blob: 3c2a54f70ffed7ca1e0954666e3d333e43d36b95 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
Ingo Molnarc31f2e82007-07-09 18:52:01 +020019 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
Ingo Molnarb9131762008-01-25 21:08:19 +010025 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/highmem.h>
35#include <linux/smp_lock.h>
36#include <asm/mmu_context.h>
37#include <linux/interrupt.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080038#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/completion.h>
40#include <linux/kernel_stat.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070041#include <linux/debug_locks.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020042#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/security.h>
44#include <linux/notifier.h>
45#include <linux/profile.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080046#include <linux/freezer.h>
akpm@osdl.org198e2f12006-01-12 01:05:30 -080047#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/blkdev.h>
49#include <linux/delay.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070050#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/smp.h>
52#include <linux/threads.h>
53#include <linux/timer.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/cpuset.h>
57#include <linux/percpu.h>
58#include <linux/kthread.h>
Alexey Dobriyanb5aadf72008-10-06 13:23:43 +040059#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#include <linux/seq_file.h>
Nick Piggine692ab52007-07-26 13:40:43 +020061#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#include <linux/syscalls.h>
63#include <linux/times.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070064#include <linux/tsacct_kern.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080065#include <linux/kprobes.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070066#include <linux/delayacct.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020067#include <linux/unistd.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020068#include <linux/pagemap.h>
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010069#include <linux/hrtimer.h>
Reynes Philippe30914a52008-03-17 16:19:05 -070070#include <linux/tick.h>
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020071#include <linux/debugfs.h>
72#include <linux/ctype.h>
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020073#include <linux/ftrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090074#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Eric Dumazet5517d862007-05-08 00:32:57 -070076#include <asm/tlb.h>
Satyam Sharma838225b2007-10-24 18:23:50 +020077#include <asm/irq_regs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Gregory Haskins6e0534f2008-05-12 21:21:01 +020079#include "sched_cpupri.h"
80
Steven Rostedta8d154b2009-04-10 09:36:00 -040081#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040082#include <trace/events/sched.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040083
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/*
85 * Convert user-nice values [ -20 ... 0 ... 19 ]
86 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
87 * and back.
88 */
89#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
90#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
91#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
92
93/*
94 * 'User priority' is the nice value converted to something we
95 * can work with better when scaling various scheduler parameters,
96 * it's a [ 0 ... 39 ] range.
97 */
98#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
99#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
100#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
101
102/*
Ingo Molnard7876a02008-01-25 21:08:19 +0100103 * Helpers for converting nanosecond timing to jiffy resolution
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 */
Eric Dumazetd6322fa2007-11-09 22:39:38 +0100105#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200107#define NICE_0_LOAD SCHED_LOAD_SCALE
108#define NICE_0_SHIFT SCHED_LOAD_SHIFT
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110/*
111 * These are the 'tuning knobs' of the scheduler:
112 *
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +0200113 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 * Timeslices get refilled after they expire.
115 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#define DEF_TIMESLICE (100 * HZ / 1000)
Peter Williams2dd73a42006-06-27 02:54:34 -0700117
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200118/*
119 * single value that denotes runtime == period, ie unlimited time.
120 */
121#define RUNTIME_INF ((u64)~0ULL)
122
Ingo Molnare05606d2007-07-09 18:51:59 +0200123static inline int rt_policy(int policy)
124{
Roel Kluin3f33a7c2008-05-13 23:44:11 +0200125 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
Ingo Molnare05606d2007-07-09 18:51:59 +0200126 return 1;
127 return 0;
128}
129
130static inline int task_has_rt_policy(struct task_struct *p)
131{
132 return rt_policy(p->policy);
133}
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135/*
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200136 * This is the priority-queue data structure of the RT scheduling class:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 */
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200138struct rt_prio_array {
139 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
140 struct list_head queue[MAX_RT_PRIO];
141};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200143struct rt_bandwidth {
Ingo Molnarea736ed2008-03-25 13:51:45 +0100144 /* nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100145 raw_spinlock_t rt_runtime_lock;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100146 ktime_t rt_period;
147 u64 rt_runtime;
148 struct hrtimer rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200149};
150
151static struct rt_bandwidth def_rt_bandwidth;
152
153static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
154
155static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
156{
157 struct rt_bandwidth *rt_b =
158 container_of(timer, struct rt_bandwidth, rt_period_timer);
159 ktime_t now;
160 int overrun;
161 int idle = 0;
162
163 for (;;) {
164 now = hrtimer_cb_get_time(timer);
165 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
166
167 if (!overrun)
168 break;
169
170 idle = do_sched_rt_period_timer(rt_b, overrun);
171 }
172
173 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
174}
175
176static
177void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
178{
179 rt_b->rt_period = ns_to_ktime(period);
180 rt_b->rt_runtime = runtime;
181
Thomas Gleixner0986b112009-11-17 15:32:06 +0100182 raw_spin_lock_init(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200183
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200184 hrtimer_init(&rt_b->rt_period_timer,
185 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
186 rt_b->rt_period_timer.function = sched_rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200187}
188
Krzysztof Heltc8bfff62008-09-05 23:46:19 +0200189static inline int rt_bandwidth_enabled(void)
190{
191 return sysctl_sched_rt_runtime >= 0;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200192}
193
194static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
195{
196 ktime_t now;
197
Hiroshi Shimamotocac64d02009-02-25 09:59:26 -0800198 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200199 return;
200
201 if (hrtimer_active(&rt_b->rt_period_timer))
202 return;
203
Thomas Gleixner0986b112009-11-17 15:32:06 +0100204 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200205 for (;;) {
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100206 unsigned long delta;
207 ktime_t soft, hard;
208
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200209 if (hrtimer_active(&rt_b->rt_period_timer))
210 break;
211
212 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
213 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100214
215 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
216 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
217 delta = ktime_to_ns(ktime_sub(hard, soft));
218 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530219 HRTIMER_MODE_ABS_PINNED, 0);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200220 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100221 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200222}
223
224#ifdef CONFIG_RT_GROUP_SCHED
225static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
226{
227 hrtimer_cancel(&rt_b->rt_period_timer);
228}
229#endif
230
Heiko Carstens712555e2008-04-28 11:33:07 +0200231/*
232 * sched_domains_mutex serializes calls to arch_init_sched_domains,
233 * detach_destroy_domains and partition_sched_domains.
234 */
235static DEFINE_MUTEX(sched_domains_mutex);
236
Dhaval Giani7c941432010-01-20 13:26:18 +0100237#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200238
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700239#include <linux/cgroup.h>
240
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200241struct cfs_rq;
242
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100243static LIST_HEAD(task_groups);
244
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200245/* task group related information */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200246struct task_group {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700247 struct cgroup_subsys_state css;
Arun R Bharadwaj6c415b92008-12-01 20:49:05 +0530248
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100249#ifdef CONFIG_FAIR_GROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200250 /* schedulable entities of this group on each cpu */
251 struct sched_entity **se;
252 /* runqueue "owned" by this group on each cpu */
253 struct cfs_rq **cfs_rq;
254 unsigned long shares;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100255#endif
256
257#ifdef CONFIG_RT_GROUP_SCHED
258 struct sched_rt_entity **rt_se;
259 struct rt_rq **rt_rq;
260
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200261 struct rt_bandwidth rt_bandwidth;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100262#endif
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100263
Srivatsa Vaddagiriae8393e2007-10-29 21:18:11 +0100264 struct rcu_head rcu;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100265 struct list_head list;
Peter Zijlstraf473aa52008-04-19 19:45:00 +0200266
267 struct task_group *parent;
268 struct list_head siblings;
269 struct list_head children;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200270};
271
Peter Zijlstraeff766a2008-04-19 19:45:00 +0200272#define root_task_group init_task_group
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100273
Peter Zijlstra8ed36992008-02-13 15:45:39 +0100274/* task_group_lock serializes add/remove of task groups and also changes to
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100275 * a task group's cpu shares.
276 */
Peter Zijlstra8ed36992008-02-13 15:45:39 +0100277static DEFINE_SPINLOCK(task_group_lock);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100278
Cyrill Gorcunove9036b32009-10-26 22:24:14 +0300279#ifdef CONFIG_FAIR_GROUP_SCHED
280
Peter Zijlstra57310a92009-03-09 13:56:21 +0100281#ifdef CONFIG_SMP
282static int root_task_group_empty(void)
283{
284 return list_empty(&root_task_group.children);
285}
286#endif
287
Srivatsa Vaddagiri93f992c2008-01-25 21:07:59 +0100288# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200289
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800290/*
Lai Jiangshan2e084782008-06-12 16:42:58 +0800291 * A weight of 0 or 1 can cause arithmetics problems.
292 * A weight of a cfs_rq is the sum of weights of which entities
293 * are queued on this cfs_rq, so a weight of a entity should not be
294 * too large, so as the shares value of a task group.
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800295 * (The default weight is 1024 - so there's no practical
296 * limitation from this.)
297 */
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200298#define MIN_SHARES 2
Lai Jiangshan2e084782008-06-12 16:42:58 +0800299#define MAX_SHARES (1UL << 18)
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200300
Srivatsa Vaddagiri93f992c2008-01-25 21:07:59 +0100301static int init_task_group_load = INIT_TASK_GROUP_LOAD;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100302#endif
303
304/* Default task group.
305 * Every task in system belong to this group at bootup.
306 */
Mike Travis434d53b2008-04-04 18:11:04 -0700307struct task_group init_task_group;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200308
309/* return group to which a task belongs */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200310static inline struct task_group *task_group(struct task_struct *p)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200311{
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200312 struct task_group *tg;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +0200313
Dhaval Giani7c941432010-01-20 13:26:18 +0100314#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700315 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
316 struct task_group, css);
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200317#else
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100318 tg = &init_task_group;
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200319#endif
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +0200320 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200321}
322
323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100324static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200325{
Peter Zijlstra8b08ca52010-04-21 13:02:07 -0700326 /*
327 * Strictly speaking this rcu_read_lock() is not needed since the
328 * task_group is tied to the cgroup, which in turn can never go away
329 * as long as there are tasks attached to it.
330 *
331 * However since task_group() uses task_subsys_state() which is an
332 * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
333 */
334 rcu_read_lock();
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100335#ifdef CONFIG_FAIR_GROUP_SCHED
Dmitry Adamushkoce96b5a2007-11-15 20:57:40 +0100336 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
337 p->se.parent = task_group(p)->se[cpu];
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100338#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100339
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100340#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100341 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
342 p->rt.parent = task_group(p)->rt_se[cpu];
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100343#endif
Peter Zijlstra8b08ca52010-04-21 13:02:07 -0700344 rcu_read_unlock();
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200345}
346
347#else
348
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100349static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
Peter Zijlstra83378262008-06-27 13:41:37 +0200350static inline struct task_group *task_group(struct task_struct *p)
351{
352 return NULL;
353}
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200354
Dhaval Giani7c941432010-01-20 13:26:18 +0100355#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200356
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200357/* CFS-related fields in a runqueue */
358struct cfs_rq {
359 struct load_weight load;
360 unsigned long nr_running;
361
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200362 u64 exec_clock;
Ingo Molnare9acbff2007-10-15 17:00:04 +0200363 u64 min_vruntime;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200364
365 struct rb_root tasks_timeline;
366 struct rb_node *rb_leftmost;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +0200367
368 struct list_head tasks;
369 struct list_head *balance_iterator;
370
371 /*
372 * 'curr' points to currently running entity on this cfs_rq.
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200373 * It is set to NULL otherwise (i.e when none are currently running).
374 */
Peter Zijlstra47932412008-11-04 21:25:09 +0100375 struct sched_entity *curr, *next, *last;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200376
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100377 unsigned int nr_spread_over;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200378
Ingo Molnar62160e32007-10-15 17:00:03 +0200379#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200380 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
381
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100382 /*
383 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200384 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
385 * (like users, containers etc.)
386 *
387 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
388 * list is used during load balance.
389 */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100390 struct list_head leaf_cfs_rq_list;
391 struct task_group *tg; /* group that "owns" this runqueue */
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200392
393#ifdef CONFIG_SMP
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200394 /*
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200395 * the part of load.weight contributed by tasks
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200396 */
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200397 unsigned long task_weight;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200398
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200399 /*
400 * h_load = weight * f(tg)
401 *
402 * Where f(tg) is the recursive weight fraction assigned to
403 * this group.
404 */
405 unsigned long h_load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200406
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200407 /*
408 * this cpu's part of tg->shares
409 */
410 unsigned long shares;
Peter Zijlstraf1d239f2008-06-27 13:41:38 +0200411
412 /*
413 * load.weight at the time we set shares
414 */
415 unsigned long rq_weight;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200416#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200417#endif
418};
419
420/* Real-Time classes' related field in a runqueue: */
421struct rt_rq {
422 struct rt_prio_array active;
Steven Rostedt63489e42008-01-25 21:08:03 +0100423 unsigned long rt_nr_running;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100424#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -0500425 struct {
426 int curr; /* highest queued rt task prio */
Gregory Haskins398a1532009-01-14 09:10:04 -0500427#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500428 int next; /* next highest */
Gregory Haskins398a1532009-01-14 09:10:04 -0500429#endif
Gregory Haskinse864c492008-12-29 09:39:49 -0500430 } highest_prio;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100431#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100432#ifdef CONFIG_SMP
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100433 unsigned long rt_nr_migratory;
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200434 unsigned long rt_nr_total;
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +0100435 int overloaded;
Gregory Haskins917b6272008-12-29 09:39:53 -0500436 struct plist_head pushable_tasks;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100437#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100438 int rt_throttled;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100439 u64 rt_time;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200440 u64 rt_runtime;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100441 /* Nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100442 raw_spinlock_t rt_runtime_lock;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100443
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100444#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100445 unsigned long rt_nr_boosted;
446
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100447 struct rq *rq;
448 struct list_head leaf_rt_rq_list;
449 struct task_group *tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100450#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200451};
452
Gregory Haskins57d885f2008-01-25 21:08:18 +0100453#ifdef CONFIG_SMP
454
455/*
456 * We add the notion of a root-domain which will be used to define per-domain
Ingo Molnar0eab9142008-01-25 21:08:19 +0100457 * variables. Each exclusive cpuset essentially defines an island domain by
458 * fully partitioning the member cpus from any other cpuset. Whenever a new
Gregory Haskins57d885f2008-01-25 21:08:18 +0100459 * exclusive cpuset is created, we also create and attach a new root-domain
460 * object.
461 *
Gregory Haskins57d885f2008-01-25 21:08:18 +0100462 */
463struct root_domain {
464 atomic_t refcount;
Rusty Russellc6c49272008-11-25 02:35:05 +1030465 cpumask_var_t span;
466 cpumask_var_t online;
Gregory Haskins637f5082008-01-25 21:08:18 +0100467
Ingo Molnar0eab9142008-01-25 21:08:19 +0100468 /*
Gregory Haskins637f5082008-01-25 21:08:18 +0100469 * The "RT overload" flag: it gets set if a CPU has more than
470 * one runnable RT task.
471 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030472 cpumask_var_t rto_mask;
Ingo Molnar0eab9142008-01-25 21:08:19 +0100473 atomic_t rto_count;
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200474#ifdef CONFIG_SMP
475 struct cpupri cpupri;
476#endif
Gregory Haskins57d885f2008-01-25 21:08:18 +0100477};
478
Gregory Haskinsdc938522008-01-25 21:08:26 +0100479/*
480 * By default the system creates a single root-domain with all cpus as
481 * members (mimicking the global state we have today).
482 */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100483static struct root_domain def_root_domain;
484
485#endif
486
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200487/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 * This is the main, per-CPU runqueue data structure.
489 *
490 * Locking rule: those places that want to lock multiple runqueues
491 * (such as the load balancing or the thread migration code), lock
492 * acquire operations must be ordered by ascending &runqueue.
493 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700494struct rq {
Ingo Molnard8016492007-10-18 21:32:55 +0200495 /* runqueue lock: */
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100496 raw_spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 /*
499 * nr_running and cpu_load should be in the same cacheline because
500 * remote CPUs use both these fields when doing load calculation.
501 */
502 unsigned long nr_running;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200503 #define CPU_LOAD_IDX_MAX 5
504 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700505#ifdef CONFIG_NO_HZ
506 unsigned char in_nohz_recently;
507#endif
Ingo Molnard8016492007-10-18 21:32:55 +0200508 /* capture load from *all* tasks on this cpu: */
509 struct load_weight load;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200510 unsigned long nr_load_updates;
511 u64 nr_switches;
512
513 struct cfs_rq cfs;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100514 struct rt_rq rt;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100515
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200516#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnard8016492007-10-18 21:32:55 +0200517 /* list of leaf cfs_rq on this cpu: */
518 struct list_head leaf_cfs_rq_list;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100519#endif
520#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100521 struct list_head leaf_rt_rq_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524 /*
525 * This is part of a global counter where only the total sum
526 * over all CPUs matters. A task can increase this counter on
527 * one CPU and if it got migrated afterwards it may decrease
528 * it on another CPU. Always updated under the runqueue lock:
529 */
530 unsigned long nr_uninterruptible;
531
Ingo Molnar36c8b582006-07-03 00:25:41 -0700532 struct task_struct *curr, *idle;
Christoph Lameterc9819f42006-12-10 02:20:25 -0800533 unsigned long next_balance;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 struct mm_struct *prev_mm;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200535
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200536 u64 clock;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 atomic_t nr_iowait;
539
540#ifdef CONFIG_SMP
Ingo Molnar0eab9142008-01-25 21:08:19 +0100541 struct root_domain *rd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 struct sched_domain *sd;
543
Henrik Austada0a522c2009-02-13 20:35:45 +0100544 unsigned char idle_at_tick;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 /* For active balancing */
Gregory Haskins3f029d32009-07-29 11:08:47 -0400546 int post_schedule;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 int active_balance;
548 int push_cpu;
Ingo Molnard8016492007-10-18 21:32:55 +0200549 /* cpu of this runqueue: */
550 int cpu;
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400551 int online;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
Peter Zijlstraa8a51d52008-06-27 13:41:26 +0200553 unsigned long avg_load_per_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Ingo Molnar36c8b582006-07-03 00:25:41 -0700555 struct task_struct *migration_thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 struct list_head migration_queue;
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200557
558 u64 rt_avg;
559 u64 age_stamp;
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100560 u64 idle_stamp;
561 u64 avg_idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562#endif
563
Thomas Gleixnerdce48a82009-04-11 10:43:41 +0200564 /* calc_load related fields */
565 unsigned long calc_load_update;
566 long calc_load_active;
567
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100568#ifdef CONFIG_SCHED_HRTICK
Peter Zijlstra31656512008-07-18 18:01:23 +0200569#ifdef CONFIG_SMP
570 int hrtick_csd_pending;
571 struct call_single_data hrtick_csd;
572#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100573 struct hrtimer hrtick_timer;
574#endif
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576#ifdef CONFIG_SCHEDSTATS
577 /* latency stats */
578 struct sched_info rq_sched_info;
Ken Chen9c2c4802008-12-16 23:41:22 -0800579 unsigned long long rq_cpu_time;
580 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
582 /* sys_sched_yield() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200583 unsigned int yld_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585 /* schedule() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200586 unsigned int sched_switch;
587 unsigned int sched_count;
588 unsigned int sched_goidle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590 /* try_to_wake_up() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200591 unsigned int ttwu_count;
592 unsigned int ttwu_local;
Ingo Molnarb8efb562007-10-15 17:00:10 +0200593
594 /* BKL stats */
Ken Chen480b9432007-10-18 21:32:56 +0200595 unsigned int bkl_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596#endif
597};
598
Fenghua Yuf34e3b62007-07-19 01:48:13 -0700599static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Peter Zijlstra7d478722009-09-14 19:55:44 +0200601static inline
602void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnardd41f592007-07-09 18:51:59 +0200603{
Peter Zijlstra7d478722009-09-14 19:55:44 +0200604 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +0200605}
606
Christoph Lameter0a2966b2006-09-25 23:30:51 -0700607static inline int cpu_of(struct rq *rq)
608{
609#ifdef CONFIG_SMP
610 return rq->cpu;
611#else
612 return 0;
613#endif
614}
615
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800616#define rcu_dereference_check_sched_domain(p) \
Paul E. McKenneyd11c5632010-02-22 17:04:50 -0800617 rcu_dereference_check((p), \
618 rcu_read_lock_sched_held() || \
619 lockdep_is_held(&sched_domains_mutex))
620
Ingo Molnar20d315d2007-07-09 18:51:58 +0200621/*
Nick Piggin674311d2005-06-25 14:57:27 -0700622 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -0700623 * See detach_destroy_domains: synchronize_sched for details.
Nick Piggin674311d2005-06-25 14:57:27 -0700624 *
625 * The domain tree of any CPU may only be accessed from within
626 * preempt-disabled sections.
627 */
Ingo Molnar48f24c42006-07-03 00:25:40 -0700628#define for_each_domain(cpu, __sd) \
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800629 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
631#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
632#define this_rq() (&__get_cpu_var(runqueues))
633#define task_rq(p) cpu_rq(task_cpu(p))
634#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Hitoshi Mitake54d35f22009-06-29 14:44:57 +0900635#define raw_rq() (&__raw_get_cpu_var(runqueues))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100637inline void update_rq_clock(struct rq *rq)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200638{
639 rq->clock = sched_clock_cpu(cpu_of(rq));
640}
641
Ingo Molnare436d802007-07-19 21:28:35 +0200642/*
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200643 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
644 */
645#ifdef CONFIG_SCHED_DEBUG
646# define const_debug __read_mostly
647#else
648# define const_debug static const
649#endif
650
Ingo Molnar017730c2008-05-12 21:20:52 +0200651/**
652 * runqueue_is_locked
Randy Dunlape17b38b2009-10-11 19:12:00 -0700653 * @cpu: the processor in question.
Ingo Molnar017730c2008-05-12 21:20:52 +0200654 *
655 * Returns true if the current cpu runqueue is locked.
656 * This interface allows printk to be called with the runqueue lock
657 * held and know whether or not it is OK to wake up the klogd.
658 */
Andrew Morton89f19f02009-09-19 11:55:44 -0700659int runqueue_is_locked(int cpu)
Ingo Molnar017730c2008-05-12 21:20:52 +0200660{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100661 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
Ingo Molnar017730c2008-05-12 21:20:52 +0200662}
663
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200664/*
665 * Debugging: various feature bits
666 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200667
668#define SCHED_FEAT(name, enabled) \
669 __SCHED_FEAT_##name ,
670
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200671enum {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200672#include "sched_features.h"
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200673};
674
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200675#undef SCHED_FEAT
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200676
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200677#define SCHED_FEAT(name, enabled) \
678 (1UL << __SCHED_FEAT_##name) * enabled |
679
680const_debug unsigned int sysctl_sched_features =
681#include "sched_features.h"
682 0;
683
684#undef SCHED_FEAT
685
686#ifdef CONFIG_SCHED_DEBUG
687#define SCHED_FEAT(name, enabled) \
688 #name ,
689
Harvey Harrison983ed7a2008-04-24 18:17:55 -0700690static __read_mostly char *sched_feat_names[] = {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200691#include "sched_features.h"
692 NULL
693};
694
695#undef SCHED_FEAT
696
Li Zefan34f3a812008-10-30 15:23:32 +0800697static int sched_feat_show(struct seq_file *m, void *v)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200698{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200699 int i;
700
701 for (i = 0; sched_feat_names[i]; i++) {
Li Zefan34f3a812008-10-30 15:23:32 +0800702 if (!(sysctl_sched_features & (1UL << i)))
703 seq_puts(m, "NO_");
704 seq_printf(m, "%s ", sched_feat_names[i]);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200705 }
Li Zefan34f3a812008-10-30 15:23:32 +0800706 seq_puts(m, "\n");
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200707
Li Zefan34f3a812008-10-30 15:23:32 +0800708 return 0;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200709}
710
711static ssize_t
712sched_feat_write(struct file *filp, const char __user *ubuf,
713 size_t cnt, loff_t *ppos)
714{
715 char buf[64];
716 char *cmp = buf;
717 int neg = 0;
718 int i;
719
720 if (cnt > 63)
721 cnt = 63;
722
723 if (copy_from_user(&buf, ubuf, cnt))
724 return -EFAULT;
725
726 buf[cnt] = 0;
727
Ingo Molnarc24b7c52008-04-18 10:55:34 +0200728 if (strncmp(buf, "NO_", 3) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200729 neg = 1;
730 cmp += 3;
731 }
732
733 for (i = 0; sched_feat_names[i]; i++) {
734 int len = strlen(sched_feat_names[i]);
735
736 if (strncmp(cmp, sched_feat_names[i], len) == 0) {
737 if (neg)
738 sysctl_sched_features &= ~(1UL << i);
739 else
740 sysctl_sched_features |= (1UL << i);
741 break;
742 }
743 }
744
745 if (!sched_feat_names[i])
746 return -EINVAL;
747
Jan Blunck42994722009-11-20 17:40:37 +0100748 *ppos += cnt;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200749
750 return cnt;
751}
752
Li Zefan34f3a812008-10-30 15:23:32 +0800753static int sched_feat_open(struct inode *inode, struct file *filp)
754{
755 return single_open(filp, sched_feat_show, NULL);
756}
757
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700758static const struct file_operations sched_feat_fops = {
Li Zefan34f3a812008-10-30 15:23:32 +0800759 .open = sched_feat_open,
760 .write = sched_feat_write,
761 .read = seq_read,
762 .llseek = seq_lseek,
763 .release = single_release,
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200764};
765
766static __init int sched_init_debug(void)
767{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200768 debugfs_create_file("sched_features", 0644, NULL, NULL,
769 &sched_feat_fops);
770
771 return 0;
772}
773late_initcall(sched_init_debug);
774
775#endif
776
777#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200778
779/*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +0100780 * Number of tasks to iterate in a single balance run.
781 * Limited because this is done with IRQs disabled.
782 */
783const_debug unsigned int sysctl_sched_nr_migrate = 32;
784
785/*
Peter Zijlstra2398f2c2008-06-27 13:41:35 +0200786 * ratelimit for updating the group shares.
Peter Zijlstra55cd5342008-08-04 08:54:26 +0200787 * default: 0.25ms
Peter Zijlstra2398f2c2008-06-27 13:41:35 +0200788 */
Peter Zijlstra55cd5342008-08-04 08:54:26 +0200789unsigned int sysctl_sched_shares_ratelimit = 250000;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +0100790unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
Peter Zijlstra2398f2c2008-06-27 13:41:35 +0200791
792/*
Peter Zijlstraffda12a2008-10-17 19:27:02 +0200793 * Inject some fuzzyness into changing the per-cpu group shares
794 * this avoids remote rq-locks at the expense of fairness.
795 * default: 4
796 */
797unsigned int sysctl_sched_shares_thresh = 4;
798
799/*
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200800 * period over which we average the RT time consumption, measured
801 * in ms.
802 *
803 * default: 1s
804 */
805const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
806
807/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100808 * period over which we measure -rt task cpu usage in us.
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100809 * default: 1s
810 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100811unsigned int sysctl_sched_rt_period = 1000000;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100812
Ingo Molnar6892b752008-02-13 14:02:36 +0100813static __read_mostly int scheduler_running;
814
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100815/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100816 * part of the period that we allow rt tasks to run in us.
817 * default: 0.95s
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100818 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100819int sysctl_sched_rt_runtime = 950000;
820
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200821static inline u64 global_rt_period(void)
822{
823 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
824}
825
826static inline u64 global_rt_runtime(void)
827{
roel kluine26873b2008-07-22 16:51:15 -0400828 if (sysctl_sched_rt_runtime < 0)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200829 return RUNTIME_INF;
830
831 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
832}
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834#ifndef prepare_arch_switch
Nick Piggin4866cde2005-06-25 14:57:23 -0700835# define prepare_arch_switch(next) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700837#ifndef finish_arch_switch
838# define finish_arch_switch(prev) do { } while (0)
839#endif
840
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100841static inline int task_current(struct rq *rq, struct task_struct *p)
842{
843 return rq->curr == p;
844}
845
Nick Piggin4866cde2005-06-25 14:57:23 -0700846#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar70b97a72006-07-03 00:25:42 -0700847static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700848{
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100849 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700850}
851
Ingo Molnar70b97a72006-07-03 00:25:42 -0700852static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700853{
854}
855
Ingo Molnar70b97a72006-07-03 00:25:42 -0700856static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700857{
Ingo Molnarda04c032005-09-13 11:17:59 +0200858#ifdef CONFIG_DEBUG_SPINLOCK
859 /* this is a valid case when another task releases the spinlock */
860 rq->lock.owner = current;
861#endif
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700862 /*
863 * If we are tracking spinlock dependencies then we have to
864 * fix up the runqueue lock - which gets 'carried over' from
865 * prev into current:
866 */
867 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
868
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100869 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700870}
871
872#else /* __ARCH_WANT_UNLOCKED_CTXSW */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700873static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700874{
875#ifdef CONFIG_SMP
876 return p->oncpu;
877#else
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100878 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700879#endif
880}
881
Ingo Molnar70b97a72006-07-03 00:25:42 -0700882static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700883{
884#ifdef CONFIG_SMP
885 /*
886 * We can optimise this out completely for !SMP, because the
887 * SMP rebalancing from interrupt is the only thing that cares
888 * here.
889 */
890 next->oncpu = 1;
891#endif
892#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100893 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700894#else
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100895 raw_spin_unlock(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700896#endif
897}
898
Ingo Molnar70b97a72006-07-03 00:25:42 -0700899static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700900{
901#ifdef CONFIG_SMP
902 /*
903 * After ->oncpu is cleared, the task can be moved to a different CPU.
904 * We must ensure this doesn't happen until the switch is completely
905 * finished.
906 */
907 smp_wmb();
908 prev->oncpu = 0;
909#endif
910#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
911 local_irq_enable();
912#endif
913}
914#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
916/*
Peter Zijlstra0970d292010-02-15 14:45:54 +0100917 * Check whether the task is waking, we use this to synchronize against
918 * ttwu() so that task_cpu() reports a stable number.
919 *
920 * We need to make an exception for PF_STARTING tasks because the fork
921 * path might require task_rq_lock() to work, eg. it can call
922 * set_cpus_allowed_ptr() from the cpuset clone_ns code.
923 */
924static inline int task_is_waking(struct task_struct *p)
925{
926 return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING));
927}
928
929/*
Ingo Molnarb29739f2006-06-27 02:54:51 -0700930 * __task_rq_lock - lock the runqueue a given task resides on.
931 * Must be called interrupts disabled.
932 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700933static inline struct rq *__task_rq_lock(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700934 __acquires(rq->lock)
935{
Peter Zijlstra0970d292010-02-15 14:45:54 +0100936 struct rq *rq;
937
Andi Kleen3a5c3592007-10-15 17:00:14 +0200938 for (;;) {
Peter Zijlstra0970d292010-02-15 14:45:54 +0100939 while (task_is_waking(p))
940 cpu_relax();
941 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100942 raw_spin_lock(&rq->lock);
Peter Zijlstra0970d292010-02-15 14:45:54 +0100943 if (likely(rq == task_rq(p) && !task_is_waking(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200944 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100945 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700946 }
Ingo Molnarb29739f2006-06-27 02:54:51 -0700947}
948
949/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 * task_rq_lock - lock the runqueue a given task resides on and disable
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100951 * interrupts. Note the ordering: we can safely lookup the task_rq without
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 * explicitly disabling preemption.
953 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700954static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 __acquires(rq->lock)
956{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700957 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
Andi Kleen3a5c3592007-10-15 17:00:14 +0200959 for (;;) {
Peter Zijlstra0970d292010-02-15 14:45:54 +0100960 while (task_is_waking(p))
961 cpu_relax();
Andi Kleen3a5c3592007-10-15 17:00:14 +0200962 local_irq_save(*flags);
963 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100964 raw_spin_lock(&rq->lock);
Peter Zijlstra0970d292010-02-15 14:45:54 +0100965 if (likely(rq == task_rq(p) && !task_is_waking(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200966 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100967 raw_spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969}
970
Oleg Nesterovad474ca2008-11-10 15:39:30 +0100971void task_rq_unlock_wait(struct task_struct *p)
972{
973 struct rq *rq = task_rq(p);
974
975 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100976 raw_spin_unlock_wait(&rq->lock);
Oleg Nesterovad474ca2008-11-10 15:39:30 +0100977}
978
Alexey Dobriyana9957442007-10-15 17:00:13 +0200979static void __task_rq_unlock(struct rq *rq)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700980 __releases(rq->lock)
981{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100982 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700983}
984
Ingo Molnar70b97a72006-07-03 00:25:42 -0700985static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 __releases(rq->lock)
987{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100988 raw_spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989}
990
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991/*
Robert P. J. Daycc2a73b2006-12-10 02:20:00 -0800992 * this_rq_lock - lock this runqueue and disable interrupts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200994static struct rq *this_rq_lock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 __acquires(rq->lock)
996{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700997 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
999 local_irq_disable();
1000 rq = this_rq();
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001001 raw_spin_lock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
1003 return rq;
1004}
1005
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001006#ifdef CONFIG_SCHED_HRTICK
1007/*
1008 * Use HR-timers to deliver accurate preemption points.
1009 *
1010 * Its all a bit involved since we cannot program an hrt while holding the
1011 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1012 * reschedule event.
1013 *
1014 * When we get rescheduled we reprogram the hrtick_timer outside of the
1015 * rq->lock.
1016 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001017
1018/*
1019 * Use hrtick when:
1020 * - enabled by features
1021 * - hrtimer is actually high res
1022 */
1023static inline int hrtick_enabled(struct rq *rq)
1024{
1025 if (!sched_feat(HRTICK))
1026 return 0;
Ingo Molnarba420592008-07-20 11:02:06 +02001027 if (!cpu_active(cpu_of(rq)))
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001028 return 0;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001029 return hrtimer_is_hres_active(&rq->hrtick_timer);
1030}
1031
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001032static void hrtick_clear(struct rq *rq)
1033{
1034 if (hrtimer_active(&rq->hrtick_timer))
1035 hrtimer_cancel(&rq->hrtick_timer);
1036}
1037
1038/*
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001039 * High-resolution timer tick.
1040 * Runs from hardirq context with interrupts disabled.
1041 */
1042static enum hrtimer_restart hrtick(struct hrtimer *timer)
1043{
1044 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1045
1046 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1047
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001048 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001049 update_rq_clock(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001050 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001051 raw_spin_unlock(&rq->lock);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001052
1053 return HRTIMER_NORESTART;
1054}
1055
Rabin Vincent95e904c2008-05-11 05:55:33 +05301056#ifdef CONFIG_SMP
Peter Zijlstra31656512008-07-18 18:01:23 +02001057/*
1058 * called from hardirq (IPI) context
1059 */
1060static void __hrtick_start(void *arg)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001061{
Peter Zijlstra31656512008-07-18 18:01:23 +02001062 struct rq *rq = arg;
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001063
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001064 raw_spin_lock(&rq->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001065 hrtimer_restart(&rq->hrtick_timer);
1066 rq->hrtick_csd_pending = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001067 raw_spin_unlock(&rq->lock);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001068}
1069
Peter Zijlstra31656512008-07-18 18:01:23 +02001070/*
1071 * Called to set the hrtick timer state.
1072 *
1073 * called with rq->lock held and irqs disabled
1074 */
1075static void hrtick_start(struct rq *rq, u64 delay)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001076{
Peter Zijlstra31656512008-07-18 18:01:23 +02001077 struct hrtimer *timer = &rq->hrtick_timer;
1078 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001079
Arjan van de Vencc584b22008-09-01 15:02:30 -07001080 hrtimer_set_expires(timer, time);
Peter Zijlstra31656512008-07-18 18:01:23 +02001081
1082 if (rq == this_rq()) {
1083 hrtimer_restart(timer);
1084 } else if (!rq->hrtick_csd_pending) {
Peter Zijlstra6e275632009-02-25 13:59:48 +01001085 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001086 rq->hrtick_csd_pending = 1;
1087 }
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001088}
1089
1090static int
1091hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1092{
1093 int cpu = (int)(long)hcpu;
1094
1095 switch (action) {
1096 case CPU_UP_CANCELED:
1097 case CPU_UP_CANCELED_FROZEN:
1098 case CPU_DOWN_PREPARE:
1099 case CPU_DOWN_PREPARE_FROZEN:
1100 case CPU_DEAD:
1101 case CPU_DEAD_FROZEN:
Peter Zijlstra31656512008-07-18 18:01:23 +02001102 hrtick_clear(cpu_rq(cpu));
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001103 return NOTIFY_OK;
1104 }
1105
1106 return NOTIFY_DONE;
1107}
1108
Rakib Mullickfa748202008-09-22 14:55:45 -07001109static __init void init_hrtick(void)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001110{
1111 hotcpu_notifier(hotplug_hrtick, 0);
1112}
Peter Zijlstra31656512008-07-18 18:01:23 +02001113#else
1114/*
1115 * Called to set the hrtick timer state.
1116 *
1117 * called with rq->lock held and irqs disabled
1118 */
1119static void hrtick_start(struct rq *rq, u64 delay)
1120{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001121 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +05301122 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001123}
1124
Andrew Morton006c75f2008-09-22 14:55:46 -07001125static inline void init_hrtick(void)
Peter Zijlstra31656512008-07-18 18:01:23 +02001126{
1127}
Rabin Vincent95e904c2008-05-11 05:55:33 +05301128#endif /* CONFIG_SMP */
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001129
1130static void init_rq_hrtick(struct rq *rq)
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001131{
Peter Zijlstra31656512008-07-18 18:01:23 +02001132#ifdef CONFIG_SMP
1133 rq->hrtick_csd_pending = 0;
1134
1135 rq->hrtick_csd.flags = 0;
1136 rq->hrtick_csd.func = __hrtick_start;
1137 rq->hrtick_csd.info = rq;
1138#endif
1139
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001140 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1141 rq->hrtick_timer.function = hrtick;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001142}
Andrew Morton006c75f2008-09-22 14:55:46 -07001143#else /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001144static inline void hrtick_clear(struct rq *rq)
1145{
1146}
1147
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001148static inline void init_rq_hrtick(struct rq *rq)
1149{
1150}
1151
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001152static inline void init_hrtick(void)
1153{
1154}
Andrew Morton006c75f2008-09-22 14:55:46 -07001155#endif /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001156
Ingo Molnar1b9f19c2007-07-09 18:51:59 +02001157/*
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001158 * resched_task - mark a task 'to be rescheduled now'.
1159 *
1160 * On UP this means the setting of the need_resched flag, on SMP it
1161 * might also involve a cross-CPU call to trigger the scheduler on
1162 * the target CPU.
1163 */
1164#ifdef CONFIG_SMP
1165
1166#ifndef tsk_is_polling
1167#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1168#endif
1169
Peter Zijlstra31656512008-07-18 18:01:23 +02001170static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001171{
1172 int cpu;
1173
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001174 assert_raw_spin_locked(&task_rq(p)->lock);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001175
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001176 if (test_tsk_need_resched(p))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001177 return;
1178
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001179 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001180
1181 cpu = task_cpu(p);
1182 if (cpu == smp_processor_id())
1183 return;
1184
1185 /* NEED_RESCHED must be visible before we test polling */
1186 smp_mb();
1187 if (!tsk_is_polling(p))
1188 smp_send_reschedule(cpu);
1189}
1190
1191static void resched_cpu(int cpu)
1192{
1193 struct rq *rq = cpu_rq(cpu);
1194 unsigned long flags;
1195
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001196 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001197 return;
1198 resched_task(cpu_curr(cpu));
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001199 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001200}
Thomas Gleixner06d83082008-03-22 09:20:24 +01001201
1202#ifdef CONFIG_NO_HZ
1203/*
1204 * When add_timer_on() enqueues a timer into the timer wheel of an
1205 * idle CPU then this timer might expire before the next timer event
1206 * which is scheduled to wake up that CPU. In case of a completely
1207 * idle system the next event might even be infinite time into the
1208 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1209 * leaves the inner idle loop so the newly added timer is taken into
1210 * account when the CPU goes back to idle and evaluates the timer
1211 * wheel for the next timer event.
1212 */
1213void wake_up_idle_cpu(int cpu)
1214{
1215 struct rq *rq = cpu_rq(cpu);
1216
1217 if (cpu == smp_processor_id())
1218 return;
1219
1220 /*
1221 * This is safe, as this function is called with the timer
1222 * wheel base lock of (cpu) held. When the CPU is on the way
1223 * to idle and has not yet set rq->curr to idle then it will
1224 * be serialized on the timer wheel base lock and take the new
1225 * timer into account automatically.
1226 */
1227 if (rq->curr != rq->idle)
1228 return;
1229
1230 /*
1231 * We can set TIF_RESCHED on the idle task of the other CPU
1232 * lockless. The worst case is that the other CPU runs the
1233 * idle task through an additional NOOP schedule()
1234 */
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001235 set_tsk_need_resched(rq->idle);
Thomas Gleixner06d83082008-03-22 09:20:24 +01001236
1237 /* NEED_RESCHED must be visible before we test polling */
1238 smp_mb();
1239 if (!tsk_is_polling(rq->idle))
1240 smp_send_reschedule(cpu);
1241}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001242#endif /* CONFIG_NO_HZ */
Thomas Gleixner06d83082008-03-22 09:20:24 +01001243
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001244static u64 sched_avg_period(void)
1245{
1246 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1247}
1248
1249static void sched_avg_update(struct rq *rq)
1250{
1251 s64 period = sched_avg_period();
1252
1253 while ((s64)(rq->clock - rq->age_stamp) > period) {
1254 rq->age_stamp += period;
1255 rq->rt_avg /= 2;
1256 }
1257}
1258
1259static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1260{
1261 rq->rt_avg += rt_delta;
1262 sched_avg_update(rq);
1263}
1264
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001265#else /* !CONFIG_SMP */
Peter Zijlstra31656512008-07-18 18:01:23 +02001266static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001267{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001268 assert_raw_spin_locked(&task_rq(p)->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001269 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001270}
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001271
1272static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1273{
1274}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001275#endif /* CONFIG_SMP */
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001276
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001277#if BITS_PER_LONG == 32
1278# define WMULT_CONST (~0UL)
1279#else
1280# define WMULT_CONST (1UL << 32)
1281#endif
1282
1283#define WMULT_SHIFT 32
1284
Ingo Molnar194081e2007-08-09 11:16:51 +02001285/*
1286 * Shift right and round:
1287 */
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001288#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
Ingo Molnar194081e2007-08-09 11:16:51 +02001289
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001290/*
1291 * delta *= weight / lw
1292 */
Ingo Molnarcb1c4fc2007-08-02 17:41:40 +02001293static unsigned long
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001294calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1295 struct load_weight *lw)
1296{
1297 u64 tmp;
1298
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001299 if (!lw->inv_weight) {
1300 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1301 lw->inv_weight = 1;
1302 else
1303 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1304 / (lw->weight+1);
1305 }
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001306
1307 tmp = (u64)delta_exec * weight;
1308 /*
1309 * Check whether we'd overflow the 64-bit multiplication:
1310 */
Ingo Molnar194081e2007-08-09 11:16:51 +02001311 if (unlikely(tmp > WMULT_CONST))
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001312 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
Ingo Molnar194081e2007-08-09 11:16:51 +02001313 WMULT_SHIFT/2);
1314 else
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001315 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001316
Ingo Molnarecf691d2007-08-02 17:41:40 +02001317 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001318}
1319
Ingo Molnar10919852007-10-15 17:00:04 +02001320static inline void update_load_add(struct load_weight *lw, unsigned long inc)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001321{
1322 lw->weight += inc;
Ingo Molnare89996a2008-03-14 23:48:28 +01001323 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001324}
1325
Ingo Molnar10919852007-10-15 17:00:04 +02001326static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001327{
1328 lw->weight -= dec;
Ingo Molnare89996a2008-03-14 23:48:28 +01001329 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001330}
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001333 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1334 * of tasks with abnormal "nice" values across CPUs the contribution that
1335 * each task makes to its run queue's load is weighted according to its
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01001336 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
Peter Williams2dd73a42006-06-27 02:54:34 -07001337 * scaled version of the new time slice allocation that they receive on time
1338 * slice expiry etc.
1339 */
1340
Peter Zijlstracce7ade2009-01-15 14:53:37 +01001341#define WEIGHT_IDLEPRIO 3
1342#define WMULT_IDLEPRIO 1431655765
Ingo Molnardd41f592007-07-09 18:51:59 +02001343
1344/*
1345 * Nice levels are multiplicative, with a gentle 10% change for every
1346 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1347 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1348 * that remained on nice 0.
1349 *
1350 * The "10% effect" is relative and cumulative: from _any_ nice level,
1351 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
Ingo Molnarf9153ee2007-07-16 09:46:30 +02001352 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1353 * If a task goes up by ~10% and another task goes down by ~10% then
1354 * the relative distance between them is ~25%.)
Ingo Molnardd41f592007-07-09 18:51:59 +02001355 */
1356static const int prio_to_weight[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001357 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1358 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1359 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1360 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1361 /* 0 */ 1024, 820, 655, 526, 423,
1362 /* 5 */ 335, 272, 215, 172, 137,
1363 /* 10 */ 110, 87, 70, 56, 45,
1364 /* 15 */ 36, 29, 23, 18, 15,
Ingo Molnardd41f592007-07-09 18:51:59 +02001365};
1366
Ingo Molnar5714d2d2007-07-16 09:46:31 +02001367/*
1368 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1369 *
1370 * In cases where the weight does not change often, we can use the
1371 * precalculated inverse to speed up arithmetics by turning divisions
1372 * into multiplications:
1373 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001374static const u32 prio_to_wmult[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001375 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1376 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1377 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1378 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1379 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1380 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1381 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1382 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
Ingo Molnardd41f592007-07-09 18:51:59 +02001383};
Peter Williams2dd73a42006-06-27 02:54:34 -07001384
Bharata B Raoef12fef2009-03-31 10:02:22 +05301385/* Time spent by the tasks of the cpu accounting group executing in ... */
1386enum cpuacct_stat_index {
1387 CPUACCT_STAT_USER, /* ... user mode */
1388 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1389
1390 CPUACCT_STAT_NSTATS,
1391};
1392
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001393#ifdef CONFIG_CGROUP_CPUACCT
1394static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
Bharata B Raoef12fef2009-03-31 10:02:22 +05301395static void cpuacct_update_stats(struct task_struct *tsk,
1396 enum cpuacct_stat_index idx, cputime_t val);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001397#else
1398static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
Bharata B Raoef12fef2009-03-31 10:02:22 +05301399static inline void cpuacct_update_stats(struct task_struct *tsk,
1400 enum cpuacct_stat_index idx, cputime_t val) {}
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001401#endif
1402
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001403static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1404{
1405 update_load_add(&rq->load, load);
1406}
1407
1408static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1409{
1410 update_load_sub(&rq->load, load);
1411}
1412
Ingo Molnar7940ca32008-08-19 13:40:47 +02001413#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
Peter Zijlstraeb755802008-08-19 12:33:05 +02001414typedef int (*tg_visitor)(struct task_group *, void *);
1415
1416/*
1417 * Iterate the full tree, calling @down when first entering a node and @up when
1418 * leaving it for the final time.
1419 */
1420static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1421{
1422 struct task_group *parent, *child;
1423 int ret;
1424
1425 rcu_read_lock();
1426 parent = &root_task_group;
1427down:
1428 ret = (*down)(parent, data);
1429 if (ret)
1430 goto out_unlock;
1431 list_for_each_entry_rcu(child, &parent->children, siblings) {
1432 parent = child;
1433 goto down;
1434
1435up:
1436 continue;
1437 }
1438 ret = (*up)(parent, data);
1439 if (ret)
1440 goto out_unlock;
1441
1442 child = parent;
1443 parent = parent->parent;
1444 if (parent)
1445 goto up;
1446out_unlock:
1447 rcu_read_unlock();
1448
1449 return ret;
1450}
1451
1452static int tg_nop(struct task_group *tg, void *data)
1453{
1454 return 0;
1455}
1456#endif
1457
Gregory Haskinse7693a32008-01-25 21:08:09 +01001458#ifdef CONFIG_SMP
Peter Zijlstraf5f08f32009-09-10 13:35:28 +02001459/* Used instead of source_load when we know the type == 0 */
1460static unsigned long weighted_cpuload(const int cpu)
1461{
1462 return cpu_rq(cpu)->load.weight;
1463}
1464
1465/*
1466 * Return a low guess at the load of a migration-source cpu weighted
1467 * according to the scheduling class and "nice" value.
1468 *
1469 * We want to under-estimate the load of migration sources, to
1470 * balance conservatively.
1471 */
1472static unsigned long source_load(int cpu, int type)
1473{
1474 struct rq *rq = cpu_rq(cpu);
1475 unsigned long total = weighted_cpuload(cpu);
1476
1477 if (type == 0 || !sched_feat(LB_BIAS))
1478 return total;
1479
1480 return min(rq->cpu_load[type-1], total);
1481}
1482
1483/*
1484 * Return a high guess at the load of a migration-target cpu weighted
1485 * according to the scheduling class and "nice" value.
1486 */
1487static unsigned long target_load(int cpu, int type)
1488{
1489 struct rq *rq = cpu_rq(cpu);
1490 unsigned long total = weighted_cpuload(cpu);
1491
1492 if (type == 0 || !sched_feat(LB_BIAS))
1493 return total;
1494
1495 return max(rq->cpu_load[type-1], total);
1496}
1497
Peter Zijlstraae154be2009-09-10 14:40:57 +02001498static struct sched_group *group_of(int cpu)
1499{
Paul E. McKenneyd11c5632010-02-22 17:04:50 -08001500 struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd);
Peter Zijlstraae154be2009-09-10 14:40:57 +02001501
1502 if (!sd)
1503 return NULL;
1504
1505 return sd->groups;
1506}
1507
1508static unsigned long power_of(int cpu)
1509{
1510 struct sched_group *group = group_of(cpu);
1511
1512 if (!group)
1513 return SCHED_LOAD_SCALE;
1514
1515 return group->cpu_power;
1516}
1517
Gregory Haskinse7693a32008-01-25 21:08:09 +01001518static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001519
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001520static unsigned long cpu_avg_load_per_task(int cpu)
1521{
1522 struct rq *rq = cpu_rq(cpu);
Ingo Molnaraf6d5962008-11-29 20:45:15 +01001523 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001524
Steven Rostedt4cd42622008-11-26 21:04:24 -05001525 if (nr_running)
1526 rq->avg_load_per_task = rq->load.weight / nr_running;
Balbir Singha2d47772008-11-12 16:19:00 +05301527 else
1528 rq->avg_load_per_task = 0;
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001529
1530 return rq->avg_load_per_task;
1531}
1532
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001533#ifdef CONFIG_FAIR_GROUP_SCHED
1534
Tejun Heo43cf38e2010-02-02 14:38:57 +09001535static __read_mostly unsigned long __percpu *update_shares_data;
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001536
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001537static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1538
1539/*
1540 * Calculate and set the cpu's group shares.
1541 */
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001542static void update_group_shares_cpu(struct task_group *tg, int cpu,
1543 unsigned long sd_shares,
1544 unsigned long sd_rq_weight,
Jiri Kosina4a6cc4b2009-10-29 00:26:00 +09001545 unsigned long *usd_rq_weight)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001546{
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001547 unsigned long shares, rq_weight;
Peter Zijlstraa5004272009-07-27 14:04:49 +02001548 int boost = 0;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001549
Jiri Kosina4a6cc4b2009-10-29 00:26:00 +09001550 rq_weight = usd_rq_weight[cpu];
Peter Zijlstraa5004272009-07-27 14:04:49 +02001551 if (!rq_weight) {
1552 boost = 1;
1553 rq_weight = NICE_0_LOAD;
1554 }
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001555
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001556 /*
Peter Zijlstraa8af7242009-08-21 13:58:54 +02001557 * \Sum_j shares_j * rq_weight_i
1558 * shares_i = -----------------------------
1559 * \Sum_j rq_weight_j
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001560 */
Ken Chenec4e0e22008-11-18 22:41:57 -08001561 shares = (sd_shares * rq_weight) / sd_rq_weight;
Peter Zijlstraffda12a2008-10-17 19:27:02 +02001562 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001563
Peter Zijlstraffda12a2008-10-17 19:27:02 +02001564 if (abs(shares - tg->se[cpu]->load.weight) >
1565 sysctl_sched_shares_thresh) {
1566 struct rq *rq = cpu_rq(cpu);
1567 unsigned long flags;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001568
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001569 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001570 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
Peter Zijlstraa5004272009-07-27 14:04:49 +02001571 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
Peter Zijlstraffda12a2008-10-17 19:27:02 +02001572 __set_se_shares(tg->se[cpu], shares);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001573 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstraffda12a2008-10-17 19:27:02 +02001574 }
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001575}
1576
1577/*
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001578 * Re-compute the task group their per cpu shares over the given domain.
1579 * This needs to be done in a bottom-up fashion because the rq weight of a
1580 * parent group depends on the shares of its child groups.
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001581 */
Peter Zijlstraeb755802008-08-19 12:33:05 +02001582static int tg_shares_up(struct task_group *tg, void *data)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001583{
Peter Zijlstracd8ad402009-12-03 18:00:07 +01001584 unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
Jiri Kosina4a6cc4b2009-10-29 00:26:00 +09001585 unsigned long *usd_rq_weight;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001586 struct sched_domain *sd = data;
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001587 unsigned long flags;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001588 int i;
1589
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001590 if (!tg->se[0])
1591 return 0;
1592
1593 local_irq_save(flags);
Jiri Kosina4a6cc4b2009-10-29 00:26:00 +09001594 usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001595
Rusty Russell758b2cd2008-11-25 02:35:04 +10301596 for_each_cpu(i, sched_domain_span(sd)) {
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001597 weight = tg->cfs_rq[i]->load.weight;
Jiri Kosina4a6cc4b2009-10-29 00:26:00 +09001598 usd_rq_weight[i] = weight;
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001599
Peter Zijlstracd8ad402009-12-03 18:00:07 +01001600 rq_weight += weight;
Ken Chenec4e0e22008-11-18 22:41:57 -08001601 /*
1602 * If there are currently no tasks on the cpu pretend there
1603 * is one of average load so that when a new task gets to
1604 * run here it will not get delayed by group starvation.
1605 */
Ken Chenec4e0e22008-11-18 22:41:57 -08001606 if (!weight)
1607 weight = NICE_0_LOAD;
1608
Peter Zijlstracd8ad402009-12-03 18:00:07 +01001609 sum_weight += weight;
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001610 shares += tg->cfs_rq[i]->shares;
1611 }
1612
Peter Zijlstracd8ad402009-12-03 18:00:07 +01001613 if (!rq_weight)
1614 rq_weight = sum_weight;
1615
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001616 if ((!shares && rq_weight) || shares > tg->shares)
1617 shares = tg->shares;
1618
1619 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1620 shares = tg->shares;
1621
Rusty Russell758b2cd2008-11-25 02:35:04 +10301622 for_each_cpu(i, sched_domain_span(sd))
Jiri Kosina4a6cc4b2009-10-29 00:26:00 +09001623 update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
Peter Zijlstra34d76c42009-08-27 13:08:56 +02001624
1625 local_irq_restore(flags);
Peter Zijlstraeb755802008-08-19 12:33:05 +02001626
1627 return 0;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001628}
1629
1630/*
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001631 * Compute the cpu's hierarchical load factor for each task group.
1632 * This needs to be done in a top-down fashion because the load of a child
1633 * group is a fraction of its parents load.
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001634 */
Peter Zijlstraeb755802008-08-19 12:33:05 +02001635static int tg_load_down(struct task_group *tg, void *data)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001636{
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001637 unsigned long load;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001638 long cpu = (long)data;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001639
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001640 if (!tg->parent) {
1641 load = cpu_rq(cpu)->load.weight;
1642 } else {
1643 load = tg->parent->cfs_rq[cpu]->h_load;
1644 load *= tg->cfs_rq[cpu]->shares;
1645 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1646 }
1647
1648 tg->cfs_rq[cpu]->h_load = load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001649
Peter Zijlstraeb755802008-08-19 12:33:05 +02001650 return 0;
Peter Zijlstra4d8d5952008-06-27 13:41:19 +02001651}
1652
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001653static void update_shares(struct sched_domain *sd)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001654{
Peter Zijlstrae7097152009-06-03 15:41:20 +02001655 s64 elapsed;
1656 u64 now;
1657
1658 if (root_task_group_empty())
1659 return;
1660
1661 now = cpu_clock(raw_smp_processor_id());
1662 elapsed = now - sd->last_update;
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02001663
1664 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1665 sd->last_update = now;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001666 walk_tg_tree(tg_nop, tg_shares_up, sd);
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02001667 }
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001668}
1669
Peter Zijlstraeb755802008-08-19 12:33:05 +02001670static void update_h_load(long cpu)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001671{
Peter Zijlstrae7097152009-06-03 15:41:20 +02001672 if (root_task_group_empty())
1673 return;
1674
Peter Zijlstraeb755802008-08-19 12:33:05 +02001675 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001676}
1677
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001678#else
1679
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001680static inline void update_shares(struct sched_domain *sd)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001681{
1682}
1683
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001684#endif
1685
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001686#ifdef CONFIG_PREEMPT
1687
Peter Zijlstrab78bb862009-09-15 14:23:18 +02001688static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1689
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001690/*
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001691 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1692 * way at the expense of forcing extra atomic operations in all
1693 * invocations. This assures that the double_lock is acquired using the
1694 * same underlying policy as the spinlock_t on this architecture, which
1695 * reduces latency compared to the unfair variant below. However, it
1696 * also adds more overhead and therefore may reduce throughput.
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001697 */
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001698static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1699 __releases(this_rq->lock)
1700 __acquires(busiest->lock)
1701 __acquires(this_rq->lock)
1702{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001703 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001704 double_rq_lock(this_rq, busiest);
1705
1706 return 1;
1707}
1708
1709#else
1710/*
1711 * Unfair double_lock_balance: Optimizes throughput at the expense of
1712 * latency by eliminating extra atomic operations when the locks are
1713 * already in proper order on entry. This favors lower cpu-ids and will
1714 * grant the double lock to lower cpus over higher ids under contention,
1715 * regardless of entry order into the function.
1716 */
1717static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001718 __releases(this_rq->lock)
1719 __acquires(busiest->lock)
1720 __acquires(this_rq->lock)
1721{
1722 int ret = 0;
1723
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001724 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001725 if (busiest < this_rq) {
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001726 raw_spin_unlock(&this_rq->lock);
1727 raw_spin_lock(&busiest->lock);
1728 raw_spin_lock_nested(&this_rq->lock,
1729 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001730 ret = 1;
1731 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001732 raw_spin_lock_nested(&busiest->lock,
1733 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001734 }
1735 return ret;
1736}
1737
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001738#endif /* CONFIG_PREEMPT */
1739
1740/*
1741 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1742 */
1743static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1744{
1745 if (unlikely(!irqs_disabled())) {
1746 /* printk() doesn't work good under rq->lock */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001747 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001748 BUG_ON(1);
1749 }
1750
1751 return _double_lock_balance(this_rq, busiest);
1752}
1753
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001754static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1755 __releases(busiest->lock)
1756{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001757 raw_spin_unlock(&busiest->lock);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001758 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1759}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001760
1761/*
1762 * double_rq_lock - safely lock two runqueues
1763 *
1764 * Note this does not disable interrupts like task_rq_lock,
1765 * you need to do so manually before calling.
1766 */
1767static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1768 __acquires(rq1->lock)
1769 __acquires(rq2->lock)
1770{
1771 BUG_ON(!irqs_disabled());
1772 if (rq1 == rq2) {
1773 raw_spin_lock(&rq1->lock);
1774 __acquire(rq2->lock); /* Fake it out ;) */
1775 } else {
1776 if (rq1 < rq2) {
1777 raw_spin_lock(&rq1->lock);
1778 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1779 } else {
1780 raw_spin_lock(&rq2->lock);
1781 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1782 }
1783 }
1784 update_rq_clock(rq1);
1785 update_rq_clock(rq2);
1786}
1787
1788/*
1789 * double_rq_unlock - safely unlock two runqueues
1790 *
1791 * Note this does not restore interrupts like task_rq_unlock,
1792 * you need to do so manually after calling.
1793 */
1794static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1795 __releases(rq1->lock)
1796 __releases(rq2->lock)
1797{
1798 raw_spin_unlock(&rq1->lock);
1799 if (rq1 != rq2)
1800 raw_spin_unlock(&rq2->lock);
1801 else
1802 __release(rq2->lock);
1803}
1804
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001805#endif
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001806
1807#ifdef CONFIG_FAIR_GROUP_SCHED
1808static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1809{
Vegard Nossum30432092008-06-27 21:35:50 +02001810#ifdef CONFIG_SMP
Ingo Molnar34e83e82008-06-27 15:42:36 +02001811 cfs_rq->shares = shares;
1812#endif
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001813}
1814#endif
1815
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02001816static void calc_load_account_active(struct rq *this_rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01001817static void update_sysctl(void);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01001818static int get_update_sysctl_factor(void);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02001819
Peter Zijlstracd29fe62009-11-27 17:32:46 +01001820static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1821{
1822 set_task_rq(p, cpu);
1823#ifdef CONFIG_SMP
1824 /*
1825 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1826 * successfuly executed on another CPU. We must ensure that updates of
1827 * per-task data have been completed by this moment.
1828 */
1829 smp_wmb();
1830 task_thread_info(p)->cpu = cpu;
1831#endif
1832}
Gregory Haskinse7693a32008-01-25 21:08:09 +01001833
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001834static const struct sched_class rt_sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02001835
1836#define sched_class_highest (&rt_sched_class)
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001837#define for_each_class(class) \
1838 for (class = sched_class_highest; class; class = class->next)
Ingo Molnardd41f592007-07-09 18:51:59 +02001839
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001840#include "sched_stats.h"
1841
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001842static void inc_nr_running(struct rq *rq)
Ingo Molnar6363ca52008-05-29 11:28:57 +02001843{
1844 rq->nr_running++;
Ingo Molnar6363ca52008-05-29 11:28:57 +02001845}
1846
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001847static void dec_nr_running(struct rq *rq)
Ingo Molnar9c217242007-08-02 17:41:40 +02001848{
1849 rq->nr_running--;
Ingo Molnar9c217242007-08-02 17:41:40 +02001850}
1851
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001852static void set_load_weight(struct task_struct *p)
1853{
1854 if (task_has_rt_policy(p)) {
Ingo Molnardd41f592007-07-09 18:51:59 +02001855 p->se.load.weight = prio_to_weight[0] * 2;
1856 p->se.load.inv_weight = prio_to_wmult[0] >> 1;
1857 return;
1858 }
1859
1860 /*
1861 * SCHED_IDLE tasks get minimal weight:
1862 */
1863 if (p->policy == SCHED_IDLE) {
1864 p->se.load.weight = WEIGHT_IDLEPRIO;
1865 p->se.load.inv_weight = WMULT_IDLEPRIO;
1866 return;
1867 }
1868
1869 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1870 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001871}
1872
Gregory Haskins2087a1a2008-06-27 14:30:00 -06001873static void update_avg(u64 *avg, u64 sample)
1874{
1875 s64 diff = sample - *avg;
1876 *avg += diff >> 3;
1877}
1878
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00001879static void
1880enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001881{
Peter Zijlstra831451a2009-01-14 12:39:18 +01001882 if (wakeup)
1883 p->se.start_runtime = p->se.sum_exec_runtime;
1884
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001885 sched_info_queued(p);
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00001886 p->sched_class->enqueue_task(rq, p, wakeup, head);
Ingo Molnardd41f592007-07-09 18:51:59 +02001887 p->se.on_rq = 1;
1888}
1889
Ingo Molnar69be72c2007-08-09 11:16:49 +02001890static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnardd41f592007-07-09 18:51:59 +02001891{
Peter Zijlstra831451a2009-01-14 12:39:18 +01001892 if (sleep) {
1893 if (p->se.last_wakeup) {
1894 update_avg(&p->se.avg_overlap,
1895 p->se.sum_exec_runtime - p->se.last_wakeup);
1896 p->se.last_wakeup = 0;
1897 } else {
1898 update_avg(&p->se.avg_wakeup,
1899 sysctl_sched_wakeup_granularity);
1900 }
Gregory Haskins2087a1a2008-06-27 14:30:00 -06001901 }
1902
Ankita Garg46ac22b2008-07-01 14:30:06 +05301903 sched_info_dequeued(p);
Ingo Molnarf02231e2007-08-09 11:16:48 +02001904 p->sched_class->dequeue_task(rq, p, sleep);
Ingo Molnardd41f592007-07-09 18:51:59 +02001905 p->se.on_rq = 0;
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001906}
1907
1908/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001909 * activate_task - move a task to the runqueue.
1910 */
1911static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1912{
1913 if (task_contributes_to_load(p))
1914 rq->nr_uninterruptible--;
1915
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00001916 enqueue_task(rq, p, wakeup, false);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001917 inc_nr_running(rq);
1918}
1919
1920/*
1921 * deactivate_task - remove a task from the runqueue.
1922 */
1923static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1924{
1925 if (task_contributes_to_load(p))
1926 rq->nr_uninterruptible++;
1927
1928 dequeue_task(rq, p, sleep);
1929 dec_nr_running(rq);
1930}
1931
1932#include "sched_idletask.c"
1933#include "sched_fair.c"
1934#include "sched_rt.c"
1935#ifdef CONFIG_SCHED_DEBUG
1936# include "sched_debug.c"
1937#endif
1938
1939/*
Ingo Molnardd41f592007-07-09 18:51:59 +02001940 * __normal_prio - return the priority that is based on the static prio
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001941 */
Ingo Molnar14531182007-07-09 18:51:59 +02001942static inline int __normal_prio(struct task_struct *p)
1943{
Ingo Molnardd41f592007-07-09 18:51:59 +02001944 return p->static_prio;
Ingo Molnar14531182007-07-09 18:51:59 +02001945}
1946
1947/*
Ingo Molnarb29739f2006-06-27 02:54:51 -07001948 * Calculate the expected normal priority: i.e. priority
1949 * without taking RT-inheritance into account. Might be
1950 * boosted by interactivity modifiers. Changes upon fork,
1951 * setprio syscalls, and whenever the interactivity
1952 * estimator recalculates.
1953 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001954static inline int normal_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07001955{
1956 int prio;
1957
Ingo Molnare05606d2007-07-09 18:51:59 +02001958 if (task_has_rt_policy(p))
Ingo Molnarb29739f2006-06-27 02:54:51 -07001959 prio = MAX_RT_PRIO-1 - p->rt_priority;
1960 else
1961 prio = __normal_prio(p);
1962 return prio;
1963}
1964
1965/*
1966 * Calculate the current priority, i.e. the priority
1967 * taken into account by the scheduler. This value might
1968 * be boosted by RT tasks, or might be boosted by
1969 * interactivity modifiers. Will be RT if the task got
1970 * RT-boosted. If not then it returns p->normal_prio.
1971 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001972static int effective_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07001973{
1974 p->normal_prio = normal_prio(p);
1975 /*
1976 * If we are RT tasks or we were boosted to RT priority,
1977 * keep the priority unchanged. Otherwise, update priority
1978 * to the normal priority:
1979 */
1980 if (!rt_prio(p->prio))
1981 return p->normal_prio;
1982 return p->prio;
1983}
1984
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985/**
1986 * task_curr - is this task currently executing on a CPU?
1987 * @p: the task in question.
1988 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001989inline int task_curr(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990{
1991 return cpu_curr(task_cpu(p)) == p;
1992}
1993
Steven Rostedtcb469842008-01-25 21:08:22 +01001994static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1995 const struct sched_class *prev_class,
1996 int oldprio, int running)
1997{
1998 if (prev_class != p->sched_class) {
1999 if (prev_class->switched_from)
2000 prev_class->switched_from(rq, p, running);
2001 p->sched_class->switched_to(rq, p, running);
2002 } else
2003 p->sched_class->prio_changed(rq, p, oldprio, running);
2004}
2005
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006#ifdef CONFIG_SMP
Ingo Molnarcc367732007-10-15 17:00:18 +02002007/*
2008 * Is this task likely cache-hot:
2009 */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002010static int
Ingo Molnarcc367732007-10-15 17:00:18 +02002011task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2012{
2013 s64 delta;
2014
Peter Zijlstrae6c8fba2009-12-16 18:04:33 +01002015 if (p->sched_class != &fair_sched_class)
2016 return 0;
2017
Ingo Molnarf540a602008-03-15 17:10:34 +01002018 /*
2019 * Buddy candidates are cache hot:
2020 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002021 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
Peter Zijlstra47932412008-11-04 21:25:09 +01002022 (&p->se == cfs_rq_of(&p->se)->next ||
2023 &p->se == cfs_rq_of(&p->se)->last))
Ingo Molnarf540a602008-03-15 17:10:34 +01002024 return 1;
2025
Ingo Molnar6bc16652007-10-15 17:00:18 +02002026 if (sysctl_sched_migration_cost == -1)
2027 return 1;
2028 if (sysctl_sched_migration_cost == 0)
2029 return 0;
2030
Ingo Molnarcc367732007-10-15 17:00:18 +02002031 delta = now - p->se.exec_start;
2032
2033 return delta < (s64)sysctl_sched_migration_cost;
2034}
2035
Ingo Molnardd41f592007-07-09 18:51:59 +02002036void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Ingo Molnarc65cc872007-07-09 18:51:58 +02002037{
Peter Zijlstrae2912002009-12-16 18:04:36 +01002038#ifdef CONFIG_SCHED_DEBUG
2039 /*
2040 * We should never call set_task_cpu() on a blocked task,
2041 * ttwu() will sort out the placement.
2042 */
Peter Zijlstra077614e2009-12-17 13:16:31 +01002043 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2044 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
Peter Zijlstrae2912002009-12-16 18:04:36 +01002045#endif
2046
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +08002047 trace_sched_migrate_task(p, new_cpu);
Peter Zijlstracbc34ed2008-12-10 08:08:22 +01002048
Peter Zijlstra0c697742009-12-22 15:43:19 +01002049 if (task_cpu(p) != new_cpu) {
2050 p->se.nr_migrations++;
2051 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2052 }
Ingo Molnardd41f592007-07-09 18:51:59 +02002053
2054 __set_task_cpu(p, new_cpu);
Ingo Molnarc65cc872007-07-09 18:51:58 +02002055}
2056
Ingo Molnar70b97a72006-07-03 00:25:42 -07002057struct migration_req {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
Ingo Molnar36c8b582006-07-03 00:25:41 -07002060 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 int dest_cpu;
2062
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 struct completion done;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002064};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
2066/*
2067 * The task's runqueue lock must be held.
2068 * Returns true if you have to wait for migration thread.
2069 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002070static int
Ingo Molnar70b97a72006-07-03 00:25:42 -07002071migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002073 struct rq *rq = task_rq(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
2075 /*
2076 * If the task is not on a runqueue (and not running), then
Peter Zijlstrae2912002009-12-16 18:04:36 +01002077 * the next wake-up will properly place the task.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 */
Peter Zijlstrae2912002009-12-16 18:04:36 +01002079 if (!p->se.on_rq && !task_running(rq, p))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
2082 init_completion(&req->done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 req->task = p;
2084 req->dest_cpu = dest_cpu;
2085 list_add(&req->list, &rq->migration_queue);
Ingo Molnar48f24c42006-07-03 00:25:40 -07002086
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 return 1;
2088}
2089
2090/*
Markus Metzgera26b89f2009-04-03 16:43:34 +02002091 * wait_task_context_switch - wait for a thread to complete at least one
2092 * context switch.
2093 *
2094 * @p must not be current.
2095 */
2096void wait_task_context_switch(struct task_struct *p)
2097{
2098 unsigned long nvcsw, nivcsw, flags;
2099 int running;
2100 struct rq *rq;
2101
2102 nvcsw = p->nvcsw;
2103 nivcsw = p->nivcsw;
2104 for (;;) {
2105 /*
2106 * The runqueue is assigned before the actual context
2107 * switch. We need to take the runqueue lock.
2108 *
2109 * We could check initially without the lock but it is
2110 * very likely that we need to take the lock in every
2111 * iteration.
2112 */
2113 rq = task_rq_lock(p, &flags);
2114 running = task_running(rq, p);
2115 task_rq_unlock(rq, &flags);
2116
2117 if (likely(!running))
2118 break;
2119 /*
2120 * The switch count is incremented before the actual
2121 * context switch. We thus wait for two switches to be
2122 * sure at least one completed.
2123 */
2124 if ((p->nvcsw - nvcsw) > 1)
2125 break;
2126 if ((p->nivcsw - nivcsw) > 1)
2127 break;
2128
2129 cpu_relax();
2130 }
2131}
2132
2133/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 * wait_task_inactive - wait for a thread to unschedule.
2135 *
Roland McGrath85ba2d82008-07-25 19:45:58 -07002136 * If @match_state is nonzero, it's the @p->state value just checked and
2137 * not expected to change. If it changes, i.e. @p might have woken up,
2138 * then return zero. When we succeed in waiting for @p to be off its CPU,
2139 * we return a positive number (its total switch count). If a second call
2140 * a short while later returns the same number, the caller can be sure that
2141 * @p has remained unscheduled the whole time.
2142 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 * The caller must ensure that the task *will* unschedule sometime soon,
2144 * else this function might spin for a *long* time. This function can't
2145 * be called with interrupts off, or it may introduce deadlock with
2146 * smp_call_function() if an IPI is sent by the same process we are
2147 * waiting to become inactive.
2148 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002149unsigned long wait_task_inactive(struct task_struct *p, long match_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150{
2151 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002152 int running, on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002153 unsigned long ncsw;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002154 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
Andi Kleen3a5c3592007-10-15 17:00:14 +02002156 for (;;) {
2157 /*
2158 * We do the initial early heuristics without holding
2159 * any task-queue locks at all. We'll only try to get
2160 * the runqueue lock when things look like they will
2161 * work out!
2162 */
2163 rq = task_rq(p);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002164
Andi Kleen3a5c3592007-10-15 17:00:14 +02002165 /*
2166 * If the task is actively running on another CPU
2167 * still, just relax and busy-wait without holding
2168 * any locks.
2169 *
2170 * NOTE! Since we don't hold any locks, it's not
2171 * even sure that "rq" stays as the right runqueue!
2172 * But we don't care, since "task_running()" will
2173 * return false if the runqueue has changed and p
2174 * is actually now running somewhere else!
2175 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002176 while (task_running(rq, p)) {
2177 if (match_state && unlikely(p->state != match_state))
2178 return 0;
Andi Kleen3a5c3592007-10-15 17:00:14 +02002179 cpu_relax();
Roland McGrath85ba2d82008-07-25 19:45:58 -07002180 }
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002181
Andi Kleen3a5c3592007-10-15 17:00:14 +02002182 /*
2183 * Ok, time to look more closely! We need the rq
2184 * lock now, to be *sure*. If we're wrong, we'll
2185 * just go back and repeat.
2186 */
2187 rq = task_rq_lock(p, &flags);
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04002188 trace_sched_wait_task(rq, p);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002189 running = task_running(rq, p);
2190 on_rq = p->se.on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002191 ncsw = 0;
Oleg Nesterovf31e11d2008-08-20 16:54:44 -07002192 if (!match_state || p->state == match_state)
Oleg Nesterov93dcf552008-08-20 16:54:44 -07002193 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
Andi Kleen3a5c3592007-10-15 17:00:14 +02002194 task_rq_unlock(rq, &flags);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002195
Andi Kleen3a5c3592007-10-15 17:00:14 +02002196 /*
Roland McGrath85ba2d82008-07-25 19:45:58 -07002197 * If it changed from the expected state, bail out now.
2198 */
2199 if (unlikely(!ncsw))
2200 break;
2201
2202 /*
Andi Kleen3a5c3592007-10-15 17:00:14 +02002203 * Was it really running after all now that we
2204 * checked with the proper locks actually held?
2205 *
2206 * Oops. Go back and try again..
2207 */
2208 if (unlikely(running)) {
2209 cpu_relax();
2210 continue;
2211 }
2212
2213 /*
2214 * It's not enough that it's not actively running,
2215 * it must be off the runqueue _entirely_, and not
2216 * preempted!
2217 *
Luis Henriques80dd99b2009-03-16 19:58:09 +00002218 * So if it was still runnable (but just not actively
Andi Kleen3a5c3592007-10-15 17:00:14 +02002219 * running right now), it's preempted, and we should
2220 * yield - it could be a while.
2221 */
2222 if (unlikely(on_rq)) {
2223 schedule_timeout_uninterruptible(1);
2224 continue;
2225 }
2226
2227 /*
2228 * Ahh, all good. It wasn't running, and it wasn't
2229 * runnable, which means that it will never become
2230 * running in the future either. We're all done!
2231 */
2232 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 }
Roland McGrath85ba2d82008-07-25 19:45:58 -07002234
2235 return ncsw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236}
2237
2238/***
2239 * kick_process - kick a running thread to enter/exit the kernel
2240 * @p: the to-be-kicked thread
2241 *
2242 * Cause a process which is running on another CPU to enter
2243 * kernel-mode, without any delay. (to get signals handled.)
2244 *
2245 * NOTE: this function doesnt have to take the runqueue lock,
2246 * because all it wants to ensure is that the remote task enters
2247 * the kernel. If the IPI races and the task has been migrated
2248 * to another CPU then no harm is done and the purpose has been
2249 * achieved as well.
2250 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002251void kick_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252{
2253 int cpu;
2254
2255 preempt_disable();
2256 cpu = task_cpu(p);
2257 if ((cpu != smp_processor_id()) && task_curr(p))
2258 smp_send_reschedule(cpu);
2259 preempt_enable();
2260}
Rusty Russellb43e3522009-06-12 22:27:00 -06002261EXPORT_SYMBOL_GPL(kick_process);
Nick Piggin476d1392005-06-25 14:57:29 -07002262#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Thomas Gleixner0793a612008-12-04 20:12:29 +01002264/**
2265 * task_oncpu_function_call - call a function on the cpu on which a task runs
2266 * @p: the task to evaluate
2267 * @func: the function to be called
2268 * @info: the function call argument
2269 *
2270 * Calls the function @func when the task is currently running. This might
2271 * be on the current CPU, which just calls the function directly
2272 */
2273void task_oncpu_function_call(struct task_struct *p,
2274 void (*func) (void *info), void *info)
2275{
2276 int cpu;
2277
2278 preempt_disable();
2279 cpu = task_cpu(p);
2280 if (task_curr(p))
2281 smp_call_function_single(cpu, func, info, 1);
2282 preempt_enable();
2283}
2284
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002285#ifdef CONFIG_SMP
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002286static int select_fallback_rq(int cpu, struct task_struct *p)
2287{
2288 int dest_cpu;
2289 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2290
2291 /* Look for allowed, online CPU in same node. */
2292 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2293 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2294 return dest_cpu;
2295
2296 /* Any allowed, online CPU? */
2297 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2298 if (dest_cpu < nr_cpu_ids)
2299 return dest_cpu;
2300
2301 /* No more Mr. Nice Guy. */
2302 if (dest_cpu >= nr_cpu_ids) {
2303 rcu_read_lock();
2304 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
2305 rcu_read_unlock();
2306 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
2307
2308 /*
2309 * Don't tell them about moving exiting tasks or
2310 * kernel threads (both mm NULL), since they never
2311 * leave kernel.
2312 */
2313 if (p->mm && printk_ratelimit()) {
2314 printk(KERN_INFO "process %d (%s) no "
2315 "longer affine to cpu%d\n",
2316 task_pid_nr(p), p->comm, cpu);
2317 }
2318 }
2319
2320 return dest_cpu;
2321}
2322
Peter Zijlstrae2912002009-12-16 18:04:36 +01002323/*
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002324 * Gets called from 3 sites (exec, fork, wakeup), since it is called without
2325 * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
2326 * by:
Peter Zijlstrae2912002009-12-16 18:04:36 +01002327 *
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002328 * exec: is unstable, retry loop
2329 * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
Peter Zijlstrae2912002009-12-16 18:04:36 +01002330 */
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002331static inline
2332int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2333{
Peter Zijlstrae2912002009-12-16 18:04:36 +01002334 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
2335
2336 /*
2337 * In order not to call set_task_cpu() on a blocking task we need
2338 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2339 * cpu.
2340 *
2341 * Since this is common to all placement strategies, this lives here.
2342 *
2343 * [ this allows ->select_task() to simply return task_cpu(p) and
2344 * not worry about this generic constraint ]
2345 */
2346 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
Peter Zijlstra70f11202009-12-20 17:36:27 +01002347 !cpu_online(cpu)))
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002348 cpu = select_fallback_rq(task_cpu(p), p);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002349
2350 return cpu;
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002351}
2352#endif
2353
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354/***
2355 * try_to_wake_up - wake up a thread
2356 * @p: the to-be-woken-up thread
2357 * @state: the mask of task states that can be woken
2358 * @sync: do a synchronous wakeup?
2359 *
2360 * Put it on the run-queue if it's not already there. The "current"
2361 * thread is always on the run-queue (except when the actual
2362 * re-schedule is in progress), and as such you're allowed to do
2363 * the simpler "current->state = TASK_RUNNING" to mark yourself
2364 * runnable without the overhead of this.
2365 *
2366 * returns failure only if the task is already active.
2367 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02002368static int try_to_wake_up(struct task_struct *p, unsigned int state,
2369 int wake_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370{
Ingo Molnarcc367732007-10-15 17:00:18 +02002371 int cpu, orig_cpu, this_cpu, success = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 unsigned long flags;
Dan Carpenterab3b3aa2010-03-06 14:17:52 +03002373 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374
Ingo Molnarb85d0662008-03-16 20:03:22 +01002375 if (!sched_feat(SYNC_WAKEUPS))
Peter Zijlstra7d478722009-09-14 19:55:44 +02002376 wake_flags &= ~WF_SYNC;
Ingo Molnarb85d0662008-03-16 20:03:22 +01002377
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002378 this_cpu = get_cpu();
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02002379
Linus Torvalds04e2f172008-02-23 18:05:03 -08002380 smp_wmb();
Dan Carpenterab3b3aa2010-03-06 14:17:52 +03002381 rq = task_rq_lock(p, &flags);
Mike Galbraith03e89e42008-12-16 08:45:30 +01002382 update_rq_clock(rq);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002383 if (!(p->state & state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 goto out;
2385
Ingo Molnardd41f592007-07-09 18:51:59 +02002386 if (p->se.on_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 goto out_running;
2388
2389 cpu = task_cpu(p);
Ingo Molnarcc367732007-10-15 17:00:18 +02002390 orig_cpu = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
2392#ifdef CONFIG_SMP
2393 if (unlikely(task_running(rq, p)))
2394 goto out_activate;
2395
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002396 /*
2397 * In order to handle concurrent wakeups and release the rq->lock
2398 * we put the task in TASK_WAKING state.
Ingo Molnareb240732009-09-16 21:09:13 +02002399 *
2400 * First fix up the nr_uninterruptible count:
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002401 */
Ingo Molnareb240732009-09-16 21:09:13 +02002402 if (task_contributes_to_load(p))
2403 rq->nr_uninterruptible--;
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002404 p->state = TASK_WAKING;
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002405
2406 if (p->sched_class->task_waking)
2407 p->sched_class->task_waking(rq, p);
2408
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002409 __task_rq_unlock(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002411 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
Peter Zijlstra0970d292010-02-15 14:45:54 +01002412 if (cpu != orig_cpu) {
2413 /*
2414 * Since we migrate the task without holding any rq->lock,
2415 * we need to be careful with task_rq_lock(), since that
2416 * might end up locking an invalid rq.
2417 */
Mike Galbraith055a0082009-11-12 11:07:44 +01002418 set_task_cpu(p, cpu);
Peter Zijlstra0970d292010-02-15 14:45:54 +01002419 }
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002420
Peter Zijlstra0970d292010-02-15 14:45:54 +01002421 rq = cpu_rq(cpu);
2422 raw_spin_lock(&rq->lock);
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002423 update_rq_clock(rq);
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002424
Peter Zijlstra0970d292010-02-15 14:45:54 +01002425 /*
2426 * We migrated the task without holding either rq->lock, however
2427 * since the task is not on the task list itself, nobody else
2428 * will try and migrate the task, hence the rq should match the
2429 * cpu we just moved it to.
2430 */
2431 WARN_ON(task_cpu(p) != cpu);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002432 WARN_ON(p->state != TASK_WAKING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
Gregory Haskinse7693a32008-01-25 21:08:09 +01002434#ifdef CONFIG_SCHEDSTATS
2435 schedstat_inc(rq, ttwu_count);
2436 if (cpu == this_cpu)
2437 schedstat_inc(rq, ttwu_local);
2438 else {
2439 struct sched_domain *sd;
2440 for_each_domain(this_cpu, sd) {
Rusty Russell758b2cd2008-11-25 02:35:04 +10302441 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Gregory Haskinse7693a32008-01-25 21:08:09 +01002442 schedstat_inc(sd, ttwu_wake_remote);
2443 break;
2444 }
2445 }
2446 }
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002447#endif /* CONFIG_SCHEDSTATS */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002448
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449out_activate:
2450#endif /* CONFIG_SMP */
Ingo Molnarcc367732007-10-15 17:00:18 +02002451 schedstat_inc(p, se.nr_wakeups);
Peter Zijlstra7d478722009-09-14 19:55:44 +02002452 if (wake_flags & WF_SYNC)
Ingo Molnarcc367732007-10-15 17:00:18 +02002453 schedstat_inc(p, se.nr_wakeups_sync);
2454 if (orig_cpu != cpu)
2455 schedstat_inc(p, se.nr_wakeups_migrate);
2456 if (cpu == this_cpu)
2457 schedstat_inc(p, se.nr_wakeups_local);
2458 else
2459 schedstat_inc(p, se.nr_wakeups_remote);
Ingo Molnardd41f592007-07-09 18:51:59 +02002460 activate_task(rq, p, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 success = 1;
2462
Peter Zijlstra831451a2009-01-14 12:39:18 +01002463 /*
2464 * Only attribute actual wakeups done by this task.
2465 */
2466 if (!in_interrupt()) {
2467 struct sched_entity *se = &current->se;
2468 u64 sample = se->sum_exec_runtime;
2469
2470 if (se->last_wakeup)
2471 sample -= se->last_wakeup;
2472 else
2473 sample -= se->start_runtime;
2474 update_avg(&se->avg_wakeup, sample);
2475
2476 se->last_wakeup = se->sum_exec_runtime;
2477 }
2478
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479out_running:
Peter Zijlstra468a15b2008-12-16 08:07:03 +01002480 trace_sched_wakeup(rq, p, success);
Peter Zijlstra7d478722009-09-14 19:55:44 +02002481 check_preempt_curr(rq, p, wake_flags);
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01002482
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 p->state = TASK_RUNNING;
Steven Rostedt9a897c52008-01-25 21:08:22 +01002484#ifdef CONFIG_SMP
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002485 if (p->sched_class->task_woken)
2486 p->sched_class->task_woken(rq, p);
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01002487
2488 if (unlikely(rq->idle_stamp)) {
2489 u64 delta = rq->clock - rq->idle_stamp;
2490 u64 max = 2*sysctl_sched_migration_cost;
2491
2492 if (delta > max)
2493 rq->avg_idle = max;
2494 else
2495 update_avg(&rq->avg_idle, delta);
2496 rq->idle_stamp = 0;
2497 }
Steven Rostedt9a897c52008-01-25 21:08:22 +01002498#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499out:
2500 task_rq_unlock(rq, &flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002501 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
2503 return success;
2504}
2505
David Howells50fa6102009-04-28 15:01:38 +01002506/**
2507 * wake_up_process - Wake up a specific process
2508 * @p: The process to be woken up.
2509 *
2510 * Attempt to wake up the nominated process and move it to the set of runnable
2511 * processes. Returns 1 if the process was woken up, 0 if it was already
2512 * running.
2513 *
2514 * It may be assumed that this function implies a write memory barrier before
2515 * changing the task state if and only if any tasks are woken up.
2516 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002517int wake_up_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05002519 return try_to_wake_up(p, TASK_ALL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521EXPORT_SYMBOL(wake_up_process);
2522
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002523int wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524{
2525 return try_to_wake_up(p, state, 0);
2526}
2527
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528/*
2529 * Perform scheduler related setup for a newly forked process p.
2530 * p is forked by current.
Ingo Molnardd41f592007-07-09 18:51:59 +02002531 *
2532 * __sched_fork() is basic setup used by init_idle() too:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002534static void __sched_fork(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535{
Ingo Molnardd41f592007-07-09 18:51:59 +02002536 p->se.exec_start = 0;
2537 p->se.sum_exec_runtime = 0;
Ingo Molnarf6cf8912007-08-28 12:53:24 +02002538 p->se.prev_sum_exec_runtime = 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002539 p->se.nr_migrations = 0;
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01002540 p->se.last_wakeup = 0;
2541 p->se.avg_overlap = 0;
Peter Zijlstra831451a2009-01-14 12:39:18 +01002542 p->se.start_runtime = 0;
2543 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002544
2545#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi77935272009-07-09 13:57:20 +02002546 p->se.wait_start = 0;
2547 p->se.wait_max = 0;
2548 p->se.wait_count = 0;
2549 p->se.wait_sum = 0;
2550
2551 p->se.sleep_start = 0;
2552 p->se.sleep_max = 0;
2553 p->se.sum_sleep_runtime = 0;
2554
2555 p->se.block_start = 0;
2556 p->se.block_max = 0;
2557 p->se.exec_max = 0;
2558 p->se.slice_max = 0;
2559
2560 p->se.nr_migrations_cold = 0;
2561 p->se.nr_failed_migrations_affine = 0;
2562 p->se.nr_failed_migrations_running = 0;
2563 p->se.nr_failed_migrations_hot = 0;
2564 p->se.nr_forced_migrations = 0;
Lucas De Marchi77935272009-07-09 13:57:20 +02002565
2566 p->se.nr_wakeups = 0;
2567 p->se.nr_wakeups_sync = 0;
2568 p->se.nr_wakeups_migrate = 0;
2569 p->se.nr_wakeups_local = 0;
2570 p->se.nr_wakeups_remote = 0;
2571 p->se.nr_wakeups_affine = 0;
2572 p->se.nr_wakeups_affine_attempts = 0;
2573 p->se.nr_wakeups_passive = 0;
2574 p->se.nr_wakeups_idle = 0;
2575
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002576#endif
Nick Piggin476d1392005-06-25 14:57:29 -07002577
Peter Zijlstrafa717062008-01-25 21:08:27 +01002578 INIT_LIST_HEAD(&p->rt.run_list);
Ingo Molnardd41f592007-07-09 18:51:59 +02002579 p->se.on_rq = 0;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02002580 INIT_LIST_HEAD(&p->se.group_node);
Nick Piggin476d1392005-06-25 14:57:29 -07002581
Avi Kivitye107be32007-07-26 13:40:43 +02002582#ifdef CONFIG_PREEMPT_NOTIFIERS
2583 INIT_HLIST_HEAD(&p->preempt_notifiers);
2584#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002585}
2586
2587/*
2588 * fork()/clone()-time setup:
2589 */
2590void sched_fork(struct task_struct *p, int clone_flags)
2591{
2592 int cpu = get_cpu();
2593
2594 __sched_fork(p);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002595 /*
2596 * We mark the process as waking here. This guarantees that
2597 * nobody will actually run it, and a signal or other external
2598 * event cannot wake it up and insert it on the runqueue either.
2599 */
2600 p->state = TASK_WAKING;
Ingo Molnardd41f592007-07-09 18:51:59 +02002601
Ingo Molnarb29739f2006-06-27 02:54:51 -07002602 /*
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002603 * Revert to default priority/policy on fork if requested.
2604 */
2605 if (unlikely(p->sched_reset_on_fork)) {
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002606 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002607 p->policy = SCHED_NORMAL;
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002608 p->normal_prio = p->static_prio;
2609 }
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002610
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002611 if (PRIO_TO_NICE(p->static_prio) < 0) {
2612 p->static_prio = NICE_TO_PRIO(0);
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002613 p->normal_prio = p->static_prio;
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002614 set_load_weight(p);
2615 }
2616
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002617 /*
2618 * We don't need the reset flag anymore after the fork. It has
2619 * fulfilled its duty:
2620 */
2621 p->sched_reset_on_fork = 0;
2622 }
Lennart Poetteringca94c442009-06-15 17:17:47 +02002623
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002624 /*
2625 * Make sure we do not leak PI boosting priority to the child.
2626 */
2627 p->prio = current->normal_prio;
2628
Hiroshi Shimamoto2ddbf952007-10-15 17:00:11 +02002629 if (!rt_prio(p->prio))
2630 p->sched_class = &fair_sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07002631
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002632 if (p->sched_class->task_fork)
2633 p->sched_class->task_fork(p);
2634
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002635 set_task_cpu(p, cpu);
2636
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002637#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Ingo Molnardd41f592007-07-09 18:51:59 +02002638 if (likely(sched_info_on()))
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002639 memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640#endif
Chen, Kenneth Wd6077cb2006-02-14 13:53:10 -08002641#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
Nick Piggin4866cde2005-06-25 14:57:23 -07002642 p->oncpu = 0;
2643#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644#ifdef CONFIG_PREEMPT
Nick Piggin4866cde2005-06-25 14:57:23 -07002645 /* Want to start with kernel preemption disabled. */
Al Viroa1261f52005-11-13 16:06:55 -08002646 task_thread_info(p)->preempt_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647#endif
Gregory Haskins917b6272008-12-29 09:39:53 -05002648 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2649
Nick Piggin476d1392005-06-25 14:57:29 -07002650 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651}
2652
2653/*
2654 * wake_up_new_task - wake up a newly created task for the first time.
2655 *
2656 * This function will do some initial scheduler statistics housekeeping
2657 * that must be done for every newly created context, then puts the task
2658 * on the runqueue and wakes it.
2659 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002660void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661{
2662 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002663 struct rq *rq;
Andrew Mortonc8906922010-03-11 14:08:43 -08002664 int cpu __maybe_unused = get_cpu();
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002665
2666#ifdef CONFIG_SMP
2667 /*
2668 * Fork balancing, do it here and not earlier because:
2669 * - cpus_allowed can change in the fork path
2670 * - any previously selected cpu might disappear through hotplug
2671 *
2672 * We still have TASK_WAKING but PF_STARTING is gone now, meaning
2673 * ->cpus_allowed is stable, we have preemption disabled, meaning
2674 * cpu_online_mask is stable.
2675 */
2676 cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
2677 set_task_cpu(p, cpu);
2678#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679
Peter Zijlstra0970d292010-02-15 14:45:54 +01002680 /*
2681 * Since the task is not on the rq and we still have TASK_WAKING set
2682 * nobody else will migrate this task.
2683 */
2684 rq = cpu_rq(cpu);
2685 raw_spin_lock_irqsave(&rq->lock, flags);
2686
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002687 BUG_ON(p->state != TASK_WAKING);
2688 p->state = TASK_RUNNING;
Ingo Molnara8e504d2007-08-09 11:16:47 +02002689 update_rq_clock(rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002690 activate_task(rq, p, 0);
Ingo Molnarc71dd422008-12-19 01:09:51 +01002691 trace_sched_wakeup_new(rq, p, 1);
Peter Zijlstraa7558e02009-09-14 20:02:34 +02002692 check_preempt_curr(rq, p, WF_FORK);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002693#ifdef CONFIG_SMP
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002694 if (p->sched_class->task_woken)
2695 p->sched_class->task_woken(rq, p);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002696#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002697 task_rq_unlock(rq, &flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002698 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699}
2700
Avi Kivitye107be32007-07-26 13:40:43 +02002701#ifdef CONFIG_PREEMPT_NOTIFIERS
2702
2703/**
Luis Henriques80dd99b2009-03-16 19:58:09 +00002704 * preempt_notifier_register - tell me when current is being preempted & rescheduled
Randy Dunlap421cee22007-07-31 00:37:50 -07002705 * @notifier: notifier struct to register
Avi Kivitye107be32007-07-26 13:40:43 +02002706 */
2707void preempt_notifier_register(struct preempt_notifier *notifier)
2708{
2709 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2710}
2711EXPORT_SYMBOL_GPL(preempt_notifier_register);
2712
2713/**
2714 * preempt_notifier_unregister - no longer interested in preemption notifications
Randy Dunlap421cee22007-07-31 00:37:50 -07002715 * @notifier: notifier struct to unregister
Avi Kivitye107be32007-07-26 13:40:43 +02002716 *
2717 * This is safe to call from within a preemption notifier.
2718 */
2719void preempt_notifier_unregister(struct preempt_notifier *notifier)
2720{
2721 hlist_del(&notifier->link);
2722}
2723EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2724
2725static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2726{
2727 struct preempt_notifier *notifier;
2728 struct hlist_node *node;
2729
2730 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2731 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2732}
2733
2734static void
2735fire_sched_out_preempt_notifiers(struct task_struct *curr,
2736 struct task_struct *next)
2737{
2738 struct preempt_notifier *notifier;
2739 struct hlist_node *node;
2740
2741 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2742 notifier->ops->sched_out(notifier, next);
2743}
2744
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002745#else /* !CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002746
2747static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2748{
2749}
2750
2751static void
2752fire_sched_out_preempt_notifiers(struct task_struct *curr,
2753 struct task_struct *next)
2754{
2755}
2756
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002757#endif /* CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002758
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759/**
Nick Piggin4866cde2005-06-25 14:57:23 -07002760 * prepare_task_switch - prepare to switch tasks
2761 * @rq: the runqueue preparing to switch
Randy Dunlap421cee22007-07-31 00:37:50 -07002762 * @prev: the current task that is being switched out
Nick Piggin4866cde2005-06-25 14:57:23 -07002763 * @next: the task we are going to switch to.
2764 *
2765 * This is called with the rq lock held and interrupts off. It must
2766 * be paired with a subsequent finish_task_switch after the context
2767 * switch.
2768 *
2769 * prepare_task_switch sets up locking and calls architecture specific
2770 * hooks.
2771 */
Avi Kivitye107be32007-07-26 13:40:43 +02002772static inline void
2773prepare_task_switch(struct rq *rq, struct task_struct *prev,
2774 struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -07002775{
Avi Kivitye107be32007-07-26 13:40:43 +02002776 fire_sched_out_preempt_notifiers(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002777 prepare_lock_switch(rq, next);
2778 prepare_arch_switch(next);
2779}
2780
2781/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 * finish_task_switch - clean up after a task-switch
Jeff Garzik344baba2005-09-07 01:15:17 -04002783 * @rq: runqueue associated with task-switch
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 * @prev: the thread we just switched away from.
2785 *
Nick Piggin4866cde2005-06-25 14:57:23 -07002786 * finish_task_switch must be called after the context switch, paired
2787 * with a prepare_task_switch call before the context switch.
2788 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2789 * and do any other architecture-specific cleanup actions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 *
2791 * Note that we may have delayed dropping an mm in context_switch(). If
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002792 * so, we finish that here outside of the runqueue lock. (Doing it
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 * with the lock held can cause deadlocks; see schedule() for
2794 * details.)
2795 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02002796static void finish_task_switch(struct rq *rq, struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 __releases(rq->lock)
2798{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 struct mm_struct *mm = rq->prev_mm;
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002800 long prev_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801
2802 rq->prev_mm = NULL;
2803
2804 /*
2805 * A task struct has one reference for the use as "current".
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002806 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002807 * schedule one last time. The schedule call will never return, and
2808 * the scheduled task must drop that reference.
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002809 * The test for TASK_DEAD must occur while the runqueue locks are
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 * still held, otherwise prev could be scheduled on another cpu, die
2811 * there before we look at prev->state, and then the reference would
2812 * be dropped twice.
2813 * Manfred Spraul <manfred@colorfullife.com>
2814 */
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002815 prev_state = prev->state;
Nick Piggin4866cde2005-06-25 14:57:23 -07002816 finish_arch_switch(prev);
Jamie Iles8381f652010-01-08 15:27:33 +00002817#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2818 local_irq_disable();
2819#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Peter Zijlstra49f47432009-12-27 11:51:52 +01002820 perf_event_task_sched_in(current);
Jamie Iles8381f652010-01-08 15:27:33 +00002821#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2822 local_irq_enable();
2823#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Nick Piggin4866cde2005-06-25 14:57:23 -07002824 finish_lock_switch(rq, prev);
Steven Rostedte8fa1362008-01-25 21:08:05 +01002825
Avi Kivitye107be32007-07-26 13:40:43 +02002826 fire_sched_in_preempt_notifiers(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 if (mm)
2828 mmdrop(mm);
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002829 if (unlikely(prev_state == TASK_DEAD)) {
bibo maoc6fd91f2006-03-26 01:38:20 -08002830 /*
2831 * Remove function-return probe instances associated with this
2832 * task and put them back on the free list.
Ingo Molnar9761eea2007-07-09 18:52:00 +02002833 */
bibo maoc6fd91f2006-03-26 01:38:20 -08002834 kprobe_flush_task(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 put_task_struct(prev);
bibo maoc6fd91f2006-03-26 01:38:20 -08002836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837}
2838
Gregory Haskins3f029d32009-07-29 11:08:47 -04002839#ifdef CONFIG_SMP
2840
2841/* assumes rq->lock is held */
2842static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2843{
2844 if (prev->sched_class->pre_schedule)
2845 prev->sched_class->pre_schedule(rq, prev);
2846}
2847
2848/* rq->lock is NOT held, but preemption is disabled */
2849static inline void post_schedule(struct rq *rq)
2850{
2851 if (rq->post_schedule) {
2852 unsigned long flags;
2853
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002854 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002855 if (rq->curr->sched_class->post_schedule)
2856 rq->curr->sched_class->post_schedule(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002857 raw_spin_unlock_irqrestore(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002858
2859 rq->post_schedule = 0;
2860 }
2861}
2862
2863#else
2864
2865static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2866{
2867}
2868
2869static inline void post_schedule(struct rq *rq)
2870{
2871}
2872
2873#endif
2874
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875/**
2876 * schedule_tail - first thing a freshly forked thread must call.
2877 * @prev: the thread we just switched away from.
2878 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002879asmlinkage void schedule_tail(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 __releases(rq->lock)
2881{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002882 struct rq *rq = this_rq();
2883
Nick Piggin4866cde2005-06-25 14:57:23 -07002884 finish_task_switch(rq, prev);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002885
Gregory Haskins3f029d32009-07-29 11:08:47 -04002886 /*
2887 * FIXME: do we need to worry about rq being invalidated by the
2888 * task_switch?
2889 */
2890 post_schedule(rq);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002891
Nick Piggin4866cde2005-06-25 14:57:23 -07002892#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2893 /* In this case, finish_task_switch does not reenable preemption */
2894 preempt_enable();
2895#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 if (current->set_child_tid)
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002897 put_user(task_pid_vnr(current), current->set_child_tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898}
2899
2900/*
2901 * context_switch - switch to the new MM and the new
2902 * thread's register state.
2903 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002904static inline void
Ingo Molnar70b97a72006-07-03 00:25:42 -07002905context_switch(struct rq *rq, struct task_struct *prev,
Ingo Molnar36c8b582006-07-03 00:25:41 -07002906 struct task_struct *next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907{
Ingo Molnardd41f592007-07-09 18:51:59 +02002908 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909
Avi Kivitye107be32007-07-26 13:40:43 +02002910 prepare_task_switch(rq, prev, next);
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04002911 trace_sched_switch(rq, prev, next);
Ingo Molnardd41f592007-07-09 18:51:59 +02002912 mm = next->mm;
2913 oldmm = prev->active_mm;
Zachary Amsden9226d122007-02-13 13:26:21 +01002914 /*
2915 * For paravirt, this is coupled with an exit in switch_to to
2916 * combine the page table reload and the switch backend into
2917 * one hypercall.
2918 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -08002919 arch_start_context_switch(prev);
Zachary Amsden9226d122007-02-13 13:26:21 +01002920
Tim Blechmann710390d2009-11-24 11:55:27 +01002921 if (likely(!mm)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 next->active_mm = oldmm;
2923 atomic_inc(&oldmm->mm_count);
2924 enter_lazy_tlb(oldmm, next);
2925 } else
2926 switch_mm(oldmm, mm, next);
2927
Tim Blechmann710390d2009-11-24 11:55:27 +01002928 if (likely(!prev->mm)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 prev->active_mm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 rq->prev_mm = oldmm;
2931 }
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002932 /*
2933 * Since the runqueue lock will be released by the next
2934 * task (which is an invalid locking op but in the case
2935 * of the scheduler it's an obvious special-case), so we
2936 * do an early lockdep release here:
2937 */
2938#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07002939 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002940#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941
2942 /* Here we just switch the register state and the stack. */
2943 switch_to(prev, next, prev);
2944
Ingo Molnardd41f592007-07-09 18:51:59 +02002945 barrier();
2946 /*
2947 * this_rq must be evaluated again because prev may have moved
2948 * CPUs since it called schedule(), thus the 'rq' on its stack
2949 * frame will be invalid.
2950 */
2951 finish_task_switch(this_rq(), prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952}
2953
2954/*
2955 * nr_running, nr_uninterruptible and nr_context_switches:
2956 *
2957 * externally visible scheduler statistics: current number of runnable
2958 * threads, current number of uninterruptible-sleeping threads, total
2959 * number of context switches performed since bootup.
2960 */
2961unsigned long nr_running(void)
2962{
2963 unsigned long i, sum = 0;
2964
2965 for_each_online_cpu(i)
2966 sum += cpu_rq(i)->nr_running;
2967
2968 return sum;
2969}
2970
2971unsigned long nr_uninterruptible(void)
2972{
2973 unsigned long i, sum = 0;
2974
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002975 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 sum += cpu_rq(i)->nr_uninterruptible;
2977
2978 /*
2979 * Since we read the counters lockless, it might be slightly
2980 * inaccurate. Do not allow it to go below zero though:
2981 */
2982 if (unlikely((long)sum < 0))
2983 sum = 0;
2984
2985 return sum;
2986}
2987
2988unsigned long long nr_context_switches(void)
2989{
Steven Rostedtcc94abf2006-06-27 02:54:31 -07002990 int i;
2991 unsigned long long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002993 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 sum += cpu_rq(i)->nr_switches;
2995
2996 return sum;
2997}
2998
2999unsigned long nr_iowait(void)
3000{
3001 unsigned long i, sum = 0;
3002
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003003 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 sum += atomic_read(&cpu_rq(i)->nr_iowait);
3005
3006 return sum;
3007}
3008
Arjan van de Ven69d25872009-09-21 17:04:08 -07003009unsigned long nr_iowait_cpu(void)
3010{
3011 struct rq *this = this_rq();
3012 return atomic_read(&this->nr_iowait);
3013}
3014
3015unsigned long this_cpu_load(void)
3016{
3017 struct rq *this = this_rq();
3018 return this->cpu_load[0];
3019}
3020
3021
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003022/* Variables and functions for calc_load */
3023static atomic_long_t calc_load_tasks;
3024static unsigned long calc_load_update;
3025unsigned long avenrun[3];
3026EXPORT_SYMBOL(avenrun);
3027
Thomas Gleixner2d024942009-05-02 20:08:52 +02003028/**
3029 * get_avenrun - get the load average array
3030 * @loads: pointer to dest load array
3031 * @offset: offset to add
3032 * @shift: shift count to shift the result left
3033 *
3034 * These values are estimates at best, so no need for locking.
3035 */
3036void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3037{
3038 loads[0] = (avenrun[0] + offset) << shift;
3039 loads[1] = (avenrun[1] + offset) << shift;
3040 loads[2] = (avenrun[2] + offset) << shift;
3041}
3042
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003043static unsigned long
3044calc_load(unsigned long load, unsigned long exp, unsigned long active)
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08003045{
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003046 load *= exp;
3047 load += active * (FIXED_1 - exp);
3048 return load >> FSHIFT;
3049}
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08003050
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003051/*
3052 * calc_load - update the avenrun load estimates 10 ticks after the
3053 * CPUs have updated calc_load_tasks.
3054 */
3055void calc_global_load(void)
3056{
3057 unsigned long upd = calc_load_update + 10;
3058 long active;
3059
3060 if (time_before(jiffies, upd))
3061 return;
3062
3063 active = atomic_long_read(&calc_load_tasks);
3064 active = active > 0 ? active * FIXED_1 : 0;
3065
3066 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3067 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3068 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3069
3070 calc_load_update += LOAD_FREQ;
3071}
3072
3073/*
3074 * Either called from update_cpu_load() or from a cpu going idle
3075 */
3076static void calc_load_account_active(struct rq *this_rq)
3077{
3078 long nr_active, delta;
3079
3080 nr_active = this_rq->nr_running;
3081 nr_active += (long) this_rq->nr_uninterruptible;
3082
3083 if (nr_active != this_rq->calc_load_active) {
3084 delta = nr_active - this_rq->calc_load_active;
3085 this_rq->calc_load_active = nr_active;
3086 atomic_long_add(delta, &calc_load_tasks);
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08003087 }
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08003088}
3089
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003091 * Update rq->cpu_load[] statistics. This function is usually called every
3092 * scheduler tick (TICK_NSEC).
Ingo Molnar48f24c42006-07-03 00:25:40 -07003093 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003094static void update_cpu_load(struct rq *this_rq)
Ingo Molnar48f24c42006-07-03 00:25:40 -07003095{
Dmitry Adamushko495eca42007-10-15 17:00:06 +02003096 unsigned long this_load = this_rq->load.weight;
Ingo Molnardd41f592007-07-09 18:51:59 +02003097 int i, scale;
3098
3099 this_rq->nr_load_updates++;
Ingo Molnardd41f592007-07-09 18:51:59 +02003100
3101 /* Update our load: */
3102 for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
3103 unsigned long old_load, new_load;
3104
3105 /* scale is effectively 1 << i now, and >> i divides by scale */
3106
3107 old_load = this_rq->cpu_load[i];
3108 new_load = this_load;
Ingo Molnara25707f2007-10-15 17:00:03 +02003109 /*
3110 * Round up the averaging division if load is increasing. This
3111 * prevents us from getting stuck on 9 if the load is 10, for
3112 * example.
3113 */
3114 if (new_load > old_load)
3115 new_load += scale-1;
Ingo Molnardd41f592007-07-09 18:51:59 +02003116 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
3117 }
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003118
3119 if (time_after_eq(jiffies, this_rq->calc_load_update)) {
3120 this_rq->calc_load_update += LOAD_FREQ;
3121 calc_load_account_active(this_rq);
3122 }
Ingo Molnar48f24c42006-07-03 00:25:40 -07003123}
3124
Ingo Molnardd41f592007-07-09 18:51:59 +02003125#ifdef CONFIG_SMP
3126
Ingo Molnar48f24c42006-07-03 00:25:40 -07003127/*
Peter Zijlstra38022902009-12-16 18:04:37 +01003128 * sched_exec - execve() is a valuable balancing opportunity, because at
3129 * this point the task has the smallest effective memory and cache footprint.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 */
Peter Zijlstra38022902009-12-16 18:04:37 +01003131void sched_exec(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132{
Peter Zijlstra38022902009-12-16 18:04:37 +01003133 struct task_struct *p = current;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003134 struct migration_req req;
Peter Zijlstra38022902009-12-16 18:04:37 +01003135 int dest_cpu, this_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003137 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138
Peter Zijlstra38022902009-12-16 18:04:37 +01003139again:
3140 this_cpu = get_cpu();
3141 dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
3142 if (dest_cpu == this_cpu) {
3143 put_cpu();
3144 return;
3145 }
3146
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147 rq = task_rq_lock(p, &flags);
Peter Zijlstra38022902009-12-16 18:04:37 +01003148 put_cpu();
3149
3150 /*
3151 * select_task_rq() can race against ->cpus_allowed
3152 */
Rusty Russell96f874e2008-11-25 02:35:14 +10303153 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
Peter Zijlstra38022902009-12-16 18:04:37 +01003154 || unlikely(!cpu_active(dest_cpu))) {
3155 task_rq_unlock(rq, &flags);
3156 goto again;
3157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158
3159 /* force the process onto the specified CPU */
3160 if (migrate_task(p, dest_cpu, &req)) {
3161 /* Need to wait for migration thread (might exit: take ref). */
3162 struct task_struct *mt = rq->migration_thread;
Ingo Molnar36c8b582006-07-03 00:25:41 -07003163
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 get_task_struct(mt);
3165 task_rq_unlock(rq, &flags);
3166 wake_up_process(mt);
3167 put_task_struct(mt);
3168 wait_for_completion(&req.done);
Ingo Molnar36c8b582006-07-03 00:25:41 -07003169
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170 return;
3171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172 task_rq_unlock(rq, &flags);
3173}
3174
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175#endif
3176
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177DEFINE_PER_CPU(struct kernel_stat, kstat);
3178
3179EXPORT_PER_CPU_SYMBOL(kstat);
3180
3181/*
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003182 * Return any ns on the sched_clock that have not yet been accounted in
Frank Mayharf06febc2008-09-12 09:54:39 -07003183 * @p in case that task is currently running.
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003184 *
3185 * Called with task_rq_lock() held on @rq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 */
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003187static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3188{
3189 u64 ns = 0;
3190
3191 if (task_current(rq, p)) {
3192 update_rq_clock(rq);
3193 ns = rq->clock - p->se.exec_start;
3194 if ((s64)ns < 0)
3195 ns = 0;
3196 }
3197
3198 return ns;
3199}
3200
Frank Mayharbb34d922008-09-12 09:54:39 -07003201unsigned long long task_delta_exec(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 unsigned long flags;
Ingo Molnar41b86e92007-07-09 18:51:58 +02003204 struct rq *rq;
Frank Mayharbb34d922008-09-12 09:54:39 -07003205 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003206
Ingo Molnar41b86e92007-07-09 18:51:58 +02003207 rq = task_rq_lock(p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003208 ns = do_task_delta_exec(p, rq);
3209 task_rq_unlock(rq, &flags);
Ingo Molnar15084872008-09-30 08:28:17 +02003210
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003211 return ns;
3212}
Frank Mayharf06febc2008-09-12 09:54:39 -07003213
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003214/*
3215 * Return accounted runtime for the task.
3216 * In case the task is currently running, return the runtime plus current's
3217 * pending runtime that have not been accounted yet.
3218 */
3219unsigned long long task_sched_runtime(struct task_struct *p)
3220{
3221 unsigned long flags;
3222 struct rq *rq;
3223 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003224
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003225 rq = task_rq_lock(p, &flags);
3226 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3227 task_rq_unlock(rq, &flags);
3228
3229 return ns;
3230}
3231
3232/*
3233 * Return sum_exec_runtime for the thread group.
3234 * In case the task is currently running, return the sum plus current's
3235 * pending runtime that have not been accounted yet.
3236 *
3237 * Note that the thread group might have other running tasks as well,
3238 * so the return value not includes other pending runtime that other
3239 * running tasks might have.
3240 */
3241unsigned long long thread_group_sched_runtime(struct task_struct *p)
3242{
3243 struct task_cputime totals;
3244 unsigned long flags;
3245 struct rq *rq;
3246 u64 ns;
3247
3248 rq = task_rq_lock(p, &flags);
3249 thread_group_cputime(p, &totals);
3250 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251 task_rq_unlock(rq, &flags);
3252
3253 return ns;
3254}
3255
3256/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257 * Account user cpu time to a process.
3258 * @p: the process that the cpu time gets accounted to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259 * @cputime: the cpu time spent in user space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003260 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003262void account_user_time(struct task_struct *p, cputime_t cputime,
3263 cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264{
3265 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3266 cputime64_t tmp;
3267
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003268 /* Add user time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003270 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003271 account_group_user_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272
3273 /* Add user time to cpustat. */
3274 tmp = cputime_to_cputime64(cputime);
3275 if (TASK_NICE(p) > 0)
3276 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3277 else
3278 cpustat->user = cputime64_add(cpustat->user, tmp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05303279
3280 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
Jonathan Lim49b5cf32008-07-25 01:48:40 -07003281 /* Account for user time used */
3282 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283}
3284
3285/*
Laurent Vivier94886b82007-10-15 17:00:19 +02003286 * Account guest cpu time to a process.
3287 * @p: the process that the cpu time gets accounted to
3288 * @cputime: the cpu time spent in virtual machine since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003289 * @cputime_scaled: cputime scaled by cpu frequency
Laurent Vivier94886b82007-10-15 17:00:19 +02003290 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003291static void account_guest_time(struct task_struct *p, cputime_t cputime,
3292 cputime_t cputime_scaled)
Laurent Vivier94886b82007-10-15 17:00:19 +02003293{
3294 cputime64_t tmp;
3295 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3296
3297 tmp = cputime_to_cputime64(cputime);
3298
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003299 /* Add guest time to process. */
Laurent Vivier94886b82007-10-15 17:00:19 +02003300 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003301 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003302 account_group_user_time(p, cputime);
Laurent Vivier94886b82007-10-15 17:00:19 +02003303 p->gtime = cputime_add(p->gtime, cputime);
3304
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003305 /* Add guest time to cpustat. */
Ryota Ozakice0e7b22009-10-24 01:20:10 +09003306 if (TASK_NICE(p) > 0) {
3307 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3308 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3309 } else {
3310 cpustat->user = cputime64_add(cpustat->user, tmp);
3311 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3312 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003313}
3314
3315/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316 * Account system cpu time to a process.
3317 * @p: the process that the cpu time gets accounted to
3318 * @hardirq_offset: the offset to subtract from hardirq_count()
3319 * @cputime: the cpu time spent in kernel space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003320 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 */
3322void account_system_time(struct task_struct *p, int hardirq_offset,
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003323 cputime_t cputime, cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324{
3325 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 cputime64_t tmp;
3327
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003328 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003329 account_guest_time(p, cputime, cputime_scaled);
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003330 return;
3331 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003332
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003333 /* Add system time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 p->stime = cputime_add(p->stime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003335 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003336 account_group_system_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337
3338 /* Add system time to cpustat. */
3339 tmp = cputime_to_cputime64(cputime);
3340 if (hardirq_count() - hardirq_offset)
3341 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3342 else if (softirq_count())
3343 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344 else
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003345 cpustat->system = cputime64_add(cpustat->system, tmp);
3346
Bharata B Raoef12fef2009-03-31 10:02:22 +05303347 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3348
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 /* Account for system time used */
3350 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351}
3352
3353/*
3354 * Account for involuntary wait time.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 * @steal: the cpu time spent in involuntary wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003357void account_steal_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003360 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3361
3362 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363}
3364
Christoph Lameter7835b982006-12-10 02:20:22 -08003365/*
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003366 * Account for idle time.
3367 * @cputime: the cpu time spent in idle wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003369void account_idle_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370{
3371 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003372 cputime64_t cputime64 = cputime_to_cputime64(cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 struct rq *rq = this_rq();
3374
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003375 if (atomic_read(&rq->nr_iowait) > 0)
3376 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3377 else
3378 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
Christoph Lameter7835b982006-12-10 02:20:22 -08003379}
3380
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003381#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3382
3383/*
3384 * Account a single tick of cpu time.
3385 * @p: the process that the cpu time gets accounted to
3386 * @user_tick: indicates if the tick is a user or a system tick
3387 */
3388void account_process_tick(struct task_struct *p, int user_tick)
3389{
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003390 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003391 struct rq *rq = this_rq();
3392
3393 if (user_tick)
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003394 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
Eric Dumazetf5f293a2009-04-29 14:44:49 +02003395 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003396 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003397 one_jiffy_scaled);
3398 else
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003399 account_idle_time(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003400}
3401
3402/*
3403 * Account multiple ticks of steal time.
3404 * @p: the process from which the cpu time has been stolen
3405 * @ticks: number of stolen ticks
3406 */
3407void account_steal_ticks(unsigned long ticks)
3408{
3409 account_steal_time(jiffies_to_cputime(ticks));
3410}
3411
3412/*
3413 * Account multiple ticks of idle time.
3414 * @ticks: number of stolen ticks
3415 */
3416void account_idle_ticks(unsigned long ticks)
3417{
3418 account_idle_time(jiffies_to_cputime(ticks));
3419}
3420
3421#endif
3422
Christoph Lameter7835b982006-12-10 02:20:22 -08003423/*
Balbir Singh49048622008-09-05 18:12:23 +02003424 * Use precise platform statistics if available:
3425 */
3426#ifdef CONFIG_VIRT_CPU_ACCOUNTING
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003427void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003428{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003429 *ut = p->utime;
3430 *st = p->stime;
Balbir Singh49048622008-09-05 18:12:23 +02003431}
3432
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003433void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003434{
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003435 struct task_cputime cputime;
3436
3437 thread_group_cputime(p, &cputime);
3438
3439 *ut = cputime.utime;
3440 *st = cputime.stime;
Balbir Singh49048622008-09-05 18:12:23 +02003441}
3442#else
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003443
3444#ifndef nsecs_to_cputime
Hidetoshi Setob7b20df2009-11-26 14:49:27 +09003445# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003446#endif
3447
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003448void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003449{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003450 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
Balbir Singh49048622008-09-05 18:12:23 +02003451
3452 /*
3453 * Use CFS's precise accounting:
3454 */
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003455 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
Balbir Singh49048622008-09-05 18:12:23 +02003456
3457 if (total) {
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003458 u64 temp;
Balbir Singh49048622008-09-05 18:12:23 +02003459
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003460 temp = (u64)(rtime * utime);
Balbir Singh49048622008-09-05 18:12:23 +02003461 do_div(temp, total);
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003462 utime = (cputime_t)temp;
3463 } else
3464 utime = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003465
3466 /*
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003467 * Compare with previous values, to keep monotonicity:
Balbir Singh49048622008-09-05 18:12:23 +02003468 */
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003469 p->prev_utime = max(p->prev_utime, utime);
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003470 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
Balbir Singh49048622008-09-05 18:12:23 +02003471
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003472 *ut = p->prev_utime;
3473 *st = p->prev_stime;
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003474}
Balbir Singh49048622008-09-05 18:12:23 +02003475
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003476/*
3477 * Must be called with siglock held.
3478 */
3479void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3480{
3481 struct signal_struct *sig = p->signal;
3482 struct task_cputime cputime;
3483 cputime_t rtime, utime, total;
3484
3485 thread_group_cputime(p, &cputime);
3486
3487 total = cputime_add(cputime.utime, cputime.stime);
3488 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3489
3490 if (total) {
3491 u64 temp;
3492
3493 temp = (u64)(rtime * cputime.utime);
3494 do_div(temp, total);
3495 utime = (cputime_t)temp;
3496 } else
3497 utime = rtime;
3498
3499 sig->prev_utime = max(sig->prev_utime, utime);
3500 sig->prev_stime = max(sig->prev_stime,
3501 cputime_sub(rtime, sig->prev_utime));
3502
3503 *ut = sig->prev_utime;
3504 *st = sig->prev_stime;
Balbir Singh49048622008-09-05 18:12:23 +02003505}
3506#endif
3507
Balbir Singh49048622008-09-05 18:12:23 +02003508/*
Christoph Lameter7835b982006-12-10 02:20:22 -08003509 * This function gets called by the timer code, with HZ frequency.
3510 * We call it with interrupts disabled.
3511 *
3512 * It also gets called by the fork code, when changing the parent's
3513 * timeslices.
3514 */
3515void scheduler_tick(void)
3516{
Christoph Lameter7835b982006-12-10 02:20:22 -08003517 int cpu = smp_processor_id();
3518 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003519 struct task_struct *curr = rq->curr;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003520
3521 sched_clock_tick();
Christoph Lameter7835b982006-12-10 02:20:22 -08003522
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003523 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003524 update_rq_clock(rq);
Ingo Molnarf1a438d2007-08-09 11:16:45 +02003525 update_cpu_load(rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01003526 curr->sched_class->task_tick(rq, curr, 0);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003527 raw_spin_unlock(&rq->lock);
Ingo Molnardd41f592007-07-09 18:51:59 +02003528
Peter Zijlstra49f47432009-12-27 11:51:52 +01003529 perf_event_task_tick(curr);
Peter Zijlstrae220d2d2009-05-23 18:28:55 +02003530
Christoph Lametere418e1c2006-12-10 02:20:23 -08003531#ifdef CONFIG_SMP
Ingo Molnardd41f592007-07-09 18:51:59 +02003532 rq->idle_at_tick = idle_cpu(cpu);
3533 trigger_load_balance(rq, cpu);
Christoph Lametere418e1c2006-12-10 02:20:23 -08003534#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535}
3536
Lai Jiangshan132380a2009-04-02 14:18:25 +08003537notrace unsigned long get_parent_ip(unsigned long addr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003538{
3539 if (in_lock_functions(addr)) {
3540 addr = CALLER_ADDR2;
3541 if (in_lock_functions(addr))
3542 addr = CALLER_ADDR3;
3543 }
3544 return addr;
3545}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546
Steven Rostedt7e49fcc2009-01-22 19:01:40 -05003547#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3548 defined(CONFIG_PREEMPT_TRACER))
3549
Srinivasa Ds43627582008-02-23 15:24:04 -08003550void __kprobes add_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003552#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 /*
3554 * Underflow?
3555 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003556 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3557 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003558#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559 preempt_count() += val;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003560#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 /*
3562 * Spinlock count overflowing soon?
3563 */
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08003564 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3565 PREEMPT_MASK - 10);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003566#endif
3567 if (preempt_count() == val)
3568 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569}
3570EXPORT_SYMBOL(add_preempt_count);
3571
Srinivasa Ds43627582008-02-23 15:24:04 -08003572void __kprobes sub_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003574#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 /*
3576 * Underflow?
3577 */
Ingo Molnar01e3eb82009-01-12 13:00:50 +01003578 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003579 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 /*
3581 * Is the spinlock portion underflowing?
3582 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003583 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3584 !(preempt_count() & PREEMPT_MASK)))
3585 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003586#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003587
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003588 if (preempt_count() == val)
3589 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 preempt_count() -= val;
3591}
3592EXPORT_SYMBOL(sub_preempt_count);
3593
3594#endif
3595
3596/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003597 * Print scheduling while atomic bug:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003599static noinline void __schedule_bug(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600{
Satyam Sharma838225b2007-10-24 18:23:50 +02003601 struct pt_regs *regs = get_irq_regs();
3602
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01003603 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3604 prev->comm, prev->pid, preempt_count());
Satyam Sharma838225b2007-10-24 18:23:50 +02003605
Ingo Molnardd41f592007-07-09 18:51:59 +02003606 debug_show_held_locks(prev);
Arjan van de Vene21f5b12008-05-23 09:05:58 -07003607 print_modules();
Ingo Molnardd41f592007-07-09 18:51:59 +02003608 if (irqs_disabled())
3609 print_irqtrace_events(prev);
Satyam Sharma838225b2007-10-24 18:23:50 +02003610
3611 if (regs)
3612 show_regs(regs);
3613 else
3614 dump_stack();
Ingo Molnardd41f592007-07-09 18:51:59 +02003615}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
Ingo Molnardd41f592007-07-09 18:51:59 +02003617/*
3618 * Various schedule()-time debugging checks and statistics:
3619 */
3620static inline void schedule_debug(struct task_struct *prev)
3621{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 /*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003623 * Test if we are atomic. Since do_exit() needs to call into
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 * schedule() atomically, we ignore that path for now.
3625 * Otherwise, whine if we are scheduling when we should not be.
3626 */
Roel Kluin3f33a7c2008-05-13 23:44:11 +02003627 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
Ingo Molnardd41f592007-07-09 18:51:59 +02003628 __schedule_bug(prev);
3629
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3631
Ingo Molnar2d723762007-10-15 17:00:12 +02003632 schedstat_inc(this_rq(), sched_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02003633#ifdef CONFIG_SCHEDSTATS
3634 if (unlikely(prev->lock_depth >= 0)) {
Ingo Molnar2d723762007-10-15 17:00:12 +02003635 schedstat_inc(this_rq(), bkl_count);
3636 schedstat_inc(prev, sched_info.bkl_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02003637 }
3638#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02003639}
3640
Peter Zijlstra6cecd082009-11-30 13:00:37 +01003641static void put_prev_task(struct rq *rq, struct task_struct *prev)
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01003642{
Peter Zijlstra6cecd082009-11-30 13:00:37 +01003643 if (prev->state == TASK_RUNNING) {
3644 u64 runtime = prev->se.sum_exec_runtime;
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01003645
Peter Zijlstra6cecd082009-11-30 13:00:37 +01003646 runtime -= prev->se.prev_sum_exec_runtime;
3647 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01003648
3649 /*
3650 * In order to avoid avg_overlap growing stale when we are
3651 * indeed overlapping and hence not getting put to sleep, grow
3652 * the avg_overlap on preemption.
3653 *
3654 * We use the average preemption runtime because that
3655 * correlates to the amount of cache footprint a task can
3656 * build up.
3657 */
Peter Zijlstra6cecd082009-11-30 13:00:37 +01003658 update_avg(&prev->se.avg_overlap, runtime);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01003659 }
Peter Zijlstra6cecd082009-11-30 13:00:37 +01003660 prev->sched_class->put_prev_task(rq, prev);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01003661}
3662
Ingo Molnardd41f592007-07-09 18:51:59 +02003663/*
3664 * Pick up the highest-prio task:
3665 */
3666static inline struct task_struct *
Wang Chenb67802e2009-03-02 13:55:26 +08003667pick_next_task(struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02003668{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02003669 const struct sched_class *class;
Ingo Molnardd41f592007-07-09 18:51:59 +02003670 struct task_struct *p;
3671
3672 /*
3673 * Optimization: we know that if all tasks are in
3674 * the fair class we can call that function directly:
3675 */
3676 if (likely(rq->nr_running == rq->cfs.nr_running)) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003677 p = fair_sched_class.pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02003678 if (likely(p))
3679 return p;
3680 }
3681
3682 class = sched_class_highest;
3683 for ( ; ; ) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003684 p = class->pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02003685 if (p)
3686 return p;
3687 /*
3688 * Will never be NULL as the idle class always
3689 * returns a non-NULL p:
3690 */
3691 class = class->next;
3692 }
3693}
3694
3695/*
3696 * schedule() is the main scheduler function.
3697 */
Peter Zijlstraff743342009-03-13 12:21:26 +01003698asmlinkage void __sched schedule(void)
Ingo Molnardd41f592007-07-09 18:51:59 +02003699{
3700 struct task_struct *prev, *next;
Harvey Harrison67ca7bd2008-02-15 09:56:36 -08003701 unsigned long *switch_count;
Ingo Molnardd41f592007-07-09 18:51:59 +02003702 struct rq *rq;
Peter Zijlstra31656512008-07-18 18:01:23 +02003703 int cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02003704
Peter Zijlstraff743342009-03-13 12:21:26 +01003705need_resched:
3706 preempt_disable();
Ingo Molnardd41f592007-07-09 18:51:59 +02003707 cpu = smp_processor_id();
3708 rq = cpu_rq(cpu);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07003709 rcu_sched_qs(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003710 prev = rq->curr;
3711 switch_count = &prev->nivcsw;
3712
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713 release_kernel_lock(prev);
3714need_resched_nonpreemptible:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715
Ingo Molnardd41f592007-07-09 18:51:59 +02003716 schedule_debug(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717
Peter Zijlstra31656512008-07-18 18:01:23 +02003718 if (sched_feat(HRTICK))
Mike Galbraithf333fdc2008-05-12 21:20:55 +02003719 hrtick_clear(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003720
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003721 raw_spin_lock_irq(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003722 update_rq_clock(rq);
Ingo Molnar1e819952007-10-15 17:00:13 +02003723 clear_tsk_need_resched(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724
Ingo Molnardd41f592007-07-09 18:51:59 +02003725 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
Oleg Nesterov16882c12008-06-08 21:20:41 +04003726 if (unlikely(signal_pending_state(prev->state, prev)))
Ingo Molnardd41f592007-07-09 18:51:59 +02003727 prev->state = TASK_RUNNING;
Oleg Nesterov16882c12008-06-08 21:20:41 +04003728 else
Ingo Molnar2e1cb742007-08-09 11:16:49 +02003729 deactivate_task(rq, prev, 1);
Ingo Molnardd41f592007-07-09 18:51:59 +02003730 switch_count = &prev->nvcsw;
3731 }
3732
Gregory Haskins3f029d32009-07-29 11:08:47 -04003733 pre_schedule(rq, prev);
Steven Rostedtf65eda42008-01-25 21:08:07 +01003734
Ingo Molnardd41f592007-07-09 18:51:59 +02003735 if (unlikely(!rq->nr_running))
3736 idle_balance(cpu, rq);
3737
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01003738 put_prev_task(rq, prev);
Wang Chenb67802e2009-03-02 13:55:26 +08003739 next = pick_next_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 if (likely(prev != next)) {
David Simner673a90a2008-04-29 10:08:59 +01003742 sched_info_switch(prev, next);
Peter Zijlstra49f47432009-12-27 11:51:52 +01003743 perf_event_task_sched_out(prev, next);
David Simner673a90a2008-04-29 10:08:59 +01003744
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 rq->nr_switches++;
3746 rq->curr = next;
3747 ++*switch_count;
3748
Ingo Molnardd41f592007-07-09 18:51:59 +02003749 context_switch(rq, prev, next); /* unlocks the rq */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003750 /*
3751 * the context switch might have flipped the stack from under
3752 * us, hence refresh the local variables.
3753 */
3754 cpu = smp_processor_id();
3755 rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003757 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758
Gregory Haskins3f029d32009-07-29 11:08:47 -04003759 post_schedule(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760
Yong Zhang6d558c32010-01-11 14:21:25 +08003761 if (unlikely(reacquire_kernel_lock(current) < 0)) {
3762 prev = rq->curr;
3763 switch_count = &prev->nivcsw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 goto need_resched_nonpreemptible;
Yong Zhang6d558c32010-01-11 14:21:25 +08003765 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003766
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 preempt_enable_no_resched();
Peter Zijlstraff743342009-03-13 12:21:26 +01003768 if (need_resched())
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 goto need_resched;
3770}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771EXPORT_SYMBOL(schedule);
3772
Frederic Weisbeckerc08f7822009-12-02 20:49:17 +01003773#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01003774/*
3775 * Look out! "owner" is an entirely speculative pointer
3776 * access and not reliable.
3777 */
3778int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3779{
3780 unsigned int cpu;
3781 struct rq *rq;
3782
3783 if (!sched_feat(OWNER_SPIN))
3784 return 0;
3785
3786#ifdef CONFIG_DEBUG_PAGEALLOC
3787 /*
3788 * Need to access the cpu field knowing that
3789 * DEBUG_PAGEALLOC could have unmapped it if
3790 * the mutex owner just released it and exited.
3791 */
3792 if (probe_kernel_address(&owner->cpu, cpu))
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02003793 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01003794#else
3795 cpu = owner->cpu;
3796#endif
3797
3798 /*
3799 * Even if the access succeeded (likely case),
3800 * the cpu field may no longer be valid.
3801 */
3802 if (cpu >= nr_cpumask_bits)
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02003803 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01003804
3805 /*
3806 * We need to validate that we can do a
3807 * get_cpu() and that we have the percpu area.
3808 */
3809 if (!cpu_online(cpu))
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02003810 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01003811
3812 rq = cpu_rq(cpu);
3813
3814 for (;;) {
3815 /*
3816 * Owner changed, break to re-assess state.
3817 */
3818 if (lock->owner != owner)
3819 break;
3820
3821 /*
3822 * Is that owner really running on that cpu?
3823 */
3824 if (task_thread_info(rq->curr) != owner || need_resched())
3825 return 0;
3826
3827 cpu_relax();
3828 }
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02003829
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01003830 return 1;
3831}
3832#endif
3833
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834#ifdef CONFIG_PREEMPT
3835/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07003836 * this is the entry point to schedule() from in-kernel preemption
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003837 * off of preempt_enable. Kernel preemptions off return from interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 * occur there and call schedule directly.
3839 */
3840asmlinkage void __sched preempt_schedule(void)
3841{
3842 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01003843
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 /*
3845 * If there is a non-zero preempt_count or interrupts are disabled,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003846 * we do not want to preempt the current task. Just return..
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847 */
Nick Pigginbeed33a2006-10-11 01:21:52 -07003848 if (likely(ti->preempt_count || irqs_disabled()))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 return;
3850
Andi Kleen3a5c3592007-10-15 17:00:14 +02003851 do {
3852 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02003853 schedule();
Andi Kleen3a5c3592007-10-15 17:00:14 +02003854 sub_preempt_count(PREEMPT_ACTIVE);
3855
3856 /*
3857 * Check again in case we missed a preemption opportunity
3858 * between schedule and now.
3859 */
3860 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08003861 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863EXPORT_SYMBOL(preempt_schedule);
3864
3865/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07003866 * this is the entry point to schedule() from kernel preemption
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867 * off of irq context.
3868 * Note, that this is called and return with irqs disabled. This will
3869 * protect us against recursive calling from irq.
3870 */
3871asmlinkage void __sched preempt_schedule_irq(void)
3872{
3873 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01003874
Andreas Mohr2ed6e342006-07-10 04:43:52 -07003875 /* Catch callers which need to be fixed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 BUG_ON(ti->preempt_count || !irqs_disabled());
3877
Andi Kleen3a5c3592007-10-15 17:00:14 +02003878 do {
3879 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02003880 local_irq_enable();
3881 schedule();
3882 local_irq_disable();
Andi Kleen3a5c3592007-10-15 17:00:14 +02003883 sub_preempt_count(PREEMPT_ACTIVE);
3884
3885 /*
3886 * Check again in case we missed a preemption opportunity
3887 * between schedule and now.
3888 */
3889 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08003890 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891}
3892
3893#endif /* CONFIG_PREEMPT */
3894
Peter Zijlstra63859d42009-09-15 19:14:42 +02003895int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07003896 void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897{
Peter Zijlstra63859d42009-09-15 19:14:42 +02003898 return try_to_wake_up(curr->private, mode, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900EXPORT_SYMBOL(default_wake_function);
3901
3902/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003903 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3904 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 * number) then we wake all the non-exclusive tasks and one exclusive task.
3906 *
3907 * There are circumstances in which we can try to wake a task which has already
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003908 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3910 */
Johannes Weiner78ddb082009-04-14 16:53:05 +02003911static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
Peter Zijlstra63859d42009-09-15 19:14:42 +02003912 int nr_exclusive, int wake_flags, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913{
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02003914 wait_queue_t *curr, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02003916 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07003917 unsigned flags = curr->flags;
3918
Peter Zijlstra63859d42009-09-15 19:14:42 +02003919 if (curr->func(curr, mode, wake_flags, key) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07003920 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 break;
3922 }
3923}
3924
3925/**
3926 * __wake_up - wake up threads blocked on a waitqueue.
3927 * @q: the waitqueue
3928 * @mode: which threads
3929 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Martin Waitz67be2dd2005-05-01 08:59:26 -07003930 * @key: is directly passed to the wakeup function
David Howells50fa6102009-04-28 15:01:38 +01003931 *
3932 * It may be assumed that this function implies a write memory barrier before
3933 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08003935void __wake_up(wait_queue_head_t *q, unsigned int mode,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07003936 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937{
3938 unsigned long flags;
3939
3940 spin_lock_irqsave(&q->lock, flags);
3941 __wake_up_common(q, mode, nr_exclusive, 0, key);
3942 spin_unlock_irqrestore(&q->lock, flags);
3943}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944EXPORT_SYMBOL(__wake_up);
3945
3946/*
3947 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3948 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08003949void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950{
3951 __wake_up_common(q, mode, 1, 0, NULL);
3952}
3953
Davide Libenzi4ede8162009-03-31 15:24:20 -07003954void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
3955{
3956 __wake_up_common(q, mode, 1, 0, key);
3957}
3958
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959/**
Davide Libenzi4ede8162009-03-31 15:24:20 -07003960 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961 * @q: the waitqueue
3962 * @mode: which threads
3963 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Davide Libenzi4ede8162009-03-31 15:24:20 -07003964 * @key: opaque value to be passed to wakeup targets
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965 *
3966 * The sync wakeup differs that the waker knows that it will schedule
3967 * away soon, so while the target thread will be woken up, it will not
3968 * be migrated to another CPU - ie. the two threads are 'synchronized'
3969 * with each other. This can prevent needless bouncing between CPUs.
3970 *
3971 * On UP it can prevent extra preemption.
David Howells50fa6102009-04-28 15:01:38 +01003972 *
3973 * It may be assumed that this function implies a write memory barrier before
3974 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 */
Davide Libenzi4ede8162009-03-31 15:24:20 -07003976void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
3977 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978{
3979 unsigned long flags;
Peter Zijlstra7d478722009-09-14 19:55:44 +02003980 int wake_flags = WF_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981
3982 if (unlikely(!q))
3983 return;
3984
3985 if (unlikely(!nr_exclusive))
Peter Zijlstra7d478722009-09-14 19:55:44 +02003986 wake_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987
3988 spin_lock_irqsave(&q->lock, flags);
Peter Zijlstra7d478722009-09-14 19:55:44 +02003989 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990 spin_unlock_irqrestore(&q->lock, flags);
3991}
Davide Libenzi4ede8162009-03-31 15:24:20 -07003992EXPORT_SYMBOL_GPL(__wake_up_sync_key);
3993
3994/*
3995 * __wake_up_sync - see __wake_up_sync_key()
3996 */
3997void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3998{
3999 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4000}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4002
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004003/**
4004 * complete: - signals a single thread waiting on this completion
4005 * @x: holds the state of this particular completion
4006 *
4007 * This will wake up a single thread waiting on this completion. Threads will be
4008 * awakened in the same order in which they were queued.
4009 *
4010 * See also complete_all(), wait_for_completion() and related routines.
David Howells50fa6102009-04-28 15:01:38 +01004011 *
4012 * It may be assumed that this function implies a write memory barrier before
4013 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004014 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004015void complete(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016{
4017 unsigned long flags;
4018
4019 spin_lock_irqsave(&x->wait.lock, flags);
4020 x->done++;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004021 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022 spin_unlock_irqrestore(&x->wait.lock, flags);
4023}
4024EXPORT_SYMBOL(complete);
4025
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004026/**
4027 * complete_all: - signals all threads waiting on this completion
4028 * @x: holds the state of this particular completion
4029 *
4030 * This will wake up all threads waiting on this particular completion event.
David Howells50fa6102009-04-28 15:01:38 +01004031 *
4032 * It may be assumed that this function implies a write memory barrier before
4033 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004034 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004035void complete_all(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036{
4037 unsigned long flags;
4038
4039 spin_lock_irqsave(&x->wait.lock, flags);
4040 x->done += UINT_MAX/2;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004041 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 spin_unlock_irqrestore(&x->wait.lock, flags);
4043}
4044EXPORT_SYMBOL(complete_all);
4045
Andi Kleen8cbbe862007-10-15 17:00:14 +02004046static inline long __sched
4047do_wait_for_common(struct completion *x, long timeout, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 if (!x->done) {
4050 DECLARE_WAITQUEUE(wait, current);
4051
4052 wait.flags |= WQ_FLAG_EXCLUSIVE;
4053 __add_wait_queue_tail(&x->wait, &wait);
4054 do {
Oleg Nesterov94d3d822008-08-20 16:54:41 -07004055 if (signal_pending_state(state, current)) {
Oleg Nesterovea71a542008-06-20 18:32:20 +04004056 timeout = -ERESTARTSYS;
4057 break;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004058 }
4059 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004061 timeout = schedule_timeout(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062 spin_lock_irq(&x->wait.lock);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004063 } while (!x->done && timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064 __remove_wait_queue(&x->wait, &wait);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004065 if (!x->done)
4066 return timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 }
4068 x->done--;
Oleg Nesterovea71a542008-06-20 18:32:20 +04004069 return timeout ?: 1;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004070}
4071
4072static long __sched
4073wait_for_common(struct completion *x, long timeout, int state)
4074{
4075 might_sleep();
4076
4077 spin_lock_irq(&x->wait.lock);
4078 timeout = do_wait_for_common(x, timeout, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004080 return timeout;
4081}
4082
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004083/**
4084 * wait_for_completion: - waits for completion of a task
4085 * @x: holds the state of this particular completion
4086 *
4087 * This waits to be signaled for completion of a specific task. It is NOT
4088 * interruptible and there is no timeout.
4089 *
4090 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4091 * and interrupt capability. Also see complete().
4092 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004093void __sched wait_for_completion(struct completion *x)
Andi Kleen8cbbe862007-10-15 17:00:14 +02004094{
4095 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096}
4097EXPORT_SYMBOL(wait_for_completion);
4098
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004099/**
4100 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4101 * @x: holds the state of this particular completion
4102 * @timeout: timeout value in jiffies
4103 *
4104 * This waits for either a completion of a specific task to be signaled or for a
4105 * specified timeout to expire. The timeout is in jiffies. It is not
4106 * interruptible.
4107 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004108unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4110{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004111 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112}
4113EXPORT_SYMBOL(wait_for_completion_timeout);
4114
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004115/**
4116 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4117 * @x: holds the state of this particular completion
4118 *
4119 * This waits for completion of a specific task to be signaled. It is
4120 * interruptible.
4121 */
Andi Kleen8cbbe862007-10-15 17:00:14 +02004122int __sched wait_for_completion_interruptible(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123{
Andi Kleen51e97992007-10-18 21:32:55 +02004124 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4125 if (t == -ERESTARTSYS)
4126 return t;
4127 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128}
4129EXPORT_SYMBOL(wait_for_completion_interruptible);
4130
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004131/**
4132 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4133 * @x: holds the state of this particular completion
4134 * @timeout: timeout value in jiffies
4135 *
4136 * This waits for either a completion of a specific task to be signaled or for a
4137 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4138 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004139unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140wait_for_completion_interruptible_timeout(struct completion *x,
4141 unsigned long timeout)
4142{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004143 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144}
4145EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4146
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004147/**
4148 * wait_for_completion_killable: - waits for completion of a task (killable)
4149 * @x: holds the state of this particular completion
4150 *
4151 * This waits to be signaled for completion of a specific task. It can be
4152 * interrupted by a kill signal.
4153 */
Matthew Wilcox009e5772007-12-06 12:29:54 -05004154int __sched wait_for_completion_killable(struct completion *x)
4155{
4156 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4157 if (t == -ERESTARTSYS)
4158 return t;
4159 return 0;
4160}
4161EXPORT_SYMBOL(wait_for_completion_killable);
4162
Dave Chinnerbe4de352008-08-15 00:40:44 -07004163/**
4164 * try_wait_for_completion - try to decrement a completion without blocking
4165 * @x: completion structure
4166 *
4167 * Returns: 0 if a decrement cannot be done without blocking
4168 * 1 if a decrement succeeded.
4169 *
4170 * If a completion is being used as a counting completion,
4171 * attempt to decrement the counter without blocking. This
4172 * enables us to avoid waiting if the resource the completion
4173 * is protecting is not available.
4174 */
4175bool try_wait_for_completion(struct completion *x)
4176{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004177 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004178 int ret = 1;
4179
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004180 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004181 if (!x->done)
4182 ret = 0;
4183 else
4184 x->done--;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004185 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004186 return ret;
4187}
4188EXPORT_SYMBOL(try_wait_for_completion);
4189
4190/**
4191 * completion_done - Test to see if a completion has any waiters
4192 * @x: completion structure
4193 *
4194 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4195 * 1 if there are no waiters.
4196 *
4197 */
4198bool completion_done(struct completion *x)
4199{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004200 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004201 int ret = 1;
4202
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004203 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004204 if (!x->done)
4205 ret = 0;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004206 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004207 return ret;
4208}
4209EXPORT_SYMBOL(completion_done);
4210
Andi Kleen8cbbe862007-10-15 17:00:14 +02004211static long __sched
4212sleep_on_common(wait_queue_head_t *q, int state, long timeout)
Ingo Molnar0fec1712007-07-09 18:52:01 +02004213{
4214 unsigned long flags;
4215 wait_queue_t wait;
4216
4217 init_waitqueue_entry(&wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218
Andi Kleen8cbbe862007-10-15 17:00:14 +02004219 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220
Andi Kleen8cbbe862007-10-15 17:00:14 +02004221 spin_lock_irqsave(&q->lock, flags);
4222 __add_wait_queue(q, &wait);
4223 spin_unlock(&q->lock);
4224 timeout = schedule_timeout(timeout);
4225 spin_lock_irq(&q->lock);
4226 __remove_wait_queue(q, &wait);
4227 spin_unlock_irqrestore(&q->lock, flags);
4228
4229 return timeout;
4230}
4231
4232void __sched interruptible_sleep_on(wait_queue_head_t *q)
4233{
4234 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236EXPORT_SYMBOL(interruptible_sleep_on);
4237
Ingo Molnar0fec1712007-07-09 18:52:01 +02004238long __sched
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004239interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004241 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4244
Ingo Molnar0fec1712007-07-09 18:52:01 +02004245void __sched sleep_on(wait_queue_head_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004247 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249EXPORT_SYMBOL(sleep_on);
4250
Ingo Molnar0fec1712007-07-09 18:52:01 +02004251long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004253 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255EXPORT_SYMBOL(sleep_on_timeout);
4256
Ingo Molnarb29739f2006-06-27 02:54:51 -07004257#ifdef CONFIG_RT_MUTEXES
4258
4259/*
4260 * rt_mutex_setprio - set the current priority of a task
4261 * @p: task
4262 * @prio: prio value (kernel-internal form)
4263 *
4264 * This function changes the 'effective' priority of a task. It does
4265 * not touch ->normal_prio like __setscheduler().
4266 *
4267 * Used by the rt_mutex code to implement priority inheritance logic.
4268 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004269void rt_mutex_setprio(struct task_struct *p, int prio)
Ingo Molnarb29739f2006-06-27 02:54:51 -07004270{
4271 unsigned long flags;
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004272 int oldprio, on_rq, running;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004273 struct rq *rq;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004274 const struct sched_class *prev_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004275
4276 BUG_ON(prio < 0 || prio > MAX_PRIO);
4277
4278 rq = task_rq_lock(p, &flags);
Ingo Molnara8e504d2007-08-09 11:16:47 +02004279 update_rq_clock(rq);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004280
Andrew Mortond5f9f942007-05-08 20:27:06 -07004281 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004282 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004283 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004284 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004285 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004286 dequeue_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004287 if (running)
4288 p->sched_class->put_prev_task(rq, p);
Ingo Molnardd41f592007-07-09 18:51:59 +02004289
4290 if (rt_prio(prio))
4291 p->sched_class = &rt_sched_class;
4292 else
4293 p->sched_class = &fair_sched_class;
4294
Ingo Molnarb29739f2006-06-27 02:54:51 -07004295 p->prio = prio;
4296
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004297 if (running)
4298 p->sched_class->set_curr_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004299 if (on_rq) {
Thomas Gleixner60db48c2010-01-20 20:59:06 +00004300 enqueue_task(rq, p, 0, oldprio < prio);
Steven Rostedtcb469842008-01-25 21:08:22 +01004301
4302 check_class_changed(rq, p, prev_class, oldprio, running);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004303 }
4304 task_rq_unlock(rq, &flags);
4305}
4306
4307#endif
4308
Ingo Molnar36c8b582006-07-03 00:25:41 -07004309void set_user_nice(struct task_struct *p, long nice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310{
Ingo Molnardd41f592007-07-09 18:51:59 +02004311 int old_prio, delta, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004313 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314
4315 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4316 return;
4317 /*
4318 * We have to be careful, if called from sys_setpriority(),
4319 * the task might be in the middle of scheduling on another CPU.
4320 */
4321 rq = task_rq_lock(p, &flags);
Ingo Molnara8e504d2007-08-09 11:16:47 +02004322 update_rq_clock(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 /*
4324 * The RT priorities are set via sched_setscheduler(), but we still
4325 * allow the 'normal' nice value to be set - but as expected
4326 * it wont have any effect on scheduling until the task is
Ingo Molnardd41f592007-07-09 18:51:59 +02004327 * SCHED_FIFO/SCHED_RR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 */
Ingo Molnare05606d2007-07-09 18:51:59 +02004329 if (task_has_rt_policy(p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330 p->static_prio = NICE_TO_PRIO(nice);
4331 goto out_unlock;
4332 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004333 on_rq = p->se.on_rq;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02004334 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004335 dequeue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337 p->static_prio = NICE_TO_PRIO(nice);
Peter Williams2dd73a42006-06-27 02:54:34 -07004338 set_load_weight(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004339 old_prio = p->prio;
4340 p->prio = effective_prio(p);
4341 delta = p->prio - old_prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342
Ingo Molnardd41f592007-07-09 18:51:59 +02004343 if (on_rq) {
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00004344 enqueue_task(rq, p, 0, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345 /*
Andrew Mortond5f9f942007-05-08 20:27:06 -07004346 * If the task increased its priority or is running and
4347 * lowered its priority, then reschedule its CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 */
Andrew Mortond5f9f942007-05-08 20:27:06 -07004349 if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 resched_task(rq->curr);
4351 }
4352out_unlock:
4353 task_rq_unlock(rq, &flags);
4354}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355EXPORT_SYMBOL(set_user_nice);
4356
Matt Mackalle43379f2005-05-01 08:59:00 -07004357/*
4358 * can_nice - check if a task can reduce its nice value
4359 * @p: task
4360 * @nice: nice value
4361 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004362int can_nice(const struct task_struct *p, const int nice)
Matt Mackalle43379f2005-05-01 08:59:00 -07004363{
Matt Mackall024f4742005-08-18 11:24:19 -07004364 /* convert nice value [19,-20] to rlimit style value [1,40] */
4365 int nice_rlim = 20 - nice;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004366
Jiri Slaby78d7d402010-03-05 13:42:54 -08004367 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
Matt Mackalle43379f2005-05-01 08:59:00 -07004368 capable(CAP_SYS_NICE));
4369}
4370
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371#ifdef __ARCH_WANT_SYS_NICE
4372
4373/*
4374 * sys_nice - change the priority of the current process.
4375 * @increment: priority increment
4376 *
4377 * sys_setpriority is a more generic, but much slower function that
4378 * does similar things.
4379 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004380SYSCALL_DEFINE1(nice, int, increment)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381{
Ingo Molnar48f24c42006-07-03 00:25:40 -07004382 long nice, retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383
4384 /*
4385 * Setpriority might change our priority at the same moment.
4386 * We don't have to worry. Conceptually one call occurs first
4387 * and we have a single winner.
4388 */
Matt Mackalle43379f2005-05-01 08:59:00 -07004389 if (increment < -40)
4390 increment = -40;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391 if (increment > 40)
4392 increment = 40;
4393
Américo Wang2b8f8362009-02-16 18:54:21 +08004394 nice = TASK_NICE(current) + increment;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395 if (nice < -20)
4396 nice = -20;
4397 if (nice > 19)
4398 nice = 19;
4399
Matt Mackalle43379f2005-05-01 08:59:00 -07004400 if (increment < 0 && !can_nice(current, nice))
4401 return -EPERM;
4402
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403 retval = security_task_setnice(current, nice);
4404 if (retval)
4405 return retval;
4406
4407 set_user_nice(current, nice);
4408 return 0;
4409}
4410
4411#endif
4412
4413/**
4414 * task_prio - return the priority value of a given task.
4415 * @p: the task in question.
4416 *
4417 * This is the priority value as seen by users in /proc.
4418 * RT tasks are offset by -200. Normal tasks are centered
4419 * around 0, value goes from -16 to +15.
4420 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004421int task_prio(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422{
4423 return p->prio - MAX_RT_PRIO;
4424}
4425
4426/**
4427 * task_nice - return the nice value of a given task.
4428 * @p: the task in question.
4429 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004430int task_nice(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431{
4432 return TASK_NICE(p);
4433}
Pavel Roskin150d8be2008-03-05 16:56:37 -05004434EXPORT_SYMBOL(task_nice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435
4436/**
4437 * idle_cpu - is a given cpu idle currently?
4438 * @cpu: the processor in question.
4439 */
4440int idle_cpu(int cpu)
4441{
4442 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4443}
4444
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445/**
4446 * idle_task - return the idle task for a given cpu.
4447 * @cpu: the processor in question.
4448 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004449struct task_struct *idle_task(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450{
4451 return cpu_rq(cpu)->idle;
4452}
4453
4454/**
4455 * find_process_by_pid - find a process with a matching PID value.
4456 * @pid: the pid in question.
4457 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02004458static struct task_struct *find_process_by_pid(pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459{
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07004460 return pid ? find_task_by_vpid(pid) : current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461}
4462
4463/* Actually do priority change: must hold rq lock. */
Ingo Molnardd41f592007-07-09 18:51:59 +02004464static void
4465__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466{
Ingo Molnardd41f592007-07-09 18:51:59 +02004467 BUG_ON(p->se.on_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07004468
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 p->policy = policy;
4470 p->rt_priority = prio;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004471 p->normal_prio = normal_prio(p);
4472 /* we are holding p->pi_lock already */
4473 p->prio = rt_mutex_getprio(p);
Peter Zijlstraffd44db2009-11-10 20:12:01 +01004474 if (rt_prio(p->prio))
4475 p->sched_class = &rt_sched_class;
4476 else
4477 p->sched_class = &fair_sched_class;
Peter Williams2dd73a42006-06-27 02:54:34 -07004478 set_load_weight(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479}
4480
David Howellsc69e8d92008-11-14 10:39:19 +11004481/*
4482 * check the target process has a UID that matches the current process's
4483 */
4484static bool check_same_owner(struct task_struct *p)
4485{
4486 const struct cred *cred = current_cred(), *pcred;
4487 bool match;
4488
4489 rcu_read_lock();
4490 pcred = __task_cred(p);
4491 match = (cred->euid == pcred->euid ||
4492 cred->euid == pcred->uid);
4493 rcu_read_unlock();
4494 return match;
4495}
4496
Rusty Russell961ccdd2008-06-23 13:55:38 +10004497static int __sched_setscheduler(struct task_struct *p, int policy,
4498 struct sched_param *param, bool user)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004500 int retval, oldprio, oldpolicy = -1, on_rq, running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 unsigned long flags;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004502 const struct sched_class *prev_class;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004503 struct rq *rq;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004504 int reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505
Steven Rostedt66e53932006-06-27 02:54:44 -07004506 /* may grab non-irq protected spin_locks */
4507 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508recheck:
4509 /* double check policy once rq lock held */
Lennart Poetteringca94c442009-06-15 17:17:47 +02004510 if (policy < 0) {
4511 reset_on_fork = p->sched_reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 policy = oldpolicy = p->policy;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004513 } else {
4514 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4515 policy &= ~SCHED_RESET_ON_FORK;
4516
4517 if (policy != SCHED_FIFO && policy != SCHED_RR &&
4518 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4519 policy != SCHED_IDLE)
4520 return -EINVAL;
4521 }
4522
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523 /*
4524 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnardd41f592007-07-09 18:51:59 +02004525 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4526 * SCHED_BATCH and SCHED_IDLE is 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527 */
4528 if (param->sched_priority < 0 ||
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004529 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
Steven Rostedtd46523e2005-07-25 16:28:39 -04004530 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531 return -EINVAL;
Ingo Molnare05606d2007-07-09 18:51:59 +02004532 if (rt_policy(policy) != (param->sched_priority != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533 return -EINVAL;
4534
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004535 /*
4536 * Allow unprivileged RT tasks to decrease priority:
4537 */
Rusty Russell961ccdd2008-06-23 13:55:38 +10004538 if (user && !capable(CAP_SYS_NICE)) {
Ingo Molnare05606d2007-07-09 18:51:59 +02004539 if (rt_policy(policy)) {
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004540 unsigned long rlim_rtprio;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004541
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004542 if (!lock_task_sighand(p, &flags))
4543 return -ESRCH;
Jiri Slaby78d7d402010-03-05 13:42:54 -08004544 rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004545 unlock_task_sighand(p, &flags);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004546
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004547 /* can't set/change the rt policy */
4548 if (policy != p->policy && !rlim_rtprio)
4549 return -EPERM;
4550
4551 /* can't increase priority */
4552 if (param->sched_priority > p->rt_priority &&
4553 param->sched_priority > rlim_rtprio)
4554 return -EPERM;
4555 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004556 /*
4557 * Like positive nice levels, dont allow tasks to
4558 * move out of SCHED_IDLE either:
4559 */
4560 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
4561 return -EPERM;
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004562
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004563 /* can't change other user's priorities */
David Howellsc69e8d92008-11-14 10:39:19 +11004564 if (!check_same_owner(p))
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004565 return -EPERM;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004566
4567 /* Normal users shall not reset the sched_reset_on_fork flag */
4568 if (p->sched_reset_on_fork && !reset_on_fork)
4569 return -EPERM;
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004572 if (user) {
Peter Zijlstrab68aa232008-02-13 15:45:40 +01004573#ifdef CONFIG_RT_GROUP_SCHED
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004574 /*
4575 * Do not allow realtime tasks into groups that have no runtime
4576 * assigned.
4577 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02004578 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4579 task_group(p)->rt_bandwidth.rt_runtime == 0)
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004580 return -EPERM;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01004581#endif
4582
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004583 retval = security_task_setscheduler(p, policy, param);
4584 if (retval)
4585 return retval;
4586 }
4587
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588 /*
Ingo Molnarb29739f2006-06-27 02:54:51 -07004589 * make sure no PI-waiters arrive (or leave) while we are
4590 * changing the priority of the task:
4591 */
Thomas Gleixner1d615482009-11-17 14:54:03 +01004592 raw_spin_lock_irqsave(&p->pi_lock, flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004593 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594 * To be able to change p->policy safely, the apropriate
4595 * runqueue lock must be held.
4596 */
Ingo Molnarb29739f2006-06-27 02:54:51 -07004597 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598 /* recheck policy now with rq lock held */
4599 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4600 policy = oldpolicy = -1;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004601 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01004602 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 goto recheck;
4604 }
Ingo Molnar2daa3572007-08-09 11:16:51 +02004605 update_rq_clock(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004606 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004607 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004608 if (on_rq)
Ingo Molnar2e1cb742007-08-09 11:16:49 +02004609 deactivate_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004610 if (running)
4611 p->sched_class->put_prev_task(rq, p);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02004612
Lennart Poetteringca94c442009-06-15 17:17:47 +02004613 p->sched_reset_on_fork = reset_on_fork;
4614
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004616 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004617 __setscheduler(rq, p, policy, param->sched_priority);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02004618
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004619 if (running)
4620 p->sched_class->set_curr_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004621 if (on_rq) {
4622 activate_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004623
4624 check_class_changed(rq, p, prev_class, oldprio, running);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 }
Ingo Molnarb29739f2006-06-27 02:54:51 -07004626 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01004627 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004628
Thomas Gleixner95e02ca2006-06-27 02:55:02 -07004629 rt_mutex_adjust_pi(p);
4630
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 return 0;
4632}
Rusty Russell961ccdd2008-06-23 13:55:38 +10004633
4634/**
4635 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4636 * @p: the task in question.
4637 * @policy: new policy.
4638 * @param: structure containing the new RT priority.
4639 *
4640 * NOTE that the task may be already dead.
4641 */
4642int sched_setscheduler(struct task_struct *p, int policy,
4643 struct sched_param *param)
4644{
4645 return __sched_setscheduler(p, policy, param, true);
4646}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647EXPORT_SYMBOL_GPL(sched_setscheduler);
4648
Rusty Russell961ccdd2008-06-23 13:55:38 +10004649/**
4650 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4651 * @p: the task in question.
4652 * @policy: new policy.
4653 * @param: structure containing the new RT priority.
4654 *
4655 * Just like sched_setscheduler, only don't bother checking if the
4656 * current context has permission. For example, this is needed in
4657 * stop_machine(): we create temporary high priority worker threads,
4658 * but our caller might not have that capability.
4659 */
4660int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4661 struct sched_param *param)
4662{
4663 return __sched_setscheduler(p, policy, param, false);
4664}
4665
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004666static int
4667do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 struct sched_param lparam;
4670 struct task_struct *p;
Ingo Molnar36c8b582006-07-03 00:25:41 -07004671 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672
4673 if (!param || pid < 0)
4674 return -EINVAL;
4675 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4676 return -EFAULT;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004677
4678 rcu_read_lock();
4679 retval = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680 p = find_process_by_pid(pid);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004681 if (p != NULL)
4682 retval = sched_setscheduler(p, policy, &lparam);
4683 rcu_read_unlock();
Ingo Molnar36c8b582006-07-03 00:25:41 -07004684
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685 return retval;
4686}
4687
4688/**
4689 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4690 * @pid: the pid in question.
4691 * @policy: new policy.
4692 * @param: structure containing the new RT priority.
4693 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004694SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4695 struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696{
Jason Baronc21761f2006-01-18 17:43:03 -08004697 /* negative values for policy are not valid */
4698 if (policy < 0)
4699 return -EINVAL;
4700
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701 return do_sched_setscheduler(pid, policy, param);
4702}
4703
4704/**
4705 * sys_sched_setparam - set/change the RT priority of a thread
4706 * @pid: the pid in question.
4707 * @param: structure containing the new RT priority.
4708 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004709SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710{
4711 return do_sched_setscheduler(pid, -1, param);
4712}
4713
4714/**
4715 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4716 * @pid: the pid in question.
4717 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004718SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719{
Ingo Molnar36c8b582006-07-03 00:25:41 -07004720 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02004721 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722
4723 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02004724 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725
4726 retval = -ESRCH;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00004727 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004728 p = find_process_by_pid(pid);
4729 if (p) {
4730 retval = security_task_getscheduler(p);
4731 if (!retval)
Lennart Poetteringca94c442009-06-15 17:17:47 +02004732 retval = p->policy
4733 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734 }
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00004735 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736 return retval;
4737}
4738
4739/**
Lennart Poetteringca94c442009-06-15 17:17:47 +02004740 * sys_sched_getparam - get the RT priority of a thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 * @pid: the pid in question.
4742 * @param: structure containing the RT priority.
4743 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004744SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745{
4746 struct sched_param lp;
Ingo Molnar36c8b582006-07-03 00:25:41 -07004747 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02004748 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749
4750 if (!param || pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02004751 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00004753 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 p = find_process_by_pid(pid);
4755 retval = -ESRCH;
4756 if (!p)
4757 goto out_unlock;
4758
4759 retval = security_task_getscheduler(p);
4760 if (retval)
4761 goto out_unlock;
4762
4763 lp.sched_priority = p->rt_priority;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00004764 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765
4766 /*
4767 * This one might sleep, we cannot do it with a spinlock held ...
4768 */
4769 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4770
Linus Torvalds1da177e2005-04-16 15:20:36 -07004771 return retval;
4772
4773out_unlock:
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00004774 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 return retval;
4776}
4777
Rusty Russell96f874e2008-11-25 02:35:14 +10304778long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004779{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304780 cpumask_var_t cpus_allowed, new_mask;
Ingo Molnar36c8b582006-07-03 00:25:41 -07004781 struct task_struct *p;
4782 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004784 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00004785 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786
4787 p = find_process_by_pid(pid);
4788 if (!p) {
Thomas Gleixner23f5d142009-12-09 10:15:01 +00004789 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004790 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791 return -ESRCH;
4792 }
4793
Thomas Gleixner23f5d142009-12-09 10:15:01 +00004794 /* Prevent p going away */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 get_task_struct(p);
Thomas Gleixner23f5d142009-12-09 10:15:01 +00004796 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004797
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304798 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4799 retval = -ENOMEM;
4800 goto out_put_task;
4801 }
4802 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4803 retval = -ENOMEM;
4804 goto out_free_cpus_allowed;
4805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806 retval = -EPERM;
David Howellsc69e8d92008-11-14 10:39:19 +11004807 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808 goto out_unlock;
4809
David Quigleye7834f82006-06-23 02:03:59 -07004810 retval = security_task_setscheduler(p, 0, NULL);
4811 if (retval)
4812 goto out_unlock;
4813
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304814 cpuset_cpus_allowed(p, cpus_allowed);
4815 cpumask_and(new_mask, in_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07004816 again:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304817 retval = set_cpus_allowed_ptr(p, new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818
Paul Menage8707d8b2007-10-18 23:40:22 -07004819 if (!retval) {
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304820 cpuset_cpus_allowed(p, cpus_allowed);
4821 if (!cpumask_subset(new_mask, cpus_allowed)) {
Paul Menage8707d8b2007-10-18 23:40:22 -07004822 /*
4823 * We must have raced with a concurrent cpuset
4824 * update. Just reset the cpus_allowed to the
4825 * cpuset's cpus_allowed
4826 */
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304827 cpumask_copy(new_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07004828 goto again;
4829 }
4830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831out_unlock:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304832 free_cpumask_var(new_mask);
4833out_free_cpus_allowed:
4834 free_cpumask_var(cpus_allowed);
4835out_put_task:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836 put_task_struct(p);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004837 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004838 return retval;
4839}
4840
4841static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
Rusty Russell96f874e2008-11-25 02:35:14 +10304842 struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004843{
Rusty Russell96f874e2008-11-25 02:35:14 +10304844 if (len < cpumask_size())
4845 cpumask_clear(new_mask);
4846 else if (len > cpumask_size())
4847 len = cpumask_size();
4848
Linus Torvalds1da177e2005-04-16 15:20:36 -07004849 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4850}
4851
4852/**
4853 * sys_sched_setaffinity - set the cpu affinity of a process
4854 * @pid: pid of the process
4855 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4856 * @user_mask_ptr: user-space pointer to the new cpu mask
4857 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004858SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4859 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304861 cpumask_var_t new_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862 int retval;
4863
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304864 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4865 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304867 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4868 if (retval == 0)
4869 retval = sched_setaffinity(pid, new_mask);
4870 free_cpumask_var(new_mask);
4871 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004872}
4873
Rusty Russell96f874e2008-11-25 02:35:14 +10304874long sched_getaffinity(pid_t pid, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875{
Ingo Molnar36c8b582006-07-03 00:25:41 -07004876 struct task_struct *p;
Thomas Gleixner31605682009-12-08 20:24:16 +00004877 unsigned long flags;
4878 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004881 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00004882 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883
4884 retval = -ESRCH;
4885 p = find_process_by_pid(pid);
4886 if (!p)
4887 goto out_unlock;
4888
David Quigleye7834f82006-06-23 02:03:59 -07004889 retval = security_task_getscheduler(p);
4890 if (retval)
4891 goto out_unlock;
4892
Thomas Gleixner31605682009-12-08 20:24:16 +00004893 rq = task_rq_lock(p, &flags);
Rusty Russell96f874e2008-11-25 02:35:14 +10304894 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
Thomas Gleixner31605682009-12-08 20:24:16 +00004895 task_rq_unlock(rq, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896
4897out_unlock:
Thomas Gleixner23f5d142009-12-09 10:15:01 +00004898 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004899 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004900
Ulrich Drepper9531b622007-08-09 11:16:46 +02004901 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902}
4903
4904/**
4905 * sys_sched_getaffinity - get the cpu affinity of a process
4906 * @pid: pid of the process
4907 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4908 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4909 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004910SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4911 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004912{
4913 int ret;
Rusty Russellf17c8602008-11-25 02:35:11 +10304914 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915
Anton Blanchard84fba5e2010-04-06 17:02:19 +10004916 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09004917 return -EINVAL;
4918 if (len & (sizeof(unsigned long)-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004919 return -EINVAL;
4920
Rusty Russellf17c8602008-11-25 02:35:11 +10304921 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4922 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004923
Rusty Russellf17c8602008-11-25 02:35:11 +10304924 ret = sched_getaffinity(pid, mask);
4925 if (ret == 0) {
KOSAKI Motohiro8bc037f2010-03-17 09:36:58 +09004926 size_t retlen = min_t(size_t, len, cpumask_size());
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09004927
4928 if (copy_to_user(user_mask_ptr, mask, retlen))
Rusty Russellf17c8602008-11-25 02:35:11 +10304929 ret = -EFAULT;
4930 else
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09004931 ret = retlen;
Rusty Russellf17c8602008-11-25 02:35:11 +10304932 }
4933 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004934
Rusty Russellf17c8602008-11-25 02:35:11 +10304935 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936}
4937
4938/**
4939 * sys_sched_yield - yield the current processor to other threads.
4940 *
Ingo Molnardd41f592007-07-09 18:51:59 +02004941 * This function yields the current CPU to other tasks. If there are no
4942 * other threads running on this CPU then this function will return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004944SYSCALL_DEFINE0(sched_yield)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945{
Ingo Molnar70b97a72006-07-03 00:25:42 -07004946 struct rq *rq = this_rq_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947
Ingo Molnar2d723762007-10-15 17:00:12 +02004948 schedstat_inc(rq, yld_count);
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02004949 current->sched_class->yield_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004950
4951 /*
4952 * Since we are going to call schedule() anyway, there's
4953 * no need to preempt or enable interrupts:
4954 */
4955 __release(rq->lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07004956 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +01004957 do_raw_spin_unlock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958 preempt_enable_no_resched();
4959
4960 schedule();
4961
4962 return 0;
4963}
4964
Peter Zijlstrad86ee482009-07-10 14:57:57 +02004965static inline int should_resched(void)
4966{
4967 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
4968}
4969
Andrew Mortone7b38402006-06-30 01:56:00 -07004970static void __cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971{
Frederic Weisbeckere7aaaa62009-07-16 15:44:29 +02004972 add_preempt_count(PREEMPT_ACTIVE);
4973 schedule();
4974 sub_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975}
4976
Herbert Xu02b67cc2008-01-25 21:08:28 +01004977int __sched _cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02004979 if (should_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980 __cond_resched();
4981 return 1;
4982 }
4983 return 0;
4984}
Herbert Xu02b67cc2008-01-25 21:08:28 +01004985EXPORT_SYMBOL(_cond_resched);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986
4987/*
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02004988 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989 * call schedule, and on return reacquire the lock.
4990 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004991 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992 * operations here to prevent schedule() from being called twice (once via
4993 * spin_unlock(), once by hand).
4994 */
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02004995int __cond_resched_lock(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02004997 int resched = should_resched();
Jan Kara6df3cec2005-06-13 15:52:32 -07004998 int ret = 0;
4999
Peter Zijlstraf607c662009-07-20 19:16:29 +02005000 lockdep_assert_held(lock);
5001
Nick Piggin95c354f2008-01-30 13:31:20 +01005002 if (spin_needbreak(lock) || resched) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 spin_unlock(lock);
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005004 if (resched)
Nick Piggin95c354f2008-01-30 13:31:20 +01005005 __cond_resched();
5006 else
5007 cpu_relax();
Jan Kara6df3cec2005-06-13 15:52:32 -07005008 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 }
Jan Kara6df3cec2005-06-13 15:52:32 -07005011 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005013EXPORT_SYMBOL(__cond_resched_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005015int __sched __cond_resched_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005016{
5017 BUG_ON(!in_softirq());
5018
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005019 if (should_resched()) {
Thomas Gleixner98d82562007-05-23 13:58:18 -07005020 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005021 __cond_resched();
5022 local_bh_disable();
5023 return 1;
5024 }
5025 return 0;
5026}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005027EXPORT_SYMBOL(__cond_resched_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029/**
5030 * yield - yield the current processor to other threads.
5031 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08005032 * This is a shortcut for kernel-space yielding - it marks the
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033 * thread runnable and calls sys_sched_yield().
5034 */
5035void __sched yield(void)
5036{
5037 set_current_state(TASK_RUNNING);
5038 sys_sched_yield();
5039}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040EXPORT_SYMBOL(yield);
5041
5042/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005043 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
Linus Torvalds1da177e2005-04-16 15:20:36 -07005044 * that process accounting knows that this is a task in IO wait state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045 */
5046void __sched io_schedule(void)
5047{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005048 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005050 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051 atomic_inc(&rq->nr_iowait);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005052 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053 schedule();
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005054 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005056 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005058EXPORT_SYMBOL(io_schedule);
5059
5060long __sched io_schedule_timeout(long timeout)
5061{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005062 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063 long ret;
5064
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005065 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 atomic_inc(&rq->nr_iowait);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005067 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068 ret = schedule_timeout(timeout);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005069 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005071 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072 return ret;
5073}
5074
5075/**
5076 * sys_sched_get_priority_max - return maximum RT priority.
5077 * @policy: scheduling class.
5078 *
5079 * this syscall returns the maximum rt_priority that can be used
5080 * by a given scheduling class.
5081 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005082SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083{
5084 int ret = -EINVAL;
5085
5086 switch (policy) {
5087 case SCHED_FIFO:
5088 case SCHED_RR:
5089 ret = MAX_USER_RT_PRIO-1;
5090 break;
5091 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005092 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005093 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005094 ret = 0;
5095 break;
5096 }
5097 return ret;
5098}
5099
5100/**
5101 * sys_sched_get_priority_min - return minimum RT priority.
5102 * @policy: scheduling class.
5103 *
5104 * this syscall returns the minimum rt_priority that can be used
5105 * by a given scheduling class.
5106 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005107SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108{
5109 int ret = -EINVAL;
5110
5111 switch (policy) {
5112 case SCHED_FIFO:
5113 case SCHED_RR:
5114 ret = 1;
5115 break;
5116 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005117 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005118 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119 ret = 0;
5120 }
5121 return ret;
5122}
5123
5124/**
5125 * sys_sched_rr_get_interval - return the default timeslice of a process.
5126 * @pid: pid of the process.
5127 * @interval: userspace pointer to the timeslice value.
5128 *
5129 * this syscall writes the default timeslice value of a given process
5130 * into the user-space timespec buffer. A value of '0' means infinity.
5131 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01005132SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
Heiko Carstens754fe8d2009-01-14 14:14:09 +01005133 struct timespec __user *, interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005135 struct task_struct *p;
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005136 unsigned int time_slice;
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005137 unsigned long flags;
5138 struct rq *rq;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005139 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140 struct timespec t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141
5142 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005143 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005144
5145 retval = -ESRCH;
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005146 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147 p = find_process_by_pid(pid);
5148 if (!p)
5149 goto out_unlock;
5150
5151 retval = security_task_getscheduler(p);
5152 if (retval)
5153 goto out_unlock;
5154
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005155 rq = task_rq_lock(p, &flags);
5156 time_slice = p->sched_class->get_rr_interval(rq, p);
5157 task_rq_unlock(rq, &flags);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005158
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005159 rcu_read_unlock();
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005160 jiffies_to_timespec(time_slice, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162 return retval;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005163
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164out_unlock:
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005165 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166 return retval;
5167}
5168
Steven Rostedt7c731e02008-05-12 21:20:41 +02005169static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005170
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005171void sched_show_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173 unsigned long free = 0;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005174 unsigned state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176 state = p->state ? __ffs(p->state) + 1 : 0;
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005177 printk(KERN_INFO "%-13.13s %c", p->comm,
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005178 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
Ingo Molnar4bd77322007-07-11 21:21:47 +02005179#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005181 printk(KERN_CONT " running ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005183 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184#else
5185 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005186 printk(KERN_CONT " running task ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005188 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189#endif
5190#ifdef CONFIG_DEBUG_STACK_USAGE
Eric Sandeen7c9f8862008-04-22 16:38:23 -05005191 free = stack_not_used(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192#endif
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005193 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
David Rientjesaa47b7e2009-05-04 01:38:05 -07005194 task_pid_nr(p), task_pid_nr(p->real_parent),
5195 (unsigned long)task_thread_info(p)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196
Nick Piggin5fb5e6d2008-01-25 21:08:34 +01005197 show_stack(p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198}
5199
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005200void show_state_filter(unsigned long state_filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005202 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203
Ingo Molnar4bd77322007-07-11 21:21:47 +02005204#if BITS_PER_LONG == 32
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005205 printk(KERN_INFO
5206 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207#else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005208 printk(KERN_INFO
5209 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210#endif
5211 read_lock(&tasklist_lock);
5212 do_each_thread(g, p) {
5213 /*
5214 * reset the NMI-timeout, listing all files on a slow
5215 * console might take alot of time:
5216 */
5217 touch_nmi_watchdog();
Ingo Molnar39bc89f2007-04-25 20:50:03 -07005218 if (!state_filter || (p->state & state_filter))
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005219 sched_show_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220 } while_each_thread(g, p);
5221
Jeremy Fitzhardinge04c91672007-05-08 00:28:05 -07005222 touch_all_softlockup_watchdogs();
5223
Ingo Molnardd41f592007-07-09 18:51:59 +02005224#ifdef CONFIG_SCHED_DEBUG
5225 sysrq_sched_debug_show();
5226#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227 read_unlock(&tasklist_lock);
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005228 /*
5229 * Only show locks if all tasks are dumped:
5230 */
Shmulik Ladkani93335a22009-11-25 15:23:41 +02005231 if (!state_filter)
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005232 debug_show_all_locks();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233}
5234
Ingo Molnar1df21052007-07-09 18:51:58 +02005235void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5236{
Ingo Molnardd41f592007-07-09 18:51:59 +02005237 idle->sched_class = &idle_sched_class;
Ingo Molnar1df21052007-07-09 18:51:58 +02005238}
5239
Ingo Molnarf340c0d2005-06-28 16:40:42 +02005240/**
5241 * init_idle - set up an idle thread for a given CPU
5242 * @idle: task in question
5243 * @cpu: cpu the idle task belongs to
5244 *
5245 * NOTE: this function does not set the idle thread's NEED_RESCHED
5246 * flag, to make booting more robust.
5247 */
Nick Piggin5c1e1762006-10-03 01:14:04 -07005248void __cpuinit init_idle(struct task_struct *idle, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005250 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251 unsigned long flags;
5252
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005253 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar5cbd54e2008-11-12 20:05:50 +01005254
Ingo Molnardd41f592007-07-09 18:51:59 +02005255 __sched_fork(idle);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01005256 idle->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02005257 idle->se.exec_start = sched_clock();
5258
Rusty Russell96f874e2008-11-25 02:35:14 +10305259 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
Ingo Molnardd41f592007-07-09 18:51:59 +02005260 __set_task_cpu(idle, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262 rq->curr = rq->idle = idle;
Nick Piggin4866cde2005-06-25 14:57:23 -07005263#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5264 idle->oncpu = 1;
5265#endif
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005266 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267
5268 /* Set the preempt count _outside_ the spinlocks! */
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005269#if defined(CONFIG_PREEMPT)
5270 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5271#else
Al Viroa1261f52005-11-13 16:06:55 -08005272 task_thread_info(idle)->preempt_count = 0;
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005273#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02005274 /*
5275 * The idle tasks have their own, simple scheduling class:
5276 */
5277 idle->sched_class = &idle_sched_class;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01005278 ftrace_graph_init_task(idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005279}
5280
5281/*
5282 * In a system that switches off the HZ timer nohz_cpu_mask
5283 * indicates which cpus entered this state. This is used
5284 * in the rcu update to wait only for active cpus. For system
5285 * which do not switch off the HZ timer nohz_cpu_mask should
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305286 * always be CPU_BITS_NONE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005287 */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305288cpumask_var_t nohz_cpu_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289
Ingo Molnar19978ca2007-11-09 22:39:38 +01005290/*
5291 * Increase the granularity value when there are more CPUs,
5292 * because with more CPUs the 'effective latency' as visible
5293 * to users decreases. But the relationship is not linear,
5294 * so pick a second-best guess by going with the log2 of the
5295 * number of CPUs.
5296 *
5297 * This idea comes from the SD scheduler of Con Kolivas:
5298 */
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005299static int get_update_sysctl_factor(void)
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005300{
Mike Galbraith4ca3ef72009-12-10 09:25:53 +01005301 unsigned int cpus = min_t(int, num_online_cpus(), 8);
Christian Ehrhardt1983a922009-11-30 12:16:47 +01005302 unsigned int factor;
5303
5304 switch (sysctl_sched_tunable_scaling) {
5305 case SCHED_TUNABLESCALING_NONE:
5306 factor = 1;
5307 break;
5308 case SCHED_TUNABLESCALING_LINEAR:
5309 factor = cpus;
5310 break;
5311 case SCHED_TUNABLESCALING_LOG:
5312 default:
5313 factor = 1 + ilog2(cpus);
5314 break;
5315 }
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005316
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005317 return factor;
5318}
5319
5320static void update_sysctl(void)
5321{
5322 unsigned int factor = get_update_sysctl_factor();
5323
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005324#define SET_SYSCTL(name) \
5325 (sysctl_##name = (factor) * normalized_sysctl_##name)
5326 SET_SYSCTL(sched_min_granularity);
5327 SET_SYSCTL(sched_latency);
5328 SET_SYSCTL(sched_wakeup_granularity);
5329 SET_SYSCTL(sched_shares_ratelimit);
5330#undef SET_SYSCTL
5331}
5332
Ingo Molnar19978ca2007-11-09 22:39:38 +01005333static inline void sched_init_granularity(void)
5334{
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005335 update_sysctl();
Ingo Molnar19978ca2007-11-09 22:39:38 +01005336}
5337
Linus Torvalds1da177e2005-04-16 15:20:36 -07005338#ifdef CONFIG_SMP
5339/*
5340 * This is how migration works:
5341 *
Ingo Molnar70b97a72006-07-03 00:25:42 -07005342 * 1) we queue a struct migration_req structure in the source CPU's
Linus Torvalds1da177e2005-04-16 15:20:36 -07005343 * runqueue and wake up that CPU's migration thread.
5344 * 2) we down() the locked semaphore => thread blocks.
5345 * 3) migration thread wakes up (implicitly it forces the migrated
5346 * thread off the CPU)
5347 * 4) it gets the migration request and checks whether the migrated
5348 * task is still in the wrong runqueue.
5349 * 5) if it's in the wrong runqueue then the migration thread removes
5350 * it and puts it into the right queue.
5351 * 6) migration thread up()s the semaphore.
5352 * 7) we wake up and the migration is done.
5353 */
5354
5355/*
5356 * Change a given task's CPU affinity. Migrate the thread to a
5357 * proper CPU and schedule it away if the CPU it's executing on
5358 * is removed from the allowed bitmask.
5359 *
5360 * NOTE: the caller must have a valid reference to the task, the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005361 * task must not exit() & deallocate itself prematurely. The
Linus Torvalds1da177e2005-04-16 15:20:36 -07005362 * call is not atomic; no spinlocks may be held.
5363 */
Rusty Russell96f874e2008-11-25 02:35:14 +10305364int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005365{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005366 struct migration_req req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005367 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005368 struct rq *rq;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005369 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370
5371 rq = task_rq_lock(p, &flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01005372
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005373 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374 ret = -EINVAL;
5375 goto out;
5376 }
5377
David Rientjes9985b0b2008-06-05 12:57:11 -07005378 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
Rusty Russell96f874e2008-11-25 02:35:14 +10305379 !cpumask_equal(&p->cpus_allowed, new_mask))) {
David Rientjes9985b0b2008-06-05 12:57:11 -07005380 ret = -EINVAL;
5381 goto out;
5382 }
5383
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005384 if (p->sched_class->set_cpus_allowed)
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005385 p->sched_class->set_cpus_allowed(p, new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005386 else {
Rusty Russell96f874e2008-11-25 02:35:14 +10305387 cpumask_copy(&p->cpus_allowed, new_mask);
5388 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005389 }
5390
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391 /* Can the task run on the task's current CPU? If so, we're done */
Rusty Russell96f874e2008-11-25 02:35:14 +10305392 if (cpumask_test_cpu(task_cpu(p), new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005393 goto out;
5394
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005395 if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005396 /* Need help from migration thread: drop lock and wait. */
Peter Zijlstra693525e2009-07-21 13:56:38 +02005397 struct task_struct *mt = rq->migration_thread;
5398
5399 get_task_struct(mt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005400 task_rq_unlock(rq, &flags);
Oleg Nesterov47a70982010-03-30 18:58:29 +02005401 wake_up_process(mt);
Peter Zijlstra693525e2009-07-21 13:56:38 +02005402 put_task_struct(mt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403 wait_for_completion(&req.done);
5404 tlb_migrate_finish(p->mm);
5405 return 0;
5406 }
5407out:
5408 task_rq_unlock(rq, &flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005409
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410 return ret;
5411}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005412EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005413
5414/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005415 * Move (not current) task off this cpu, onto dest cpu. We're doing
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416 * this because either it can't run here any more (set_cpus_allowed()
5417 * away from this CPU, or CPU going down), or because we're
5418 * attempting to rebalance this task on exec (sched_exec).
5419 *
5420 * So we race with normal scheduler movements, but that's OK, as long
5421 * as the task is no longer on this CPU.
Kirill Korotaevefc30812006-06-27 02:54:32 -07005422 *
5423 * Returns non-zero if task was successfully migrated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424 */
Kirill Korotaevefc30812006-06-27 02:54:32 -07005425static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005427 struct rq *rq_dest, *rq_src;
Peter Zijlstrae2912002009-12-16 18:04:36 +01005428 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429
Max Krasnyanskye761b772008-07-15 04:43:49 -07005430 if (unlikely(!cpu_active(dest_cpu)))
Kirill Korotaevefc30812006-06-27 02:54:32 -07005431 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005432
5433 rq_src = cpu_rq(src_cpu);
5434 rq_dest = cpu_rq(dest_cpu);
5435
5436 double_rq_lock(rq_src, rq_dest);
5437 /* Already moved. */
5438 if (task_cpu(p) != src_cpu)
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005439 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440 /* Affinity changed (again). */
Rusty Russell96f874e2008-11-25 02:35:14 +10305441 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005442 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443
Peter Zijlstrae2912002009-12-16 18:04:36 +01005444 /*
5445 * If we're not on a rq, the next wake-up will ensure we're
5446 * placed properly.
5447 */
5448 if (p->se.on_rq) {
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005449 deactivate_task(rq_src, p, 0);
Peter Zijlstrae2912002009-12-16 18:04:36 +01005450 set_task_cpu(p, dest_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005451 activate_task(rq_dest, p, 0);
Peter Zijlstra15afe092008-09-20 23:38:02 +02005452 check_preempt_curr(rq_dest, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453 }
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005454done:
Kirill Korotaevefc30812006-06-27 02:54:32 -07005455 ret = 1;
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005456fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005457 double_rq_unlock(rq_src, rq_dest);
Kirill Korotaevefc30812006-06-27 02:54:32 -07005458 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005459}
5460
Paul E. McKenney03b042b2009-06-25 09:08:16 -07005461#define RCU_MIGRATION_IDLE 0
5462#define RCU_MIGRATION_NEED_QS 1
5463#define RCU_MIGRATION_GOT_QS 2
5464#define RCU_MIGRATION_MUST_SYNC 3
5465
Linus Torvalds1da177e2005-04-16 15:20:36 -07005466/*
5467 * migration_thread - this is a highprio system thread that performs
5468 * thread migration by bumping thread off CPU then 'pushing' onto
5469 * another runqueue.
5470 */
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005471static int migration_thread(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472{
Paul E. McKenney03b042b2009-06-25 09:08:16 -07005473 int badcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005474 int cpu = (long)data;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005475 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005476
5477 rq = cpu_rq(cpu);
5478 BUG_ON(rq->migration_thread != current);
5479
5480 set_current_state(TASK_INTERRUPTIBLE);
5481 while (!kthread_should_stop()) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07005482 struct migration_req *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005483 struct list_head *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005484
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005485 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005486
5487 if (cpu_is_offline(cpu)) {
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005488 raw_spin_unlock_irq(&rq->lock);
Oleg Nesterov371cbb32009-06-17 16:27:45 -07005489 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005490 }
5491
5492 if (rq->active_balance) {
5493 active_load_balance(rq, cpu);
5494 rq->active_balance = 0;
5495 }
5496
5497 head = &rq->migration_queue;
5498
5499 if (list_empty(head)) {
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005500 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005501 schedule();
5502 set_current_state(TASK_INTERRUPTIBLE);
5503 continue;
5504 }
Ingo Molnar70b97a72006-07-03 00:25:42 -07005505 req = list_entry(head->next, struct migration_req, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506 list_del_init(head->next);
5507
Paul E. McKenney03b042b2009-06-25 09:08:16 -07005508 if (req->task != NULL) {
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005509 raw_spin_unlock(&rq->lock);
Paul E. McKenney03b042b2009-06-25 09:08:16 -07005510 __migrate_task(req->task, cpu, req->dest_cpu);
5511 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
5512 req->dest_cpu = RCU_MIGRATION_GOT_QS;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005513 raw_spin_unlock(&rq->lock);
Paul E. McKenney03b042b2009-06-25 09:08:16 -07005514 } else {
5515 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005516 raw_spin_unlock(&rq->lock);
Paul E. McKenney03b042b2009-06-25 09:08:16 -07005517 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
5518 }
Nick Piggin674311d2005-06-25 14:57:27 -07005519 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005520
5521 complete(&req->done);
5522 }
5523 __set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524
Linus Torvalds1da177e2005-04-16 15:20:36 -07005525 return 0;
5526}
5527
5528#ifdef CONFIG_HOTPLUG_CPU
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07005529
5530static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
5531{
5532 int ret;
5533
5534 local_irq_disable();
5535 ret = __migrate_task(p, src_cpu, dest_cpu);
5536 local_irq_enable();
5537 return ret;
5538}
5539
Kirill Korotaev054b9102006-12-10 02:20:11 -08005540/*
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +02005541 * Figure out where task on dead CPU should go, use force if necessary.
Kirill Korotaev054b9102006-12-10 02:20:11 -08005542 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07005543static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005544{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005545 int dest_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005546
Rusty Russelle76bd8d2008-11-25 02:35:11 +10305547again:
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01005548 dest_cpu = select_fallback_rq(dead_cpu, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549
Rusty Russelle76bd8d2008-11-25 02:35:11 +10305550 /* It can have affinity changed while we were choosing. */
5551 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
5552 goto again;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005553}
5554
5555/*
5556 * While a dead CPU has no uninterruptible tasks queued at this point,
5557 * it might still have a nonzero ->nr_uninterruptible counter, because
5558 * for performance reasons the counter is not stricly tracking tasks to
5559 * their home CPUs. So we just add the counter to another CPU's counter,
5560 * to keep the global sum constant after CPU-down:
5561 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07005562static void migrate_nr_uninterruptible(struct rq *rq_src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005563{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005564 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565 unsigned long flags;
5566
5567 local_irq_save(flags);
5568 double_rq_lock(rq_src, rq_dest);
5569 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5570 rq_src->nr_uninterruptible = 0;
5571 double_rq_unlock(rq_src, rq_dest);
5572 local_irq_restore(flags);
5573}
5574
5575/* Run through task list and migrate tasks from the dead cpu. */
5576static void migrate_live_tasks(int src_cpu)
5577{
Ingo Molnar48f24c42006-07-03 00:25:40 -07005578 struct task_struct *p, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005579
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07005580 read_lock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005581
Ingo Molnar48f24c42006-07-03 00:25:40 -07005582 do_each_thread(t, p) {
5583 if (p == current)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005584 continue;
5585
Ingo Molnar48f24c42006-07-03 00:25:40 -07005586 if (task_cpu(p) == src_cpu)
5587 move_task_off_dead_cpu(src_cpu, p);
5588 } while_each_thread(t, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005589
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07005590 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005591}
5592
Ingo Molnardd41f592007-07-09 18:51:59 +02005593/*
5594 * Schedules idle task to be the next runnable task on current CPU.
Dmitry Adamushko94bc9a72007-11-15 20:57:40 +01005595 * It does so by boosting its priority to highest possible.
5596 * Used by CPU offline code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005597 */
5598void sched_idle_next(void)
5599{
Ingo Molnar48f24c42006-07-03 00:25:40 -07005600 int this_cpu = smp_processor_id();
Ingo Molnar70b97a72006-07-03 00:25:42 -07005601 struct rq *rq = cpu_rq(this_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005602 struct task_struct *p = rq->idle;
5603 unsigned long flags;
5604
5605 /* cpu has to be offline */
Ingo Molnar48f24c42006-07-03 00:25:40 -07005606 BUG_ON(cpu_online(this_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005607
Ingo Molnar48f24c42006-07-03 00:25:40 -07005608 /*
5609 * Strictly not necessary since rest of the CPUs are stopped by now
5610 * and interrupts disabled on the current cpu.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005611 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005612 raw_spin_lock_irqsave(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005613
Ingo Molnardd41f592007-07-09 18:51:59 +02005614 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005615
Dmitry Adamushko94bc9a72007-11-15 20:57:40 +01005616 update_rq_clock(rq);
5617 activate_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005618
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005619 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005620}
5621
Ingo Molnar48f24c42006-07-03 00:25:40 -07005622/*
5623 * Ensures that the idle task is using init_mm right before its cpu goes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005624 * offline.
5625 */
5626void idle_task_exit(void)
5627{
5628 struct mm_struct *mm = current->active_mm;
5629
5630 BUG_ON(cpu_online(smp_processor_id()));
5631
5632 if (mm != &init_mm)
5633 switch_mm(mm, &init_mm, current);
5634 mmdrop(mm);
5635}
5636
Kirill Korotaev054b9102006-12-10 02:20:11 -08005637/* called under rq->lock with disabled interrupts */
Ingo Molnar36c8b582006-07-03 00:25:41 -07005638static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005639{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005640 struct rq *rq = cpu_rq(dead_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005641
5642 /* Must be exiting, otherwise would be on tasklist. */
Eugene Teo270f7222007-10-18 23:40:38 -07005643 BUG_ON(!p->exit_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005644
5645 /* Cannot have done final schedule yet: would have vanished. */
Oleg Nesterovc394cc92006-09-29 02:01:11 -07005646 BUG_ON(p->state == TASK_DEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647
Ingo Molnar48f24c42006-07-03 00:25:40 -07005648 get_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005649
5650 /*
5651 * Drop lock around migration; if someone else moves it,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005652 * that's OK. No task can be added to this CPU, so iteration is
Linus Torvalds1da177e2005-04-16 15:20:36 -07005653 * fine.
5654 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005655 raw_spin_unlock_irq(&rq->lock);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005656 move_task_off_dead_cpu(dead_cpu, p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005657 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658
Ingo Molnar48f24c42006-07-03 00:25:40 -07005659 put_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005660}
5661
5662/* release_task() removes task from tasklist, so we won't find dead tasks. */
5663static void migrate_dead_tasks(unsigned int dead_cpu)
5664{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005665 struct rq *rq = cpu_rq(dead_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005666 struct task_struct *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005667
Ingo Molnardd41f592007-07-09 18:51:59 +02005668 for ( ; ; ) {
5669 if (!rq->nr_running)
5670 break;
Ingo Molnara8e504d2007-08-09 11:16:47 +02005671 update_rq_clock(rq);
Wang Chenb67802e2009-03-02 13:55:26 +08005672 next = pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02005673 if (!next)
5674 break;
Dmitry Adamushko79c53792008-06-29 00:16:56 +02005675 next->sched_class->put_prev_task(rq, next);
Ingo Molnardd41f592007-07-09 18:51:59 +02005676 migrate_dead(dead_cpu, next);
Nick Piggine692ab52007-07-26 13:40:43 +02005677
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678 }
5679}
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02005680
5681/*
5682 * remove the tasks which were accounted by rq from calc_load_tasks.
5683 */
5684static void calc_global_load_remove(struct rq *rq)
5685{
5686 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
Thomas Gleixnera468d382009-07-17 14:15:46 +02005687 rq->calc_load_active = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02005688}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005689#endif /* CONFIG_HOTPLUG_CPU */
5690
Nick Piggine692ab52007-07-26 13:40:43 +02005691#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5692
5693static struct ctl_table sd_ctl_dir[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02005694 {
5695 .procname = "sched_domain",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005696 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02005697 },
Eric W. Biederman56992302009-11-05 15:38:40 -08005698 {}
Nick Piggine692ab52007-07-26 13:40:43 +02005699};
5700
5701static struct ctl_table sd_ctl_root[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02005702 {
5703 .procname = "kernel",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005704 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02005705 .child = sd_ctl_dir,
5706 },
Eric W. Biederman56992302009-11-05 15:38:40 -08005707 {}
Nick Piggine692ab52007-07-26 13:40:43 +02005708};
5709
5710static struct ctl_table *sd_alloc_ctl_entry(int n)
5711{
5712 struct ctl_table *entry =
Milton Miller5cf9f062007-10-15 17:00:19 +02005713 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
Nick Piggine692ab52007-07-26 13:40:43 +02005714
Nick Piggine692ab52007-07-26 13:40:43 +02005715 return entry;
5716}
5717
Milton Miller6382bc92007-10-15 17:00:19 +02005718static void sd_free_ctl_entry(struct ctl_table **tablep)
5719{
Milton Millercd790072007-10-17 16:55:11 +02005720 struct ctl_table *entry;
Milton Miller6382bc92007-10-15 17:00:19 +02005721
Milton Millercd790072007-10-17 16:55:11 +02005722 /*
5723 * In the intermediate directories, both the child directory and
5724 * procname are dynamically allocated and could fail but the mode
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005725 * will always be set. In the lowest directory the names are
Milton Millercd790072007-10-17 16:55:11 +02005726 * static strings and all have proc handlers.
5727 */
5728 for (entry = *tablep; entry->mode; entry++) {
Milton Miller6382bc92007-10-15 17:00:19 +02005729 if (entry->child)
5730 sd_free_ctl_entry(&entry->child);
Milton Millercd790072007-10-17 16:55:11 +02005731 if (entry->proc_handler == NULL)
5732 kfree(entry->procname);
5733 }
Milton Miller6382bc92007-10-15 17:00:19 +02005734
5735 kfree(*tablep);
5736 *tablep = NULL;
5737}
5738
Nick Piggine692ab52007-07-26 13:40:43 +02005739static void
Alexey Dobriyane0361852007-08-09 11:16:46 +02005740set_table_entry(struct ctl_table *entry,
Nick Piggine692ab52007-07-26 13:40:43 +02005741 const char *procname, void *data, int maxlen,
5742 mode_t mode, proc_handler *proc_handler)
5743{
Nick Piggine692ab52007-07-26 13:40:43 +02005744 entry->procname = procname;
5745 entry->data = data;
5746 entry->maxlen = maxlen;
5747 entry->mode = mode;
5748 entry->proc_handler = proc_handler;
5749}
5750
5751static struct ctl_table *
5752sd_alloc_ctl_domain_table(struct sched_domain *sd)
5753{
Ingo Molnara5d8c342008-10-09 11:35:51 +02005754 struct ctl_table *table = sd_alloc_ctl_entry(13);
Nick Piggine692ab52007-07-26 13:40:43 +02005755
Milton Millerad1cdc12007-10-15 17:00:19 +02005756 if (table == NULL)
5757 return NULL;
5758
Alexey Dobriyane0361852007-08-09 11:16:46 +02005759 set_table_entry(&table[0], "min_interval", &sd->min_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02005760 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005761 set_table_entry(&table[1], "max_interval", &sd->max_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02005762 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005763 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005764 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005765 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005766 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005767 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005768 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005769 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005770 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005771 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005772 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005773 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
Nick Piggine692ab52007-07-26 13:40:43 +02005774 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005775 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
Nick Piggine692ab52007-07-26 13:40:43 +02005776 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02005777 set_table_entry(&table[9], "cache_nice_tries",
Nick Piggine692ab52007-07-26 13:40:43 +02005778 &sd->cache_nice_tries,
5779 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02005780 set_table_entry(&table[10], "flags", &sd->flags,
Nick Piggine692ab52007-07-26 13:40:43 +02005781 sizeof(int), 0644, proc_dointvec_minmax);
Ingo Molnara5d8c342008-10-09 11:35:51 +02005782 set_table_entry(&table[11], "name", sd->name,
5783 CORENAME_MAX_SIZE, 0444, proc_dostring);
5784 /* &table[12] is terminator */
Nick Piggine692ab52007-07-26 13:40:43 +02005785
5786 return table;
5787}
5788
Ingo Molnar9a4e7152007-11-28 15:52:56 +01005789static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
Nick Piggine692ab52007-07-26 13:40:43 +02005790{
5791 struct ctl_table *entry, *table;
5792 struct sched_domain *sd;
5793 int domain_num = 0, i;
5794 char buf[32];
5795
5796 for_each_domain(cpu, sd)
5797 domain_num++;
5798 entry = table = sd_alloc_ctl_entry(domain_num + 1);
Milton Millerad1cdc12007-10-15 17:00:19 +02005799 if (table == NULL)
5800 return NULL;
Nick Piggine692ab52007-07-26 13:40:43 +02005801
5802 i = 0;
5803 for_each_domain(cpu, sd) {
5804 snprintf(buf, 32, "domain%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02005805 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005806 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02005807 entry->child = sd_alloc_ctl_domain_table(sd);
5808 entry++;
5809 i++;
5810 }
5811 return table;
5812}
5813
5814static struct ctl_table_header *sd_sysctl_header;
Milton Miller6382bc92007-10-15 17:00:19 +02005815static void register_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02005816{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005817 int i, cpu_num = num_possible_cpus();
Nick Piggine692ab52007-07-26 13:40:43 +02005818 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5819 char buf[32];
5820
Milton Miller73785472007-10-24 18:23:48 +02005821 WARN_ON(sd_ctl_dir[0].child);
5822 sd_ctl_dir[0].child = entry;
5823
Milton Millerad1cdc12007-10-15 17:00:19 +02005824 if (entry == NULL)
5825 return;
5826
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005827 for_each_possible_cpu(i) {
Nick Piggine692ab52007-07-26 13:40:43 +02005828 snprintf(buf, 32, "cpu%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02005829 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005830 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02005831 entry->child = sd_alloc_ctl_cpu_table(i);
Milton Miller97b6ea72007-10-15 17:00:19 +02005832 entry++;
Nick Piggine692ab52007-07-26 13:40:43 +02005833 }
Milton Miller73785472007-10-24 18:23:48 +02005834
5835 WARN_ON(sd_sysctl_header);
Nick Piggine692ab52007-07-26 13:40:43 +02005836 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5837}
Milton Miller6382bc92007-10-15 17:00:19 +02005838
Milton Miller73785472007-10-24 18:23:48 +02005839/* may be called multiple times per register */
Milton Miller6382bc92007-10-15 17:00:19 +02005840static void unregister_sched_domain_sysctl(void)
5841{
Milton Miller73785472007-10-24 18:23:48 +02005842 if (sd_sysctl_header)
5843 unregister_sysctl_table(sd_sysctl_header);
Milton Miller6382bc92007-10-15 17:00:19 +02005844 sd_sysctl_header = NULL;
Milton Miller73785472007-10-24 18:23:48 +02005845 if (sd_ctl_dir[0].child)
5846 sd_free_ctl_entry(&sd_ctl_dir[0].child);
Milton Miller6382bc92007-10-15 17:00:19 +02005847}
Nick Piggine692ab52007-07-26 13:40:43 +02005848#else
Milton Miller6382bc92007-10-15 17:00:19 +02005849static void register_sched_domain_sysctl(void)
5850{
5851}
5852static void unregister_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02005853{
5854}
5855#endif
5856
Gregory Haskins1f11eb62008-06-04 15:04:05 -04005857static void set_rq_online(struct rq *rq)
5858{
5859 if (!rq->online) {
5860 const struct sched_class *class;
5861
Rusty Russellc6c49272008-11-25 02:35:05 +10305862 cpumask_set_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04005863 rq->online = 1;
5864
5865 for_each_class(class) {
5866 if (class->rq_online)
5867 class->rq_online(rq);
5868 }
5869 }
5870}
5871
5872static void set_rq_offline(struct rq *rq)
5873{
5874 if (rq->online) {
5875 const struct sched_class *class;
5876
5877 for_each_class(class) {
5878 if (class->rq_offline)
5879 class->rq_offline(rq);
5880 }
5881
Rusty Russellc6c49272008-11-25 02:35:05 +10305882 cpumask_clear_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04005883 rq->online = 0;
5884 }
5885}
5886
Linus Torvalds1da177e2005-04-16 15:20:36 -07005887/*
5888 * migration_call - callback that gets triggered when a CPU is added.
5889 * Here we can start up the necessary migration thread for the new CPU.
5890 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07005891static int __cpuinit
5892migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005893{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005894 struct task_struct *p;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005895 int cpu = (long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005896 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005897 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005898
5899 switch (action) {
Gautham R Shenoy5be93612007-05-09 02:34:04 -07005900
Linus Torvalds1da177e2005-04-16 15:20:36 -07005901 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005902 case CPU_UP_PREPARE_FROZEN:
Ingo Molnardd41f592007-07-09 18:51:59 +02005903 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005904 if (IS_ERR(p))
5905 return NOTIFY_BAD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005906 kthread_bind(p, cpu);
5907 /* Must be high prio: stop_machine expects to yield to it. */
5908 rq = task_rq_lock(p, &flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02005909 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005910 task_rq_unlock(rq, &flags);
Oleg Nesterov371cbb32009-06-17 16:27:45 -07005911 get_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005912 cpu_rq(cpu)->migration_thread = p;
Thomas Gleixnera468d382009-07-17 14:15:46 +02005913 rq->calc_load_update = calc_load_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005914 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005915
Linus Torvalds1da177e2005-04-16 15:20:36 -07005916 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005917 case CPU_ONLINE_FROZEN:
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +02005918 /* Strictly unnecessary, as first user will wake it. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005919 wake_up_process(cpu_rq(cpu)->migration_thread);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04005920
5921 /* Update our root-domain */
5922 rq = cpu_rq(cpu);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005923 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04005924 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10305925 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04005926
5927 set_rq_online(rq);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04005928 }
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005929 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005930 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005931
Linus Torvalds1da177e2005-04-16 15:20:36 -07005932#ifdef CONFIG_HOTPLUG_CPU
5933 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005934 case CPU_UP_CANCELED_FROZEN:
Heiko Carstensfc75cdf2006-06-25 05:49:10 -07005935 if (!cpu_rq(cpu)->migration_thread)
5936 break;
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005937 /* Unbind it from offline cpu so it can run. Fall thru. */
Heiko Carstensa4c4af72005-11-07 00:58:38 -08005938 kthread_bind(cpu_rq(cpu)->migration_thread,
Rusty Russell1e5ce4f2008-11-25 02:35:03 +10305939 cpumask_any(cpu_online_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005940 kthread_stop(cpu_rq(cpu)->migration_thread);
Oleg Nesterov371cbb32009-06-17 16:27:45 -07005941 put_task_struct(cpu_rq(cpu)->migration_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005942 cpu_rq(cpu)->migration_thread = NULL;
5943 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005944
Linus Torvalds1da177e2005-04-16 15:20:36 -07005945 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005946 case CPU_DEAD_FROZEN:
Cliff Wickman470fd642007-10-18 23:40:46 -07005947 cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005948 migrate_live_tasks(cpu);
5949 rq = cpu_rq(cpu);
5950 kthread_stop(rq->migration_thread);
Oleg Nesterov371cbb32009-06-17 16:27:45 -07005951 put_task_struct(rq->migration_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005952 rq->migration_thread = NULL;
5953 /* Idle task back to normal (off runqueue, low prio) */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005954 raw_spin_lock_irq(&rq->lock);
Ingo Molnara8e504d2007-08-09 11:16:47 +02005955 update_rq_clock(rq);
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005956 deactivate_task(rq, rq->idle, 0);
Ingo Molnardd41f592007-07-09 18:51:59 +02005957 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
5958 rq->idle->sched_class = &idle_sched_class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005959 migrate_dead_tasks(cpu);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005960 raw_spin_unlock_irq(&rq->lock);
Cliff Wickman470fd642007-10-18 23:40:46 -07005961 cpuset_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005962 migrate_nr_uninterruptible(rq);
5963 BUG_ON(rq->nr_running != 0);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02005964 calc_global_load_remove(rq);
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005965 /*
5966 * No need to migrate the tasks: it was best-effort if
5967 * they didn't take sched_hotcpu_mutex. Just wake up
5968 * the requestors.
5969 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005970 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005971 while (!list_empty(&rq->migration_queue)) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07005972 struct migration_req *req;
5973
Linus Torvalds1da177e2005-04-16 15:20:36 -07005974 req = list_entry(rq->migration_queue.next,
Ingo Molnar70b97a72006-07-03 00:25:42 -07005975 struct migration_req, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005976 list_del_init(&req->list);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005977 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005978 complete(&req->done);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005979 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005980 }
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005981 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005982 break;
Gregory Haskins57d885f2008-01-25 21:08:18 +01005983
Gregory Haskins08f503b2008-03-10 17:59:11 -04005984 case CPU_DYING:
5985 case CPU_DYING_FROZEN:
Gregory Haskins57d885f2008-01-25 21:08:18 +01005986 /* Update our root-domain */
5987 rq = cpu_rq(cpu);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005988 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005989 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10305990 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04005991 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005992 }
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005993 raw_spin_unlock_irqrestore(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005994 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005995#endif
5996 }
5997 return NOTIFY_OK;
5998}
5999
Paul Mackerrasf38b0822009-06-02 21:05:16 +10006000/*
6001 * Register at high priority so that task migration (migrate_all_tasks)
6002 * happens before everything else. This has to be lower priority than
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006003 * the notifier in the perf_event subsystem, though.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006004 */
Chandra Seetharaman26c21432006-06-27 02:54:10 -07006005static struct notifier_block __cpuinitdata migration_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006006 .notifier_call = migration_call,
6007 .priority = 10
6008};
6009
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006010static int __init migration_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006011{
6012 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -07006013 int err;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006014
6015 /* Start one for the boot CPU: */
Akinobu Mita07dccf32006-09-29 02:00:22 -07006016 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6017 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006018 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6019 register_cpu_notifier(&migration_notifier);
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006020
Thomas Gleixnera004cd42009-07-21 09:54:05 +02006021 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006022}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006023early_initcall(migration_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006024#endif
6025
6026#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07006027
Ingo Molnar3e9830d2007-10-15 17:00:13 +02006028#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006029
Mike Travisf6630112009-11-17 18:22:15 -06006030static __read_mostly int sched_domain_debug_enabled;
6031
6032static int __init sched_domain_debug_setup(char *str)
6033{
6034 sched_domain_debug_enabled = 1;
6035
6036 return 0;
6037}
6038early_param("sched_debug", sched_domain_debug_setup);
6039
Mike Travis7c16ec52008-04-04 18:11:11 -07006040static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
Rusty Russell96f874e2008-11-25 02:35:14 +10306041 struct cpumask *groupmask)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006042{
6043 struct sched_group *group = sd->groups;
Mike Travis434d53b2008-04-04 18:11:04 -07006044 char str[256];
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006045
Rusty Russell968ea6d2008-12-13 21:55:51 +10306046 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
Rusty Russell96f874e2008-11-25 02:35:14 +10306047 cpumask_clear(groupmask);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006048
6049 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6050
6051 if (!(sd->flags & SD_LOAD_BALANCE)) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006052 printk("does not load-balance\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006053 if (sd->parent)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006054 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6055 " has parent");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006056 return -1;
6057 }
6058
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006059 printk(KERN_CONT "span %s level %s\n", str, sd->name);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006060
Rusty Russell758b2cd2008-11-25 02:35:04 +10306061 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006062 printk(KERN_ERR "ERROR: domain->span does not contain "
6063 "CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006064 }
Rusty Russell758b2cd2008-11-25 02:35:04 +10306065 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006066 printk(KERN_ERR "ERROR: domain->groups does not contain"
6067 " CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006068 }
6069
6070 printk(KERN_DEBUG "%*s groups:", level + 1, "");
6071 do {
6072 if (!group) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006073 printk("\n");
6074 printk(KERN_ERR "ERROR: group is NULL\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006075 break;
6076 }
6077
Peter Zijlstra18a38852009-09-01 10:34:39 +02006078 if (!group->cpu_power) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006079 printk(KERN_CONT "\n");
6080 printk(KERN_ERR "ERROR: domain->cpu_power not "
6081 "set\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006082 break;
6083 }
6084
Rusty Russell758b2cd2008-11-25 02:35:04 +10306085 if (!cpumask_weight(sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006086 printk(KERN_CONT "\n");
6087 printk(KERN_ERR "ERROR: empty group\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006088 break;
6089 }
6090
Rusty Russell758b2cd2008-11-25 02:35:04 +10306091 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006092 printk(KERN_CONT "\n");
6093 printk(KERN_ERR "ERROR: repeated CPUs\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006094 break;
6095 }
6096
Rusty Russell758b2cd2008-11-25 02:35:04 +10306097 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006098
Rusty Russell968ea6d2008-12-13 21:55:51 +10306099 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306100
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006101 printk(KERN_CONT " %s", str);
Peter Zijlstra18a38852009-09-01 10:34:39 +02006102 if (group->cpu_power != SCHED_LOAD_SCALE) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006103 printk(KERN_CONT " (cpu_power = %d)",
6104 group->cpu_power);
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306105 }
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006106
6107 group = group->next;
6108 } while (group != sd->groups);
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006109 printk(KERN_CONT "\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006110
Rusty Russell758b2cd2008-11-25 02:35:04 +10306111 if (!cpumask_equal(sched_domain_span(sd), groupmask))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006112 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006113
Rusty Russell758b2cd2008-11-25 02:35:04 +10306114 if (sd->parent &&
6115 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006116 printk(KERN_ERR "ERROR: parent span is not a superset "
6117 "of domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006118 return 0;
6119}
6120
Linus Torvalds1da177e2005-04-16 15:20:36 -07006121static void sched_domain_debug(struct sched_domain *sd, int cpu)
6122{
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306123 cpumask_var_t groupmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006124 int level = 0;
6125
Mike Travisf6630112009-11-17 18:22:15 -06006126 if (!sched_domain_debug_enabled)
6127 return;
6128
Nick Piggin41c7ce92005-06-25 14:57:24 -07006129 if (!sd) {
6130 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6131 return;
6132 }
6133
Linus Torvalds1da177e2005-04-16 15:20:36 -07006134 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6135
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306136 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006137 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6138 return;
6139 }
6140
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006141 for (;;) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006142 if (sched_domain_debug_one(sd, cpu, level, groupmask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006143 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006144 level++;
6145 sd = sd->parent;
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08006146 if (!sd)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006147 break;
6148 }
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306149 free_cpumask_var(groupmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006150}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006151#else /* !CONFIG_SCHED_DEBUG */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006152# define sched_domain_debug(sd, cpu) do { } while (0)
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006153#endif /* CONFIG_SCHED_DEBUG */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006154
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006155static int sd_degenerate(struct sched_domain *sd)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006156{
Rusty Russell758b2cd2008-11-25 02:35:04 +10306157 if (cpumask_weight(sched_domain_span(sd)) == 1)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006158 return 1;
6159
6160 /* Following flags need at least 2 groups */
6161 if (sd->flags & (SD_LOAD_BALANCE |
6162 SD_BALANCE_NEWIDLE |
6163 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006164 SD_BALANCE_EXEC |
6165 SD_SHARE_CPUPOWER |
6166 SD_SHARE_PKG_RESOURCES)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006167 if (sd->groups != sd->groups->next)
6168 return 0;
6169 }
6170
6171 /* Following flags don't use groups */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02006172 if (sd->flags & (SD_WAKE_AFFINE))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006173 return 0;
6174
6175 return 1;
6176}
6177
Ingo Molnar48f24c42006-07-03 00:25:40 -07006178static int
6179sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006180{
6181 unsigned long cflags = sd->flags, pflags = parent->flags;
6182
6183 if (sd_degenerate(parent))
6184 return 1;
6185
Rusty Russell758b2cd2008-11-25 02:35:04 +10306186 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006187 return 0;
6188
Suresh Siddha245af2c2005-06-25 14:57:25 -07006189 /* Flags needing groups don't count if only 1 group in parent */
6190 if (parent->groups == parent->groups->next) {
6191 pflags &= ~(SD_LOAD_BALANCE |
6192 SD_BALANCE_NEWIDLE |
6193 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006194 SD_BALANCE_EXEC |
6195 SD_SHARE_CPUPOWER |
6196 SD_SHARE_PKG_RESOURCES);
Ken Chen54364992008-12-07 18:47:37 -08006197 if (nr_node_ids == 1)
6198 pflags &= ~SD_SERIALIZE;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006199 }
6200 if (~cflags & pflags)
6201 return 0;
6202
6203 return 1;
6204}
6205
Rusty Russellc6c49272008-11-25 02:35:05 +10306206static void free_rootdomain(struct root_domain *rd)
6207{
Peter Zijlstra047106a2009-11-16 10:28:09 +01006208 synchronize_sched();
6209
Rusty Russell68e74562008-11-25 02:35:13 +10306210 cpupri_cleanup(&rd->cpupri);
6211
Rusty Russellc6c49272008-11-25 02:35:05 +10306212 free_cpumask_var(rd->rto_mask);
6213 free_cpumask_var(rd->online);
6214 free_cpumask_var(rd->span);
6215 kfree(rd);
6216}
6217
Gregory Haskins57d885f2008-01-25 21:08:18 +01006218static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6219{
Ingo Molnara0490fa2009-02-12 11:35:40 +01006220 struct root_domain *old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006221 unsigned long flags;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006222
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006223 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006224
6225 if (rq->rd) {
Ingo Molnara0490fa2009-02-12 11:35:40 +01006226 old_rd = rq->rd;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006227
Rusty Russellc6c49272008-11-25 02:35:05 +10306228 if (cpumask_test_cpu(rq->cpu, old_rd->online))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006229 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006230
Rusty Russellc6c49272008-11-25 02:35:05 +10306231 cpumask_clear_cpu(rq->cpu, old_rd->span);
Gregory Haskinsdc938522008-01-25 21:08:26 +01006232
Ingo Molnara0490fa2009-02-12 11:35:40 +01006233 /*
6234 * If we dont want to free the old_rt yet then
6235 * set old_rd to NULL to skip the freeing later
6236 * in this function:
6237 */
6238 if (!atomic_dec_and_test(&old_rd->refcount))
6239 old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006240 }
6241
6242 atomic_inc(&rd->refcount);
6243 rq->rd = rd;
6244
Rusty Russellc6c49272008-11-25 02:35:05 +10306245 cpumask_set_cpu(rq->cpu, rd->span);
Gregory Haskins00aec932009-07-30 10:57:23 -04006246 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006247 set_rq_online(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006248
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006249 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnara0490fa2009-02-12 11:35:40 +01006250
6251 if (old_rd)
6252 free_rootdomain(old_rd);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006253}
6254
Li Zefanfd5e1b52009-06-15 13:34:19 +08006255static int init_rootdomain(struct root_domain *rd, bool bootmem)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006256{
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03006257 gfp_t gfp = GFP_KERNEL;
6258
Gregory Haskins57d885f2008-01-25 21:08:18 +01006259 memset(rd, 0, sizeof(*rd));
6260
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03006261 if (bootmem)
6262 gfp = GFP_NOWAIT;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02006263
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03006264 if (!alloc_cpumask_var(&rd->span, gfp))
Li Zefan0c910d22009-01-06 17:39:06 +08006265 goto out;
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03006266 if (!alloc_cpumask_var(&rd->online, gfp))
Rusty Russellc6c49272008-11-25 02:35:05 +10306267 goto free_span;
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03006268 if (!alloc_cpumask_var(&rd->rto_mask, gfp))
Rusty Russellc6c49272008-11-25 02:35:05 +10306269 goto free_online;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02006270
Pekka Enberg0fb53022009-06-11 08:41:22 +03006271 if (cpupri_init(&rd->cpupri, bootmem) != 0)
Rusty Russell68e74562008-11-25 02:35:13 +10306272 goto free_rto_mask;
Rusty Russellc6c49272008-11-25 02:35:05 +10306273 return 0;
6274
Rusty Russell68e74562008-11-25 02:35:13 +10306275free_rto_mask:
6276 free_cpumask_var(rd->rto_mask);
Rusty Russellc6c49272008-11-25 02:35:05 +10306277free_online:
6278 free_cpumask_var(rd->online);
6279free_span:
6280 free_cpumask_var(rd->span);
Li Zefan0c910d22009-01-06 17:39:06 +08006281out:
Rusty Russellc6c49272008-11-25 02:35:05 +10306282 return -ENOMEM;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006283}
6284
6285static void init_defrootdomain(void)
6286{
Rusty Russellc6c49272008-11-25 02:35:05 +10306287 init_rootdomain(&def_root_domain, true);
6288
Gregory Haskins57d885f2008-01-25 21:08:18 +01006289 atomic_set(&def_root_domain.refcount, 1);
6290}
6291
Gregory Haskinsdc938522008-01-25 21:08:26 +01006292static struct root_domain *alloc_rootdomain(void)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006293{
6294 struct root_domain *rd;
6295
6296 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6297 if (!rd)
6298 return NULL;
6299
Rusty Russellc6c49272008-11-25 02:35:05 +10306300 if (init_rootdomain(rd, false) != 0) {
6301 kfree(rd);
6302 return NULL;
6303 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01006304
6305 return rd;
6306}
6307
Linus Torvalds1da177e2005-04-16 15:20:36 -07006308/*
Ingo Molnar0eab9142008-01-25 21:08:19 +01006309 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
Linus Torvalds1da177e2005-04-16 15:20:36 -07006310 * hold the hotplug lock.
6311 */
Ingo Molnar0eab9142008-01-25 21:08:19 +01006312static void
6313cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006314{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006315 struct rq *rq = cpu_rq(cpu);
Suresh Siddha245af2c2005-06-25 14:57:25 -07006316 struct sched_domain *tmp;
6317
6318 /* Remove the sched domains which do not contribute to scheduling. */
Li Zefanf29c9b12008-11-06 09:45:16 +08006319 for (tmp = sd; tmp; ) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006320 struct sched_domain *parent = tmp->parent;
6321 if (!parent)
6322 break;
Li Zefanf29c9b12008-11-06 09:45:16 +08006323
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006324 if (sd_parent_degenerate(tmp, parent)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006325 tmp->parent = parent->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006326 if (parent->parent)
6327 parent->parent->child = tmp;
Li Zefanf29c9b12008-11-06 09:45:16 +08006328 } else
6329 tmp = tmp->parent;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006330 }
6331
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006332 if (sd && sd_degenerate(sd)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006333 sd = sd->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006334 if (sd)
6335 sd->child = NULL;
6336 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006337
6338 sched_domain_debug(sd, cpu);
6339
Gregory Haskins57d885f2008-01-25 21:08:18 +01006340 rq_attach_root(rq, rd);
Nick Piggin674311d2005-06-25 14:57:27 -07006341 rcu_assign_pointer(rq->sd, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006342}
6343
6344/* cpus with isolated domains */
Rusty Russelldcc30a32008-11-25 02:35:12 +10306345static cpumask_var_t cpu_isolated_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006346
6347/* Setup the mask of cpus configured for isolated domains */
6348static int __init isolated_cpu_setup(char *str)
6349{
Rusty Russellbdddd292009-12-02 14:09:16 +10306350 alloc_bootmem_cpumask_var(&cpu_isolated_map);
Rusty Russell968ea6d2008-12-13 21:55:51 +10306351 cpulist_parse(str, cpu_isolated_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006352 return 1;
6353}
6354
Ingo Molnar8927f492007-10-15 17:00:13 +02006355__setup("isolcpus=", isolated_cpu_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006356
6357/*
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006358 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
6359 * to a function which identifies what group(along with sched group) a CPU
Rusty Russell96f874e2008-11-25 02:35:14 +10306360 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6361 * (due to the fact that we keep track of groups covered with a struct cpumask).
Linus Torvalds1da177e2005-04-16 15:20:36 -07006362 *
6363 * init_sched_build_groups will build a circular linked list of the groups
6364 * covered by the given span, and will set each group's ->cpumask correctly,
6365 * and ->cpu_power to 0.
6366 */
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006367static void
Rusty Russell96f874e2008-11-25 02:35:14 +10306368init_sched_build_groups(const struct cpumask *span,
6369 const struct cpumask *cpu_map,
6370 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
Mike Travis7c16ec52008-04-04 18:11:11 -07006371 struct sched_group **sg,
Rusty Russell96f874e2008-11-25 02:35:14 +10306372 struct cpumask *tmpmask),
6373 struct cpumask *covered, struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006374{
6375 struct sched_group *first = NULL, *last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006376 int i;
6377
Rusty Russell96f874e2008-11-25 02:35:14 +10306378 cpumask_clear(covered);
Mike Travis7c16ec52008-04-04 18:11:11 -07006379
Rusty Russellabcd0832008-11-25 02:35:02 +10306380 for_each_cpu(i, span) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006381 struct sched_group *sg;
Mike Travis7c16ec52008-04-04 18:11:11 -07006382 int group = group_fn(i, cpu_map, &sg, tmpmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006383 int j;
6384
Rusty Russell758b2cd2008-11-25 02:35:04 +10306385 if (cpumask_test_cpu(i, covered))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006386 continue;
6387
Rusty Russell758b2cd2008-11-25 02:35:04 +10306388 cpumask_clear(sched_group_cpus(sg));
Peter Zijlstra18a38852009-09-01 10:34:39 +02006389 sg->cpu_power = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006390
Rusty Russellabcd0832008-11-25 02:35:02 +10306391 for_each_cpu(j, span) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006392 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006393 continue;
6394
Rusty Russell96f874e2008-11-25 02:35:14 +10306395 cpumask_set_cpu(j, covered);
Rusty Russell758b2cd2008-11-25 02:35:04 +10306396 cpumask_set_cpu(j, sched_group_cpus(sg));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006397 }
6398 if (!first)
6399 first = sg;
6400 if (last)
6401 last->next = sg;
6402 last = sg;
6403 }
6404 last->next = first;
6405}
6406
John Hawkes9c1cfda2005-09-06 15:18:14 -07006407#define SD_NODES_PER_DOMAIN 16
Linus Torvalds1da177e2005-04-16 15:20:36 -07006408
John Hawkes9c1cfda2005-09-06 15:18:14 -07006409#ifdef CONFIG_NUMA
akpm@osdl.org198e2f12006-01-12 01:05:30 -08006410
John Hawkes9c1cfda2005-09-06 15:18:14 -07006411/**
6412 * find_next_best_node - find the next node to include in a sched_domain
6413 * @node: node whose sched_domain we're building
6414 * @used_nodes: nodes already in the sched_domain
6415 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006416 * Find the next node to include in a given scheduling domain. Simply
John Hawkes9c1cfda2005-09-06 15:18:14 -07006417 * finds the closest node not already in the @used_nodes map.
6418 *
6419 * Should use nodemask_t.
6420 */
Mike Travisc5f59f02008-04-04 18:11:10 -07006421static int find_next_best_node(int node, nodemask_t *used_nodes)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006422{
6423 int i, n, val, min_val, best_node = 0;
6424
6425 min_val = INT_MAX;
6426
Mike Travis076ac2a2008-05-12 21:21:12 +02006427 for (i = 0; i < nr_node_ids; i++) {
John Hawkes9c1cfda2005-09-06 15:18:14 -07006428 /* Start at @node */
Mike Travis076ac2a2008-05-12 21:21:12 +02006429 n = (node + i) % nr_node_ids;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006430
6431 if (!nr_cpus_node(n))
6432 continue;
6433
6434 /* Skip already used nodes */
Mike Travisc5f59f02008-04-04 18:11:10 -07006435 if (node_isset(n, *used_nodes))
John Hawkes9c1cfda2005-09-06 15:18:14 -07006436 continue;
6437
6438 /* Simple min distance search */
6439 val = node_distance(node, n);
6440
6441 if (val < min_val) {
6442 min_val = val;
6443 best_node = n;
6444 }
6445 }
6446
Mike Travisc5f59f02008-04-04 18:11:10 -07006447 node_set(best_node, *used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006448 return best_node;
6449}
6450
6451/**
6452 * sched_domain_node_span - get a cpumask for a node's sched_domain
6453 * @node: node whose cpumask we're constructing
Randy Dunlap73486722008-04-22 10:07:22 -07006454 * @span: resulting cpumask
John Hawkes9c1cfda2005-09-06 15:18:14 -07006455 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006456 * Given a node, construct a good cpumask for its sched_domain to span. It
John Hawkes9c1cfda2005-09-06 15:18:14 -07006457 * should be one that prevents unnecessary balancing, but also spreads tasks
6458 * out optimally.
6459 */
Rusty Russell96f874e2008-11-25 02:35:14 +10306460static void sched_domain_node_span(int node, struct cpumask *span)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006461{
Mike Travisc5f59f02008-04-04 18:11:10 -07006462 nodemask_t used_nodes;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006463 int i;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006464
Mike Travis6ca09df2008-12-31 18:08:45 -08006465 cpumask_clear(span);
Mike Travisc5f59f02008-04-04 18:11:10 -07006466 nodes_clear(used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006467
Mike Travis6ca09df2008-12-31 18:08:45 -08006468 cpumask_or(span, span, cpumask_of_node(node));
Mike Travisc5f59f02008-04-04 18:11:10 -07006469 node_set(node, used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006470
6471 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
Mike Travisc5f59f02008-04-04 18:11:10 -07006472 int next_node = find_next_best_node(node, &used_nodes);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006473
Mike Travis6ca09df2008-12-31 18:08:45 -08006474 cpumask_or(span, span, cpumask_of_node(next_node));
John Hawkes9c1cfda2005-09-06 15:18:14 -07006475 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006476}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006477#endif /* CONFIG_NUMA */
John Hawkes9c1cfda2005-09-06 15:18:14 -07006478
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006479int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006480
John Hawkes9c1cfda2005-09-06 15:18:14 -07006481/*
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306482 * The cpus mask in sched_group and sched_domain hangs off the end.
Ingo Molnar4200efd2009-05-19 09:22:19 +02006483 *
6484 * ( See the the comments in include/linux/sched.h:struct sched_group
6485 * and struct sched_domain. )
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306486 */
6487struct static_sched_group {
6488 struct sched_group sg;
6489 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
6490};
6491
6492struct static_sched_domain {
6493 struct sched_domain sd;
6494 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
6495};
6496
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006497struct s_data {
6498#ifdef CONFIG_NUMA
6499 int sd_allnodes;
6500 cpumask_var_t domainspan;
6501 cpumask_var_t covered;
6502 cpumask_var_t notcovered;
6503#endif
6504 cpumask_var_t nodemask;
6505 cpumask_var_t this_sibling_map;
6506 cpumask_var_t this_core_map;
6507 cpumask_var_t send_covered;
6508 cpumask_var_t tmpmask;
6509 struct sched_group **sched_group_nodes;
6510 struct root_domain *rd;
6511};
6512
Andreas Herrmann2109b992009-08-18 12:53:00 +02006513enum s_alloc {
6514 sa_sched_groups = 0,
6515 sa_rootdomain,
6516 sa_tmpmask,
6517 sa_send_covered,
6518 sa_this_core_map,
6519 sa_this_sibling_map,
6520 sa_nodemask,
6521 sa_sched_group_nodes,
6522#ifdef CONFIG_NUMA
6523 sa_notcovered,
6524 sa_covered,
6525 sa_domainspan,
6526#endif
6527 sa_none,
6528};
6529
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306530/*
Ingo Molnar48f24c42006-07-03 00:25:40 -07006531 * SMT sched-domains:
John Hawkes9c1cfda2005-09-06 15:18:14 -07006532 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006533#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306534static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
Tejun Heo1871e522009-10-29 22:34:13 +09006535static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006536
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006537static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306538cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
6539 struct sched_group **sg, struct cpumask *unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006540{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006541 if (sg)
Tejun Heo1871e522009-10-29 22:34:13 +09006542 *sg = &per_cpu(sched_groups, cpu).sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006543 return cpu;
6544}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006545#endif /* CONFIG_SCHED_SMT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006546
Ingo Molnar48f24c42006-07-03 00:25:40 -07006547/*
6548 * multi-core sched-domains:
6549 */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006550#ifdef CONFIG_SCHED_MC
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306551static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
6552static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006553#endif /* CONFIG_SCHED_MC */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006554
6555#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006556static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306557cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
6558 struct sched_group **sg, struct cpumask *mask)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006559{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006560 int group;
Mike Travis7c16ec52008-04-04 18:11:11 -07006561
Rusty Russellc69fc562009-03-13 14:49:46 +10306562 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306563 group = cpumask_first(mask);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006564 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306565 *sg = &per_cpu(sched_group_core, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006566 return group;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006567}
6568#elif defined(CONFIG_SCHED_MC)
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006569static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306570cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
6571 struct sched_group **sg, struct cpumask *unused)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006572{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006573 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306574 *sg = &per_cpu(sched_group_core, cpu).sg;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006575 return cpu;
6576}
6577#endif
6578
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306579static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
6580static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006581
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006582static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306583cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
6584 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006585{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006586 int group;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006587#ifdef CONFIG_SCHED_MC
Mike Travis6ca09df2008-12-31 18:08:45 -08006588 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306589 group = cpumask_first(mask);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006590#elif defined(CONFIG_SCHED_SMT)
Rusty Russellc69fc562009-03-13 14:49:46 +10306591 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306592 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593#else
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006594 group = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006595#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006596 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306597 *sg = &per_cpu(sched_group_phys, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006598 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006599}
6600
6601#ifdef CONFIG_NUMA
John Hawkes9c1cfda2005-09-06 15:18:14 -07006602/*
6603 * The init_sched_build_groups can't handle what we want to do with node
6604 * groups, so roll our own. Now each node has its own list of groups which
6605 * gets dynamically allocated.
6606 */
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006607static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
Mike Travis434d53b2008-04-04 18:11:04 -07006608static struct sched_group ***sched_group_nodes_bycpu;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006609
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006610static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306611static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006612
Rusty Russell96f874e2008-11-25 02:35:14 +10306613static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
6614 struct sched_group **sg,
6615 struct cpumask *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006616{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006617 int group;
6618
Mike Travis6ca09df2008-12-31 18:08:45 -08006619 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306620 group = cpumask_first(nodemask);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006621
6622 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306623 *sg = &per_cpu(sched_group_allnodes, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006624 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625}
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006626
Siddha, Suresh B08069032006-03-27 01:15:23 -08006627static void init_numa_sched_groups_power(struct sched_group *group_head)
6628{
6629 struct sched_group *sg = group_head;
6630 int j;
6631
6632 if (!sg)
6633 return;
Andi Kleen3a5c3592007-10-15 17:00:14 +02006634 do {
Rusty Russell758b2cd2008-11-25 02:35:04 +10306635 for_each_cpu(j, sched_group_cpus(sg)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02006636 struct sched_domain *sd;
Siddha, Suresh B08069032006-03-27 01:15:23 -08006637
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306638 sd = &per_cpu(phys_domains, j).sd;
Miao Xie13318a72009-04-15 09:59:10 +08006639 if (j != group_first_cpu(sd->groups)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02006640 /*
6641 * Only add "power" once for each
6642 * physical package.
6643 */
6644 continue;
6645 }
6646
Peter Zijlstra18a38852009-09-01 10:34:39 +02006647 sg->cpu_power += sd->groups->cpu_power;
Siddha, Suresh B08069032006-03-27 01:15:23 -08006648 }
Andi Kleen3a5c3592007-10-15 17:00:14 +02006649 sg = sg->next;
6650 } while (sg != group_head);
Siddha, Suresh B08069032006-03-27 01:15:23 -08006651}
Andreas Herrmann0601a882009-08-18 13:01:11 +02006652
6653static int build_numa_sched_groups(struct s_data *d,
6654 const struct cpumask *cpu_map, int num)
6655{
6656 struct sched_domain *sd;
6657 struct sched_group *sg, *prev;
6658 int n, j;
6659
6660 cpumask_clear(d->covered);
6661 cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
6662 if (cpumask_empty(d->nodemask)) {
6663 d->sched_group_nodes[num] = NULL;
6664 goto out;
6665 }
6666
6667 sched_domain_node_span(num, d->domainspan);
6668 cpumask_and(d->domainspan, d->domainspan, cpu_map);
6669
6670 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
6671 GFP_KERNEL, num);
6672 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006673 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
6674 num);
Andreas Herrmann0601a882009-08-18 13:01:11 +02006675 return -ENOMEM;
6676 }
6677 d->sched_group_nodes[num] = sg;
6678
6679 for_each_cpu(j, d->nodemask) {
6680 sd = &per_cpu(node_domains, j).sd;
6681 sd->groups = sg;
6682 }
6683
Peter Zijlstra18a38852009-09-01 10:34:39 +02006684 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02006685 cpumask_copy(sched_group_cpus(sg), d->nodemask);
6686 sg->next = sg;
6687 cpumask_or(d->covered, d->covered, d->nodemask);
6688
6689 prev = sg;
6690 for (j = 0; j < nr_node_ids; j++) {
6691 n = (num + j) % nr_node_ids;
6692 cpumask_complement(d->notcovered, d->covered);
6693 cpumask_and(d->tmpmask, d->notcovered, cpu_map);
6694 cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
6695 if (cpumask_empty(d->tmpmask))
6696 break;
6697 cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
6698 if (cpumask_empty(d->tmpmask))
6699 continue;
6700 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
6701 GFP_KERNEL, num);
6702 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006703 printk(KERN_WARNING
6704 "Can not alloc domain group for node %d\n", j);
Andreas Herrmann0601a882009-08-18 13:01:11 +02006705 return -ENOMEM;
6706 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02006707 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02006708 cpumask_copy(sched_group_cpus(sg), d->tmpmask);
6709 sg->next = prev->next;
6710 cpumask_or(d->covered, d->covered, d->tmpmask);
6711 prev->next = sg;
6712 prev = sg;
6713 }
6714out:
6715 return 0;
6716}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006717#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006718
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006719#ifdef CONFIG_NUMA
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006720/* Free memory allocated for various sched_group structures */
Rusty Russell96f874e2008-11-25 02:35:14 +10306721static void free_sched_groups(const struct cpumask *cpu_map,
6722 struct cpumask *nodemask)
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006723{
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006724 int cpu, i;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006725
Rusty Russellabcd0832008-11-25 02:35:02 +10306726 for_each_cpu(cpu, cpu_map) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006727 struct sched_group **sched_group_nodes
6728 = sched_group_nodes_bycpu[cpu];
6729
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006730 if (!sched_group_nodes)
6731 continue;
6732
Mike Travis076ac2a2008-05-12 21:21:12 +02006733 for (i = 0; i < nr_node_ids; i++) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006734 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6735
Mike Travis6ca09df2008-12-31 18:08:45 -08006736 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306737 if (cpumask_empty(nodemask))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006738 continue;
6739
6740 if (sg == NULL)
6741 continue;
6742 sg = sg->next;
6743next_sg:
6744 oldsg = sg;
6745 sg = sg->next;
6746 kfree(oldsg);
6747 if (oldsg != sched_group_nodes[i])
6748 goto next_sg;
6749 }
6750 kfree(sched_group_nodes);
6751 sched_group_nodes_bycpu[cpu] = NULL;
6752 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006753}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006754#else /* !CONFIG_NUMA */
Rusty Russell96f874e2008-11-25 02:35:14 +10306755static void free_sched_groups(const struct cpumask *cpu_map,
6756 struct cpumask *nodemask)
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006757{
6758}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006759#endif /* CONFIG_NUMA */
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006760
Linus Torvalds1da177e2005-04-16 15:20:36 -07006761/*
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006762 * Initialize sched groups cpu_power.
6763 *
6764 * cpu_power indicates the capacity of sched group, which is used while
6765 * distributing the load between different sched groups in a sched domain.
6766 * Typically cpu_power for all the groups in a sched domain will be same unless
6767 * there are asymmetries in the topology. If there are asymmetries, group
6768 * having more cpu_power will pickup more load compared to the group having
6769 * less cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006770 */
6771static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6772{
6773 struct sched_domain *child;
6774 struct sched_group *group;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02006775 long power;
6776 int weight;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006777
6778 WARN_ON(!sd || !sd->groups);
6779
Miao Xie13318a72009-04-15 09:59:10 +08006780 if (cpu != group_first_cpu(sd->groups))
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006781 return;
6782
6783 child = sd->child;
6784
Peter Zijlstra18a38852009-09-01 10:34:39 +02006785 sd->groups->cpu_power = 0;
Eric Dumazet5517d862007-05-08 00:32:57 -07006786
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02006787 if (!child) {
6788 power = SCHED_LOAD_SCALE;
6789 weight = cpumask_weight(sched_domain_span(sd));
6790 /*
6791 * SMT siblings share the power of a single core.
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02006792 * Usually multiple threads get a better yield out of
6793 * that one core than a single thread would have,
6794 * reflect that in sd->smt_gain.
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02006795 */
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02006796 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
6797 power *= sd->smt_gain;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02006798 power /= weight;
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02006799 power >>= SCHED_LOAD_SHIFT;
6800 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02006801 sd->groups->cpu_power += power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006802 return;
6803 }
6804
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006805 /*
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02006806 * Add cpu_power of each child group to this groups cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006807 */
6808 group = child->groups;
6809 do {
Peter Zijlstra18a38852009-09-01 10:34:39 +02006810 sd->groups->cpu_power += group->cpu_power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006811 group = group->next;
6812 } while (group != child->groups);
6813}
6814
6815/*
Mike Travis7c16ec52008-04-04 18:11:11 -07006816 * Initializers for schedule domains
6817 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6818 */
6819
Ingo Molnara5d8c342008-10-09 11:35:51 +02006820#ifdef CONFIG_SCHED_DEBUG
6821# define SD_INIT_NAME(sd, type) sd->name = #type
6822#else
6823# define SD_INIT_NAME(sd, type) do { } while (0)
6824#endif
6825
Mike Travis7c16ec52008-04-04 18:11:11 -07006826#define SD_INIT(sd, type) sd_init_##type(sd)
Ingo Molnara5d8c342008-10-09 11:35:51 +02006827
Mike Travis7c16ec52008-04-04 18:11:11 -07006828#define SD_INIT_FUNC(type) \
6829static noinline void sd_init_##type(struct sched_domain *sd) \
6830{ \
6831 memset(sd, 0, sizeof(*sd)); \
6832 *sd = SD_##type##_INIT; \
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006833 sd->level = SD_LV_##type; \
Ingo Molnara5d8c342008-10-09 11:35:51 +02006834 SD_INIT_NAME(sd, type); \
Mike Travis7c16ec52008-04-04 18:11:11 -07006835}
6836
6837SD_INIT_FUNC(CPU)
6838#ifdef CONFIG_NUMA
6839 SD_INIT_FUNC(ALLNODES)
6840 SD_INIT_FUNC(NODE)
6841#endif
6842#ifdef CONFIG_SCHED_SMT
6843 SD_INIT_FUNC(SIBLING)
6844#endif
6845#ifdef CONFIG_SCHED_MC
6846 SD_INIT_FUNC(MC)
6847#endif
6848
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006849static int default_relax_domain_level = -1;
6850
6851static int __init setup_relax_domain_level(char *str)
6852{
Li Zefan30e0e172008-05-13 10:27:17 +08006853 unsigned long val;
6854
6855 val = simple_strtoul(str, NULL, 0);
6856 if (val < SD_LV_MAX)
6857 default_relax_domain_level = val;
6858
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006859 return 1;
6860}
6861__setup("relax_domain_level=", setup_relax_domain_level);
6862
6863static void set_domain_attribute(struct sched_domain *sd,
6864 struct sched_domain_attr *attr)
6865{
6866 int request;
6867
6868 if (!attr || attr->relax_domain_level < 0) {
6869 if (default_relax_domain_level < 0)
6870 return;
6871 else
6872 request = default_relax_domain_level;
6873 } else
6874 request = attr->relax_domain_level;
6875 if (request < sd->level) {
6876 /* turn off idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02006877 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006878 } else {
6879 /* turn on idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02006880 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006881 }
6882}
6883
Andreas Herrmann2109b992009-08-18 12:53:00 +02006884static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6885 const struct cpumask *cpu_map)
6886{
6887 switch (what) {
6888 case sa_sched_groups:
6889 free_sched_groups(cpu_map, d->tmpmask); /* fall through */
6890 d->sched_group_nodes = NULL;
6891 case sa_rootdomain:
6892 free_rootdomain(d->rd); /* fall through */
6893 case sa_tmpmask:
6894 free_cpumask_var(d->tmpmask); /* fall through */
6895 case sa_send_covered:
6896 free_cpumask_var(d->send_covered); /* fall through */
6897 case sa_this_core_map:
6898 free_cpumask_var(d->this_core_map); /* fall through */
6899 case sa_this_sibling_map:
6900 free_cpumask_var(d->this_sibling_map); /* fall through */
6901 case sa_nodemask:
6902 free_cpumask_var(d->nodemask); /* fall through */
6903 case sa_sched_group_nodes:
6904#ifdef CONFIG_NUMA
6905 kfree(d->sched_group_nodes); /* fall through */
6906 case sa_notcovered:
6907 free_cpumask_var(d->notcovered); /* fall through */
6908 case sa_covered:
6909 free_cpumask_var(d->covered); /* fall through */
6910 case sa_domainspan:
6911 free_cpumask_var(d->domainspan); /* fall through */
6912#endif
6913 case sa_none:
6914 break;
6915 }
6916}
6917
6918static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6919 const struct cpumask *cpu_map)
6920{
6921#ifdef CONFIG_NUMA
6922 if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
6923 return sa_none;
6924 if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
6925 return sa_domainspan;
6926 if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
6927 return sa_covered;
6928 /* Allocate the per-node list of sched groups */
6929 d->sched_group_nodes = kcalloc(nr_node_ids,
6930 sizeof(struct sched_group *), GFP_KERNEL);
6931 if (!d->sched_group_nodes) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006932 printk(KERN_WARNING "Can not alloc sched group node list\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02006933 return sa_notcovered;
6934 }
6935 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
6936#endif
6937 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
6938 return sa_sched_group_nodes;
6939 if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
6940 return sa_nodemask;
6941 if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
6942 return sa_this_sibling_map;
6943 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
6944 return sa_this_core_map;
6945 if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
6946 return sa_send_covered;
6947 d->rd = alloc_rootdomain();
6948 if (!d->rd) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006949 printk(KERN_WARNING "Cannot alloc root domain\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02006950 return sa_tmpmask;
6951 }
6952 return sa_rootdomain;
6953}
6954
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02006955static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
6956 const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
6957{
6958 struct sched_domain *sd = NULL;
6959#ifdef CONFIG_NUMA
6960 struct sched_domain *parent;
6961
6962 d->sd_allnodes = 0;
6963 if (cpumask_weight(cpu_map) >
6964 SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
6965 sd = &per_cpu(allnodes_domains, i).sd;
6966 SD_INIT(sd, ALLNODES);
6967 set_domain_attribute(sd, attr);
6968 cpumask_copy(sched_domain_span(sd), cpu_map);
6969 cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
6970 d->sd_allnodes = 1;
6971 }
6972 parent = sd;
6973
6974 sd = &per_cpu(node_domains, i).sd;
6975 SD_INIT(sd, NODE);
6976 set_domain_attribute(sd, attr);
6977 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
6978 sd->parent = parent;
6979 if (parent)
6980 parent->child = sd;
6981 cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
6982#endif
6983 return sd;
6984}
6985
Andreas Herrmann87cce662009-08-18 12:54:55 +02006986static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
6987 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6988 struct sched_domain *parent, int i)
6989{
6990 struct sched_domain *sd;
6991 sd = &per_cpu(phys_domains, i).sd;
6992 SD_INIT(sd, CPU);
6993 set_domain_attribute(sd, attr);
6994 cpumask_copy(sched_domain_span(sd), d->nodemask);
6995 sd->parent = parent;
6996 if (parent)
6997 parent->child = sd;
6998 cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
6999 return sd;
7000}
7001
Andreas Herrmann410c4082009-08-18 12:56:14 +02007002static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
7003 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7004 struct sched_domain *parent, int i)
7005{
7006 struct sched_domain *sd = parent;
7007#ifdef CONFIG_SCHED_MC
7008 sd = &per_cpu(core_domains, i).sd;
7009 SD_INIT(sd, MC);
7010 set_domain_attribute(sd, attr);
7011 cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
7012 sd->parent = parent;
7013 parent->child = sd;
7014 cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
7015#endif
7016 return sd;
7017}
7018
Andreas Herrmannd8173532009-08-18 12:57:03 +02007019static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
7020 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7021 struct sched_domain *parent, int i)
7022{
7023 struct sched_domain *sd = parent;
7024#ifdef CONFIG_SCHED_SMT
7025 sd = &per_cpu(cpu_domains, i).sd;
7026 SD_INIT(sd, SIBLING);
7027 set_domain_attribute(sd, attr);
7028 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
7029 sd->parent = parent;
7030 parent->child = sd;
7031 cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
7032#endif
7033 return sd;
7034}
7035
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007036static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
7037 const struct cpumask *cpu_map, int cpu)
7038{
7039 switch (l) {
7040#ifdef CONFIG_SCHED_SMT
7041 case SD_LV_SIBLING: /* set up CPU (sibling) groups */
7042 cpumask_and(d->this_sibling_map, cpu_map,
7043 topology_thread_cpumask(cpu));
7044 if (cpu == cpumask_first(d->this_sibling_map))
7045 init_sched_build_groups(d->this_sibling_map, cpu_map,
7046 &cpu_to_cpu_group,
7047 d->send_covered, d->tmpmask);
7048 break;
7049#endif
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007050#ifdef CONFIG_SCHED_MC
7051 case SD_LV_MC: /* set up multi-core groups */
7052 cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
7053 if (cpu == cpumask_first(d->this_core_map))
7054 init_sched_build_groups(d->this_core_map, cpu_map,
7055 &cpu_to_core_group,
7056 d->send_covered, d->tmpmask);
7057 break;
7058#endif
Andreas Herrmann86548092009-08-18 12:59:28 +02007059 case SD_LV_CPU: /* set up physical groups */
7060 cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
7061 if (!cpumask_empty(d->nodemask))
7062 init_sched_build_groups(d->nodemask, cpu_map,
7063 &cpu_to_phys_group,
7064 d->send_covered, d->tmpmask);
7065 break;
Andreas Herrmannde616e32009-08-18 13:00:13 +02007066#ifdef CONFIG_NUMA
7067 case SD_LV_ALLNODES:
7068 init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
7069 d->send_covered, d->tmpmask);
7070 break;
7071#endif
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007072 default:
7073 break;
7074 }
7075}
7076
Mike Travis7c16ec52008-04-04 18:11:11 -07007077/*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007078 * Build sched domains for a given set of cpus and attach the sched domains
7079 * to the individual cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07007080 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307081static int __build_sched_domains(const struct cpumask *cpu_map,
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007082 struct sched_domain_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007083{
Andreas Herrmann2109b992009-08-18 12:53:00 +02007084 enum s_alloc alloc_state = sa_none;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007085 struct s_data d;
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007086 struct sched_domain *sd;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007087 int i;
John Hawkesd1b55132005-09-06 15:18:14 -07007088#ifdef CONFIG_NUMA
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007089 d.sd_allnodes = 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307090#endif
7091
Andreas Herrmann2109b992009-08-18 12:53:00 +02007092 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7093 if (alloc_state != sa_rootdomain)
7094 goto error;
7095 alloc_state = sa_sched_groups;
Mike Travis7c16ec52008-04-04 18:11:11 -07007096
Linus Torvalds1da177e2005-04-16 15:20:36 -07007097 /*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007098 * Set up domains for cpus specified by the cpu_map.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007099 */
Rusty Russellabcd0832008-11-25 02:35:02 +10307100 for_each_cpu(i, cpu_map) {
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007101 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
7102 cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007103
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007104 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
Andreas Herrmann87cce662009-08-18 12:54:55 +02007105 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmann410c4082009-08-18 12:56:14 +02007106 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmannd8173532009-08-18 12:57:03 +02007107 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007108 }
7109
Rusty Russellabcd0832008-11-25 02:35:02 +10307110 for_each_cpu(i, cpu_map) {
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007111 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007112 build_sched_groups(&d, SD_LV_MC, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007113 }
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007114
Linus Torvalds1da177e2005-04-16 15:20:36 -07007115 /* Set up physical groups */
Andreas Herrmann86548092009-08-18 12:59:28 +02007116 for (i = 0; i < nr_node_ids; i++)
7117 build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007118
7119#ifdef CONFIG_NUMA
7120 /* Set up node groups */
Andreas Herrmannde616e32009-08-18 13:00:13 +02007121 if (d.sd_allnodes)
7122 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007123
Andreas Herrmann0601a882009-08-18 13:01:11 +02007124 for (i = 0; i < nr_node_ids; i++)
7125 if (build_numa_sched_groups(&d, cpu_map, i))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007126 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007127#endif
7128
7129 /* Calculate CPU power for physical packages and nodes */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007130#ifdef CONFIG_SCHED_SMT
Rusty Russellabcd0832008-11-25 02:35:02 +10307131 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007132 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007133 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007134 }
7135#endif
7136#ifdef CONFIG_SCHED_MC
Rusty Russellabcd0832008-11-25 02:35:02 +10307137 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007138 sd = &per_cpu(core_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007139 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007140 }
7141#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07007142
Rusty Russellabcd0832008-11-25 02:35:02 +10307143 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007144 sd = &per_cpu(phys_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007145 init_sched_groups_power(i, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007146 }
7147
John Hawkes9c1cfda2005-09-06 15:18:14 -07007148#ifdef CONFIG_NUMA
Mike Travis076ac2a2008-05-12 21:21:12 +02007149 for (i = 0; i < nr_node_ids; i++)
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007150 init_numa_sched_groups_power(d.sched_group_nodes[i]);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007151
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007152 if (d.sd_allnodes) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007153 struct sched_group *sg;
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007154
Rusty Russell96f874e2008-11-25 02:35:14 +10307155 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007156 d.tmpmask);
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007157 init_numa_sched_groups_power(sg);
7158 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07007159#endif
7160
Linus Torvalds1da177e2005-04-16 15:20:36 -07007161 /* Attach the domains */
Rusty Russellabcd0832008-11-25 02:35:02 +10307162 for_each_cpu(i, cpu_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007163#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307164 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007165#elif defined(CONFIG_SCHED_MC)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307166 sd = &per_cpu(core_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007167#else
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307168 sd = &per_cpu(phys_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007169#endif
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007170 cpu_attach_domain(sd, d.rd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007171 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007172
Andreas Herrmann2109b992009-08-18 12:53:00 +02007173 d.sched_group_nodes = NULL; /* don't free this we still need it */
7174 __free_domain_allocs(&d, sa_tmpmask, cpu_map);
7175 return 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307176
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007177error:
Andreas Herrmann2109b992009-08-18 12:53:00 +02007178 __free_domain_allocs(&d, alloc_state, cpu_map);
7179 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007180}
Paul Jackson029190c2007-10-18 23:40:20 -07007181
Rusty Russell96f874e2008-11-25 02:35:14 +10307182static int build_sched_domains(const struct cpumask *cpu_map)
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007183{
7184 return __build_sched_domains(cpu_map, NULL);
7185}
7186
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307187static cpumask_var_t *doms_cur; /* current sched domains */
Paul Jackson029190c2007-10-18 23:40:20 -07007188static int ndoms_cur; /* number of sched domains in 'doms_cur' */
Ingo Molnar4285f5942008-05-16 17:47:14 +02007189static struct sched_domain_attr *dattr_cur;
7190 /* attribues of custom domains in 'doms_cur' */
Paul Jackson029190c2007-10-18 23:40:20 -07007191
7192/*
7193 * Special case: If a kmalloc of a doms_cur partition (array of
Rusty Russell42128232008-11-25 02:35:12 +10307194 * cpumask) fails, then fallback to a single sched domain,
7195 * as determined by the single cpumask fallback_doms.
Paul Jackson029190c2007-10-18 23:40:20 -07007196 */
Rusty Russell42128232008-11-25 02:35:12 +10307197static cpumask_var_t fallback_doms;
Paul Jackson029190c2007-10-18 23:40:20 -07007198
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007199/*
7200 * arch_update_cpu_topology lets virtualized architectures update the
7201 * cpu core maps. It is supposed to return 1 if the topology changed
7202 * or 0 if it stayed the same.
7203 */
7204int __attribute__((weak)) arch_update_cpu_topology(void)
Heiko Carstens22e52b02008-03-12 18:31:59 +01007205{
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007206 return 0;
Heiko Carstens22e52b02008-03-12 18:31:59 +01007207}
7208
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307209cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7210{
7211 int i;
7212 cpumask_var_t *doms;
7213
7214 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7215 if (!doms)
7216 return NULL;
7217 for (i = 0; i < ndoms; i++) {
7218 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7219 free_sched_domains(doms, i);
7220 return NULL;
7221 }
7222 }
7223 return doms;
7224}
7225
7226void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7227{
7228 unsigned int i;
7229 for (i = 0; i < ndoms; i++)
7230 free_cpumask_var(doms[i]);
7231 kfree(doms);
7232}
7233
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007234/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007235 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
Paul Jackson029190c2007-10-18 23:40:20 -07007236 * For now this just excludes isolated cpus, but could be used to
7237 * exclude other special cases in the future.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007238 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307239static int arch_init_sched_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007240{
Milton Miller73785472007-10-24 18:23:48 +02007241 int err;
7242
Heiko Carstens22e52b02008-03-12 18:31:59 +01007243 arch_update_cpu_topology();
Paul Jackson029190c2007-10-18 23:40:20 -07007244 ndoms_cur = 1;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307245 doms_cur = alloc_sched_domains(ndoms_cur);
Paul Jackson029190c2007-10-18 23:40:20 -07007246 if (!doms_cur)
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307247 doms_cur = &fallback_doms;
7248 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007249 dattr_cur = NULL;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307250 err = build_sched_domains(doms_cur[0]);
Milton Miller6382bc92007-10-15 17:00:19 +02007251 register_sched_domain_sysctl();
Milton Miller73785472007-10-24 18:23:48 +02007252
7253 return err;
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007254}
7255
Rusty Russell96f874e2008-11-25 02:35:14 +10307256static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
7257 struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007258{
Mike Travis7c16ec52008-04-04 18:11:11 -07007259 free_sched_groups(cpu_map, tmpmask);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007260}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007261
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007262/*
7263 * Detach sched domains from a group of cpus specified in cpu_map
7264 * These cpus will now be attached to the NULL domain
7265 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307266static void detach_destroy_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007267{
Rusty Russell96f874e2008-11-25 02:35:14 +10307268 /* Save because hotplug lock held. */
7269 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007270 int i;
7271
Rusty Russellabcd0832008-11-25 02:35:02 +10307272 for_each_cpu(i, cpu_map)
Gregory Haskins57d885f2008-01-25 21:08:18 +01007273 cpu_attach_domain(NULL, &def_root_domain, i);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007274 synchronize_sched();
Rusty Russell96f874e2008-11-25 02:35:14 +10307275 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007276}
7277
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007278/* handle null as "default" */
7279static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7280 struct sched_domain_attr *new, int idx_new)
7281{
7282 struct sched_domain_attr tmp;
7283
7284 /* fast path */
7285 if (!new && !cur)
7286 return 1;
7287
7288 tmp = SD_ATTR_INIT;
7289 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7290 new ? (new + idx_new) : &tmp,
7291 sizeof(struct sched_domain_attr));
7292}
7293
Paul Jackson029190c2007-10-18 23:40:20 -07007294/*
7295 * Partition sched domains as specified by the 'ndoms_new'
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007296 * cpumasks in the array doms_new[] of cpumasks. This compares
Paul Jackson029190c2007-10-18 23:40:20 -07007297 * doms_new[] to the current sched domain partitioning, doms_cur[].
7298 * It destroys each deleted domain and builds each new domain.
7299 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307300 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007301 * The masks don't intersect (don't overlap.) We should setup one
7302 * sched domain for each mask. CPUs not in any of the cpumasks will
7303 * not be load balanced. If the same cpumask appears both in the
Paul Jackson029190c2007-10-18 23:40:20 -07007304 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7305 * it as it is.
7306 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307307 * The passed in 'doms_new' should be allocated using
7308 * alloc_sched_domains. This routine takes ownership of it and will
7309 * free_sched_domains it when done with it. If the caller failed the
7310 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7311 * and partition_sched_domains() will fallback to the single partition
7312 * 'fallback_doms', it also forces the domains to be rebuilt.
Paul Jackson029190c2007-10-18 23:40:20 -07007313 *
Rusty Russell96f874e2008-11-25 02:35:14 +10307314 * If doms_new == NULL it will be replaced with cpu_online_mask.
Li Zefan700018e2008-11-18 14:02:03 +08007315 * ndoms_new == 0 is a special case for destroying existing domains,
7316 * and it will not create the default domain.
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007317 *
Paul Jackson029190c2007-10-18 23:40:20 -07007318 * Call with hotplug lock held
7319 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307320void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007321 struct sched_domain_attr *dattr_new)
Paul Jackson029190c2007-10-18 23:40:20 -07007322{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007323 int i, j, n;
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007324 int new_topology;
Paul Jackson029190c2007-10-18 23:40:20 -07007325
Heiko Carstens712555e2008-04-28 11:33:07 +02007326 mutex_lock(&sched_domains_mutex);
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007327
Milton Miller73785472007-10-24 18:23:48 +02007328 /* always unregister in case we don't destroy any domains */
7329 unregister_sched_domain_sysctl();
7330
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007331 /* Let architecture update cpu core mappings. */
7332 new_topology = arch_update_cpu_topology();
7333
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007334 n = doms_new ? ndoms_new : 0;
Paul Jackson029190c2007-10-18 23:40:20 -07007335
7336 /* Destroy deleted domains */
7337 for (i = 0; i < ndoms_cur; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007338 for (j = 0; j < n && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307339 if (cpumask_equal(doms_cur[i], doms_new[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007340 && dattrs_equal(dattr_cur, i, dattr_new, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007341 goto match1;
7342 }
7343 /* no match - a current sched domain not in new doms_new[] */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307344 detach_destroy_domains(doms_cur[i]);
Paul Jackson029190c2007-10-18 23:40:20 -07007345match1:
7346 ;
7347 }
7348
Max Krasnyanskye761b772008-07-15 04:43:49 -07007349 if (doms_new == NULL) {
7350 ndoms_cur = 0;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307351 doms_new = &fallback_doms;
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007352 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
Li Zefanfaa2f982008-11-04 16:20:23 +08007353 WARN_ON_ONCE(dattr_new);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007354 }
7355
Paul Jackson029190c2007-10-18 23:40:20 -07007356 /* Build new domains */
7357 for (i = 0; i < ndoms_new; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007358 for (j = 0; j < ndoms_cur && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307359 if (cpumask_equal(doms_new[i], doms_cur[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007360 && dattrs_equal(dattr_new, i, dattr_cur, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007361 goto match2;
7362 }
7363 /* no match - add a new doms_new */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307364 __build_sched_domains(doms_new[i],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007365 dattr_new ? dattr_new + i : NULL);
Paul Jackson029190c2007-10-18 23:40:20 -07007366match2:
7367 ;
7368 }
7369
7370 /* Remember the new sched domains */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307371 if (doms_cur != &fallback_doms)
7372 free_sched_domains(doms_cur, ndoms_cur);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007373 kfree(dattr_cur); /* kfree(NULL) is safe */
Paul Jackson029190c2007-10-18 23:40:20 -07007374 doms_cur = doms_new;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007375 dattr_cur = dattr_new;
Paul Jackson029190c2007-10-18 23:40:20 -07007376 ndoms_cur = ndoms_new;
Milton Miller73785472007-10-24 18:23:48 +02007377
7378 register_sched_domain_sysctl();
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007379
Heiko Carstens712555e2008-04-28 11:33:07 +02007380 mutex_unlock(&sched_domains_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -07007381}
7382
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007383#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Li Zefanc70f22d2009-01-05 19:07:50 +08007384static void arch_reinit_sched_domains(void)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007385{
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007386 get_online_cpus();
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007387
7388 /* Destroy domains first to force the rebuild */
7389 partition_sched_domains(0, NULL, NULL);
7390
Max Krasnyanskye761b772008-07-15 04:43:49 -07007391 rebuild_sched_domains();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007392 put_online_cpus();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007393}
7394
7395static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7396{
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307397 unsigned int level = 0;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007398
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307399 if (sscanf(buf, "%u", &level) != 1)
7400 return -EINVAL;
7401
7402 /*
7403 * level is always be positive so don't check for
7404 * level < POWERSAVINGS_BALANCE_NONE which is 0
7405 * What happens on 0 or 1 byte write,
7406 * need to check for count as well?
7407 */
7408
7409 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007410 return -EINVAL;
7411
7412 if (smt)
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307413 sched_smt_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007414 else
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307415 sched_mc_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007416
Li Zefanc70f22d2009-01-05 19:07:50 +08007417 arch_reinit_sched_domains();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007418
Li Zefanc70f22d2009-01-05 19:07:50 +08007419 return count;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007420}
7421
Adrian Bunk6707de002007-08-12 18:08:19 +02007422#ifdef CONFIG_SCHED_MC
Andi Kleenf718cd42008-07-29 22:33:52 -07007423static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007424 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007425 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007426{
7427 return sprintf(page, "%u\n", sched_mc_power_savings);
7428}
Andi Kleenf718cd42008-07-29 22:33:52 -07007429static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007430 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007431 const char *buf, size_t count)
7432{
7433 return sched_power_savings_store(buf, count, 0);
7434}
Andi Kleenf718cd42008-07-29 22:33:52 -07007435static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7436 sched_mc_power_savings_show,
7437 sched_mc_power_savings_store);
Adrian Bunk6707de002007-08-12 18:08:19 +02007438#endif
7439
7440#ifdef CONFIG_SCHED_SMT
Andi Kleenf718cd42008-07-29 22:33:52 -07007441static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007442 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007443 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007444{
7445 return sprintf(page, "%u\n", sched_smt_power_savings);
7446}
Andi Kleenf718cd42008-07-29 22:33:52 -07007447static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007448 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007449 const char *buf, size_t count)
7450{
7451 return sched_power_savings_store(buf, count, 1);
7452}
Andi Kleenf718cd42008-07-29 22:33:52 -07007453static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7454 sched_smt_power_savings_show,
Adrian Bunk6707de002007-08-12 18:08:19 +02007455 sched_smt_power_savings_store);
7456#endif
7457
Li Zefan39aac642009-01-05 19:18:02 +08007458int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007459{
7460 int err = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007461
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007462#ifdef CONFIG_SCHED_SMT
7463 if (smt_capable())
7464 err = sysfs_create_file(&cls->kset.kobj,
7465 &attr_sched_smt_power_savings.attr);
7466#endif
7467#ifdef CONFIG_SCHED_MC
7468 if (!err && mc_capable())
7469 err = sysfs_create_file(&cls->kset.kobj,
7470 &attr_sched_mc_power_savings.attr);
7471#endif
7472 return err;
7473}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007474#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007475
Max Krasnyanskye761b772008-07-15 04:43:49 -07007476#ifndef CONFIG_CPUSETS
Linus Torvalds1da177e2005-04-16 15:20:36 -07007477/*
Max Krasnyanskye761b772008-07-15 04:43:49 -07007478 * Add online and remove offline CPUs from the scheduler domains.
7479 * When cpusets are enabled they take over this function.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007480 */
7481static int update_sched_domains(struct notifier_block *nfb,
7482 unsigned long action, void *hcpu)
7483{
Max Krasnyanskye761b772008-07-15 04:43:49 -07007484 switch (action) {
7485 case CPU_ONLINE:
7486 case CPU_ONLINE_FROZEN:
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007487 case CPU_DOWN_PREPARE:
7488 case CPU_DOWN_PREPARE_FROZEN:
7489 case CPU_DOWN_FAILED:
7490 case CPU_DOWN_FAILED_FROZEN:
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007491 partition_sched_domains(1, NULL, NULL);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007492 return NOTIFY_OK;
7493
7494 default:
7495 return NOTIFY_DONE;
7496 }
7497}
7498#endif
7499
7500static int update_runtime(struct notifier_block *nfb,
7501 unsigned long action, void *hcpu)
7502{
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007503 int cpu = (int)(long)hcpu;
7504
Linus Torvalds1da177e2005-04-16 15:20:36 -07007505 switch (action) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007506 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007507 case CPU_DOWN_PREPARE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007508 disable_runtime(cpu_rq(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007509 return NOTIFY_OK;
7510
Linus Torvalds1da177e2005-04-16 15:20:36 -07007511 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007512 case CPU_DOWN_FAILED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007514 case CPU_ONLINE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007515 enable_runtime(cpu_rq(cpu));
Max Krasnyanskye761b772008-07-15 04:43:49 -07007516 return NOTIFY_OK;
7517
Linus Torvalds1da177e2005-04-16 15:20:36 -07007518 default:
7519 return NOTIFY_DONE;
7520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007521}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007522
7523void __init sched_init_smp(void)
7524{
Rusty Russelldcc30a32008-11-25 02:35:12 +10307525 cpumask_var_t non_isolated_cpus;
7526
7527 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
Yong Zhangcb5fd132009-09-14 20:20:16 +08007528 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
Nick Piggin5c1e1762006-10-03 01:14:04 -07007529
Mike Travis434d53b2008-04-04 18:11:04 -07007530#if defined(CONFIG_NUMA)
7531 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
7532 GFP_KERNEL);
7533 BUG_ON(sched_group_nodes_bycpu == NULL);
7534#endif
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007535 get_online_cpus();
Heiko Carstens712555e2008-04-28 11:33:07 +02007536 mutex_lock(&sched_domains_mutex);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007537 arch_init_sched_domains(cpu_active_mask);
Rusty Russelldcc30a32008-11-25 02:35:12 +10307538 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7539 if (cpumask_empty(non_isolated_cpus))
7540 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
Heiko Carstens712555e2008-04-28 11:33:07 +02007541 mutex_unlock(&sched_domains_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007542 put_online_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007543
7544#ifndef CONFIG_CPUSETS
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545 /* XXX: Theoretical race here - CPU may be hotplugged now */
7546 hotcpu_notifier(update_sched_domains, 0);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007547#endif
7548
7549 /* RT runtime code needs to handle some hotplug events */
7550 hotcpu_notifier(update_runtime, 0);
7551
Peter Zijlstrab328ca12008-04-29 10:02:46 +02007552 init_hrtick();
Nick Piggin5c1e1762006-10-03 01:14:04 -07007553
7554 /* Move init over to a non-isolated CPU */
Rusty Russelldcc30a32008-11-25 02:35:12 +10307555 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
Nick Piggin5c1e1762006-10-03 01:14:04 -07007556 BUG();
Ingo Molnar19978ca2007-11-09 22:39:38 +01007557 sched_init_granularity();
Rusty Russelldcc30a32008-11-25 02:35:12 +10307558 free_cpumask_var(non_isolated_cpus);
Rusty Russell42128232008-11-25 02:35:12 +10307559
Rusty Russell0e3900e2008-11-25 02:35:13 +10307560 init_sched_rt_class();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007561}
7562#else
7563void __init sched_init_smp(void)
7564{
Ingo Molnar19978ca2007-11-09 22:39:38 +01007565 sched_init_granularity();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007566}
7567#endif /* CONFIG_SMP */
7568
Arun R Bharadwajcd1bb942009-04-16 12:15:34 +05307569const_debug unsigned int sysctl_timer_migration = 1;
7570
Linus Torvalds1da177e2005-04-16 15:20:36 -07007571int in_sched_functions(unsigned long addr)
7572{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573 return in_lock_functions(addr) ||
7574 (addr >= (unsigned long)__sched_text_start
7575 && addr < (unsigned long)__sched_text_end);
7576}
7577
Alexey Dobriyana9957442007-10-15 17:00:13 +02007578static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02007579{
7580 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02007581 INIT_LIST_HEAD(&cfs_rq->tasks);
Ingo Molnardd41f592007-07-09 18:51:59 +02007582#ifdef CONFIG_FAIR_GROUP_SCHED
7583 cfs_rq->rq = rq;
7584#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02007585 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
Ingo Molnardd41f592007-07-09 18:51:59 +02007586}
7587
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007588static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7589{
7590 struct rt_prio_array *array;
7591 int i;
7592
7593 array = &rt_rq->active;
7594 for (i = 0; i < MAX_RT_PRIO; i++) {
7595 INIT_LIST_HEAD(array->queue + i);
7596 __clear_bit(i, array->bitmap);
7597 }
7598 /* delimiter for bitsearch: */
7599 __set_bit(MAX_RT_PRIO, array->bitmap);
7600
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007601#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -05007602 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins398a1532009-01-14 09:10:04 -05007603#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -05007604 rt_rq->highest_prio.next = MAX_RT_PRIO;
Peter Zijlstra48d5e252008-01-25 21:08:31 +01007605#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007606#endif
7607#ifdef CONFIG_SMP
7608 rt_rq->rt_nr_migratory = 0;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007609 rt_rq->overloaded = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007610 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007611#endif
7612
7613 rt_rq->rt_time = 0;
7614 rt_rq->rt_throttled = 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007615 rt_rq->rt_runtime = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +01007616 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007617
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007618#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01007619 rt_rq->rt_nr_boosted = 0;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007620 rt_rq->rq = rq;
7621#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007622}
7623
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007624#ifdef CONFIG_FAIR_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007625static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7626 struct sched_entity *se, int cpu, int add,
7627 struct sched_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007628{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007629 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007630 tg->cfs_rq[cpu] = cfs_rq;
7631 init_cfs_rq(cfs_rq, rq);
7632 cfs_rq->tg = tg;
7633 if (add)
7634 list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
7635
7636 tg->se[cpu] = se;
Dhaval Giani354d60c2008-04-19 19:44:59 +02007637 /* se could be NULL for init_task_group */
7638 if (!se)
7639 return;
7640
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007641 if (!parent)
7642 se->cfs_rq = &rq->cfs;
7643 else
7644 se->cfs_rq = parent->my_q;
7645
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007646 se->my_q = cfs_rq;
7647 se->load.weight = tg->shares;
Peter Zijlstrae05510d2008-05-05 23:56:17 +02007648 se->load.inv_weight = 0;
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007649 se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007650}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007651#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007652
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007653#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007654static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7655 struct sched_rt_entity *rt_se, int cpu, int add,
7656 struct sched_rt_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007657{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007658 struct rq *rq = cpu_rq(cpu);
7659
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007660 tg->rt_rq[cpu] = rt_rq;
7661 init_rt_rq(rt_rq, rq);
7662 rt_rq->tg = tg;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007663 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007664 if (add)
7665 list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
7666
7667 tg->rt_se[cpu] = rt_se;
Dhaval Giani354d60c2008-04-19 19:44:59 +02007668 if (!rt_se)
7669 return;
7670
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007671 if (!parent)
7672 rt_se->rt_rq = &rq->rt;
7673 else
7674 rt_se->rt_rq = parent->my_q;
7675
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007676 rt_se->my_q = rt_rq;
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007677 rt_se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007678 INIT_LIST_HEAD(&rt_se->run_list);
7679}
7680#endif
7681
Linus Torvalds1da177e2005-04-16 15:20:36 -07007682void __init sched_init(void)
7683{
Ingo Molnardd41f592007-07-09 18:51:59 +02007684 int i, j;
Mike Travis434d53b2008-04-04 18:11:04 -07007685 unsigned long alloc_size = 0, ptr;
7686
7687#ifdef CONFIG_FAIR_GROUP_SCHED
7688 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7689#endif
7690#ifdef CONFIG_RT_GROUP_SCHED
7691 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7692#endif
Rusty Russelldf7c8e82009-03-19 15:22:20 +10307693#ifdef CONFIG_CPUMASK_OFFSTACK
Rusty Russell8c083f02009-03-19 15:22:20 +10307694 alloc_size += num_possible_cpus() * cpumask_size();
Rusty Russelldf7c8e82009-03-19 15:22:20 +10307695#endif
Mike Travis434d53b2008-04-04 18:11:04 -07007696 if (alloc_size) {
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03007697 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
Mike Travis434d53b2008-04-04 18:11:04 -07007698
7699#ifdef CONFIG_FAIR_GROUP_SCHED
7700 init_task_group.se = (struct sched_entity **)ptr;
7701 ptr += nr_cpu_ids * sizeof(void **);
7702
7703 init_task_group.cfs_rq = (struct cfs_rq **)ptr;
7704 ptr += nr_cpu_ids * sizeof(void **);
Peter Zijlstraeff766a2008-04-19 19:45:00 +02007705
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007706#endif /* CONFIG_FAIR_GROUP_SCHED */
Mike Travis434d53b2008-04-04 18:11:04 -07007707#ifdef CONFIG_RT_GROUP_SCHED
7708 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
7709 ptr += nr_cpu_ids * sizeof(void **);
7710
7711 init_task_group.rt_rq = (struct rt_rq **)ptr;
Peter Zijlstraeff766a2008-04-19 19:45:00 +02007712 ptr += nr_cpu_ids * sizeof(void **);
7713
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007714#endif /* CONFIG_RT_GROUP_SCHED */
Rusty Russelldf7c8e82009-03-19 15:22:20 +10307715#ifdef CONFIG_CPUMASK_OFFSTACK
7716 for_each_possible_cpu(i) {
7717 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
7718 ptr += cpumask_size();
7719 }
7720#endif /* CONFIG_CPUMASK_OFFSTACK */
Mike Travis434d53b2008-04-04 18:11:04 -07007721 }
Ingo Molnardd41f592007-07-09 18:51:59 +02007722
Gregory Haskins57d885f2008-01-25 21:08:18 +01007723#ifdef CONFIG_SMP
7724 init_defrootdomain();
7725#endif
7726
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007727 init_rt_bandwidth(&def_rt_bandwidth,
7728 global_rt_period(), global_rt_runtime());
7729
7730#ifdef CONFIG_RT_GROUP_SCHED
7731 init_rt_bandwidth(&init_task_group.rt_bandwidth,
7732 global_rt_period(), global_rt_runtime());
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007733#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007734
Dhaval Giani7c941432010-01-20 13:26:18 +01007735#ifdef CONFIG_CGROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007736 list_add(&init_task_group.list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02007737 INIT_LIST_HEAD(&init_task_group.children);
7738
Dhaval Giani7c941432010-01-20 13:26:18 +01007739#endif /* CONFIG_CGROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007740
Jiri Kosina4a6cc4b2009-10-29 00:26:00 +09007741#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
7742 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
7743 __alignof__(unsigned long));
7744#endif
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08007745 for_each_possible_cpu(i) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07007746 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007747
7748 rq = cpu_rq(i);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007749 raw_spin_lock_init(&rq->lock);
Nick Piggin78979862005-06-25 14:57:13 -07007750 rq->nr_running = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02007751 rq->calc_load_active = 0;
7752 rq->calc_load_update = jiffies + LOAD_FREQ;
Ingo Molnardd41f592007-07-09 18:51:59 +02007753 init_cfs_rq(&rq->cfs, rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007754 init_rt_rq(&rq->rt, rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007755#ifdef CONFIG_FAIR_GROUP_SCHED
7756 init_task_group.shares = init_task_group_load;
7757 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02007758#ifdef CONFIG_CGROUP_SCHED
7759 /*
7760 * How much cpu bandwidth does init_task_group get?
7761 *
7762 * In case of task-groups formed thr' the cgroup filesystem, it
7763 * gets 100% of the cpu resources in the system. This overall
7764 * system cpu resource is divided among the tasks of
7765 * init_task_group and its child task-groups in a fair manner,
7766 * based on each entity's (task or task-group's) weight
7767 * (se->load.weight).
7768 *
7769 * In other words, if init_task_group has 10 tasks of weight
7770 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7771 * then A0's share of the cpu resource is:
7772 *
Ingo Molnar0d905bc2009-05-04 19:13:30 +02007773 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
Dhaval Giani354d60c2008-04-19 19:44:59 +02007774 *
7775 * We achieve this by letting init_task_group's tasks sit
7776 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
7777 */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007778 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007779#endif
Dhaval Giani354d60c2008-04-19 19:44:59 +02007780#endif /* CONFIG_FAIR_GROUP_SCHED */
7781
7782 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007783#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007784 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02007785#ifdef CONFIG_CGROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007786 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02007787#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007788#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07007789
Ingo Molnardd41f592007-07-09 18:51:59 +02007790 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7791 rq->cpu_load[j] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007792#ifdef CONFIG_SMP
Nick Piggin41c7ce92005-06-25 14:57:24 -07007793 rq->sd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01007794 rq->rd = NULL;
Gregory Haskins3f029d32009-07-29 11:08:47 -04007795 rq->post_schedule = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007796 rq->active_balance = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02007797 rq->next_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007798 rq->push_cpu = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07007799 rq->cpu = i;
Gregory Haskins1f11eb62008-06-04 15:04:05 -04007800 rq->online = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007801 rq->migration_thread = NULL;
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01007802 rq->idle_stamp = 0;
7803 rq->avg_idle = 2*sysctl_sched_migration_cost;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007804 INIT_LIST_HEAD(&rq->migration_queue);
Gregory Haskinsdc938522008-01-25 21:08:26 +01007805 rq_attach_root(rq, &def_root_domain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007806#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01007807 init_rq_hrtick(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007808 atomic_set(&rq->nr_iowait, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007809 }
7810
Peter Williams2dd73a42006-06-27 02:54:34 -07007811 set_load_weight(&init_task);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07007812
Avi Kivitye107be32007-07-26 13:40:43 +02007813#ifdef CONFIG_PREEMPT_NOTIFIERS
7814 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7815#endif
7816
Christoph Lameterc9819f42006-12-10 02:20:25 -08007817#ifdef CONFIG_SMP
Carlos R. Mafra962cf362008-05-15 11:15:37 -03007818 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
Christoph Lameterc9819f42006-12-10 02:20:25 -08007819#endif
7820
Heiko Carstensb50f60c2006-07-30 03:03:52 -07007821#ifdef CONFIG_RT_MUTEXES
Thomas Gleixner1d615482009-11-17 14:54:03 +01007822 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07007823#endif
7824
Linus Torvalds1da177e2005-04-16 15:20:36 -07007825 /*
7826 * The boot idle thread does lazy MMU switching as well:
7827 */
7828 atomic_inc(&init_mm.mm_count);
7829 enter_lazy_tlb(&init_mm, current);
7830
7831 /*
7832 * Make us the idle thread. Technically, schedule() should not be
7833 * called from this thread, however somewhere below it might be,
7834 * but because we are the idle thread, we just pick up running again
7835 * when this runqueue becomes "idle".
7836 */
7837 init_idle(current, smp_processor_id());
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02007838
7839 calc_load_update = jiffies + LOAD_FREQ;
7840
Ingo Molnardd41f592007-07-09 18:51:59 +02007841 /*
7842 * During early bootup we pretend to be a normal task:
7843 */
7844 current->sched_class = &fair_sched_class;
Ingo Molnar6892b752008-02-13 14:02:36 +01007845
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10307846 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
Rusty Russell49557e62009-11-02 20:37:20 +10307847 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10307848#ifdef CONFIG_SMP
Rusty Russell7d1e6a92008-11-25 02:35:09 +10307849#ifdef CONFIG_NO_HZ
Rusty Russell49557e62009-11-02 20:37:20 +10307850 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
Pekka Enberg4bdddf82009-06-11 08:35:27 +03007851 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
Rusty Russell7d1e6a92008-11-25 02:35:09 +10307852#endif
Rusty Russellbdddd292009-12-02 14:09:16 +10307853 /* May be allocated at isolcpus cmdline parse time */
7854 if (cpu_isolated_map == NULL)
7855 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10307856#endif /* SMP */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10307857
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007858 perf_event_init();
Ingo Molnar0d905bc2009-05-04 19:13:30 +02007859
Ingo Molnar6892b752008-02-13 14:02:36 +01007860 scheduler_running = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007861}
7862
7863#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02007864static inline int preempt_count_equals(int preempt_offset)
7865{
Frederic Weisbecker234da7b2009-12-16 20:21:05 +01007866 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02007867
7868 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
7869}
7870
Simon Kagstromd8948372009-12-23 11:08:18 +01007871void __might_sleep(const char *file, int line, int preempt_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007872{
Ingo Molnar48f24c42006-07-03 00:25:40 -07007873#ifdef in_atomic
Linus Torvalds1da177e2005-04-16 15:20:36 -07007874 static unsigned long prev_jiffy; /* ratelimiting */
7875
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02007876 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
7877 system_state != SYSTEM_RUNNING || oops_in_progress)
Ingo Molnaraef745f2008-08-28 11:34:43 +02007878 return;
7879 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7880 return;
7881 prev_jiffy = jiffies;
7882
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007883 printk(KERN_ERR
7884 "BUG: sleeping function called from invalid context at %s:%d\n",
7885 file, line);
7886 printk(KERN_ERR
7887 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7888 in_atomic(), irqs_disabled(),
7889 current->pid, current->comm);
Ingo Molnaraef745f2008-08-28 11:34:43 +02007890
7891 debug_show_held_locks(current);
7892 if (irqs_disabled())
7893 print_irqtrace_events(current);
7894 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007895#endif
7896}
7897EXPORT_SYMBOL(__might_sleep);
7898#endif
7899
7900#ifdef CONFIG_MAGIC_SYSRQ
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02007901static void normalize_task(struct rq *rq, struct task_struct *p)
7902{
7903 int on_rq;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02007904
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02007905 update_rq_clock(rq);
7906 on_rq = p->se.on_rq;
7907 if (on_rq)
7908 deactivate_task(rq, p, 0);
7909 __setscheduler(rq, p, SCHED_NORMAL, 0);
7910 if (on_rq) {
7911 activate_task(rq, p, 0);
7912 resched_task(rq->curr);
7913 }
7914}
7915
Linus Torvalds1da177e2005-04-16 15:20:36 -07007916void normalize_rt_tasks(void)
7917{
Ingo Molnara0f98a12007-06-17 18:37:45 +02007918 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007919 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07007920 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007921
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01007922 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnara0f98a12007-06-17 18:37:45 +02007923 do_each_thread(g, p) {
Ingo Molnar178be792007-10-15 17:00:18 +02007924 /*
7925 * Only normalize user tasks:
7926 */
7927 if (!p->mm)
7928 continue;
7929
Ingo Molnardd41f592007-07-09 18:51:59 +02007930 p->se.exec_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02007931#ifdef CONFIG_SCHEDSTATS
7932 p->se.wait_start = 0;
7933 p->se.sleep_start = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02007934 p->se.block_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02007935#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02007936
7937 if (!rt_task(p)) {
7938 /*
7939 * Renice negative nice level userspace
7940 * tasks back to 0:
7941 */
7942 if (TASK_NICE(p) < 0 && p->mm)
7943 set_user_nice(p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007944 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02007945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007946
Thomas Gleixner1d615482009-11-17 14:54:03 +01007947 raw_spin_lock(&p->pi_lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -07007948 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007949
Ingo Molnar178be792007-10-15 17:00:18 +02007950 normalize_task(rq, p);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02007951
Ingo Molnarb29739f2006-06-27 02:54:51 -07007952 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01007953 raw_spin_unlock(&p->pi_lock);
Ingo Molnara0f98a12007-06-17 18:37:45 +02007954 } while_each_thread(g, p);
7955
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01007956 read_unlock_irqrestore(&tasklist_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007957}
7958
7959#endif /* CONFIG_MAGIC_SYSRQ */
Linus Torvalds1df5c102005-09-12 07:59:21 -07007960
7961#ifdef CONFIG_IA64
7962/*
7963 * These functions are only useful for the IA64 MCA handling.
7964 *
7965 * They can only be called when the whole system has been
7966 * stopped - every CPU needs to be quiescent, and no scheduling
7967 * activity can take place. Using them for anything else would
7968 * be a serious bug, and as a result, they aren't even visible
7969 * under any other configuration.
7970 */
7971
7972/**
7973 * curr_task - return the current task for a given cpu.
7974 * @cpu: the processor in question.
7975 *
7976 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7977 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07007978struct task_struct *curr_task(int cpu)
Linus Torvalds1df5c102005-09-12 07:59:21 -07007979{
7980 return cpu_curr(cpu);
7981}
7982
7983/**
7984 * set_curr_task - set the current task for a given cpu.
7985 * @cpu: the processor in question.
7986 * @p: the task pointer to set.
7987 *
7988 * Description: This function must only be used when non-maskable interrupts
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007989 * are serviced on a separate stack. It allows the architecture to switch the
7990 * notion of the current task on a cpu in a non-blocking manner. This function
Linus Torvalds1df5c102005-09-12 07:59:21 -07007991 * must be called with all CPU's synchronized, and interrupts disabled, the
7992 * and caller must save the original value of the current task (see
7993 * curr_task() above) and restore that value before reenabling interrupts and
7994 * re-starting the system.
7995 *
7996 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7997 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07007998void set_curr_task(int cpu, struct task_struct *p)
Linus Torvalds1df5c102005-09-12 07:59:21 -07007999{
8000 cpu_curr(cpu) = p;
8001}
8002
8003#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008004
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008005#ifdef CONFIG_FAIR_GROUP_SCHED
8006static void free_fair_sched_group(struct task_group *tg)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008007{
8008 int i;
8009
8010 for_each_possible_cpu(i) {
8011 if (tg->cfs_rq)
8012 kfree(tg->cfs_rq[i]);
8013 if (tg->se)
8014 kfree(tg->se[i]);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008015 }
8016
8017 kfree(tg->cfs_rq);
8018 kfree(tg->se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008019}
8020
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008021static
8022int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008023{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008024 struct cfs_rq *cfs_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008025 struct sched_entity *se;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008026 struct rq *rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008027 int i;
8028
Mike Travis434d53b2008-04-04 18:11:04 -07008029 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008030 if (!tg->cfs_rq)
8031 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008032 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008033 if (!tg->se)
8034 goto err;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008035
8036 tg->shares = NICE_0_LOAD;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008037
8038 for_each_possible_cpu(i) {
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008039 rq = cpu_rq(i);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008040
Li Zefaneab17222008-10-29 17:03:22 +08008041 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8042 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008043 if (!cfs_rq)
8044 goto err;
8045
Li Zefaneab17222008-10-29 17:03:22 +08008046 se = kzalloc_node(sizeof(struct sched_entity),
8047 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008048 if (!se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008049 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008050
Li Zefaneab17222008-10-29 17:03:22 +08008051 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008052 }
8053
8054 return 1;
8055
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008056 err_free_rq:
8057 kfree(cfs_rq);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008058 err:
8059 return 0;
8060}
8061
8062static inline void register_fair_sched_group(struct task_group *tg, int cpu)
8063{
8064 list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
8065 &cpu_rq(cpu)->leaf_cfs_rq_list);
8066}
8067
8068static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8069{
8070 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
8071}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008072#else /* !CONFG_FAIR_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008073static inline void free_fair_sched_group(struct task_group *tg)
8074{
8075}
8076
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008077static inline
8078int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008079{
8080 return 1;
8081}
8082
8083static inline void register_fair_sched_group(struct task_group *tg, int cpu)
8084{
8085}
8086
8087static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8088{
8089}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008090#endif /* CONFIG_FAIR_GROUP_SCHED */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008091
8092#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008093static void free_rt_sched_group(struct task_group *tg)
8094{
8095 int i;
8096
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008097 destroy_rt_bandwidth(&tg->rt_bandwidth);
8098
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008099 for_each_possible_cpu(i) {
8100 if (tg->rt_rq)
8101 kfree(tg->rt_rq[i]);
8102 if (tg->rt_se)
8103 kfree(tg->rt_se[i]);
8104 }
8105
8106 kfree(tg->rt_rq);
8107 kfree(tg->rt_se);
8108}
8109
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008110static
8111int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008112{
8113 struct rt_rq *rt_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008114 struct sched_rt_entity *rt_se;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008115 struct rq *rq;
8116 int i;
8117
Mike Travis434d53b2008-04-04 18:11:04 -07008118 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008119 if (!tg->rt_rq)
8120 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008121 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008122 if (!tg->rt_se)
8123 goto err;
8124
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008125 init_rt_bandwidth(&tg->rt_bandwidth,
8126 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008127
8128 for_each_possible_cpu(i) {
8129 rq = cpu_rq(i);
8130
Li Zefaneab17222008-10-29 17:03:22 +08008131 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8132 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008133 if (!rt_rq)
8134 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008135
Li Zefaneab17222008-10-29 17:03:22 +08008136 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8137 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008138 if (!rt_se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008139 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008140
Li Zefaneab17222008-10-29 17:03:22 +08008141 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008142 }
8143
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008144 return 1;
8145
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008146 err_free_rq:
8147 kfree(rt_rq);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008148 err:
8149 return 0;
8150}
8151
8152static inline void register_rt_sched_group(struct task_group *tg, int cpu)
8153{
8154 list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
8155 &cpu_rq(cpu)->leaf_rt_rq_list);
8156}
8157
8158static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8159{
8160 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
8161}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008162#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008163static inline void free_rt_sched_group(struct task_group *tg)
8164{
8165}
8166
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008167static inline
8168int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008169{
8170 return 1;
8171}
8172
8173static inline void register_rt_sched_group(struct task_group *tg, int cpu)
8174{
8175}
8176
8177static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8178{
8179}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008180#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008181
Dhaval Giani7c941432010-01-20 13:26:18 +01008182#ifdef CONFIG_CGROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008183static void free_sched_group(struct task_group *tg)
8184{
8185 free_fair_sched_group(tg);
8186 free_rt_sched_group(tg);
8187 kfree(tg);
8188}
8189
8190/* allocate runqueue etc for a new task group */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008191struct task_group *sched_create_group(struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008192{
8193 struct task_group *tg;
8194 unsigned long flags;
8195 int i;
8196
8197 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8198 if (!tg)
8199 return ERR_PTR(-ENOMEM);
8200
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008201 if (!alloc_fair_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008202 goto err;
8203
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008204 if (!alloc_rt_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008205 goto err;
8206
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008207 spin_lock_irqsave(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008208 for_each_possible_cpu(i) {
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008209 register_fair_sched_group(tg, i);
8210 register_rt_sched_group(tg, i);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008211 }
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008212 list_add_rcu(&tg->list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008213
8214 WARN_ON(!parent); /* root should already exist */
8215
8216 tg->parent = parent;
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008217 INIT_LIST_HEAD(&tg->children);
Zhang, Yanmin09f27242030-08-14 15:56:40 +08008218 list_add_rcu(&tg->siblings, &parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008219 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008220
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008221 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008222
8223err:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008224 free_sched_group(tg);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008225 return ERR_PTR(-ENOMEM);
8226}
8227
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008228/* rcu callback to free various structures associated with a task group */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008229static void free_sched_group_rcu(struct rcu_head *rhp)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008230{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008231 /* now it should be safe to free those cfs_rqs */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008232 free_sched_group(container_of(rhp, struct task_group, rcu));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008233}
8234
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008235/* Destroy runqueue etc associated with a task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008236void sched_destroy_group(struct task_group *tg)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008237{
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008238 unsigned long flags;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008239 int i;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008240
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008241 spin_lock_irqsave(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008242 for_each_possible_cpu(i) {
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008243 unregister_fair_sched_group(tg, i);
8244 unregister_rt_sched_group(tg, i);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008245 }
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008246 list_del_rcu(&tg->list);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008247 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008248 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008249
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008250 /* wait for possible concurrent references to cfs_rqs complete */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008251 call_rcu(&tg->rcu, free_sched_group_rcu);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008252}
8253
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008254/* change task's runqueue when it moves between groups.
Ingo Molnar3a252012007-10-15 17:00:12 +02008255 * The caller of this function should have put the task in its new group
8256 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8257 * reflect its new group.
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008258 */
8259void sched_move_task(struct task_struct *tsk)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008260{
8261 int on_rq, running;
8262 unsigned long flags;
8263 struct rq *rq;
8264
8265 rq = task_rq_lock(tsk, &flags);
8266
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008267 update_rq_clock(rq);
8268
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01008269 running = task_current(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008270 on_rq = tsk->se.on_rq;
8271
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008272 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008273 dequeue_task(rq, tsk, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008274 if (unlikely(running))
8275 tsk->sched_class->put_prev_task(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008276
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008277 set_task_rq(tsk, task_cpu(tsk));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008278
Peter Zijlstra810b3812008-02-29 15:21:01 -05008279#ifdef CONFIG_FAIR_GROUP_SCHED
8280 if (tsk->sched_class->moved_group)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01008281 tsk->sched_class->moved_group(tsk, on_rq);
Peter Zijlstra810b3812008-02-29 15:21:01 -05008282#endif
8283
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008284 if (unlikely(running))
8285 tsk->sched_class->set_curr_task(rq);
8286 if (on_rq)
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00008287 enqueue_task(rq, tsk, 0, false);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008288
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008289 task_rq_unlock(rq, &flags);
8290}
Dhaval Giani7c941432010-01-20 13:26:18 +01008291#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008292
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008293#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008294static void __set_se_shares(struct sched_entity *se, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008295{
8296 struct cfs_rq *cfs_rq = se->cfs_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008297 int on_rq;
8298
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008299 on_rq = se->on_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008300 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008301 dequeue_entity(cfs_rq, se, 0);
8302
8303 se->load.weight = shares;
Peter Zijlstrae05510d2008-05-05 23:56:17 +02008304 se->load.inv_weight = 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008305
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008306 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008307 enqueue_entity(cfs_rq, se, 0);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008308}
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008309
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008310static void set_se_shares(struct sched_entity *se, unsigned long shares)
8311{
8312 struct cfs_rq *cfs_rq = se->cfs_rq;
8313 struct rq *rq = cfs_rq->rq;
8314 unsigned long flags;
8315
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008316 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008317 __set_se_shares(se, shares);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008318 raw_spin_unlock_irqrestore(&rq->lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008319}
8320
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008321static DEFINE_MUTEX(shares_mutex);
8322
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008323int sched_group_set_shares(struct task_group *tg, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008324{
8325 int i;
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008326 unsigned long flags;
Ingo Molnarc61935f2008-01-22 11:24:58 +01008327
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008328 /*
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008329 * We can't change the weight of the root cgroup.
8330 */
8331 if (!tg->se[0])
8332 return -EINVAL;
8333
Peter Zijlstra18d95a22008-04-19 19:45:00 +02008334 if (shares < MIN_SHARES)
8335 shares = MIN_SHARES;
Miao Xiecb4ad1f2008-04-28 12:54:56 +08008336 else if (shares > MAX_SHARES)
8337 shares = MAX_SHARES;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008338
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008339 mutex_lock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008340 if (tg->shares == shares)
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008341 goto done;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008342
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008343 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008344 for_each_possible_cpu(i)
8345 unregister_fair_sched_group(tg, i);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008346 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008347 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008348
8349 /* wait for any ongoing reference to this group to finish */
8350 synchronize_sched();
8351
8352 /*
8353 * Now we are free to modify the group's share on each cpu
8354 * w/o tripping rebalance_share or load_balance_fair.
8355 */
8356 tg->shares = shares;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008357 for_each_possible_cpu(i) {
8358 /*
8359 * force a rebalance
8360 */
8361 cfs_rq_set_shares(tg->cfs_rq[i], 0);
Miao Xiecb4ad1f2008-04-28 12:54:56 +08008362 set_se_shares(tg->se[i], shares);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008363 }
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008364
8365 /*
8366 * Enable load balance activity on this group, by inserting it back on
8367 * each cpu's rq->leaf_cfs_rq_list.
8368 */
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008369 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008370 for_each_possible_cpu(i)
8371 register_fair_sched_group(tg, i);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008372 list_add_rcu(&tg->siblings, &tg->parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008373 spin_unlock_irqrestore(&task_group_lock, flags);
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008374done:
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008375 mutex_unlock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008376 return 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008377}
8378
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008379unsigned long sched_group_shares(struct task_group *tg)
8380{
8381 return tg->shares;
8382}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008383#endif
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008384
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008385#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008386/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008387 * Ensure that the real time constraints are schedulable.
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008388 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008389static DEFINE_MUTEX(rt_constraints_mutex);
8390
8391static unsigned long to_ratio(u64 period, u64 runtime)
8392{
8393 if (runtime == RUNTIME_INF)
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008394 return 1ULL << 20;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008395
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008396 return div64_u64(runtime << 20, period);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008397}
8398
Dhaval Giani521f1a242008-02-28 15:21:56 +05308399/* Must be called with tasklist_lock held */
8400static inline int tg_has_rt_tasks(struct task_group *tg)
8401{
8402 struct task_struct *g, *p;
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008403
Dhaval Giani521f1a242008-02-28 15:21:56 +05308404 do_each_thread(g, p) {
8405 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8406 return 1;
8407 } while_each_thread(g, p);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008408
Dhaval Giani521f1a242008-02-28 15:21:56 +05308409 return 0;
8410}
8411
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008412struct rt_schedulable_data {
8413 struct task_group *tg;
8414 u64 rt_period;
8415 u64 rt_runtime;
8416};
8417
8418static int tg_schedulable(struct task_group *tg, void *data)
8419{
8420 struct rt_schedulable_data *d = data;
8421 struct task_group *child;
8422 unsigned long total, sum = 0;
8423 u64 period, runtime;
8424
8425 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8426 runtime = tg->rt_bandwidth.rt_runtime;
8427
8428 if (tg == d->tg) {
8429 period = d->rt_period;
8430 runtime = d->rt_runtime;
8431 }
8432
Peter Zijlstra4653f802008-09-23 15:33:44 +02008433 /*
8434 * Cannot have more runtime than the period.
8435 */
8436 if (runtime > period && runtime != RUNTIME_INF)
8437 return -EINVAL;
8438
8439 /*
8440 * Ensure we don't starve existing RT tasks.
8441 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008442 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8443 return -EBUSY;
8444
8445 total = to_ratio(period, runtime);
8446
Peter Zijlstra4653f802008-09-23 15:33:44 +02008447 /*
8448 * Nobody can have more than the global setting allows.
8449 */
8450 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8451 return -EINVAL;
8452
8453 /*
8454 * The sum of our children's runtime should not exceed our own.
8455 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008456 list_for_each_entry_rcu(child, &tg->children, siblings) {
8457 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8458 runtime = child->rt_bandwidth.rt_runtime;
8459
8460 if (child == d->tg) {
8461 period = d->rt_period;
8462 runtime = d->rt_runtime;
8463 }
8464
8465 sum += to_ratio(period, runtime);
8466 }
8467
8468 if (sum > total)
8469 return -EINVAL;
8470
8471 return 0;
8472}
8473
8474static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8475{
8476 struct rt_schedulable_data data = {
8477 .tg = tg,
8478 .rt_period = period,
8479 .rt_runtime = runtime,
8480 };
8481
8482 return walk_tg_tree(tg_schedulable, tg_nop, &data);
8483}
8484
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008485static int tg_set_bandwidth(struct task_group *tg,
8486 u64 rt_period, u64 rt_runtime)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008487{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008488 int i, err = 0;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008489
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008490 mutex_lock(&rt_constraints_mutex);
Dhaval Giani521f1a242008-02-28 15:21:56 +05308491 read_lock(&tasklist_lock);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008492 err = __rt_schedulable(tg, rt_period, rt_runtime);
8493 if (err)
Dhaval Giani521f1a242008-02-28 15:21:56 +05308494 goto unlock;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008495
Thomas Gleixner0986b112009-11-17 15:32:06 +01008496 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008497 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8498 tg->rt_bandwidth.rt_runtime = rt_runtime;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008499
8500 for_each_possible_cpu(i) {
8501 struct rt_rq *rt_rq = tg->rt_rq[i];
8502
Thomas Gleixner0986b112009-11-17 15:32:06 +01008503 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008504 rt_rq->rt_runtime = rt_runtime;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008505 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008506 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008507 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008508 unlock:
Dhaval Giani521f1a242008-02-28 15:21:56 +05308509 read_unlock(&tasklist_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008510 mutex_unlock(&rt_constraints_mutex);
8511
8512 return err;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008513}
8514
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008515int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8516{
8517 u64 rt_runtime, rt_period;
8518
8519 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8520 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8521 if (rt_runtime_us < 0)
8522 rt_runtime = RUNTIME_INF;
8523
8524 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8525}
8526
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008527long sched_group_rt_runtime(struct task_group *tg)
8528{
8529 u64 rt_runtime_us;
8530
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008531 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008532 return -1;
8533
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008534 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008535 do_div(rt_runtime_us, NSEC_PER_USEC);
8536 return rt_runtime_us;
8537}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008538
8539int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8540{
8541 u64 rt_runtime, rt_period;
8542
8543 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8544 rt_runtime = tg->rt_bandwidth.rt_runtime;
8545
Raistlin619b0482008-06-26 18:54:09 +02008546 if (rt_period == 0)
8547 return -EINVAL;
8548
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008549 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8550}
8551
8552long sched_group_rt_period(struct task_group *tg)
8553{
8554 u64 rt_period_us;
8555
8556 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8557 do_div(rt_period_us, NSEC_PER_USEC);
8558 return rt_period_us;
8559}
8560
8561static int sched_rt_global_constraints(void)
8562{
Peter Zijlstra4653f802008-09-23 15:33:44 +02008563 u64 runtime, period;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008564 int ret = 0;
8565
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008566 if (sysctl_sched_rt_period <= 0)
8567 return -EINVAL;
8568
Peter Zijlstra4653f802008-09-23 15:33:44 +02008569 runtime = global_rt_runtime();
8570 period = global_rt_period();
8571
8572 /*
8573 * Sanity check on the sysctl variables.
8574 */
8575 if (runtime > period && runtime != RUNTIME_INF)
8576 return -EINVAL;
Peter Zijlstra10b612f2008-06-19 14:22:27 +02008577
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008578 mutex_lock(&rt_constraints_mutex);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008579 read_lock(&tasklist_lock);
Peter Zijlstra4653f802008-09-23 15:33:44 +02008580 ret = __rt_schedulable(NULL, 0, 0);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008581 read_unlock(&tasklist_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008582 mutex_unlock(&rt_constraints_mutex);
8583
8584 return ret;
8585}
Dhaval Giani54e99122009-02-27 15:13:54 +05308586
8587int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8588{
8589 /* Don't accept realtime tasks when there is no way for them to run */
8590 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8591 return 0;
8592
8593 return 1;
8594}
8595
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008596#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008597static int sched_rt_global_constraints(void)
8598{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008599 unsigned long flags;
8600 int i;
8601
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008602 if (sysctl_sched_rt_period <= 0)
8603 return -EINVAL;
8604
Peter Zijlstra60aa6052009-05-05 17:50:21 +02008605 /*
8606 * There's always some RT tasks in the root group
8607 * -- migration, kstopmachine etc..
8608 */
8609 if (sysctl_sched_rt_runtime == 0)
8610 return -EBUSY;
8611
Thomas Gleixner0986b112009-11-17 15:32:06 +01008612 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008613 for_each_possible_cpu(i) {
8614 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8615
Thomas Gleixner0986b112009-11-17 15:32:06 +01008616 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008617 rt_rq->rt_runtime = global_rt_runtime();
Thomas Gleixner0986b112009-11-17 15:32:06 +01008618 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008619 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008620 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008621
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008622 return 0;
8623}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008624#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008625
8626int sched_rt_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008627 void __user *buffer, size_t *lenp,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008628 loff_t *ppos)
8629{
8630 int ret;
8631 int old_period, old_runtime;
8632 static DEFINE_MUTEX(mutex);
8633
8634 mutex_lock(&mutex);
8635 old_period = sysctl_sched_rt_period;
8636 old_runtime = sysctl_sched_rt_runtime;
8637
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008638 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008639
8640 if (!ret && write) {
8641 ret = sched_rt_global_constraints();
8642 if (ret) {
8643 sysctl_sched_rt_period = old_period;
8644 sysctl_sched_rt_runtime = old_runtime;
8645 } else {
8646 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8647 def_rt_bandwidth.rt_period =
8648 ns_to_ktime(global_rt_period());
8649 }
8650 }
8651 mutex_unlock(&mutex);
8652
8653 return ret;
8654}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008655
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008656#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008657
8658/* return corresponding task_group object of a cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +02008659static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008660{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008661 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
8662 struct task_group, css);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008663}
8664
8665static struct cgroup_subsys_state *
Paul Menage2b01dfe2007-10-24 18:23:50 +02008666cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008667{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008668 struct task_group *tg, *parent;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008669
Paul Menage2b01dfe2007-10-24 18:23:50 +02008670 if (!cgrp->parent) {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008671 /* This is early initialization for the top cgroup */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008672 return &init_task_group.css;
8673 }
8674
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008675 parent = cgroup_tg(cgrp->parent);
8676 tg = sched_create_group(parent);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008677 if (IS_ERR(tg))
8678 return ERR_PTR(-ENOMEM);
8679
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008680 return &tg->css;
8681}
8682
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008683static void
8684cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008685{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008686 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008687
8688 sched_destroy_group(tg);
8689}
8690
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008691static int
Ben Blumbe367d02009-09-23 15:56:31 -07008692cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008693{
Peter Zijlstrab68aa232008-02-13 15:45:40 +01008694#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Giani54e99122009-02-27 15:13:54 +05308695 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
Peter Zijlstrab68aa232008-02-13 15:45:40 +01008696 return -EINVAL;
8697#else
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008698 /* We don't support RT-tasks being in separate groups */
8699 if (tsk->sched_class != &fair_sched_class)
8700 return -EINVAL;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01008701#endif
Ben Blumbe367d02009-09-23 15:56:31 -07008702 return 0;
8703}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008704
Ben Blumbe367d02009-09-23 15:56:31 -07008705static int
8706cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
8707 struct task_struct *tsk, bool threadgroup)
8708{
8709 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
8710 if (retval)
8711 return retval;
8712 if (threadgroup) {
8713 struct task_struct *c;
8714 rcu_read_lock();
8715 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
8716 retval = cpu_cgroup_can_attach_task(cgrp, c);
8717 if (retval) {
8718 rcu_read_unlock();
8719 return retval;
8720 }
8721 }
8722 rcu_read_unlock();
8723 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008724 return 0;
8725}
8726
8727static void
Paul Menage2b01dfe2007-10-24 18:23:50 +02008728cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
Ben Blumbe367d02009-09-23 15:56:31 -07008729 struct cgroup *old_cont, struct task_struct *tsk,
8730 bool threadgroup)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008731{
8732 sched_move_task(tsk);
Ben Blumbe367d02009-09-23 15:56:31 -07008733 if (threadgroup) {
8734 struct task_struct *c;
8735 rcu_read_lock();
8736 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
8737 sched_move_task(c);
8738 }
8739 rcu_read_unlock();
8740 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008741}
8742
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008743#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagef4c753b2008-04-29 00:59:56 -07008744static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
Paul Menage2b01dfe2007-10-24 18:23:50 +02008745 u64 shareval)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008746{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008747 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008748}
8749
Paul Menagef4c753b2008-04-29 00:59:56 -07008750static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008751{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008752 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008753
8754 return (u64) tg->shares;
8755}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008756#endif /* CONFIG_FAIR_GROUP_SCHED */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008757
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008758#ifdef CONFIG_RT_GROUP_SCHED
Mirco Tischler0c708142008-05-14 16:05:46 -07008759static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
Paul Menage06ecb272008-04-29 01:00:06 -07008760 s64 val)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008761{
Paul Menage06ecb272008-04-29 01:00:06 -07008762 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008763}
8764
Paul Menage06ecb272008-04-29 01:00:06 -07008765static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008766{
Paul Menage06ecb272008-04-29 01:00:06 -07008767 return sched_group_rt_runtime(cgroup_tg(cgrp));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008768}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008769
8770static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
8771 u64 rt_period_us)
8772{
8773 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
8774}
8775
8776static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
8777{
8778 return sched_group_rt_period(cgroup_tg(cgrp));
8779}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008780#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008781
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008782static struct cftype cpu_files[] = {
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008783#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008784 {
8785 .name = "shares",
Paul Menagef4c753b2008-04-29 00:59:56 -07008786 .read_u64 = cpu_shares_read_u64,
8787 .write_u64 = cpu_shares_write_u64,
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008788 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008789#endif
8790#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008791 {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008792 .name = "rt_runtime_us",
Paul Menage06ecb272008-04-29 01:00:06 -07008793 .read_s64 = cpu_rt_runtime_read,
8794 .write_s64 = cpu_rt_runtime_write,
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008795 },
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008796 {
8797 .name = "rt_period_us",
Paul Menagef4c753b2008-04-29 00:59:56 -07008798 .read_u64 = cpu_rt_period_read_uint,
8799 .write_u64 = cpu_rt_period_write_uint,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008800 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008801#endif
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008802};
8803
8804static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
8805{
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008806 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008807}
8808
8809struct cgroup_subsys cpu_cgroup_subsys = {
Ingo Molnar38605ca2007-10-29 21:18:11 +01008810 .name = "cpu",
8811 .create = cpu_cgroup_create,
8812 .destroy = cpu_cgroup_destroy,
8813 .can_attach = cpu_cgroup_can_attach,
8814 .attach = cpu_cgroup_attach,
8815 .populate = cpu_cgroup_populate,
8816 .subsys_id = cpu_cgroup_subsys_id,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008817 .early_init = 1,
8818};
8819
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008820#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008821
8822#ifdef CONFIG_CGROUP_CPUACCT
8823
8824/*
8825 * CPU accounting code for task groups.
8826 *
8827 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
8828 * (balbir@in.ibm.com).
8829 */
8830
Bharata B Rao934352f2008-11-10 20:41:13 +05308831/* track cpu usage of a group of tasks and its child groups */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008832struct cpuacct {
8833 struct cgroup_subsys_state css;
8834 /* cpuusage holds pointer to a u64-type object on every cpu */
Tejun Heo43cf38e2010-02-02 14:38:57 +09008835 u64 __percpu *cpuusage;
Bharata B Raoef12fef2009-03-31 10:02:22 +05308836 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
Bharata B Rao934352f2008-11-10 20:41:13 +05308837 struct cpuacct *parent;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008838};
8839
8840struct cgroup_subsys cpuacct_subsys;
8841
8842/* return cpu accounting group corresponding to this container */
Dhaval Giani32cd7562008-02-29 10:02:43 +05308843static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008844{
Dhaval Giani32cd7562008-02-29 10:02:43 +05308845 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008846 struct cpuacct, css);
8847}
8848
8849/* return cpu accounting group to which this task belongs */
8850static inline struct cpuacct *task_ca(struct task_struct *tsk)
8851{
8852 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
8853 struct cpuacct, css);
8854}
8855
8856/* create a new cpu accounting group */
8857static struct cgroup_subsys_state *cpuacct_create(
Dhaval Giani32cd7562008-02-29 10:02:43 +05308858 struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008859{
8860 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
Bharata B Raoef12fef2009-03-31 10:02:22 +05308861 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008862
8863 if (!ca)
Bharata B Raoef12fef2009-03-31 10:02:22 +05308864 goto out;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008865
8866 ca->cpuusage = alloc_percpu(u64);
Bharata B Raoef12fef2009-03-31 10:02:22 +05308867 if (!ca->cpuusage)
8868 goto out_free_ca;
8869
8870 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
8871 if (percpu_counter_init(&ca->cpustat[i], 0))
8872 goto out_free_counters;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008873
Bharata B Rao934352f2008-11-10 20:41:13 +05308874 if (cgrp->parent)
8875 ca->parent = cgroup_ca(cgrp->parent);
8876
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008877 return &ca->css;
Bharata B Raoef12fef2009-03-31 10:02:22 +05308878
8879out_free_counters:
8880 while (--i >= 0)
8881 percpu_counter_destroy(&ca->cpustat[i]);
8882 free_percpu(ca->cpuusage);
8883out_free_ca:
8884 kfree(ca);
8885out:
8886 return ERR_PTR(-ENOMEM);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008887}
8888
8889/* destroy an existing cpu accounting group */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008890static void
Dhaval Giani32cd7562008-02-29 10:02:43 +05308891cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008892{
Dhaval Giani32cd7562008-02-29 10:02:43 +05308893 struct cpuacct *ca = cgroup_ca(cgrp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05308894 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008895
Bharata B Raoef12fef2009-03-31 10:02:22 +05308896 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
8897 percpu_counter_destroy(&ca->cpustat[i]);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008898 free_percpu(ca->cpuusage);
8899 kfree(ca);
8900}
8901
Ken Chen720f5492008-12-15 22:02:01 -08008902static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
8903{
Rusty Russellb36128c2009-02-20 16:29:08 +09008904 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08008905 u64 data;
8906
8907#ifndef CONFIG_64BIT
8908 /*
8909 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
8910 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008911 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08008912 data = *cpuusage;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008913 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08008914#else
8915 data = *cpuusage;
8916#endif
8917
8918 return data;
8919}
8920
8921static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
8922{
Rusty Russellb36128c2009-02-20 16:29:08 +09008923 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08008924
8925#ifndef CONFIG_64BIT
8926 /*
8927 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
8928 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008929 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08008930 *cpuusage = val;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008931 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08008932#else
8933 *cpuusage = val;
8934#endif
8935}
8936
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008937/* return total cpu usage (in nanoseconds) of a group */
Dhaval Giani32cd7562008-02-29 10:02:43 +05308938static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008939{
Dhaval Giani32cd7562008-02-29 10:02:43 +05308940 struct cpuacct *ca = cgroup_ca(cgrp);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008941 u64 totalcpuusage = 0;
8942 int i;
8943
Ken Chen720f5492008-12-15 22:02:01 -08008944 for_each_present_cpu(i)
8945 totalcpuusage += cpuacct_cpuusage_read(ca, i);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008946
8947 return totalcpuusage;
8948}
8949
Dhaval Giani0297b802008-02-29 10:02:44 +05308950static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
8951 u64 reset)
8952{
8953 struct cpuacct *ca = cgroup_ca(cgrp);
8954 int err = 0;
8955 int i;
8956
8957 if (reset) {
8958 err = -EINVAL;
8959 goto out;
8960 }
8961
Ken Chen720f5492008-12-15 22:02:01 -08008962 for_each_present_cpu(i)
8963 cpuacct_cpuusage_write(ca, i, 0);
Dhaval Giani0297b802008-02-29 10:02:44 +05308964
Dhaval Giani0297b802008-02-29 10:02:44 +05308965out:
8966 return err;
8967}
8968
Ken Chene9515c32008-12-15 22:04:15 -08008969static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
8970 struct seq_file *m)
8971{
8972 struct cpuacct *ca = cgroup_ca(cgroup);
8973 u64 percpu;
8974 int i;
8975
8976 for_each_present_cpu(i) {
8977 percpu = cpuacct_cpuusage_read(ca, i);
8978 seq_printf(m, "%llu ", (unsigned long long) percpu);
8979 }
8980 seq_printf(m, "\n");
8981 return 0;
8982}
8983
Bharata B Raoef12fef2009-03-31 10:02:22 +05308984static const char *cpuacct_stat_desc[] = {
8985 [CPUACCT_STAT_USER] = "user",
8986 [CPUACCT_STAT_SYSTEM] = "system",
8987};
8988
8989static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
8990 struct cgroup_map_cb *cb)
8991{
8992 struct cpuacct *ca = cgroup_ca(cgrp);
8993 int i;
8994
8995 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
8996 s64 val = percpu_counter_read(&ca->cpustat[i]);
8997 val = cputime64_to_clock_t(val);
8998 cb->fill(cb, cpuacct_stat_desc[i], val);
8999 }
9000 return 0;
9001}
9002
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009003static struct cftype files[] = {
9004 {
9005 .name = "usage",
Paul Menagef4c753b2008-04-29 00:59:56 -07009006 .read_u64 = cpuusage_read,
9007 .write_u64 = cpuusage_write,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009008 },
Ken Chene9515c32008-12-15 22:04:15 -08009009 {
9010 .name = "usage_percpu",
9011 .read_seq_string = cpuacct_percpu_seq_read,
9012 },
Bharata B Raoef12fef2009-03-31 10:02:22 +05309013 {
9014 .name = "stat",
9015 .read_map = cpuacct_stats_show,
9016 },
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009017};
9018
Dhaval Giani32cd7562008-02-29 10:02:43 +05309019static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009020{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309021 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009022}
9023
9024/*
9025 * charge this task's execution time to its accounting group.
9026 *
9027 * called with rq->lock held.
9028 */
9029static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9030{
9031 struct cpuacct *ca;
Bharata B Rao934352f2008-11-10 20:41:13 +05309032 int cpu;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009033
Li Zefanc40c6f82009-02-26 15:40:15 +08009034 if (unlikely(!cpuacct_subsys.active))
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009035 return;
9036
Bharata B Rao934352f2008-11-10 20:41:13 +05309037 cpu = task_cpu(tsk);
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309038
9039 rcu_read_lock();
9040
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009041 ca = task_ca(tsk);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009042
Bharata B Rao934352f2008-11-10 20:41:13 +05309043 for (; ca; ca = ca->parent) {
Rusty Russellb36128c2009-02-20 16:29:08 +09009044 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009045 *cpuusage += cputime;
9046 }
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309047
9048 rcu_read_unlock();
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009049}
9050
Bharata B Raoef12fef2009-03-31 10:02:22 +05309051/*
Anton Blanchardfa535a72010-02-02 14:46:13 -08009052 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9053 * in cputime_t units. As a result, cpuacct_update_stats calls
9054 * percpu_counter_add with values large enough to always overflow the
9055 * per cpu batch limit causing bad SMP scalability.
9056 *
9057 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9058 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9059 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9060 */
9061#ifdef CONFIG_SMP
9062#define CPUACCT_BATCH \
9063 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9064#else
9065#define CPUACCT_BATCH 0
9066#endif
9067
9068/*
Bharata B Raoef12fef2009-03-31 10:02:22 +05309069 * Charge the system/user time to the task's accounting group.
9070 */
9071static void cpuacct_update_stats(struct task_struct *tsk,
9072 enum cpuacct_stat_index idx, cputime_t val)
9073{
9074 struct cpuacct *ca;
Anton Blanchardfa535a72010-02-02 14:46:13 -08009075 int batch = CPUACCT_BATCH;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309076
9077 if (unlikely(!cpuacct_subsys.active))
9078 return;
9079
9080 rcu_read_lock();
9081 ca = task_ca(tsk);
9082
9083 do {
Anton Blanchardfa535a72010-02-02 14:46:13 -08009084 __percpu_counter_add(&ca->cpustat[idx], val, batch);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309085 ca = ca->parent;
9086 } while (ca);
9087 rcu_read_unlock();
9088}
9089
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009090struct cgroup_subsys cpuacct_subsys = {
9091 .name = "cpuacct",
9092 .create = cpuacct_create,
9093 .destroy = cpuacct_destroy,
9094 .populate = cpuacct_populate,
9095 .subsys_id = cpuacct_subsys_id,
9096};
9097#endif /* CONFIG_CGROUP_CPUACCT */
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009098
9099#ifndef CONFIG_SMP
9100
9101int rcu_expedited_torture_stats(char *page)
9102{
9103 return 0;
9104}
9105EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
9106
9107void synchronize_sched_expedited(void)
9108{
9109}
9110EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9111
9112#else /* #ifndef CONFIG_SMP */
9113
9114static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
9115static DEFINE_MUTEX(rcu_sched_expedited_mutex);
9116
9117#define RCU_EXPEDITED_STATE_POST -2
9118#define RCU_EXPEDITED_STATE_IDLE -1
9119
9120static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
9121
9122int rcu_expedited_torture_stats(char *page)
9123{
9124 int cnt = 0;
9125 int cpu;
9126
9127 cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
9128 for_each_online_cpu(cpu) {
9129 cnt += sprintf(&page[cnt], " %d:%d",
9130 cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
9131 }
9132 cnt += sprintf(&page[cnt], "\n");
9133 return cnt;
9134}
9135EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
9136
9137static long synchronize_sched_expedited_count;
9138
9139/*
9140 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
9141 * approach to force grace period to end quickly. This consumes
9142 * significant time on all CPUs, and is thus not recommended for
9143 * any sort of common-case code.
9144 *
9145 * Note that it is illegal to call this function while holding any
9146 * lock that is acquired by a CPU-hotplug notifier. Failing to
9147 * observe this restriction will result in deadlock.
9148 */
9149void synchronize_sched_expedited(void)
9150{
9151 int cpu;
9152 unsigned long flags;
9153 bool need_full_sync = 0;
9154 struct rq *rq;
9155 struct migration_req *req;
9156 long snap;
9157 int trycount = 0;
9158
9159 smp_mb(); /* ensure prior mod happens before capturing snap. */
9160 snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
9161 get_online_cpus();
9162 while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
9163 put_online_cpus();
9164 if (trycount++ < 10)
9165 udelay(trycount * num_online_cpus());
9166 else {
9167 synchronize_sched();
9168 return;
9169 }
9170 if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
9171 smp_mb(); /* ensure test happens before caller kfree */
9172 return;
9173 }
9174 get_online_cpus();
9175 }
9176 rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
9177 for_each_online_cpu(cpu) {
9178 rq = cpu_rq(cpu);
9179 req = &per_cpu(rcu_migration_req, cpu);
9180 init_completion(&req->done);
9181 req->task = NULL;
9182 req->dest_cpu = RCU_MIGRATION_NEED_QS;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009183 raw_spin_lock_irqsave(&rq->lock, flags);
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009184 list_add(&req->list, &rq->migration_queue);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009185 raw_spin_unlock_irqrestore(&rq->lock, flags);
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009186 wake_up_process(rq->migration_thread);
9187 }
9188 for_each_online_cpu(cpu) {
9189 rcu_expedited_state = cpu;
9190 req = &per_cpu(rcu_migration_req, cpu);
9191 rq = cpu_rq(cpu);
9192 wait_for_completion(&req->done);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009193 raw_spin_lock_irqsave(&rq->lock, flags);
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009194 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
9195 need_full_sync = 1;
9196 req->dest_cpu = RCU_MIGRATION_IDLE;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009197 raw_spin_unlock_irqrestore(&rq->lock, flags);
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009198 }
9199 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
Paul E. McKenney956539b2009-11-10 13:37:20 -08009200 synchronize_sched_expedited_count++;
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009201 mutex_unlock(&rcu_sched_expedited_mutex);
9202 put_online_cpus();
9203 if (need_full_sync)
9204 synchronize_sched();
9205}
9206EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9207
9208#endif /* #else #ifndef CONFIG_SMP */