blob: 7c5494dccd39416dddccd87b07b64631537e21c6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
Ingo Molnarc31f2e82007-07-09 18:52:01 +020019 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
Ingo Molnarb9131762008-01-25 21:08:19 +010025 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080037#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/completion.h>
39#include <linux/kernel_stat.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070040#include <linux/debug_locks.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020041#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080045#include <linux/freezer.h>
akpm@osdl.org198e2f12006-01-12 01:05:30 -080046#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/blkdev.h>
48#include <linux/delay.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070049#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
Alexey Dobriyanb5aadf72008-10-06 13:23:43 +040057#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/seq_file.h>
Tejun Heo969c7922010-05-06 18:49:21 +020059#include <linux/stop_machine.h>
Nick Piggine692ab52007-07-26 13:40:43 +020060#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#include <linux/syscalls.h>
62#include <linux/times.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070063#include <linux/tsacct_kern.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080064#include <linux/kprobes.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070065#include <linux/delayacct.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020066#include <linux/unistd.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020067#include <linux/pagemap.h>
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010068#include <linux/hrtimer.h>
Reynes Philippe30914a52008-03-17 16:19:05 -070069#include <linux/tick.h>
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020070#include <linux/debugfs.h>
71#include <linux/ctype.h>
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020072#include <linux/ftrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090073#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Eric Dumazet5517d862007-05-08 00:32:57 -070075#include <asm/tlb.h>
Satyam Sharma838225b2007-10-24 18:23:50 +020076#include <asm/irq_regs.h>
Gerald Schaefer335d7af2010-11-22 15:47:36 +010077#include <asm/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Gregory Haskins6e0534f2008-05-12 21:21:01 +020079#include "sched_cpupri.h"
Tejun Heo21aa9af2010-06-08 21:40:37 +020080#include "workqueue_sched.h"
Mike Galbraith5091faa2010-11-30 14:18:03 +010081#include "sched_autogroup.h"
Gregory Haskins6e0534f2008-05-12 21:21:01 +020082
Steven Rostedta8d154b2009-04-10 09:36:00 -040083#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040084#include <trace/events/sched.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086/*
87 * Convert user-nice values [ -20 ... 0 ... 19 ]
88 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
89 * and back.
90 */
91#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
92#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
93#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
94
95/*
96 * 'User priority' is the nice value converted to something we
97 * can work with better when scaling various scheduler parameters,
98 * it's a [ 0 ... 39 ] range.
99 */
100#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
101#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
102#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
103
104/*
Ingo Molnard7876a02008-01-25 21:08:19 +0100105 * Helpers for converting nanosecond timing to jiffy resolution
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 */
Eric Dumazetd6322fa2007-11-09 22:39:38 +0100107#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200109#define NICE_0_LOAD SCHED_LOAD_SCALE
110#define NICE_0_SHIFT SCHED_LOAD_SHIFT
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/*
113 * These are the 'tuning knobs' of the scheduler:
114 *
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +0200115 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 * Timeslices get refilled after they expire.
117 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#define DEF_TIMESLICE (100 * HZ / 1000)
Peter Williams2dd73a42006-06-27 02:54:34 -0700119
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200120/*
121 * single value that denotes runtime == period, ie unlimited time.
122 */
123#define RUNTIME_INF ((u64)~0ULL)
124
Ingo Molnare05606d2007-07-09 18:51:59 +0200125static inline int rt_policy(int policy)
126{
Roel Kluin3f33a7c2008-05-13 23:44:11 +0200127 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
Ingo Molnare05606d2007-07-09 18:51:59 +0200128 return 1;
129 return 0;
130}
131
132static inline int task_has_rt_policy(struct task_struct *p)
133{
134 return rt_policy(p->policy);
135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/*
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200138 * This is the priority-queue data structure of the RT scheduling class:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 */
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200140struct rt_prio_array {
141 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
142 struct list_head queue[MAX_RT_PRIO];
143};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200145struct rt_bandwidth {
Ingo Molnarea736ed2008-03-25 13:51:45 +0100146 /* nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100147 raw_spinlock_t rt_runtime_lock;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100148 ktime_t rt_period;
149 u64 rt_runtime;
150 struct hrtimer rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200151};
152
153static struct rt_bandwidth def_rt_bandwidth;
154
155static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
156
157static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
158{
159 struct rt_bandwidth *rt_b =
160 container_of(timer, struct rt_bandwidth, rt_period_timer);
161 ktime_t now;
162 int overrun;
163 int idle = 0;
164
165 for (;;) {
166 now = hrtimer_cb_get_time(timer);
167 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
168
169 if (!overrun)
170 break;
171
172 idle = do_sched_rt_period_timer(rt_b, overrun);
173 }
174
175 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
176}
177
178static
179void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
180{
181 rt_b->rt_period = ns_to_ktime(period);
182 rt_b->rt_runtime = runtime;
183
Thomas Gleixner0986b112009-11-17 15:32:06 +0100184 raw_spin_lock_init(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200185
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200186 hrtimer_init(&rt_b->rt_period_timer,
187 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
188 rt_b->rt_period_timer.function = sched_rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200189}
190
Krzysztof Heltc8bfff62008-09-05 23:46:19 +0200191static inline int rt_bandwidth_enabled(void)
192{
193 return sysctl_sched_rt_runtime >= 0;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200194}
195
196static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
197{
198 ktime_t now;
199
Hiroshi Shimamotocac64d02009-02-25 09:59:26 -0800200 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200201 return;
202
203 if (hrtimer_active(&rt_b->rt_period_timer))
204 return;
205
Thomas Gleixner0986b112009-11-17 15:32:06 +0100206 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200207 for (;;) {
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100208 unsigned long delta;
209 ktime_t soft, hard;
210
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200211 if (hrtimer_active(&rt_b->rt_period_timer))
212 break;
213
214 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
215 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100216
217 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
218 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
219 delta = ktime_to_ns(ktime_sub(hard, soft));
220 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530221 HRTIMER_MODE_ABS_PINNED, 0);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200222 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100223 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200224}
225
226#ifdef CONFIG_RT_GROUP_SCHED
227static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
228{
229 hrtimer_cancel(&rt_b->rt_period_timer);
230}
231#endif
232
Heiko Carstens712555e2008-04-28 11:33:07 +0200233/*
234 * sched_domains_mutex serializes calls to arch_init_sched_domains,
235 * detach_destroy_domains and partition_sched_domains.
236 */
237static DEFINE_MUTEX(sched_domains_mutex);
238
Dhaval Giani7c941432010-01-20 13:26:18 +0100239#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200240
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700241#include <linux/cgroup.h>
242
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200243struct cfs_rq;
244
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100245static LIST_HEAD(task_groups);
246
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200247/* task group related information */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200248struct task_group {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700249 struct cgroup_subsys_state css;
Arun R Bharadwaj6c415b92008-12-01 20:49:05 +0530250
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100251#ifdef CONFIG_FAIR_GROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200252 /* schedulable entities of this group on each cpu */
253 struct sched_entity **se;
254 /* runqueue "owned" by this group on each cpu */
255 struct cfs_rq **cfs_rq;
256 unsigned long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800257
258 atomic_t load_weight;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100259#endif
260
261#ifdef CONFIG_RT_GROUP_SCHED
262 struct sched_rt_entity **rt_se;
263 struct rt_rq **rt_rq;
264
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200265 struct rt_bandwidth rt_bandwidth;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100266#endif
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100267
Srivatsa Vaddagiriae8393e2007-10-29 21:18:11 +0100268 struct rcu_head rcu;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100269 struct list_head list;
Peter Zijlstraf473aa52008-04-19 19:45:00 +0200270
271 struct task_group *parent;
272 struct list_head siblings;
273 struct list_head children;
Mike Galbraith5091faa2010-11-30 14:18:03 +0100274
275#ifdef CONFIG_SCHED_AUTOGROUP
276 struct autogroup *autogroup;
277#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200278};
279
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800280/* task_group_lock serializes the addition/removal of task groups */
Peter Zijlstra8ed36992008-02-13 15:45:39 +0100281static DEFINE_SPINLOCK(task_group_lock);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100282
Cyrill Gorcunove9036b32009-10-26 22:24:14 +0300283#ifdef CONFIG_FAIR_GROUP_SCHED
284
Yong Zhang07e06b02011-01-07 15:17:36 +0800285# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200286
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800287/*
Lai Jiangshan2e084782008-06-12 16:42:58 +0800288 * A weight of 0 or 1 can cause arithmetics problems.
289 * A weight of a cfs_rq is the sum of weights of which entities
290 * are queued on this cfs_rq, so a weight of a entity should not be
291 * too large, so as the shares value of a task group.
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800292 * (The default weight is 1024 - so there's no practical
293 * limitation from this.)
294 */
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200295#define MIN_SHARES 2
Lai Jiangshan2e084782008-06-12 16:42:58 +0800296#define MAX_SHARES (1UL << 18)
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200297
Yong Zhang07e06b02011-01-07 15:17:36 +0800298static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100299#endif
300
301/* Default task group.
302 * Every task in system belong to this group at bootup.
303 */
Yong Zhang07e06b02011-01-07 15:17:36 +0800304struct task_group root_task_group;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200305
Dhaval Giani7c941432010-01-20 13:26:18 +0100306#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200307
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200308/* CFS-related fields in a runqueue */
309struct cfs_rq {
310 struct load_weight load;
311 unsigned long nr_running;
312
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200313 u64 exec_clock;
Ingo Molnare9acbff2007-10-15 17:00:04 +0200314 u64 min_vruntime;
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200315#ifndef CONFIG_64BIT
316 u64 min_vruntime_copy;
317#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200318
319 struct rb_root tasks_timeline;
320 struct rb_node *rb_leftmost;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +0200321
322 struct list_head tasks;
323 struct list_head *balance_iterator;
324
325 /*
326 * 'curr' points to currently running entity on this cfs_rq.
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200327 * It is set to NULL otherwise (i.e when none are currently running).
328 */
Rik van Rielac53db52011-02-01 09:51:03 -0500329 struct sched_entity *curr, *next, *last, *skip;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200330
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100331 unsigned int nr_spread_over;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200332
Ingo Molnar62160e32007-10-15 17:00:03 +0200333#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200334 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
335
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100336 /*
337 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200338 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
339 * (like users, containers etc.)
340 *
341 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
342 * list is used during load balance.
343 */
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800344 int on_list;
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100345 struct list_head leaf_cfs_rq_list;
346 struct task_group *tg; /* group that "owns" this runqueue */
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200347
348#ifdef CONFIG_SMP
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200349 /*
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200350 * the part of load.weight contributed by tasks
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200351 */
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200352 unsigned long task_weight;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200353
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200354 /*
355 * h_load = weight * f(tg)
356 *
357 * Where f(tg) is the recursive weight fraction assigned to
358 * this group.
359 */
360 unsigned long h_load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200361
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200362 /*
Paul Turner3b3d1902010-11-15 15:47:08 -0800363 * Maintaining per-cpu shares distribution for group scheduling
364 *
365 * load_stamp is the last time we updated the load average
366 * load_last is the last time we updated the load average and saw load
367 * load_unacc_exec_time is currently unaccounted execution time
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200368 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800369 u64 load_avg;
370 u64 load_period;
Paul Turner3b3d1902010-11-15 15:47:08 -0800371 u64 load_stamp, load_last, load_unacc_exec_time;
Peter Zijlstraf1d239f2008-06-27 13:41:38 +0200372
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800373 unsigned long load_contribution;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200374#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200375#endif
376};
377
378/* Real-Time classes' related field in a runqueue: */
379struct rt_rq {
380 struct rt_prio_array active;
Steven Rostedt63489e42008-01-25 21:08:03 +0100381 unsigned long rt_nr_running;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100382#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -0500383 struct {
384 int curr; /* highest queued rt task prio */
Gregory Haskins398a1532009-01-14 09:10:04 -0500385#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500386 int next; /* next highest */
Gregory Haskins398a1532009-01-14 09:10:04 -0500387#endif
Gregory Haskinse864c492008-12-29 09:39:49 -0500388 } highest_prio;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100389#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100390#ifdef CONFIG_SMP
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100391 unsigned long rt_nr_migratory;
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200392 unsigned long rt_nr_total;
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +0100393 int overloaded;
Gregory Haskins917b6272008-12-29 09:39:53 -0500394 struct plist_head pushable_tasks;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100395#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100396 int rt_throttled;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100397 u64 rt_time;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200398 u64 rt_runtime;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100399 /* Nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100400 raw_spinlock_t rt_runtime_lock;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100401
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100402#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100403 unsigned long rt_nr_boosted;
404
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100405 struct rq *rq;
406 struct list_head leaf_rt_rq_list;
407 struct task_group *tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100408#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200409};
410
Gregory Haskins57d885f2008-01-25 21:08:18 +0100411#ifdef CONFIG_SMP
412
413/*
414 * We add the notion of a root-domain which will be used to define per-domain
Ingo Molnar0eab9142008-01-25 21:08:19 +0100415 * variables. Each exclusive cpuset essentially defines an island domain by
416 * fully partitioning the member cpus from any other cpuset. Whenever a new
Gregory Haskins57d885f2008-01-25 21:08:18 +0100417 * exclusive cpuset is created, we also create and attach a new root-domain
418 * object.
419 *
Gregory Haskins57d885f2008-01-25 21:08:18 +0100420 */
421struct root_domain {
422 atomic_t refcount;
Rusty Russellc6c49272008-11-25 02:35:05 +1030423 cpumask_var_t span;
424 cpumask_var_t online;
Gregory Haskins637f5082008-01-25 21:08:18 +0100425
Ingo Molnar0eab9142008-01-25 21:08:19 +0100426 /*
Gregory Haskins637f5082008-01-25 21:08:18 +0100427 * The "RT overload" flag: it gets set if a CPU has more than
428 * one runnable RT task.
429 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030430 cpumask_var_t rto_mask;
Ingo Molnar0eab9142008-01-25 21:08:19 +0100431 atomic_t rto_count;
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200432 struct cpupri cpupri;
Gregory Haskins57d885f2008-01-25 21:08:18 +0100433};
434
Gregory Haskinsdc938522008-01-25 21:08:26 +0100435/*
436 * By default the system creates a single root-domain with all cpus as
437 * members (mimicking the global state we have today).
438 */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100439static struct root_domain def_root_domain;
440
Christian Dietriched2d3722010-09-06 16:37:05 +0200441#endif /* CONFIG_SMP */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100442
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200443/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 * This is the main, per-CPU runqueue data structure.
445 *
446 * Locking rule: those places that want to lock multiple runqueues
447 * (such as the load balancing or the thread migration code), lock
448 * acquire operations must be ordered by ascending &runqueue.
449 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700450struct rq {
Ingo Molnard8016492007-10-18 21:32:55 +0200451 /* runqueue lock: */
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100452 raw_spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
454 /*
455 * nr_running and cpu_load should be in the same cacheline because
456 * remote CPUs use both these fields when doing load calculation.
457 */
458 unsigned long nr_running;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200459 #define CPU_LOAD_IDX_MAX 5
460 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -0700461 unsigned long last_load_update_tick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700462#ifdef CONFIG_NO_HZ
Mike Galbraith39c0cbe2010-03-11 17:17:13 +0100463 u64 nohz_stamp;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700464 unsigned char nohz_balance_kick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700465#endif
Mike Galbraitha64692a2010-03-11 17:16:20 +0100466 unsigned int skip_clock_update;
467
Ingo Molnard8016492007-10-18 21:32:55 +0200468 /* capture load from *all* tasks on this cpu: */
469 struct load_weight load;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200470 unsigned long nr_load_updates;
471 u64 nr_switches;
472
473 struct cfs_rq cfs;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100474 struct rt_rq rt;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100475
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200476#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnard8016492007-10-18 21:32:55 +0200477 /* list of leaf cfs_rq on this cpu: */
478 struct list_head leaf_cfs_rq_list;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100479#endif
480#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100481 struct list_head leaf_rt_rq_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484 /*
485 * This is part of a global counter where only the total sum
486 * over all CPUs matters. A task can increase this counter on
487 * one CPU and if it got migrated afterwards it may decrease
488 * it on another CPU. Always updated under the runqueue lock:
489 */
490 unsigned long nr_uninterruptible;
491
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200492 struct task_struct *curr, *idle, *stop;
Christoph Lameterc9819f42006-12-10 02:20:25 -0800493 unsigned long next_balance;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 struct mm_struct *prev_mm;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200495
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200496 u64 clock;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700497 u64 clock_task;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 atomic_t nr_iowait;
500
501#ifdef CONFIG_SMP
Ingo Molnar0eab9142008-01-25 21:08:19 +0100502 struct root_domain *rd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 struct sched_domain *sd;
504
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +0200505 unsigned long cpu_power;
506
Henrik Austada0a522c2009-02-13 20:35:45 +0100507 unsigned char idle_at_tick;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 /* For active balancing */
Gregory Haskins3f029d32009-07-29 11:08:47 -0400509 int post_schedule;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 int active_balance;
511 int push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +0200512 struct cpu_stop_work active_balance_work;
Ingo Molnard8016492007-10-18 21:32:55 +0200513 /* cpu of this runqueue: */
514 int cpu;
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400515 int online;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Peter Zijlstraa8a51d52008-06-27 13:41:26 +0200517 unsigned long avg_load_per_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200519 u64 rt_avg;
520 u64 age_stamp;
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100521 u64 idle_stamp;
522 u64 avg_idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523#endif
524
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700525#ifdef CONFIG_IRQ_TIME_ACCOUNTING
526 u64 prev_irq_time;
527#endif
528
Thomas Gleixnerdce48a82009-04-11 10:43:41 +0200529 /* calc_load related fields */
530 unsigned long calc_load_update;
531 long calc_load_active;
532
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100533#ifdef CONFIG_SCHED_HRTICK
Peter Zijlstra31656512008-07-18 18:01:23 +0200534#ifdef CONFIG_SMP
535 int hrtick_csd_pending;
536 struct call_single_data hrtick_csd;
537#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100538 struct hrtimer hrtick_timer;
539#endif
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541#ifdef CONFIG_SCHEDSTATS
542 /* latency stats */
543 struct sched_info rq_sched_info;
Ken Chen9c2c4802008-12-16 23:41:22 -0800544 unsigned long long rq_cpu_time;
545 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547 /* sys_sched_yield() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200548 unsigned int yld_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550 /* schedule() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200551 unsigned int sched_switch;
552 unsigned int sched_count;
553 unsigned int sched_goidle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 /* try_to_wake_up() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200556 unsigned int ttwu_count;
557 unsigned int ttwu_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558#endif
559};
560
Fenghua Yuf34e3b62007-07-19 01:48:13 -0700561static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Mike Galbraitha64692a2010-03-11 17:16:20 +0100563
Peter Zijlstra1e5a7402010-10-31 12:37:04 +0100564static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
Ingo Molnardd41f592007-07-09 18:51:59 +0200565
Christoph Lameter0a2966b2006-09-25 23:30:51 -0700566static inline int cpu_of(struct rq *rq)
567{
568#ifdef CONFIG_SMP
569 return rq->cpu;
570#else
571 return 0;
572#endif
573}
574
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800575#define rcu_dereference_check_sched_domain(p) \
Paul E. McKenneyd11c5632010-02-22 17:04:50 -0800576 rcu_dereference_check((p), \
577 rcu_read_lock_sched_held() || \
578 lockdep_is_held(&sched_domains_mutex))
579
Ingo Molnar20d315d2007-07-09 18:51:58 +0200580/*
Nick Piggin674311d2005-06-25 14:57:27 -0700581 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -0700582 * See detach_destroy_domains: synchronize_sched for details.
Nick Piggin674311d2005-06-25 14:57:27 -0700583 *
584 * The domain tree of any CPU may only be accessed from within
585 * preempt-disabled sections.
586 */
Ingo Molnar48f24c42006-07-03 00:25:40 -0700587#define for_each_domain(cpu, __sd) \
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800588 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
591#define this_rq() (&__get_cpu_var(runqueues))
592#define task_rq(p) cpu_rq(task_cpu(p))
593#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Hitoshi Mitake54d35f22009-06-29 14:44:57 +0900594#define raw_rq() (&__raw_get_cpu_var(runqueues))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200596#ifdef CONFIG_CGROUP_SCHED
597
598/*
599 * Return the group to which this tasks belongs.
600 *
601 * We use task_subsys_state_check() and extend the RCU verification
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200602 * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach()
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200603 * holds that lock for each task it moves into the cgroup. Therefore
604 * by holding that lock, we pin the task to the current cgroup.
605 */
606static inline struct task_group *task_group(struct task_struct *p)
607{
Mike Galbraith5091faa2010-11-30 14:18:03 +0100608 struct task_group *tg;
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200609 struct cgroup_subsys_state *css;
610
611 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200612 lockdep_is_held(&p->pi_lock));
Mike Galbraith5091faa2010-11-30 14:18:03 +0100613 tg = container_of(css, struct task_group, css);
614
615 return autogroup_task_group(p, tg);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200616}
617
618/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
619static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
620{
621#ifdef CONFIG_FAIR_GROUP_SCHED
622 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
623 p->se.parent = task_group(p)->se[cpu];
624#endif
625
626#ifdef CONFIG_RT_GROUP_SCHED
627 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
628 p->rt.parent = task_group(p)->rt_se[cpu];
629#endif
630}
631
632#else /* CONFIG_CGROUP_SCHED */
633
634static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
635static inline struct task_group *task_group(struct task_struct *p)
636{
637 return NULL;
638}
639
640#endif /* CONFIG_CGROUP_SCHED */
641
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100642static void update_rq_clock_task(struct rq *rq, s64 delta);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700643
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100644static void update_rq_clock(struct rq *rq)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200645{
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100646 s64 delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700647
Mike Galbraithf26f9af2010-12-08 11:05:42 +0100648 if (rq->skip_clock_update)
649 return;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700650
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100651 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
652 rq->clock += delta;
653 update_rq_clock_task(rq, delta);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200654}
655
Ingo Molnare436d802007-07-19 21:28:35 +0200656/*
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200657 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
658 */
659#ifdef CONFIG_SCHED_DEBUG
660# define const_debug __read_mostly
661#else
662# define const_debug static const
663#endif
664
Ingo Molnar017730c2008-05-12 21:20:52 +0200665/**
Randy Dunlap1fd06bb2011-03-15 16:12:30 -0700666 * runqueue_is_locked - Returns true if the current cpu runqueue is locked
Randy Dunlape17b38b2009-10-11 19:12:00 -0700667 * @cpu: the processor in question.
Ingo Molnar017730c2008-05-12 21:20:52 +0200668 *
Ingo Molnar017730c2008-05-12 21:20:52 +0200669 * This interface allows printk to be called with the runqueue lock
670 * held and know whether or not it is OK to wake up the klogd.
671 */
Andrew Morton89f19f02009-09-19 11:55:44 -0700672int runqueue_is_locked(int cpu)
Ingo Molnar017730c2008-05-12 21:20:52 +0200673{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100674 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
Ingo Molnar017730c2008-05-12 21:20:52 +0200675}
676
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200677/*
678 * Debugging: various feature bits
679 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200680
681#define SCHED_FEAT(name, enabled) \
682 __SCHED_FEAT_##name ,
683
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200684enum {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200685#include "sched_features.h"
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200686};
687
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200688#undef SCHED_FEAT
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200689
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200690#define SCHED_FEAT(name, enabled) \
691 (1UL << __SCHED_FEAT_##name) * enabled |
692
693const_debug unsigned int sysctl_sched_features =
694#include "sched_features.h"
695 0;
696
697#undef SCHED_FEAT
698
699#ifdef CONFIG_SCHED_DEBUG
700#define SCHED_FEAT(name, enabled) \
701 #name ,
702
Harvey Harrison983ed7a2008-04-24 18:17:55 -0700703static __read_mostly char *sched_feat_names[] = {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200704#include "sched_features.h"
705 NULL
706};
707
708#undef SCHED_FEAT
709
Li Zefan34f3a812008-10-30 15:23:32 +0800710static int sched_feat_show(struct seq_file *m, void *v)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200711{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200712 int i;
713
714 for (i = 0; sched_feat_names[i]; i++) {
Li Zefan34f3a812008-10-30 15:23:32 +0800715 if (!(sysctl_sched_features & (1UL << i)))
716 seq_puts(m, "NO_");
717 seq_printf(m, "%s ", sched_feat_names[i]);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200718 }
Li Zefan34f3a812008-10-30 15:23:32 +0800719 seq_puts(m, "\n");
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200720
Li Zefan34f3a812008-10-30 15:23:32 +0800721 return 0;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200722}
723
724static ssize_t
725sched_feat_write(struct file *filp, const char __user *ubuf,
726 size_t cnt, loff_t *ppos)
727{
728 char buf[64];
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400729 char *cmp;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200730 int neg = 0;
731 int i;
732
733 if (cnt > 63)
734 cnt = 63;
735
736 if (copy_from_user(&buf, ubuf, cnt))
737 return -EFAULT;
738
739 buf[cnt] = 0;
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400740 cmp = strstrip(buf);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200741
Hillf Danton524429c2011-01-06 20:58:12 +0800742 if (strncmp(cmp, "NO_", 3) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200743 neg = 1;
744 cmp += 3;
745 }
746
747 for (i = 0; sched_feat_names[i]; i++) {
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400748 if (strcmp(cmp, sched_feat_names[i]) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200749 if (neg)
750 sysctl_sched_features &= ~(1UL << i);
751 else
752 sysctl_sched_features |= (1UL << i);
753 break;
754 }
755 }
756
757 if (!sched_feat_names[i])
758 return -EINVAL;
759
Jan Blunck42994722009-11-20 17:40:37 +0100760 *ppos += cnt;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200761
762 return cnt;
763}
764
Li Zefan34f3a812008-10-30 15:23:32 +0800765static int sched_feat_open(struct inode *inode, struct file *filp)
766{
767 return single_open(filp, sched_feat_show, NULL);
768}
769
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700770static const struct file_operations sched_feat_fops = {
Li Zefan34f3a812008-10-30 15:23:32 +0800771 .open = sched_feat_open,
772 .write = sched_feat_write,
773 .read = seq_read,
774 .llseek = seq_lseek,
775 .release = single_release,
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200776};
777
778static __init int sched_init_debug(void)
779{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200780 debugfs_create_file("sched_features", 0644, NULL, NULL,
781 &sched_feat_fops);
782
783 return 0;
784}
785late_initcall(sched_init_debug);
786
787#endif
788
789#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200790
791/*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +0100792 * Number of tasks to iterate in a single balance run.
793 * Limited because this is done with IRQs disabled.
794 */
795const_debug unsigned int sysctl_sched_nr_migrate = 32;
796
797/*
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200798 * period over which we average the RT time consumption, measured
799 * in ms.
800 *
801 * default: 1s
802 */
803const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
804
805/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100806 * period over which we measure -rt task cpu usage in us.
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100807 * default: 1s
808 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100809unsigned int sysctl_sched_rt_period = 1000000;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100810
Ingo Molnar6892b752008-02-13 14:02:36 +0100811static __read_mostly int scheduler_running;
812
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100813/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100814 * part of the period that we allow rt tasks to run in us.
815 * default: 0.95s
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100816 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100817int sysctl_sched_rt_runtime = 950000;
818
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200819static inline u64 global_rt_period(void)
820{
821 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
822}
823
824static inline u64 global_rt_runtime(void)
825{
roel kluine26873b2008-07-22 16:51:15 -0400826 if (sysctl_sched_rt_runtime < 0)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200827 return RUNTIME_INF;
828
829 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
830}
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832#ifndef prepare_arch_switch
Nick Piggin4866cde2005-06-25 14:57:23 -0700833# define prepare_arch_switch(next) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700835#ifndef finish_arch_switch
836# define finish_arch_switch(prev) do { } while (0)
837#endif
838
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100839static inline int task_current(struct rq *rq, struct task_struct *p)
840{
841 return rq->curr == p;
842}
843
Ingo Molnar70b97a72006-07-03 00:25:42 -0700844static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700845{
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200846#ifdef CONFIG_SMP
847 return p->on_cpu;
848#else
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100849 return task_current(rq, p);
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200850#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700851}
852
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200853#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar70b97a72006-07-03 00:25:42 -0700854static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700855{
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200856#ifdef CONFIG_SMP
857 /*
858 * We can optimise this out completely for !SMP, because the
859 * SMP rebalancing from interrupt is the only thing that cares
860 * here.
861 */
862 next->on_cpu = 1;
863#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700864}
865
Ingo Molnar70b97a72006-07-03 00:25:42 -0700866static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700867{
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200868#ifdef CONFIG_SMP
869 /*
870 * After ->on_cpu is cleared, the task can be moved to a different CPU.
871 * We must ensure this doesn't happen until the switch is completely
872 * finished.
873 */
874 smp_wmb();
875 prev->on_cpu = 0;
876#endif
Ingo Molnarda04c032005-09-13 11:17:59 +0200877#ifdef CONFIG_DEBUG_SPINLOCK
878 /* this is a valid case when another task releases the spinlock */
879 rq->lock.owner = current;
880#endif
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700881 /*
882 * If we are tracking spinlock dependencies then we have to
883 * fix up the runqueue lock - which gets 'carried over' from
884 * prev into current:
885 */
886 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
887
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100888 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700889}
890
891#else /* __ARCH_WANT_UNLOCKED_CTXSW */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700892static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700893{
894#ifdef CONFIG_SMP
895 /*
896 * We can optimise this out completely for !SMP, because the
897 * SMP rebalancing from interrupt is the only thing that cares
898 * here.
899 */
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200900 next->on_cpu = 1;
Nick Piggin4866cde2005-06-25 14:57:23 -0700901#endif
902#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100903 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700904#else
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100905 raw_spin_unlock(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700906#endif
907}
908
Ingo Molnar70b97a72006-07-03 00:25:42 -0700909static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700910{
911#ifdef CONFIG_SMP
912 /*
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200913 * After ->on_cpu is cleared, the task can be moved to a different CPU.
Nick Piggin4866cde2005-06-25 14:57:23 -0700914 * We must ensure this doesn't happen until the switch is completely
915 * finished.
916 */
917 smp_wmb();
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200918 prev->on_cpu = 0;
Nick Piggin4866cde2005-06-25 14:57:23 -0700919#endif
920#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
921 local_irq_enable();
922#endif
923}
924#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
926/*
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200927 * __task_rq_lock - lock the rq @p resides on.
Ingo Molnarb29739f2006-06-27 02:54:51 -0700928 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700929static inline struct rq *__task_rq_lock(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700930 __acquires(rq->lock)
931{
Peter Zijlstra0970d292010-02-15 14:45:54 +0100932 struct rq *rq;
933
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200934 lockdep_assert_held(&p->pi_lock);
935
Andi Kleen3a5c3592007-10-15 17:00:14 +0200936 for (;;) {
Peter Zijlstra0970d292010-02-15 14:45:54 +0100937 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100938 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100939 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200940 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100941 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700942 }
Ingo Molnarb29739f2006-06-27 02:54:51 -0700943}
944
945/*
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200946 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700948static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200949 __acquires(p->pi_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 __acquires(rq->lock)
951{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700952 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Andi Kleen3a5c3592007-10-15 17:00:14 +0200954 for (;;) {
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200955 raw_spin_lock_irqsave(&p->pi_lock, *flags);
Andi Kleen3a5c3592007-10-15 17:00:14 +0200956 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100957 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100958 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200959 return rq;
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200960 raw_spin_unlock(&rq->lock);
961 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963}
964
Alexey Dobriyana9957442007-10-15 17:00:13 +0200965static void __task_rq_unlock(struct rq *rq)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700966 __releases(rq->lock)
967{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100968 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700969}
970
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200971static inline void
972task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 __releases(rq->lock)
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200974 __releases(p->pi_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975{
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200976 raw_spin_unlock(&rq->lock);
977 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978}
979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980/*
Robert P. J. Daycc2a73b2006-12-10 02:20:00 -0800981 * this_rq_lock - lock this runqueue and disable interrupts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200983static struct rq *this_rq_lock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 __acquires(rq->lock)
985{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700986 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
988 local_irq_disable();
989 rq = this_rq();
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100990 raw_spin_lock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
992 return rq;
993}
994
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100995#ifdef CONFIG_SCHED_HRTICK
996/*
997 * Use HR-timers to deliver accurate preemption points.
998 *
999 * Its all a bit involved since we cannot program an hrt while holding the
1000 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1001 * reschedule event.
1002 *
1003 * When we get rescheduled we reprogram the hrtick_timer outside of the
1004 * rq->lock.
1005 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001006
1007/*
1008 * Use hrtick when:
1009 * - enabled by features
1010 * - hrtimer is actually high res
1011 */
1012static inline int hrtick_enabled(struct rq *rq)
1013{
1014 if (!sched_feat(HRTICK))
1015 return 0;
Ingo Molnarba420592008-07-20 11:02:06 +02001016 if (!cpu_active(cpu_of(rq)))
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001017 return 0;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001018 return hrtimer_is_hres_active(&rq->hrtick_timer);
1019}
1020
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001021static void hrtick_clear(struct rq *rq)
1022{
1023 if (hrtimer_active(&rq->hrtick_timer))
1024 hrtimer_cancel(&rq->hrtick_timer);
1025}
1026
1027/*
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001028 * High-resolution timer tick.
1029 * Runs from hardirq context with interrupts disabled.
1030 */
1031static enum hrtimer_restart hrtick(struct hrtimer *timer)
1032{
1033 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1034
1035 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1036
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001037 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001038 update_rq_clock(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001039 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001040 raw_spin_unlock(&rq->lock);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001041
1042 return HRTIMER_NORESTART;
1043}
1044
Rabin Vincent95e904c2008-05-11 05:55:33 +05301045#ifdef CONFIG_SMP
Peter Zijlstra31656512008-07-18 18:01:23 +02001046/*
1047 * called from hardirq (IPI) context
1048 */
1049static void __hrtick_start(void *arg)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001050{
Peter Zijlstra31656512008-07-18 18:01:23 +02001051 struct rq *rq = arg;
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001052
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001053 raw_spin_lock(&rq->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001054 hrtimer_restart(&rq->hrtick_timer);
1055 rq->hrtick_csd_pending = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001056 raw_spin_unlock(&rq->lock);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001057}
1058
Peter Zijlstra31656512008-07-18 18:01:23 +02001059/*
1060 * Called to set the hrtick timer state.
1061 *
1062 * called with rq->lock held and irqs disabled
1063 */
1064static void hrtick_start(struct rq *rq, u64 delay)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001065{
Peter Zijlstra31656512008-07-18 18:01:23 +02001066 struct hrtimer *timer = &rq->hrtick_timer;
1067 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001068
Arjan van de Vencc584b22008-09-01 15:02:30 -07001069 hrtimer_set_expires(timer, time);
Peter Zijlstra31656512008-07-18 18:01:23 +02001070
1071 if (rq == this_rq()) {
1072 hrtimer_restart(timer);
1073 } else if (!rq->hrtick_csd_pending) {
Peter Zijlstra6e275632009-02-25 13:59:48 +01001074 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001075 rq->hrtick_csd_pending = 1;
1076 }
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001077}
1078
1079static int
1080hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1081{
1082 int cpu = (int)(long)hcpu;
1083
1084 switch (action) {
1085 case CPU_UP_CANCELED:
1086 case CPU_UP_CANCELED_FROZEN:
1087 case CPU_DOWN_PREPARE:
1088 case CPU_DOWN_PREPARE_FROZEN:
1089 case CPU_DEAD:
1090 case CPU_DEAD_FROZEN:
Peter Zijlstra31656512008-07-18 18:01:23 +02001091 hrtick_clear(cpu_rq(cpu));
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001092 return NOTIFY_OK;
1093 }
1094
1095 return NOTIFY_DONE;
1096}
1097
Rakib Mullickfa748202008-09-22 14:55:45 -07001098static __init void init_hrtick(void)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001099{
1100 hotcpu_notifier(hotplug_hrtick, 0);
1101}
Peter Zijlstra31656512008-07-18 18:01:23 +02001102#else
1103/*
1104 * Called to set the hrtick timer state.
1105 *
1106 * called with rq->lock held and irqs disabled
1107 */
1108static void hrtick_start(struct rq *rq, u64 delay)
1109{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001110 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +05301111 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001112}
1113
Andrew Morton006c75f2008-09-22 14:55:46 -07001114static inline void init_hrtick(void)
Peter Zijlstra31656512008-07-18 18:01:23 +02001115{
1116}
Rabin Vincent95e904c2008-05-11 05:55:33 +05301117#endif /* CONFIG_SMP */
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001118
1119static void init_rq_hrtick(struct rq *rq)
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001120{
Peter Zijlstra31656512008-07-18 18:01:23 +02001121#ifdef CONFIG_SMP
1122 rq->hrtick_csd_pending = 0;
1123
1124 rq->hrtick_csd.flags = 0;
1125 rq->hrtick_csd.func = __hrtick_start;
1126 rq->hrtick_csd.info = rq;
1127#endif
1128
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001129 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1130 rq->hrtick_timer.function = hrtick;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001131}
Andrew Morton006c75f2008-09-22 14:55:46 -07001132#else /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001133static inline void hrtick_clear(struct rq *rq)
1134{
1135}
1136
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001137static inline void init_rq_hrtick(struct rq *rq)
1138{
1139}
1140
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001141static inline void init_hrtick(void)
1142{
1143}
Andrew Morton006c75f2008-09-22 14:55:46 -07001144#endif /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001145
Ingo Molnar1b9f19c2007-07-09 18:51:59 +02001146/*
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001147 * resched_task - mark a task 'to be rescheduled now'.
1148 *
1149 * On UP this means the setting of the need_resched flag, on SMP it
1150 * might also involve a cross-CPU call to trigger the scheduler on
1151 * the target CPU.
1152 */
1153#ifdef CONFIG_SMP
1154
1155#ifndef tsk_is_polling
1156#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1157#endif
1158
Peter Zijlstra31656512008-07-18 18:01:23 +02001159static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001160{
1161 int cpu;
1162
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001163 assert_raw_spin_locked(&task_rq(p)->lock);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001164
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001165 if (test_tsk_need_resched(p))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001166 return;
1167
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001168 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001169
1170 cpu = task_cpu(p);
1171 if (cpu == smp_processor_id())
1172 return;
1173
1174 /* NEED_RESCHED must be visible before we test polling */
1175 smp_mb();
1176 if (!tsk_is_polling(p))
1177 smp_send_reschedule(cpu);
1178}
1179
1180static void resched_cpu(int cpu)
1181{
1182 struct rq *rq = cpu_rq(cpu);
1183 unsigned long flags;
1184
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001185 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001186 return;
1187 resched_task(cpu_curr(cpu));
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001188 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001189}
Thomas Gleixner06d83082008-03-22 09:20:24 +01001190
1191#ifdef CONFIG_NO_HZ
1192/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07001193 * In the semi idle case, use the nearest busy cpu for migrating timers
1194 * from an idle cpu. This is good for power-savings.
1195 *
1196 * We don't do similar optimization for completely idle system, as
1197 * selecting an idle cpu will add more delays to the timers than intended
1198 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1199 */
1200int get_nohz_timer_target(void)
1201{
1202 int cpu = smp_processor_id();
1203 int i;
1204 struct sched_domain *sd;
1205
1206 for_each_domain(cpu, sd) {
1207 for_each_cpu(i, sched_domain_span(sd))
1208 if (!idle_cpu(i))
1209 return i;
1210 }
1211 return cpu;
1212}
1213/*
Thomas Gleixner06d83082008-03-22 09:20:24 +01001214 * When add_timer_on() enqueues a timer into the timer wheel of an
1215 * idle CPU then this timer might expire before the next timer event
1216 * which is scheduled to wake up that CPU. In case of a completely
1217 * idle system the next event might even be infinite time into the
1218 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1219 * leaves the inner idle loop so the newly added timer is taken into
1220 * account when the CPU goes back to idle and evaluates the timer
1221 * wheel for the next timer event.
1222 */
1223void wake_up_idle_cpu(int cpu)
1224{
1225 struct rq *rq = cpu_rq(cpu);
1226
1227 if (cpu == smp_processor_id())
1228 return;
1229
1230 /*
1231 * This is safe, as this function is called with the timer
1232 * wheel base lock of (cpu) held. When the CPU is on the way
1233 * to idle and has not yet set rq->curr to idle then it will
1234 * be serialized on the timer wheel base lock and take the new
1235 * timer into account automatically.
1236 */
1237 if (rq->curr != rq->idle)
1238 return;
1239
1240 /*
1241 * We can set TIF_RESCHED on the idle task of the other CPU
1242 * lockless. The worst case is that the other CPU runs the
1243 * idle task through an additional NOOP schedule()
1244 */
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001245 set_tsk_need_resched(rq->idle);
Thomas Gleixner06d83082008-03-22 09:20:24 +01001246
1247 /* NEED_RESCHED must be visible before we test polling */
1248 smp_mb();
1249 if (!tsk_is_polling(rq->idle))
1250 smp_send_reschedule(cpu);
1251}
Mike Galbraith39c0cbe2010-03-11 17:17:13 +01001252
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001253#endif /* CONFIG_NO_HZ */
Thomas Gleixner06d83082008-03-22 09:20:24 +01001254
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001255static u64 sched_avg_period(void)
1256{
1257 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1258}
1259
1260static void sched_avg_update(struct rq *rq)
1261{
1262 s64 period = sched_avg_period();
1263
1264 while ((s64)(rq->clock - rq->age_stamp) > period) {
Will Deacon0d98bb22010-05-24 12:11:43 -07001265 /*
1266 * Inline assembly required to prevent the compiler
1267 * optimising this loop into a divmod call.
1268 * See __iter_div_u64_rem() for another example of this.
1269 */
1270 asm("" : "+rm" (rq->age_stamp));
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001271 rq->age_stamp += period;
1272 rq->rt_avg /= 2;
1273 }
1274}
1275
1276static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1277{
1278 rq->rt_avg += rt_delta;
1279 sched_avg_update(rq);
1280}
1281
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001282#else /* !CONFIG_SMP */
Peter Zijlstra31656512008-07-18 18:01:23 +02001283static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001284{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001285 assert_raw_spin_locked(&task_rq(p)->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001286 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001287}
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001288
1289static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1290{
1291}
Suresh Siddhada2b71e2010-08-23 13:42:51 -07001292
1293static void sched_avg_update(struct rq *rq)
1294{
1295}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001296#endif /* CONFIG_SMP */
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001297
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001298#if BITS_PER_LONG == 32
1299# define WMULT_CONST (~0UL)
1300#else
1301# define WMULT_CONST (1UL << 32)
1302#endif
1303
1304#define WMULT_SHIFT 32
1305
Ingo Molnar194081e2007-08-09 11:16:51 +02001306/*
1307 * Shift right and round:
1308 */
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001309#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
Ingo Molnar194081e2007-08-09 11:16:51 +02001310
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001311/*
1312 * delta *= weight / lw
1313 */
Ingo Molnarcb1c4fc2007-08-02 17:41:40 +02001314static unsigned long
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001315calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1316 struct load_weight *lw)
1317{
1318 u64 tmp;
1319
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001320 if (!lw->inv_weight) {
1321 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1322 lw->inv_weight = 1;
1323 else
1324 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1325 / (lw->weight+1);
1326 }
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001327
1328 tmp = (u64)delta_exec * weight;
1329 /*
1330 * Check whether we'd overflow the 64-bit multiplication:
1331 */
Ingo Molnar194081e2007-08-09 11:16:51 +02001332 if (unlikely(tmp > WMULT_CONST))
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001333 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
Ingo Molnar194081e2007-08-09 11:16:51 +02001334 WMULT_SHIFT/2);
1335 else
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001336 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001337
Ingo Molnarecf691d2007-08-02 17:41:40 +02001338 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001339}
1340
Ingo Molnar10919852007-10-15 17:00:04 +02001341static inline void update_load_add(struct load_weight *lw, unsigned long inc)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001342{
1343 lw->weight += inc;
Ingo Molnare89996a2008-03-14 23:48:28 +01001344 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001345}
1346
Ingo Molnar10919852007-10-15 17:00:04 +02001347static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001348{
1349 lw->weight -= dec;
Ingo Molnare89996a2008-03-14 23:48:28 +01001350 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001351}
1352
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001353static inline void update_load_set(struct load_weight *lw, unsigned long w)
1354{
1355 lw->weight = w;
1356 lw->inv_weight = 0;
1357}
1358
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001360 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1361 * of tasks with abnormal "nice" values across CPUs the contribution that
1362 * each task makes to its run queue's load is weighted according to its
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01001363 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
Peter Williams2dd73a42006-06-27 02:54:34 -07001364 * scaled version of the new time slice allocation that they receive on time
1365 * slice expiry etc.
1366 */
1367
Peter Zijlstracce7ade2009-01-15 14:53:37 +01001368#define WEIGHT_IDLEPRIO 3
1369#define WMULT_IDLEPRIO 1431655765
Ingo Molnardd41f592007-07-09 18:51:59 +02001370
1371/*
1372 * Nice levels are multiplicative, with a gentle 10% change for every
1373 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1374 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1375 * that remained on nice 0.
1376 *
1377 * The "10% effect" is relative and cumulative: from _any_ nice level,
1378 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
Ingo Molnarf9153ee2007-07-16 09:46:30 +02001379 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1380 * If a task goes up by ~10% and another task goes down by ~10% then
1381 * the relative distance between them is ~25%.)
Ingo Molnardd41f592007-07-09 18:51:59 +02001382 */
1383static const int prio_to_weight[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001384 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1385 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1386 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1387 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1388 /* 0 */ 1024, 820, 655, 526, 423,
1389 /* 5 */ 335, 272, 215, 172, 137,
1390 /* 10 */ 110, 87, 70, 56, 45,
1391 /* 15 */ 36, 29, 23, 18, 15,
Ingo Molnardd41f592007-07-09 18:51:59 +02001392};
1393
Ingo Molnar5714d2d2007-07-16 09:46:31 +02001394/*
1395 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1396 *
1397 * In cases where the weight does not change often, we can use the
1398 * precalculated inverse to speed up arithmetics by turning divisions
1399 * into multiplications:
1400 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001401static const u32 prio_to_wmult[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001402 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1403 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1404 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1405 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1406 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1407 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1408 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1409 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
Ingo Molnardd41f592007-07-09 18:51:59 +02001410};
Peter Williams2dd73a42006-06-27 02:54:34 -07001411
Bharata B Raoef12fef2009-03-31 10:02:22 +05301412/* Time spent by the tasks of the cpu accounting group executing in ... */
1413enum cpuacct_stat_index {
1414 CPUACCT_STAT_USER, /* ... user mode */
1415 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1416
1417 CPUACCT_STAT_NSTATS,
1418};
1419
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001420#ifdef CONFIG_CGROUP_CPUACCT
1421static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
Bharata B Raoef12fef2009-03-31 10:02:22 +05301422static void cpuacct_update_stats(struct task_struct *tsk,
1423 enum cpuacct_stat_index idx, cputime_t val);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001424#else
1425static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
Bharata B Raoef12fef2009-03-31 10:02:22 +05301426static inline void cpuacct_update_stats(struct task_struct *tsk,
1427 enum cpuacct_stat_index idx, cputime_t val) {}
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001428#endif
1429
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001430static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1431{
1432 update_load_add(&rq->load, load);
1433}
1434
1435static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1436{
1437 update_load_sub(&rq->load, load);
1438}
1439
Ingo Molnar7940ca32008-08-19 13:40:47 +02001440#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
Peter Zijlstraeb755802008-08-19 12:33:05 +02001441typedef int (*tg_visitor)(struct task_group *, void *);
1442
1443/*
1444 * Iterate the full tree, calling @down when first entering a node and @up when
1445 * leaving it for the final time.
1446 */
1447static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1448{
1449 struct task_group *parent, *child;
1450 int ret;
1451
1452 rcu_read_lock();
1453 parent = &root_task_group;
1454down:
1455 ret = (*down)(parent, data);
1456 if (ret)
1457 goto out_unlock;
1458 list_for_each_entry_rcu(child, &parent->children, siblings) {
1459 parent = child;
1460 goto down;
1461
1462up:
1463 continue;
1464 }
1465 ret = (*up)(parent, data);
1466 if (ret)
1467 goto out_unlock;
1468
1469 child = parent;
1470 parent = parent->parent;
1471 if (parent)
1472 goto up;
1473out_unlock:
1474 rcu_read_unlock();
1475
1476 return ret;
1477}
1478
1479static int tg_nop(struct task_group *tg, void *data)
1480{
1481 return 0;
1482}
1483#endif
1484
Gregory Haskinse7693a32008-01-25 21:08:09 +01001485#ifdef CONFIG_SMP
Peter Zijlstraf5f08f32009-09-10 13:35:28 +02001486/* Used instead of source_load when we know the type == 0 */
1487static unsigned long weighted_cpuload(const int cpu)
1488{
1489 return cpu_rq(cpu)->load.weight;
1490}
1491
1492/*
1493 * Return a low guess at the load of a migration-source cpu weighted
1494 * according to the scheduling class and "nice" value.
1495 *
1496 * We want to under-estimate the load of migration sources, to
1497 * balance conservatively.
1498 */
1499static unsigned long source_load(int cpu, int type)
1500{
1501 struct rq *rq = cpu_rq(cpu);
1502 unsigned long total = weighted_cpuload(cpu);
1503
1504 if (type == 0 || !sched_feat(LB_BIAS))
1505 return total;
1506
1507 return min(rq->cpu_load[type-1], total);
1508}
1509
1510/*
1511 * Return a high guess at the load of a migration-target cpu weighted
1512 * according to the scheduling class and "nice" value.
1513 */
1514static unsigned long target_load(int cpu, int type)
1515{
1516 struct rq *rq = cpu_rq(cpu);
1517 unsigned long total = weighted_cpuload(cpu);
1518
1519 if (type == 0 || !sched_feat(LB_BIAS))
1520 return total;
1521
1522 return max(rq->cpu_load[type-1], total);
1523}
1524
Peter Zijlstraae154be2009-09-10 14:40:57 +02001525static unsigned long power_of(int cpu)
1526{
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02001527 return cpu_rq(cpu)->cpu_power;
Peter Zijlstraae154be2009-09-10 14:40:57 +02001528}
1529
Gregory Haskinse7693a32008-01-25 21:08:09 +01001530static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001531
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001532static unsigned long cpu_avg_load_per_task(int cpu)
1533{
1534 struct rq *rq = cpu_rq(cpu);
Ingo Molnaraf6d5962008-11-29 20:45:15 +01001535 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001536
Steven Rostedt4cd42622008-11-26 21:04:24 -05001537 if (nr_running)
1538 rq->avg_load_per_task = rq->load.weight / nr_running;
Balbir Singha2d47772008-11-12 16:19:00 +05301539 else
1540 rq->avg_load_per_task = 0;
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001541
1542 return rq->avg_load_per_task;
1543}
1544
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001545#ifdef CONFIG_FAIR_GROUP_SCHED
1546
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001547/*
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001548 * Compute the cpu's hierarchical load factor for each task group.
1549 * This needs to be done in a top-down fashion because the load of a child
1550 * group is a fraction of its parents load.
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001551 */
Peter Zijlstraeb755802008-08-19 12:33:05 +02001552static int tg_load_down(struct task_group *tg, void *data)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001553{
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001554 unsigned long load;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001555 long cpu = (long)data;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001556
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001557 if (!tg->parent) {
1558 load = cpu_rq(cpu)->load.weight;
1559 } else {
1560 load = tg->parent->cfs_rq[cpu]->h_load;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001561 load *= tg->se[cpu]->load.weight;
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001562 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1563 }
1564
1565 tg->cfs_rq[cpu]->h_load = load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001566
Peter Zijlstraeb755802008-08-19 12:33:05 +02001567 return 0;
Peter Zijlstra4d8d5952008-06-27 13:41:19 +02001568}
1569
Peter Zijlstraeb755802008-08-19 12:33:05 +02001570static void update_h_load(long cpu)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001571{
Peter Zijlstraeb755802008-08-19 12:33:05 +02001572 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001573}
1574
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001575#endif
1576
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001577#ifdef CONFIG_PREEMPT
1578
Peter Zijlstrab78bb862009-09-15 14:23:18 +02001579static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1580
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001581/*
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001582 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1583 * way at the expense of forcing extra atomic operations in all
1584 * invocations. This assures that the double_lock is acquired using the
1585 * same underlying policy as the spinlock_t on this architecture, which
1586 * reduces latency compared to the unfair variant below. However, it
1587 * also adds more overhead and therefore may reduce throughput.
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001588 */
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001589static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1590 __releases(this_rq->lock)
1591 __acquires(busiest->lock)
1592 __acquires(this_rq->lock)
1593{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001594 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001595 double_rq_lock(this_rq, busiest);
1596
1597 return 1;
1598}
1599
1600#else
1601/*
1602 * Unfair double_lock_balance: Optimizes throughput at the expense of
1603 * latency by eliminating extra atomic operations when the locks are
1604 * already in proper order on entry. This favors lower cpu-ids and will
1605 * grant the double lock to lower cpus over higher ids under contention,
1606 * regardless of entry order into the function.
1607 */
1608static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001609 __releases(this_rq->lock)
1610 __acquires(busiest->lock)
1611 __acquires(this_rq->lock)
1612{
1613 int ret = 0;
1614
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001615 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001616 if (busiest < this_rq) {
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001617 raw_spin_unlock(&this_rq->lock);
1618 raw_spin_lock(&busiest->lock);
1619 raw_spin_lock_nested(&this_rq->lock,
1620 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001621 ret = 1;
1622 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001623 raw_spin_lock_nested(&busiest->lock,
1624 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001625 }
1626 return ret;
1627}
1628
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001629#endif /* CONFIG_PREEMPT */
1630
1631/*
1632 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1633 */
1634static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1635{
1636 if (unlikely(!irqs_disabled())) {
1637 /* printk() doesn't work good under rq->lock */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001638 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001639 BUG_ON(1);
1640 }
1641
1642 return _double_lock_balance(this_rq, busiest);
1643}
1644
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001645static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1646 __releases(busiest->lock)
1647{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001648 raw_spin_unlock(&busiest->lock);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001649 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1650}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001651
1652/*
1653 * double_rq_lock - safely lock two runqueues
1654 *
1655 * Note this does not disable interrupts like task_rq_lock,
1656 * you need to do so manually before calling.
1657 */
1658static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1659 __acquires(rq1->lock)
1660 __acquires(rq2->lock)
1661{
1662 BUG_ON(!irqs_disabled());
1663 if (rq1 == rq2) {
1664 raw_spin_lock(&rq1->lock);
1665 __acquire(rq2->lock); /* Fake it out ;) */
1666 } else {
1667 if (rq1 < rq2) {
1668 raw_spin_lock(&rq1->lock);
1669 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1670 } else {
1671 raw_spin_lock(&rq2->lock);
1672 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1673 }
1674 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001675}
1676
1677/*
1678 * double_rq_unlock - safely unlock two runqueues
1679 *
1680 * Note this does not restore interrupts like task_rq_unlock,
1681 * you need to do so manually after calling.
1682 */
1683static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1684 __releases(rq1->lock)
1685 __releases(rq2->lock)
1686{
1687 raw_spin_unlock(&rq1->lock);
1688 if (rq1 != rq2)
1689 raw_spin_unlock(&rq2->lock);
1690 else
1691 __release(rq2->lock);
1692}
1693
Mike Galbraithd95f4122011-02-01 09:50:51 -05001694#else /* CONFIG_SMP */
1695
1696/*
1697 * double_rq_lock - safely lock two runqueues
1698 *
1699 * Note this does not disable interrupts like task_rq_lock,
1700 * you need to do so manually before calling.
1701 */
1702static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1703 __acquires(rq1->lock)
1704 __acquires(rq2->lock)
1705{
1706 BUG_ON(!irqs_disabled());
1707 BUG_ON(rq1 != rq2);
1708 raw_spin_lock(&rq1->lock);
1709 __acquire(rq2->lock); /* Fake it out ;) */
1710}
1711
1712/*
1713 * double_rq_unlock - safely unlock two runqueues
1714 *
1715 * Note this does not restore interrupts like task_rq_unlock,
1716 * you need to do so manually after calling.
1717 */
1718static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1719 __releases(rq1->lock)
1720 __releases(rq2->lock)
1721{
1722 BUG_ON(rq1 != rq2);
1723 raw_spin_unlock(&rq1->lock);
1724 __release(rq2->lock);
1725}
1726
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001727#endif
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001728
Peter Zijlstra74f51872010-04-22 21:50:19 +02001729static void calc_load_account_idle(struct rq *this_rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01001730static void update_sysctl(void);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01001731static int get_update_sysctl_factor(void);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07001732static void update_cpu_load(struct rq *this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02001733
Peter Zijlstracd29fe62009-11-27 17:32:46 +01001734static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1735{
1736 set_task_rq(p, cpu);
1737#ifdef CONFIG_SMP
1738 /*
1739 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1740 * successfuly executed on another CPU. We must ensure that updates of
1741 * per-task data have been completed by this moment.
1742 */
1743 smp_wmb();
1744 task_thread_info(p)->cpu = cpu;
1745#endif
1746}
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001747
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001748static const struct sched_class rt_sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02001749
Peter Zijlstra34f971f2010-09-22 13:53:15 +02001750#define sched_class_highest (&stop_sched_class)
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001751#define for_each_class(class) \
1752 for (class = sched_class_highest; class; class = class->next)
Ingo Molnardd41f592007-07-09 18:51:59 +02001753
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001754#include "sched_stats.h"
1755
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001756static void inc_nr_running(struct rq *rq)
Ingo Molnar6363ca52008-05-29 11:28:57 +02001757{
1758 rq->nr_running++;
Ingo Molnar6363ca52008-05-29 11:28:57 +02001759}
1760
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001761static void dec_nr_running(struct rq *rq)
Ingo Molnar9c217242007-08-02 17:41:40 +02001762{
1763 rq->nr_running--;
Ingo Molnar9c217242007-08-02 17:41:40 +02001764}
1765
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001766static void set_load_weight(struct task_struct *p)
1767{
Ingo Molnardd41f592007-07-09 18:51:59 +02001768 /*
1769 * SCHED_IDLE tasks get minimal weight:
1770 */
1771 if (p->policy == SCHED_IDLE) {
1772 p->se.load.weight = WEIGHT_IDLEPRIO;
1773 p->se.load.inv_weight = WMULT_IDLEPRIO;
1774 return;
1775 }
1776
1777 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1778 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001779}
1780
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001781static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
Gregory Haskins2087a1a2008-06-27 14:30:00 -06001782{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001783 update_rq_clock(rq);
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001784 sched_info_queued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001785 p->sched_class->enqueue_task(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02001786}
1787
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001788static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnardd41f592007-07-09 18:51:59 +02001789{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001790 update_rq_clock(rq);
Ankita Garg46ac22b2008-07-01 14:30:06 +05301791 sched_info_dequeued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001792 p->sched_class->dequeue_task(rq, p, flags);
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001793}
1794
1795/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001796 * activate_task - move a task to the runqueue.
1797 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001798static void activate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001799{
1800 if (task_contributes_to_load(p))
1801 rq->nr_uninterruptible--;
1802
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001803 enqueue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001804 inc_nr_running(rq);
1805}
1806
1807/*
1808 * deactivate_task - remove a task from the runqueue.
1809 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001810static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001811{
1812 if (task_contributes_to_load(p))
1813 rq->nr_uninterruptible++;
1814
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001815 dequeue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001816 dec_nr_running(rq);
1817}
1818
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001819#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1820
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001821/*
1822 * There are no locks covering percpu hardirq/softirq time.
1823 * They are only modified in account_system_vtime, on corresponding CPU
1824 * with interrupts disabled. So, writes are safe.
1825 * They are read and saved off onto struct rq in update_rq_clock().
1826 * This may result in other CPU reading this CPU's irq time and can
1827 * race with irq/account_system_vtime on this CPU. We would either get old
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001828 * or new value with a side effect of accounting a slice of irq time to wrong
1829 * task when irq is in progress while we read rq->clock. That is a worthy
1830 * compromise in place of having locks on each irq in account_system_time.
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001831 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001832static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1833static DEFINE_PER_CPU(u64, cpu_softirq_time);
1834
1835static DEFINE_PER_CPU(u64, irq_start_time);
1836static int sched_clock_irqtime;
1837
1838void enable_sched_clock_irqtime(void)
1839{
1840 sched_clock_irqtime = 1;
1841}
1842
1843void disable_sched_clock_irqtime(void)
1844{
1845 sched_clock_irqtime = 0;
1846}
1847
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001848#ifndef CONFIG_64BIT
1849static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
1850
1851static inline void irq_time_write_begin(void)
1852{
1853 __this_cpu_inc(irq_time_seq.sequence);
1854 smp_wmb();
1855}
1856
1857static inline void irq_time_write_end(void)
1858{
1859 smp_wmb();
1860 __this_cpu_inc(irq_time_seq.sequence);
1861}
1862
1863static inline u64 irq_time_read(int cpu)
1864{
1865 u64 irq_time;
1866 unsigned seq;
1867
1868 do {
1869 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1870 irq_time = per_cpu(cpu_softirq_time, cpu) +
1871 per_cpu(cpu_hardirq_time, cpu);
1872 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1873
1874 return irq_time;
1875}
1876#else /* CONFIG_64BIT */
1877static inline void irq_time_write_begin(void)
1878{
1879}
1880
1881static inline void irq_time_write_end(void)
1882{
1883}
1884
1885static inline u64 irq_time_read(int cpu)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001886{
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001887 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1888}
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001889#endif /* CONFIG_64BIT */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001890
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001891/*
1892 * Called before incrementing preempt_count on {soft,}irq_enter
1893 * and before decrementing preempt_count on {soft,}irq_exit.
1894 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001895void account_system_vtime(struct task_struct *curr)
1896{
1897 unsigned long flags;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001898 s64 delta;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001899 int cpu;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001900
1901 if (!sched_clock_irqtime)
1902 return;
1903
1904 local_irq_save(flags);
1905
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001906 cpu = smp_processor_id();
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001907 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
1908 __this_cpu_add(irq_start_time, delta);
1909
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001910 irq_time_write_begin();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001911 /*
1912 * We do not account for softirq time from ksoftirqd here.
1913 * We want to continue accounting softirq time to ksoftirqd thread
1914 * in that case, so as not to confuse scheduler with a special task
1915 * that do not consume any time, but still wants to run.
1916 */
1917 if (hardirq_count())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001918 __this_cpu_add(cpu_hardirq_time, delta);
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -08001919 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001920 __this_cpu_add(cpu_softirq_time, delta);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001921
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001922 irq_time_write_end();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001923 local_irq_restore(flags);
1924}
Ingo Molnarb7dadc32010-10-18 20:00:37 +02001925EXPORT_SYMBOL_GPL(account_system_vtime);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001926
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001927static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001928{
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001929 s64 irq_delta;
1930
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001931 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001932
1933 /*
1934 * Since irq_time is only updated on {soft,}irq_exit, we might run into
1935 * this case when a previous update_rq_clock() happened inside a
1936 * {soft,}irq region.
1937 *
1938 * When this happens, we stop ->clock_task and only update the
1939 * prev_irq_time stamp to account for the part that fit, so that a next
1940 * update will consume the rest. This ensures ->clock_task is
1941 * monotonic.
1942 *
1943 * It does however cause some slight miss-attribution of {soft,}irq
1944 * time, a more accurate solution would be to update the irq_time using
1945 * the current rq->clock timestamp, except that would require using
1946 * atomic ops.
1947 */
1948 if (irq_delta > delta)
1949 irq_delta = delta;
1950
1951 rq->prev_irq_time += irq_delta;
1952 delta -= irq_delta;
1953 rq->clock_task += delta;
1954
1955 if (irq_delta && sched_feat(NONIRQ_POWER))
1956 sched_rt_avg_update(rq, irq_delta);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001957}
1958
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08001959static int irqtime_account_hi_update(void)
1960{
1961 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1962 unsigned long flags;
1963 u64 latest_ns;
1964 int ret = 0;
1965
1966 local_irq_save(flags);
1967 latest_ns = this_cpu_read(cpu_hardirq_time);
1968 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
1969 ret = 1;
1970 local_irq_restore(flags);
1971 return ret;
1972}
1973
1974static int irqtime_account_si_update(void)
1975{
1976 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1977 unsigned long flags;
1978 u64 latest_ns;
1979 int ret = 0;
1980
1981 local_irq_save(flags);
1982 latest_ns = this_cpu_read(cpu_softirq_time);
1983 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
1984 ret = 1;
1985 local_irq_restore(flags);
1986 return ret;
1987}
1988
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001989#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001990
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08001991#define sched_clock_irqtime (0)
1992
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001993static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001994{
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001995 rq->clock_task += delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001996}
1997
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001998#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001999
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002000#include "sched_idletask.c"
2001#include "sched_fair.c"
2002#include "sched_rt.c"
Mike Galbraith5091faa2010-11-30 14:18:03 +01002003#include "sched_autogroup.c"
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002004#include "sched_stoptask.c"
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002005#ifdef CONFIG_SCHED_DEBUG
2006# include "sched_debug.c"
2007#endif
2008
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002009void sched_set_stop_task(int cpu, struct task_struct *stop)
2010{
2011 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2012 struct task_struct *old_stop = cpu_rq(cpu)->stop;
2013
2014 if (stop) {
2015 /*
2016 * Make it appear like a SCHED_FIFO task, its something
2017 * userspace knows about and won't get confused about.
2018 *
2019 * Also, it will make PI more or less work without too
2020 * much confusion -- but then, stop work should not
2021 * rely on PI working anyway.
2022 */
2023 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2024
2025 stop->sched_class = &stop_sched_class;
2026 }
2027
2028 cpu_rq(cpu)->stop = stop;
2029
2030 if (old_stop) {
2031 /*
2032 * Reset it back to a normal scheduling class so that
2033 * it can die in pieces.
2034 */
2035 old_stop->sched_class = &rt_sched_class;
2036 }
2037}
2038
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002039/*
Ingo Molnardd41f592007-07-09 18:51:59 +02002040 * __normal_prio - return the priority that is based on the static prio
Ingo Molnar71f8bd42007-07-09 18:51:59 +02002041 */
Ingo Molnar14531182007-07-09 18:51:59 +02002042static inline int __normal_prio(struct task_struct *p)
2043{
Ingo Molnardd41f592007-07-09 18:51:59 +02002044 return p->static_prio;
Ingo Molnar14531182007-07-09 18:51:59 +02002045}
2046
2047/*
Ingo Molnarb29739f2006-06-27 02:54:51 -07002048 * Calculate the expected normal priority: i.e. priority
2049 * without taking RT-inheritance into account. Might be
2050 * boosted by interactivity modifiers. Changes upon fork,
2051 * setprio syscalls, and whenever the interactivity
2052 * estimator recalculates.
2053 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002054static inline int normal_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002055{
2056 int prio;
2057
Ingo Molnare05606d2007-07-09 18:51:59 +02002058 if (task_has_rt_policy(p))
Ingo Molnarb29739f2006-06-27 02:54:51 -07002059 prio = MAX_RT_PRIO-1 - p->rt_priority;
2060 else
2061 prio = __normal_prio(p);
2062 return prio;
2063}
2064
2065/*
2066 * Calculate the current priority, i.e. the priority
2067 * taken into account by the scheduler. This value might
2068 * be boosted by RT tasks, or might be boosted by
2069 * interactivity modifiers. Will be RT if the task got
2070 * RT-boosted. If not then it returns p->normal_prio.
2071 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002072static int effective_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002073{
2074 p->normal_prio = normal_prio(p);
2075 /*
2076 * If we are RT tasks or we were boosted to RT priority,
2077 * keep the priority unchanged. Otherwise, update priority
2078 * to the normal priority:
2079 */
2080 if (!rt_prio(p->prio))
2081 return p->normal_prio;
2082 return p->prio;
2083}
2084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085/**
2086 * task_curr - is this task currently executing on a CPU?
2087 * @p: the task in question.
2088 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002089inline int task_curr(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090{
2091 return cpu_curr(task_cpu(p)) == p;
2092}
2093
Steven Rostedtcb469842008-01-25 21:08:22 +01002094static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2095 const struct sched_class *prev_class,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002096 int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01002097{
2098 if (prev_class != p->sched_class) {
2099 if (prev_class->switched_from)
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002100 prev_class->switched_from(rq, p);
2101 p->sched_class->switched_to(rq, p);
2102 } else if (oldprio != p->prio)
2103 p->sched_class->prio_changed(rq, p, oldprio);
Steven Rostedtcb469842008-01-25 21:08:22 +01002104}
2105
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002106static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2107{
2108 const struct sched_class *class;
2109
2110 if (p->sched_class == rq->curr->sched_class) {
2111 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2112 } else {
2113 for_each_class(class) {
2114 if (class == rq->curr->sched_class)
2115 break;
2116 if (class == p->sched_class) {
2117 resched_task(rq->curr);
2118 break;
2119 }
2120 }
2121 }
2122
2123 /*
2124 * A queue event has occurred, and we're going to schedule. In
2125 * this case, we can save a useless back to back clock update.
2126 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002127 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002128 rq->skip_clock_update = 1;
2129}
2130
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131#ifdef CONFIG_SMP
Ingo Molnarcc367732007-10-15 17:00:18 +02002132/*
2133 * Is this task likely cache-hot:
2134 */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002135static int
Ingo Molnarcc367732007-10-15 17:00:18 +02002136task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2137{
2138 s64 delta;
2139
Peter Zijlstrae6c8fba2009-12-16 18:04:33 +01002140 if (p->sched_class != &fair_sched_class)
2141 return 0;
2142
Nikhil Raoef8002f2010-10-13 12:09:35 -07002143 if (unlikely(p->policy == SCHED_IDLE))
2144 return 0;
2145
Ingo Molnarf540a602008-03-15 17:10:34 +01002146 /*
2147 * Buddy candidates are cache hot:
2148 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002149 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
Peter Zijlstra47932412008-11-04 21:25:09 +01002150 (&p->se == cfs_rq_of(&p->se)->next ||
2151 &p->se == cfs_rq_of(&p->se)->last))
Ingo Molnarf540a602008-03-15 17:10:34 +01002152 return 1;
2153
Ingo Molnar6bc16652007-10-15 17:00:18 +02002154 if (sysctl_sched_migration_cost == -1)
2155 return 1;
2156 if (sysctl_sched_migration_cost == 0)
2157 return 0;
2158
Ingo Molnarcc367732007-10-15 17:00:18 +02002159 delta = now - p->se.exec_start;
2160
2161 return delta < (s64)sysctl_sched_migration_cost;
2162}
2163
Ingo Molnardd41f592007-07-09 18:51:59 +02002164void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Ingo Molnarc65cc872007-07-09 18:51:58 +02002165{
Peter Zijlstrae2912002009-12-16 18:04:36 +01002166#ifdef CONFIG_SCHED_DEBUG
2167 /*
2168 * We should never call set_task_cpu() on a blocked task,
2169 * ttwu() will sort out the placement.
2170 */
Peter Zijlstra077614e2009-12-17 13:16:31 +01002171 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2172 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002173
2174#ifdef CONFIG_LOCKDEP
2175 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2176 lockdep_is_held(&task_rq(p)->lock)));
2177#endif
Peter Zijlstrae2912002009-12-16 18:04:36 +01002178#endif
2179
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +08002180 trace_sched_migrate_task(p, new_cpu);
Peter Zijlstracbc34ed2008-12-10 08:08:22 +01002181
Peter Zijlstra0c697742009-12-22 15:43:19 +01002182 if (task_cpu(p) != new_cpu) {
2183 p->se.nr_migrations++;
2184 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2185 }
Ingo Molnardd41f592007-07-09 18:51:59 +02002186
2187 __set_task_cpu(p, new_cpu);
Ingo Molnarc65cc872007-07-09 18:51:58 +02002188}
2189
Tejun Heo969c7922010-05-06 18:49:21 +02002190struct migration_arg {
Ingo Molnar36c8b582006-07-03 00:25:41 -07002191 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 int dest_cpu;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002193};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
Tejun Heo969c7922010-05-06 18:49:21 +02002195static int migration_cpu_stop(void *data);
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197/*
2198 * The task's runqueue lock must be held.
2199 * Returns true if you have to wait for migration thread.
2200 */
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002201static bool need_migrate_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 /*
2204 * If the task is not on a runqueue (and not running), then
Peter Zijlstrae2912002009-12-16 18:04:36 +01002205 * the next wake-up will properly place the task.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 */
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002207 bool running = p->on_rq || p->on_cpu;
2208 smp_rmb(); /* finish_lock_switch() */
2209 return running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210}
2211
2212/*
2213 * wait_task_inactive - wait for a thread to unschedule.
2214 *
Roland McGrath85ba2d82008-07-25 19:45:58 -07002215 * If @match_state is nonzero, it's the @p->state value just checked and
2216 * not expected to change. If it changes, i.e. @p might have woken up,
2217 * then return zero. When we succeed in waiting for @p to be off its CPU,
2218 * we return a positive number (its total switch count). If a second call
2219 * a short while later returns the same number, the caller can be sure that
2220 * @p has remained unscheduled the whole time.
2221 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 * The caller must ensure that the task *will* unschedule sometime soon,
2223 * else this function might spin for a *long* time. This function can't
2224 * be called with interrupts off, or it may introduce deadlock with
2225 * smp_call_function() if an IPI is sent by the same process we are
2226 * waiting to become inactive.
2227 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002228unsigned long wait_task_inactive(struct task_struct *p, long match_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229{
2230 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002231 int running, on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002232 unsigned long ncsw;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002233 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
Andi Kleen3a5c3592007-10-15 17:00:14 +02002235 for (;;) {
2236 /*
2237 * We do the initial early heuristics without holding
2238 * any task-queue locks at all. We'll only try to get
2239 * the runqueue lock when things look like they will
2240 * work out!
2241 */
2242 rq = task_rq(p);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002243
Andi Kleen3a5c3592007-10-15 17:00:14 +02002244 /*
2245 * If the task is actively running on another CPU
2246 * still, just relax and busy-wait without holding
2247 * any locks.
2248 *
2249 * NOTE! Since we don't hold any locks, it's not
2250 * even sure that "rq" stays as the right runqueue!
2251 * But we don't care, since "task_running()" will
2252 * return false if the runqueue has changed and p
2253 * is actually now running somewhere else!
2254 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002255 while (task_running(rq, p)) {
2256 if (match_state && unlikely(p->state != match_state))
2257 return 0;
Andi Kleen3a5c3592007-10-15 17:00:14 +02002258 cpu_relax();
Roland McGrath85ba2d82008-07-25 19:45:58 -07002259 }
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002260
Andi Kleen3a5c3592007-10-15 17:00:14 +02002261 /*
2262 * Ok, time to look more closely! We need the rq
2263 * lock now, to be *sure*. If we're wrong, we'll
2264 * just go back and repeat.
2265 */
2266 rq = task_rq_lock(p, &flags);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002267 trace_sched_wait_task(p);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002268 running = task_running(rq, p);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002269 on_rq = p->on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002270 ncsw = 0;
Oleg Nesterovf31e11d2008-08-20 16:54:44 -07002271 if (!match_state || p->state == match_state)
Oleg Nesterov93dcf552008-08-20 16:54:44 -07002272 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002273 task_rq_unlock(rq, p, &flags);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002274
Andi Kleen3a5c3592007-10-15 17:00:14 +02002275 /*
Roland McGrath85ba2d82008-07-25 19:45:58 -07002276 * If it changed from the expected state, bail out now.
2277 */
2278 if (unlikely(!ncsw))
2279 break;
2280
2281 /*
Andi Kleen3a5c3592007-10-15 17:00:14 +02002282 * Was it really running after all now that we
2283 * checked with the proper locks actually held?
2284 *
2285 * Oops. Go back and try again..
2286 */
2287 if (unlikely(running)) {
2288 cpu_relax();
2289 continue;
2290 }
2291
2292 /*
2293 * It's not enough that it's not actively running,
2294 * it must be off the runqueue _entirely_, and not
2295 * preempted!
2296 *
Luis Henriques80dd99b2009-03-16 19:58:09 +00002297 * So if it was still runnable (but just not actively
Andi Kleen3a5c3592007-10-15 17:00:14 +02002298 * running right now), it's preempted, and we should
2299 * yield - it could be a while.
2300 */
2301 if (unlikely(on_rq)) {
Thomas Gleixner8eb90c32011-02-23 23:52:21 +00002302 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
2303
2304 set_current_state(TASK_UNINTERRUPTIBLE);
2305 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002306 continue;
2307 }
2308
2309 /*
2310 * Ahh, all good. It wasn't running, and it wasn't
2311 * runnable, which means that it will never become
2312 * running in the future either. We're all done!
2313 */
2314 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 }
Roland McGrath85ba2d82008-07-25 19:45:58 -07002316
2317 return ncsw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318}
2319
2320/***
2321 * kick_process - kick a running thread to enter/exit the kernel
2322 * @p: the to-be-kicked thread
2323 *
2324 * Cause a process which is running on another CPU to enter
2325 * kernel-mode, without any delay. (to get signals handled.)
2326 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002327 * NOTE: this function doesn't have to take the runqueue lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 * because all it wants to ensure is that the remote task enters
2329 * the kernel. If the IPI races and the task has been migrated
2330 * to another CPU then no harm is done and the purpose has been
2331 * achieved as well.
2332 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002333void kick_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334{
2335 int cpu;
2336
2337 preempt_disable();
2338 cpu = task_cpu(p);
2339 if ((cpu != smp_processor_id()) && task_curr(p))
2340 smp_send_reschedule(cpu);
2341 preempt_enable();
2342}
Rusty Russellb43e3522009-06-12 22:27:00 -06002343EXPORT_SYMBOL_GPL(kick_process);
Nick Piggin476d1392005-06-25 14:57:29 -07002344#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002346#ifdef CONFIG_SMP
Oleg Nesterov30da6882010-03-15 10:10:19 +01002347/*
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002348 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
Oleg Nesterov30da6882010-03-15 10:10:19 +01002349 */
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002350static int select_fallback_rq(int cpu, struct task_struct *p)
2351{
2352 int dest_cpu;
2353 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2354
2355 /* Look for allowed, online CPU in same node. */
2356 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2357 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2358 return dest_cpu;
2359
2360 /* Any allowed, online CPU? */
2361 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2362 if (dest_cpu < nr_cpu_ids)
2363 return dest_cpu;
2364
2365 /* No more Mr. Nice Guy. */
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01002366 dest_cpu = cpuset_cpus_allowed_fallback(p);
2367 /*
2368 * Don't tell them about moving exiting tasks or
2369 * kernel threads (both mm NULL), since they never
2370 * leave kernel.
2371 */
2372 if (p->mm && printk_ratelimit()) {
2373 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
2374 task_pid_nr(p), p->comm, cpu);
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002375 }
2376
2377 return dest_cpu;
2378}
2379
Peter Zijlstrae2912002009-12-16 18:04:36 +01002380/*
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002381 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
Peter Zijlstrae2912002009-12-16 18:04:36 +01002382 */
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002383static inline
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002384int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002385{
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002386 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002387
2388 /*
2389 * In order not to call set_task_cpu() on a blocking task we need
2390 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2391 * cpu.
2392 *
2393 * Since this is common to all placement strategies, this lives here.
2394 *
2395 * [ this allows ->select_task() to simply return task_cpu(p) and
2396 * not worry about this generic constraint ]
2397 */
2398 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
Peter Zijlstra70f11202009-12-20 17:36:27 +01002399 !cpu_online(cpu)))
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002400 cpu = select_fallback_rq(task_cpu(p), p);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002401
2402 return cpu;
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002403}
Mike Galbraith09a40af2010-04-15 07:29:59 +02002404
2405static void update_avg(u64 *avg, u64 sample)
2406{
2407 s64 diff = sample - *avg;
2408 *avg += diff >> 3;
2409}
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002410#endif
2411
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002412static void
2413ttwu_stat(struct rq *rq, struct task_struct *p, int cpu, int wake_flags)
Tejun Heo9ed38112009-12-03 15:08:03 +09002414{
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002415#ifdef CONFIG_SCHEDSTATS
2416#ifdef CONFIG_SMP
2417 int this_cpu = smp_processor_id();
Tejun Heo9ed38112009-12-03 15:08:03 +09002418
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002419 if (cpu == this_cpu) {
2420 schedstat_inc(rq, ttwu_local);
2421 schedstat_inc(p, se.statistics.nr_wakeups_local);
2422 } else {
2423 struct sched_domain *sd;
2424
2425 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2426 for_each_domain(this_cpu, sd) {
2427 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2428 schedstat_inc(sd, ttwu_wake_remote);
2429 break;
2430 }
2431 }
2432 }
2433#endif /* CONFIG_SMP */
2434
2435 schedstat_inc(rq, ttwu_count);
2436 schedstat_inc(p, se.statistics.nr_wakeups);
2437
2438 if (wake_flags & WF_SYNC)
2439 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2440
2441 if (cpu != task_cpu(p))
2442 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2443
2444#endif /* CONFIG_SCHEDSTATS */
2445}
2446
2447static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
2448{
Tejun Heo9ed38112009-12-03 15:08:03 +09002449 activate_task(rq, p, en_flags);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002450 p->on_rq = 1;
Peter Zijlstrac2f71152011-04-13 13:28:56 +02002451
2452 /* if a worker is waking up, notify workqueue */
2453 if (p->flags & PF_WQ_WORKER)
2454 wq_worker_waking_up(p, cpu_of(rq));
Tejun Heo9ed38112009-12-03 15:08:03 +09002455}
2456
Peter Zijlstra89363382011-04-05 17:23:42 +02002457static void
2458ttwu_post_activation(struct task_struct *p, struct rq *rq, int wake_flags)
Tejun Heo9ed38112009-12-03 15:08:03 +09002459{
Peter Zijlstra89363382011-04-05 17:23:42 +02002460 trace_sched_wakeup(p, true);
Tejun Heo9ed38112009-12-03 15:08:03 +09002461 check_preempt_curr(rq, p, wake_flags);
2462
2463 p->state = TASK_RUNNING;
2464#ifdef CONFIG_SMP
2465 if (p->sched_class->task_woken)
2466 p->sched_class->task_woken(rq, p);
2467
2468 if (unlikely(rq->idle_stamp)) {
2469 u64 delta = rq->clock - rq->idle_stamp;
2470 u64 max = 2*sysctl_sched_migration_cost;
2471
2472 if (delta > max)
2473 rq->avg_idle = max;
2474 else
2475 update_avg(&rq->avg_idle, delta);
2476 rq->idle_stamp = 0;
2477 }
2478#endif
2479}
2480
2481/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 * try_to_wake_up - wake up a thread
Tejun Heo9ed38112009-12-03 15:08:03 +09002483 * @p: the thread to be awakened
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 * @state: the mask of task states that can be woken
Tejun Heo9ed38112009-12-03 15:08:03 +09002485 * @wake_flags: wake modifier flags (WF_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 *
2487 * Put it on the run-queue if it's not already there. The "current"
2488 * thread is always on the run-queue (except when the actual
2489 * re-schedule is in progress), and as such you're allowed to do
2490 * the simpler "current->state = TASK_RUNNING" to mark yourself
2491 * runnable without the overhead of this.
2492 *
Tejun Heo9ed38112009-12-03 15:08:03 +09002493 * Returns %true if @p was woken up, %false if it was already running
2494 * or @state didn't match @p's state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02002496static int try_to_wake_up(struct task_struct *p, unsigned int state,
2497 int wake_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498{
Ingo Molnarcc367732007-10-15 17:00:18 +02002499 int cpu, orig_cpu, this_cpu, success = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 unsigned long flags;
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002501 unsigned long en_flags = ENQUEUE_WAKEUP;
Dan Carpenterab3b3aa2010-03-06 14:17:52 +03002502 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002504 this_cpu = get_cpu();
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02002505
Linus Torvalds04e2f172008-02-23 18:05:03 -08002506 smp_wmb();
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002507 raw_spin_lock_irqsave(&p->pi_lock, flags);
2508 rq = __task_rq_lock(p);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002509 if (!(p->state & state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 goto out;
2511
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002512 cpu = task_cpu(p);
2513
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002514 if (p->on_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 goto out_running;
2516
Ingo Molnarcc367732007-10-15 17:00:18 +02002517 orig_cpu = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518#ifdef CONFIG_SMP
2519 if (unlikely(task_running(rq, p)))
2520 goto out_activate;
2521
Peter Zijlstraa8e4f2e2011-04-05 17:23:49 +02002522 p->sched_contributes_to_load = !!task_contributes_to_load(p);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002523 p->state = TASK_WAKING;
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002524
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002525 if (p->sched_class->task_waking) {
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02002526 p->sched_class->task_waking(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002527 en_flags |= ENQUEUE_WAKING;
Peter Zijlstra0970d292010-02-15 14:45:54 +01002528 }
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002529
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002530 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002531 if (cpu != orig_cpu)
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002532 set_task_cpu(p, cpu);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002533 __task_rq_unlock(rq);
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002534
Peter Zijlstra0970d292010-02-15 14:45:54 +01002535 rq = cpu_rq(cpu);
2536 raw_spin_lock(&rq->lock);
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002537
Peter Zijlstra0970d292010-02-15 14:45:54 +01002538 /*
2539 * We migrated the task without holding either rq->lock, however
2540 * since the task is not on the task list itself, nobody else
2541 * will try and migrate the task, hence the rq should match the
2542 * cpu we just moved it to.
2543 */
2544 WARN_ON(task_cpu(p) != cpu);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002545 WARN_ON(p->state != TASK_WAKING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
Peter Zijlstraa8e4f2e2011-04-05 17:23:49 +02002547 if (p->sched_contributes_to_load)
2548 rq->nr_uninterruptible--;
2549
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550out_activate:
2551#endif /* CONFIG_SMP */
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002552 ttwu_activate(rq, p, en_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553out_running:
Peter Zijlstra89363382011-04-05 17:23:42 +02002554 ttwu_post_activation(p, rq, wake_flags);
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002555 ttwu_stat(rq, p, cpu, wake_flags);
Peter Zijlstra89363382011-04-05 17:23:42 +02002556 success = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557out:
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002558 __task_rq_unlock(rq);
2559 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002560 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561
2562 return success;
2563}
2564
David Howells50fa6102009-04-28 15:01:38 +01002565/**
Tejun Heo21aa9af2010-06-08 21:40:37 +02002566 * try_to_wake_up_local - try to wake up a local task with rq lock held
2567 * @p: the thread to be awakened
2568 *
Peter Zijlstra2acca552011-04-05 17:23:50 +02002569 * Put @p on the run-queue if it's not already there. The caller must
Tejun Heo21aa9af2010-06-08 21:40:37 +02002570 * ensure that this_rq() is locked, @p is bound to this_rq() and not
Peter Zijlstra2acca552011-04-05 17:23:50 +02002571 * the current task.
Tejun Heo21aa9af2010-06-08 21:40:37 +02002572 */
2573static void try_to_wake_up_local(struct task_struct *p)
2574{
2575 struct rq *rq = task_rq(p);
Tejun Heo21aa9af2010-06-08 21:40:37 +02002576
2577 BUG_ON(rq != this_rq());
2578 BUG_ON(p == current);
2579 lockdep_assert_held(&rq->lock);
2580
Peter Zijlstra2acca552011-04-05 17:23:50 +02002581 if (!raw_spin_trylock(&p->pi_lock)) {
2582 raw_spin_unlock(&rq->lock);
2583 raw_spin_lock(&p->pi_lock);
2584 raw_spin_lock(&rq->lock);
2585 }
2586
Tejun Heo21aa9af2010-06-08 21:40:37 +02002587 if (!(p->state & TASK_NORMAL))
Peter Zijlstra2acca552011-04-05 17:23:50 +02002588 goto out;
Tejun Heo21aa9af2010-06-08 21:40:37 +02002589
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002590 if (!p->on_rq)
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002591 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2592
Peter Zijlstra89363382011-04-05 17:23:42 +02002593 ttwu_post_activation(p, rq, 0);
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002594 ttwu_stat(rq, p, smp_processor_id(), 0);
Peter Zijlstra2acca552011-04-05 17:23:50 +02002595out:
2596 raw_spin_unlock(&p->pi_lock);
Tejun Heo21aa9af2010-06-08 21:40:37 +02002597}
2598
2599/**
David Howells50fa6102009-04-28 15:01:38 +01002600 * wake_up_process - Wake up a specific process
2601 * @p: The process to be woken up.
2602 *
2603 * Attempt to wake up the nominated process and move it to the set of runnable
2604 * processes. Returns 1 if the process was woken up, 0 if it was already
2605 * running.
2606 *
2607 * It may be assumed that this function implies a write memory barrier before
2608 * changing the task state if and only if any tasks are woken up.
2609 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002610int wake_up_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05002612 return try_to_wake_up(p, TASK_ALL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614EXPORT_SYMBOL(wake_up_process);
2615
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002616int wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617{
2618 return try_to_wake_up(p, state, 0);
2619}
2620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621/*
2622 * Perform scheduler related setup for a newly forked process p.
2623 * p is forked by current.
Ingo Molnardd41f592007-07-09 18:51:59 +02002624 *
2625 * __sched_fork() is basic setup used by init_idle() too:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002627static void __sched_fork(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628{
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002629 p->on_rq = 0;
2630
2631 p->se.on_rq = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02002632 p->se.exec_start = 0;
2633 p->se.sum_exec_runtime = 0;
Ingo Molnarf6cf8912007-08-28 12:53:24 +02002634 p->se.prev_sum_exec_runtime = 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002635 p->se.nr_migrations = 0;
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002636 p->se.vruntime = 0;
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002637 INIT_LIST_HEAD(&p->se.group_node);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002638
2639#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03002640 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002641#endif
Nick Piggin476d1392005-06-25 14:57:29 -07002642
Peter Zijlstrafa717062008-01-25 21:08:27 +01002643 INIT_LIST_HEAD(&p->rt.run_list);
Nick Piggin476d1392005-06-25 14:57:29 -07002644
Avi Kivitye107be32007-07-26 13:40:43 +02002645#ifdef CONFIG_PREEMPT_NOTIFIERS
2646 INIT_HLIST_HEAD(&p->preempt_notifiers);
2647#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002648}
2649
2650/*
2651 * fork()/clone()-time setup:
2652 */
2653void sched_fork(struct task_struct *p, int clone_flags)
2654{
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002655 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002656 int cpu = get_cpu();
2657
2658 __sched_fork(p);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002659 /*
Peter Zijlstra0017d732010-03-24 18:34:10 +01002660 * We mark the process as running here. This guarantees that
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002661 * nobody will actually run it, and a signal or other external
2662 * event cannot wake it up and insert it on the runqueue either.
2663 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01002664 p->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02002665
Ingo Molnarb29739f2006-06-27 02:54:51 -07002666 /*
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002667 * Revert to default priority/policy on fork if requested.
2668 */
2669 if (unlikely(p->sched_reset_on_fork)) {
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002670 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002671 p->policy = SCHED_NORMAL;
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002672 p->normal_prio = p->static_prio;
2673 }
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002674
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002675 if (PRIO_TO_NICE(p->static_prio) < 0) {
2676 p->static_prio = NICE_TO_PRIO(0);
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002677 p->normal_prio = p->static_prio;
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002678 set_load_weight(p);
2679 }
2680
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002681 /*
2682 * We don't need the reset flag anymore after the fork. It has
2683 * fulfilled its duty:
2684 */
2685 p->sched_reset_on_fork = 0;
2686 }
Lennart Poetteringca94c442009-06-15 17:17:47 +02002687
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002688 /*
2689 * Make sure we do not leak PI boosting priority to the child.
2690 */
2691 p->prio = current->normal_prio;
2692
Hiroshi Shimamoto2ddbf952007-10-15 17:00:11 +02002693 if (!rt_prio(p->prio))
2694 p->sched_class = &fair_sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07002695
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002696 if (p->sched_class->task_fork)
2697 p->sched_class->task_fork(p);
2698
Peter Zijlstra86951592010-06-22 11:44:53 +02002699 /*
2700 * The child is not yet in the pid-hash so no cgroup attach races,
2701 * and the cgroup is pinned to this child due to cgroup_fork()
2702 * is ran before sched_fork().
2703 *
2704 * Silence PROVE_RCU.
2705 */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002706 raw_spin_lock_irqsave(&p->pi_lock, flags);
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002707 set_task_cpu(p, cpu);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002708 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002709
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002710#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Ingo Molnardd41f592007-07-09 18:51:59 +02002711 if (likely(sched_info_on()))
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002712 memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713#endif
Peter Zijlstra3ca7a442011-04-05 17:23:40 +02002714#if defined(CONFIG_SMP)
2715 p->on_cpu = 0;
Nick Piggin4866cde2005-06-25 14:57:23 -07002716#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717#ifdef CONFIG_PREEMPT
Nick Piggin4866cde2005-06-25 14:57:23 -07002718 /* Want to start with kernel preemption disabled. */
Al Viroa1261f52005-11-13 16:06:55 -08002719 task_thread_info(p)->preempt_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720#endif
Dario Faggioli806c09a2010-11-30 19:51:33 +01002721#ifdef CONFIG_SMP
Gregory Haskins917b6272008-12-29 09:39:53 -05002722 plist_node_init(&p->pushable_tasks, MAX_PRIO);
Dario Faggioli806c09a2010-11-30 19:51:33 +01002723#endif
Gregory Haskins917b6272008-12-29 09:39:53 -05002724
Nick Piggin476d1392005-06-25 14:57:29 -07002725 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726}
2727
2728/*
2729 * wake_up_new_task - wake up a newly created task for the first time.
2730 *
2731 * This function will do some initial scheduler statistics housekeeping
2732 * that must be done for every newly created context, then puts the task
2733 * on the runqueue and wakes it.
2734 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002735void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736{
2737 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002738 struct rq *rq;
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002739
Peter Zijlstraab2515c2011-04-05 17:23:52 +02002740 raw_spin_lock_irqsave(&p->pi_lock, flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002741#ifdef CONFIG_SMP
2742 /*
2743 * Fork balancing, do it here and not earlier because:
2744 * - cpus_allowed can change in the fork path
2745 * - any previously selected cpu might disappear through hotplug
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002746 */
Peter Zijlstraab2515c2011-04-05 17:23:52 +02002747 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002748#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Peter Zijlstraab2515c2011-04-05 17:23:52 +02002750 rq = __task_rq_lock(p);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002751 activate_task(rq, p, 0);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002752 p->on_rq = 1;
Peter Zijlstra89363382011-04-05 17:23:42 +02002753 trace_sched_wakeup_new(p, true);
Peter Zijlstraa7558e02009-09-14 20:02:34 +02002754 check_preempt_curr(rq, p, WF_FORK);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002755#ifdef CONFIG_SMP
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002756 if (p->sched_class->task_woken)
2757 p->sched_class->task_woken(rq, p);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002758#endif
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002759 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760}
2761
Avi Kivitye107be32007-07-26 13:40:43 +02002762#ifdef CONFIG_PREEMPT_NOTIFIERS
2763
2764/**
Luis Henriques80dd99b2009-03-16 19:58:09 +00002765 * preempt_notifier_register - tell me when current is being preempted & rescheduled
Randy Dunlap421cee22007-07-31 00:37:50 -07002766 * @notifier: notifier struct to register
Avi Kivitye107be32007-07-26 13:40:43 +02002767 */
2768void preempt_notifier_register(struct preempt_notifier *notifier)
2769{
2770 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2771}
2772EXPORT_SYMBOL_GPL(preempt_notifier_register);
2773
2774/**
2775 * preempt_notifier_unregister - no longer interested in preemption notifications
Randy Dunlap421cee22007-07-31 00:37:50 -07002776 * @notifier: notifier struct to unregister
Avi Kivitye107be32007-07-26 13:40:43 +02002777 *
2778 * This is safe to call from within a preemption notifier.
2779 */
2780void preempt_notifier_unregister(struct preempt_notifier *notifier)
2781{
2782 hlist_del(&notifier->link);
2783}
2784EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2785
2786static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2787{
2788 struct preempt_notifier *notifier;
2789 struct hlist_node *node;
2790
2791 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2792 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2793}
2794
2795static void
2796fire_sched_out_preempt_notifiers(struct task_struct *curr,
2797 struct task_struct *next)
2798{
2799 struct preempt_notifier *notifier;
2800 struct hlist_node *node;
2801
2802 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2803 notifier->ops->sched_out(notifier, next);
2804}
2805
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002806#else /* !CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002807
2808static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2809{
2810}
2811
2812static void
2813fire_sched_out_preempt_notifiers(struct task_struct *curr,
2814 struct task_struct *next)
2815{
2816}
2817
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002818#endif /* CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002819
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820/**
Nick Piggin4866cde2005-06-25 14:57:23 -07002821 * prepare_task_switch - prepare to switch tasks
2822 * @rq: the runqueue preparing to switch
Randy Dunlap421cee22007-07-31 00:37:50 -07002823 * @prev: the current task that is being switched out
Nick Piggin4866cde2005-06-25 14:57:23 -07002824 * @next: the task we are going to switch to.
2825 *
2826 * This is called with the rq lock held and interrupts off. It must
2827 * be paired with a subsequent finish_task_switch after the context
2828 * switch.
2829 *
2830 * prepare_task_switch sets up locking and calls architecture specific
2831 * hooks.
2832 */
Avi Kivitye107be32007-07-26 13:40:43 +02002833static inline void
2834prepare_task_switch(struct rq *rq, struct task_struct *prev,
2835 struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -07002836{
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002837 sched_info_switch(prev, next);
2838 perf_event_task_sched_out(prev, next);
Avi Kivitye107be32007-07-26 13:40:43 +02002839 fire_sched_out_preempt_notifiers(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002840 prepare_lock_switch(rq, next);
2841 prepare_arch_switch(next);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002842 trace_sched_switch(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002843}
2844
2845/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 * finish_task_switch - clean up after a task-switch
Jeff Garzik344baba2005-09-07 01:15:17 -04002847 * @rq: runqueue associated with task-switch
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 * @prev: the thread we just switched away from.
2849 *
Nick Piggin4866cde2005-06-25 14:57:23 -07002850 * finish_task_switch must be called after the context switch, paired
2851 * with a prepare_task_switch call before the context switch.
2852 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2853 * and do any other architecture-specific cleanup actions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 *
2855 * Note that we may have delayed dropping an mm in context_switch(). If
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002856 * so, we finish that here outside of the runqueue lock. (Doing it
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 * with the lock held can cause deadlocks; see schedule() for
2858 * details.)
2859 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02002860static void finish_task_switch(struct rq *rq, struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 __releases(rq->lock)
2862{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 struct mm_struct *mm = rq->prev_mm;
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002864 long prev_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865
2866 rq->prev_mm = NULL;
2867
2868 /*
2869 * A task struct has one reference for the use as "current".
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002870 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002871 * schedule one last time. The schedule call will never return, and
2872 * the scheduled task must drop that reference.
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002873 * The test for TASK_DEAD must occur while the runqueue locks are
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 * still held, otherwise prev could be scheduled on another cpu, die
2875 * there before we look at prev->state, and then the reference would
2876 * be dropped twice.
2877 * Manfred Spraul <manfred@colorfullife.com>
2878 */
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002879 prev_state = prev->state;
Nick Piggin4866cde2005-06-25 14:57:23 -07002880 finish_arch_switch(prev);
Jamie Iles8381f652010-01-08 15:27:33 +00002881#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2882 local_irq_disable();
2883#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Peter Zijlstra49f47432009-12-27 11:51:52 +01002884 perf_event_task_sched_in(current);
Jamie Iles8381f652010-01-08 15:27:33 +00002885#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2886 local_irq_enable();
2887#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Nick Piggin4866cde2005-06-25 14:57:23 -07002888 finish_lock_switch(rq, prev);
Steven Rostedte8fa1362008-01-25 21:08:05 +01002889
Avi Kivitye107be32007-07-26 13:40:43 +02002890 fire_sched_in_preempt_notifiers(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 if (mm)
2892 mmdrop(mm);
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002893 if (unlikely(prev_state == TASK_DEAD)) {
bibo maoc6fd91f2006-03-26 01:38:20 -08002894 /*
2895 * Remove function-return probe instances associated with this
2896 * task and put them back on the free list.
Ingo Molnar9761eea2007-07-09 18:52:00 +02002897 */
bibo maoc6fd91f2006-03-26 01:38:20 -08002898 kprobe_flush_task(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 put_task_struct(prev);
bibo maoc6fd91f2006-03-26 01:38:20 -08002900 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901}
2902
Gregory Haskins3f029d32009-07-29 11:08:47 -04002903#ifdef CONFIG_SMP
2904
2905/* assumes rq->lock is held */
2906static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2907{
2908 if (prev->sched_class->pre_schedule)
2909 prev->sched_class->pre_schedule(rq, prev);
2910}
2911
2912/* rq->lock is NOT held, but preemption is disabled */
2913static inline void post_schedule(struct rq *rq)
2914{
2915 if (rq->post_schedule) {
2916 unsigned long flags;
2917
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002918 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002919 if (rq->curr->sched_class->post_schedule)
2920 rq->curr->sched_class->post_schedule(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002921 raw_spin_unlock_irqrestore(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002922
2923 rq->post_schedule = 0;
2924 }
2925}
2926
2927#else
2928
2929static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2930{
2931}
2932
2933static inline void post_schedule(struct rq *rq)
2934{
2935}
2936
2937#endif
2938
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939/**
2940 * schedule_tail - first thing a freshly forked thread must call.
2941 * @prev: the thread we just switched away from.
2942 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002943asmlinkage void schedule_tail(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 __releases(rq->lock)
2945{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002946 struct rq *rq = this_rq();
2947
Nick Piggin4866cde2005-06-25 14:57:23 -07002948 finish_task_switch(rq, prev);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002949
Gregory Haskins3f029d32009-07-29 11:08:47 -04002950 /*
2951 * FIXME: do we need to worry about rq being invalidated by the
2952 * task_switch?
2953 */
2954 post_schedule(rq);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002955
Nick Piggin4866cde2005-06-25 14:57:23 -07002956#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2957 /* In this case, finish_task_switch does not reenable preemption */
2958 preempt_enable();
2959#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 if (current->set_child_tid)
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002961 put_user(task_pid_vnr(current), current->set_child_tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962}
2963
2964/*
2965 * context_switch - switch to the new MM and the new
2966 * thread's register state.
2967 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002968static inline void
Ingo Molnar70b97a72006-07-03 00:25:42 -07002969context_switch(struct rq *rq, struct task_struct *prev,
Ingo Molnar36c8b582006-07-03 00:25:41 -07002970 struct task_struct *next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971{
Ingo Molnardd41f592007-07-09 18:51:59 +02002972 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973
Avi Kivitye107be32007-07-26 13:40:43 +02002974 prepare_task_switch(rq, prev, next);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002975
Ingo Molnardd41f592007-07-09 18:51:59 +02002976 mm = next->mm;
2977 oldmm = prev->active_mm;
Zachary Amsden9226d122007-02-13 13:26:21 +01002978 /*
2979 * For paravirt, this is coupled with an exit in switch_to to
2980 * combine the page table reload and the switch backend into
2981 * one hypercall.
2982 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -08002983 arch_start_context_switch(prev);
Zachary Amsden9226d122007-02-13 13:26:21 +01002984
Heiko Carstens31915ab2010-09-16 14:42:25 +02002985 if (!mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 next->active_mm = oldmm;
2987 atomic_inc(&oldmm->mm_count);
2988 enter_lazy_tlb(oldmm, next);
2989 } else
2990 switch_mm(oldmm, mm, next);
2991
Heiko Carstens31915ab2010-09-16 14:42:25 +02002992 if (!prev->mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 prev->active_mm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 rq->prev_mm = oldmm;
2995 }
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002996 /*
2997 * Since the runqueue lock will be released by the next
2998 * task (which is an invalid locking op but in the case
2999 * of the scheduler it's an obvious special-case), so we
3000 * do an early lockdep release here:
3001 */
3002#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07003003 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07003004#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005
3006 /* Here we just switch the register state and the stack. */
3007 switch_to(prev, next, prev);
3008
Ingo Molnardd41f592007-07-09 18:51:59 +02003009 barrier();
3010 /*
3011 * this_rq must be evaluated again because prev may have moved
3012 * CPUs since it called schedule(), thus the 'rq' on its stack
3013 * frame will be invalid.
3014 */
3015 finish_task_switch(this_rq(), prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016}
3017
3018/*
3019 * nr_running, nr_uninterruptible and nr_context_switches:
3020 *
3021 * externally visible scheduler statistics: current number of runnable
3022 * threads, current number of uninterruptible-sleeping threads, total
3023 * number of context switches performed since bootup.
3024 */
3025unsigned long nr_running(void)
3026{
3027 unsigned long i, sum = 0;
3028
3029 for_each_online_cpu(i)
3030 sum += cpu_rq(i)->nr_running;
3031
3032 return sum;
3033}
3034
3035unsigned long nr_uninterruptible(void)
3036{
3037 unsigned long i, sum = 0;
3038
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003039 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 sum += cpu_rq(i)->nr_uninterruptible;
3041
3042 /*
3043 * Since we read the counters lockless, it might be slightly
3044 * inaccurate. Do not allow it to go below zero though:
3045 */
3046 if (unlikely((long)sum < 0))
3047 sum = 0;
3048
3049 return sum;
3050}
3051
3052unsigned long long nr_context_switches(void)
3053{
Steven Rostedtcc94abf2006-06-27 02:54:31 -07003054 int i;
3055 unsigned long long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003057 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 sum += cpu_rq(i)->nr_switches;
3059
3060 return sum;
3061}
3062
3063unsigned long nr_iowait(void)
3064{
3065 unsigned long i, sum = 0;
3066
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003067 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 sum += atomic_read(&cpu_rq(i)->nr_iowait);
3069
3070 return sum;
3071}
3072
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003073unsigned long nr_iowait_cpu(int cpu)
Arjan van de Ven69d25872009-09-21 17:04:08 -07003074{
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003075 struct rq *this = cpu_rq(cpu);
Arjan van de Ven69d25872009-09-21 17:04:08 -07003076 return atomic_read(&this->nr_iowait);
3077}
3078
3079unsigned long this_cpu_load(void)
3080{
3081 struct rq *this = this_rq();
3082 return this->cpu_load[0];
3083}
3084
3085
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003086/* Variables and functions for calc_load */
3087static atomic_long_t calc_load_tasks;
3088static unsigned long calc_load_update;
3089unsigned long avenrun[3];
3090EXPORT_SYMBOL(avenrun);
3091
Peter Zijlstra74f51872010-04-22 21:50:19 +02003092static long calc_load_fold_active(struct rq *this_rq)
3093{
3094 long nr_active, delta = 0;
3095
3096 nr_active = this_rq->nr_running;
3097 nr_active += (long) this_rq->nr_uninterruptible;
3098
3099 if (nr_active != this_rq->calc_load_active) {
3100 delta = nr_active - this_rq->calc_load_active;
3101 this_rq->calc_load_active = nr_active;
3102 }
3103
3104 return delta;
3105}
3106
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003107static unsigned long
3108calc_load(unsigned long load, unsigned long exp, unsigned long active)
3109{
3110 load *= exp;
3111 load += active * (FIXED_1 - exp);
3112 load += 1UL << (FSHIFT - 1);
3113 return load >> FSHIFT;
3114}
3115
Peter Zijlstra74f51872010-04-22 21:50:19 +02003116#ifdef CONFIG_NO_HZ
3117/*
3118 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
3119 *
3120 * When making the ILB scale, we should try to pull this in as well.
3121 */
3122static atomic_long_t calc_load_tasks_idle;
3123
3124static void calc_load_account_idle(struct rq *this_rq)
3125{
3126 long delta;
3127
3128 delta = calc_load_fold_active(this_rq);
3129 if (delta)
3130 atomic_long_add(delta, &calc_load_tasks_idle);
3131}
3132
3133static long calc_load_fold_idle(void)
3134{
3135 long delta = 0;
3136
3137 /*
3138 * Its got a race, we don't care...
3139 */
3140 if (atomic_long_read(&calc_load_tasks_idle))
3141 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3142
3143 return delta;
3144}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003145
3146/**
3147 * fixed_power_int - compute: x^n, in O(log n) time
3148 *
3149 * @x: base of the power
3150 * @frac_bits: fractional bits of @x
3151 * @n: power to raise @x to.
3152 *
3153 * By exploiting the relation between the definition of the natural power
3154 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
3155 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3156 * (where: n_i \elem {0, 1}, the binary vector representing n),
3157 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3158 * of course trivially computable in O(log_2 n), the length of our binary
3159 * vector.
3160 */
3161static unsigned long
3162fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3163{
3164 unsigned long result = 1UL << frac_bits;
3165
3166 if (n) for (;;) {
3167 if (n & 1) {
3168 result *= x;
3169 result += 1UL << (frac_bits - 1);
3170 result >>= frac_bits;
3171 }
3172 n >>= 1;
3173 if (!n)
3174 break;
3175 x *= x;
3176 x += 1UL << (frac_bits - 1);
3177 x >>= frac_bits;
3178 }
3179
3180 return result;
3181}
3182
3183/*
3184 * a1 = a0 * e + a * (1 - e)
3185 *
3186 * a2 = a1 * e + a * (1 - e)
3187 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3188 * = a0 * e^2 + a * (1 - e) * (1 + e)
3189 *
3190 * a3 = a2 * e + a * (1 - e)
3191 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3192 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3193 *
3194 * ...
3195 *
3196 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3197 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3198 * = a0 * e^n + a * (1 - e^n)
3199 *
3200 * [1] application of the geometric series:
3201 *
3202 * n 1 - x^(n+1)
3203 * S_n := \Sum x^i = -------------
3204 * i=0 1 - x
3205 */
3206static unsigned long
3207calc_load_n(unsigned long load, unsigned long exp,
3208 unsigned long active, unsigned int n)
3209{
3210
3211 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3212}
3213
3214/*
3215 * NO_HZ can leave us missing all per-cpu ticks calling
3216 * calc_load_account_active(), but since an idle CPU folds its delta into
3217 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3218 * in the pending idle delta if our idle period crossed a load cycle boundary.
3219 *
3220 * Once we've updated the global active value, we need to apply the exponential
3221 * weights adjusted to the number of cycles missed.
3222 */
3223static void calc_global_nohz(unsigned long ticks)
3224{
3225 long delta, active, n;
3226
3227 if (time_before(jiffies, calc_load_update))
3228 return;
3229
3230 /*
3231 * If we crossed a calc_load_update boundary, make sure to fold
3232 * any pending idle changes, the respective CPUs might have
3233 * missed the tick driven calc_load_account_active() update
3234 * due to NO_HZ.
3235 */
3236 delta = calc_load_fold_idle();
3237 if (delta)
3238 atomic_long_add(delta, &calc_load_tasks);
3239
3240 /*
3241 * If we were idle for multiple load cycles, apply them.
3242 */
3243 if (ticks >= LOAD_FREQ) {
3244 n = ticks / LOAD_FREQ;
3245
3246 active = atomic_long_read(&calc_load_tasks);
3247 active = active > 0 ? active * FIXED_1 : 0;
3248
3249 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3250 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3251 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3252
3253 calc_load_update += n * LOAD_FREQ;
3254 }
3255
3256 /*
3257 * Its possible the remainder of the above division also crosses
3258 * a LOAD_FREQ period, the regular check in calc_global_load()
3259 * which comes after this will take care of that.
3260 *
3261 * Consider us being 11 ticks before a cycle completion, and us
3262 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3263 * age us 4 cycles, and the test in calc_global_load() will
3264 * pick up the final one.
3265 */
3266}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003267#else
3268static void calc_load_account_idle(struct rq *this_rq)
3269{
3270}
3271
3272static inline long calc_load_fold_idle(void)
3273{
3274 return 0;
3275}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003276
3277static void calc_global_nohz(unsigned long ticks)
3278{
3279}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003280#endif
3281
Thomas Gleixner2d024942009-05-02 20:08:52 +02003282/**
3283 * get_avenrun - get the load average array
3284 * @loads: pointer to dest load array
3285 * @offset: offset to add
3286 * @shift: shift count to shift the result left
3287 *
3288 * These values are estimates at best, so no need for locking.
3289 */
3290void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3291{
3292 loads[0] = (avenrun[0] + offset) << shift;
3293 loads[1] = (avenrun[1] + offset) << shift;
3294 loads[2] = (avenrun[2] + offset) << shift;
3295}
3296
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003297/*
3298 * calc_load - update the avenrun load estimates 10 ticks after the
3299 * CPUs have updated calc_load_tasks.
3300 */
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003301void calc_global_load(unsigned long ticks)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003302{
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003303 long active;
3304
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003305 calc_global_nohz(ticks);
3306
3307 if (time_before(jiffies, calc_load_update + 10))
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003308 return;
3309
3310 active = atomic_long_read(&calc_load_tasks);
3311 active = active > 0 ? active * FIXED_1 : 0;
3312
3313 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3314 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3315 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3316
3317 calc_load_update += LOAD_FREQ;
3318}
3319
3320/*
Peter Zijlstra74f51872010-04-22 21:50:19 +02003321 * Called from update_cpu_load() to periodically update this CPU's
3322 * active count.
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003323 */
3324static void calc_load_account_active(struct rq *this_rq)
3325{
Peter Zijlstra74f51872010-04-22 21:50:19 +02003326 long delta;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003327
Peter Zijlstra74f51872010-04-22 21:50:19 +02003328 if (time_before(jiffies, this_rq->calc_load_update))
3329 return;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003330
Peter Zijlstra74f51872010-04-22 21:50:19 +02003331 delta = calc_load_fold_active(this_rq);
3332 delta += calc_load_fold_idle();
3333 if (delta)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003334 atomic_long_add(delta, &calc_load_tasks);
Peter Zijlstra74f51872010-04-22 21:50:19 +02003335
3336 this_rq->calc_load_update += LOAD_FREQ;
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08003337}
3338
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339/*
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003340 * The exact cpuload at various idx values, calculated at every tick would be
3341 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3342 *
3343 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3344 * on nth tick when cpu may be busy, then we have:
3345 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3346 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3347 *
3348 * decay_load_missed() below does efficient calculation of
3349 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3350 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3351 *
3352 * The calculation is approximated on a 128 point scale.
3353 * degrade_zero_ticks is the number of ticks after which load at any
3354 * particular idx is approximated to be zero.
3355 * degrade_factor is a precomputed table, a row for each load idx.
3356 * Each column corresponds to degradation factor for a power of two ticks,
3357 * based on 128 point scale.
3358 * Example:
3359 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3360 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3361 *
3362 * With this power of 2 load factors, we can degrade the load n times
3363 * by looking at 1 bits in n and doing as many mult/shift instead of
3364 * n mult/shifts needed by the exact degradation.
3365 */
3366#define DEGRADE_SHIFT 7
3367static const unsigned char
3368 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3369static const unsigned char
3370 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3371 {0, 0, 0, 0, 0, 0, 0, 0},
3372 {64, 32, 8, 0, 0, 0, 0, 0},
3373 {96, 72, 40, 12, 1, 0, 0},
3374 {112, 98, 75, 43, 15, 1, 0},
3375 {120, 112, 98, 76, 45, 16, 2} };
3376
3377/*
3378 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3379 * would be when CPU is idle and so we just decay the old load without
3380 * adding any new load.
3381 */
3382static unsigned long
3383decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3384{
3385 int j = 0;
3386
3387 if (!missed_updates)
3388 return load;
3389
3390 if (missed_updates >= degrade_zero_ticks[idx])
3391 return 0;
3392
3393 if (idx == 1)
3394 return load >> missed_updates;
3395
3396 while (missed_updates) {
3397 if (missed_updates % 2)
3398 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3399
3400 missed_updates >>= 1;
3401 j++;
3402 }
3403 return load;
3404}
3405
3406/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003407 * Update rq->cpu_load[] statistics. This function is usually called every
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003408 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3409 * every tick. We fix it up based on jiffies.
Ingo Molnar48f24c42006-07-03 00:25:40 -07003410 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003411static void update_cpu_load(struct rq *this_rq)
Ingo Molnar48f24c42006-07-03 00:25:40 -07003412{
Dmitry Adamushko495eca42007-10-15 17:00:06 +02003413 unsigned long this_load = this_rq->load.weight;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003414 unsigned long curr_jiffies = jiffies;
3415 unsigned long pending_updates;
Ingo Molnardd41f592007-07-09 18:51:59 +02003416 int i, scale;
3417
3418 this_rq->nr_load_updates++;
Ingo Molnardd41f592007-07-09 18:51:59 +02003419
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003420 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3421 if (curr_jiffies == this_rq->last_load_update_tick)
3422 return;
3423
3424 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3425 this_rq->last_load_update_tick = curr_jiffies;
3426
Ingo Molnardd41f592007-07-09 18:51:59 +02003427 /* Update our load: */
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003428 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3429 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
Ingo Molnardd41f592007-07-09 18:51:59 +02003430 unsigned long old_load, new_load;
3431
3432 /* scale is effectively 1 << i now, and >> i divides by scale */
3433
3434 old_load = this_rq->cpu_load[i];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003435 old_load = decay_load_missed(old_load, pending_updates - 1, i);
Ingo Molnardd41f592007-07-09 18:51:59 +02003436 new_load = this_load;
Ingo Molnara25707f2007-10-15 17:00:03 +02003437 /*
3438 * Round up the averaging division if load is increasing. This
3439 * prevents us from getting stuck on 9 if the load is 10, for
3440 * example.
3441 */
3442 if (new_load > old_load)
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003443 new_load += scale - 1;
3444
3445 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
Ingo Molnardd41f592007-07-09 18:51:59 +02003446 }
Suresh Siddhada2b71e2010-08-23 13:42:51 -07003447
3448 sched_avg_update(this_rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003449}
3450
3451static void update_cpu_load_active(struct rq *this_rq)
3452{
3453 update_cpu_load(this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003454
Peter Zijlstra74f51872010-04-22 21:50:19 +02003455 calc_load_account_active(this_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07003456}
3457
Ingo Molnardd41f592007-07-09 18:51:59 +02003458#ifdef CONFIG_SMP
3459
Ingo Molnar48f24c42006-07-03 00:25:40 -07003460/*
Peter Zijlstra38022902009-12-16 18:04:37 +01003461 * sched_exec - execve() is a valuable balancing opportunity, because at
3462 * this point the task has the smallest effective memory and cache footprint.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 */
Peter Zijlstra38022902009-12-16 18:04:37 +01003464void sched_exec(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465{
Peter Zijlstra38022902009-12-16 18:04:37 +01003466 struct task_struct *p = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003468 struct rq *rq;
Peter Zijlstra0017d732010-03-24 18:34:10 +01003469 int dest_cpu;
Peter Zijlstra38022902009-12-16 18:04:37 +01003470
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 rq = task_rq_lock(p, &flags);
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003472 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
Peter Zijlstra0017d732010-03-24 18:34:10 +01003473 if (dest_cpu == smp_processor_id())
3474 goto unlock;
Peter Zijlstra38022902009-12-16 18:04:37 +01003475
3476 /*
3477 * select_task_rq() can race against ->cpus_allowed
3478 */
Oleg Nesterov30da6882010-03-15 10:10:19 +01003479 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003480 likely(cpu_active(dest_cpu)) && need_migrate_task(p)) {
Tejun Heo969c7922010-05-06 18:49:21 +02003481 struct migration_arg arg = { p, dest_cpu };
Ingo Molnar36c8b582006-07-03 00:25:41 -07003482
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003483 task_rq_unlock(rq, p, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02003484 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 return;
3486 }
Peter Zijlstra0017d732010-03-24 18:34:10 +01003487unlock:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003488 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489}
3490
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491#endif
3492
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493DEFINE_PER_CPU(struct kernel_stat, kstat);
3494
3495EXPORT_PER_CPU_SYMBOL(kstat);
3496
3497/*
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003498 * Return any ns on the sched_clock that have not yet been accounted in
Frank Mayharf06febc2008-09-12 09:54:39 -07003499 * @p in case that task is currently running.
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003500 *
3501 * Called with task_rq_lock() held on @rq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502 */
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003503static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3504{
3505 u64 ns = 0;
3506
3507 if (task_current(rq, p)) {
3508 update_rq_clock(rq);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07003509 ns = rq->clock_task - p->se.exec_start;
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003510 if ((s64)ns < 0)
3511 ns = 0;
3512 }
3513
3514 return ns;
3515}
3516
Frank Mayharbb34d922008-09-12 09:54:39 -07003517unsigned long long task_delta_exec(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 unsigned long flags;
Ingo Molnar41b86e92007-07-09 18:51:58 +02003520 struct rq *rq;
Frank Mayharbb34d922008-09-12 09:54:39 -07003521 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003522
Ingo Molnar41b86e92007-07-09 18:51:58 +02003523 rq = task_rq_lock(p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003524 ns = do_task_delta_exec(p, rq);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003525 task_rq_unlock(rq, p, &flags);
Ingo Molnar15084872008-09-30 08:28:17 +02003526
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003527 return ns;
3528}
Frank Mayharf06febc2008-09-12 09:54:39 -07003529
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003530/*
3531 * Return accounted runtime for the task.
3532 * In case the task is currently running, return the runtime plus current's
3533 * pending runtime that have not been accounted yet.
3534 */
3535unsigned long long task_sched_runtime(struct task_struct *p)
3536{
3537 unsigned long flags;
3538 struct rq *rq;
3539 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003540
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003541 rq = task_rq_lock(p, &flags);
3542 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003543 task_rq_unlock(rq, p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003544
3545 return ns;
3546}
3547
3548/*
3549 * Return sum_exec_runtime for the thread group.
3550 * In case the task is currently running, return the sum plus current's
3551 * pending runtime that have not been accounted yet.
3552 *
3553 * Note that the thread group might have other running tasks as well,
3554 * so the return value not includes other pending runtime that other
3555 * running tasks might have.
3556 */
3557unsigned long long thread_group_sched_runtime(struct task_struct *p)
3558{
3559 struct task_cputime totals;
3560 unsigned long flags;
3561 struct rq *rq;
3562 u64 ns;
3563
3564 rq = task_rq_lock(p, &flags);
3565 thread_group_cputime(p, &totals);
3566 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003567 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568
3569 return ns;
3570}
3571
3572/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 * Account user cpu time to a process.
3574 * @p: the process that the cpu time gets accounted to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 * @cputime: the cpu time spent in user space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003576 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003578void account_user_time(struct task_struct *p, cputime_t cputime,
3579 cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580{
3581 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3582 cputime64_t tmp;
3583
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003584 /* Add user time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003586 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003587 account_group_user_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588
3589 /* Add user time to cpustat. */
3590 tmp = cputime_to_cputime64(cputime);
3591 if (TASK_NICE(p) > 0)
3592 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3593 else
3594 cpustat->user = cputime64_add(cpustat->user, tmp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05303595
3596 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
Jonathan Lim49b5cf32008-07-25 01:48:40 -07003597 /* Account for user time used */
3598 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599}
3600
3601/*
Laurent Vivier94886b82007-10-15 17:00:19 +02003602 * Account guest cpu time to a process.
3603 * @p: the process that the cpu time gets accounted to
3604 * @cputime: the cpu time spent in virtual machine since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003605 * @cputime_scaled: cputime scaled by cpu frequency
Laurent Vivier94886b82007-10-15 17:00:19 +02003606 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003607static void account_guest_time(struct task_struct *p, cputime_t cputime,
3608 cputime_t cputime_scaled)
Laurent Vivier94886b82007-10-15 17:00:19 +02003609{
3610 cputime64_t tmp;
3611 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3612
3613 tmp = cputime_to_cputime64(cputime);
3614
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003615 /* Add guest time to process. */
Laurent Vivier94886b82007-10-15 17:00:19 +02003616 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003617 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003618 account_group_user_time(p, cputime);
Laurent Vivier94886b82007-10-15 17:00:19 +02003619 p->gtime = cputime_add(p->gtime, cputime);
3620
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003621 /* Add guest time to cpustat. */
Ryota Ozakice0e7b22009-10-24 01:20:10 +09003622 if (TASK_NICE(p) > 0) {
3623 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3624 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3625 } else {
3626 cpustat->user = cputime64_add(cpustat->user, tmp);
3627 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3628 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003629}
3630
3631/*
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003632 * Account system cpu time to a process and desired cpustat field
3633 * @p: the process that the cpu time gets accounted to
3634 * @cputime: the cpu time spent in kernel space since the last update
3635 * @cputime_scaled: cputime scaled by cpu frequency
3636 * @target_cputime64: pointer to cpustat field that has to be updated
3637 */
3638static inline
3639void __account_system_time(struct task_struct *p, cputime_t cputime,
3640 cputime_t cputime_scaled, cputime64_t *target_cputime64)
3641{
3642 cputime64_t tmp = cputime_to_cputime64(cputime);
3643
3644 /* Add system time to process. */
3645 p->stime = cputime_add(p->stime, cputime);
3646 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3647 account_group_system_time(p, cputime);
3648
3649 /* Add system time to cpustat. */
3650 *target_cputime64 = cputime64_add(*target_cputime64, tmp);
3651 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3652
3653 /* Account for system time used */
3654 acct_update_integrals(p);
3655}
3656
3657/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658 * Account system cpu time to a process.
3659 * @p: the process that the cpu time gets accounted to
3660 * @hardirq_offset: the offset to subtract from hardirq_count()
3661 * @cputime: the cpu time spent in kernel space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003662 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663 */
3664void account_system_time(struct task_struct *p, int hardirq_offset,
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003665 cputime_t cputime, cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666{
3667 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003668 cputime64_t *target_cputime64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003670 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003671 account_guest_time(p, cputime, cputime_scaled);
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003672 return;
3673 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003674
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 if (hardirq_count() - hardirq_offset)
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003676 target_cputime64 = &cpustat->irq;
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07003677 else if (in_serving_softirq())
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003678 target_cputime64 = &cpustat->softirq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 else
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003680 target_cputime64 = &cpustat->system;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003681
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003682 __account_system_time(p, cputime, cputime_scaled, target_cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683}
3684
3685/*
3686 * Account for involuntary wait time.
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003687 * @cputime: the cpu time spent in involuntary wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003689void account_steal_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003692 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3693
3694 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695}
3696
Christoph Lameter7835b982006-12-10 02:20:22 -08003697/*
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003698 * Account for idle time.
3699 * @cputime: the cpu time spent in idle wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003701void account_idle_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702{
3703 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003704 cputime64_t cputime64 = cputime_to_cputime64(cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 struct rq *rq = this_rq();
3706
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003707 if (atomic_read(&rq->nr_iowait) > 0)
3708 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3709 else
3710 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
Christoph Lameter7835b982006-12-10 02:20:22 -08003711}
3712
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003713#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3714
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003715#ifdef CONFIG_IRQ_TIME_ACCOUNTING
3716/*
3717 * Account a tick to a process and cpustat
3718 * @p: the process that the cpu time gets accounted to
3719 * @user_tick: is the tick from userspace
3720 * @rq: the pointer to rq
3721 *
3722 * Tick demultiplexing follows the order
3723 * - pending hardirq update
3724 * - pending softirq update
3725 * - user_time
3726 * - idle_time
3727 * - system time
3728 * - check for guest_time
3729 * - else account as system_time
3730 *
3731 * Check for hardirq is done both for system and user time as there is
3732 * no timer going off while we are on hardirq and hence we may never get an
3733 * opportunity to update it solely in system time.
3734 * p->stime and friends are only updated on system time and not on irq
3735 * softirq as those do not count in task exec_runtime any more.
3736 */
3737static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3738 struct rq *rq)
3739{
3740 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3741 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
3742 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3743
3744 if (irqtime_account_hi_update()) {
3745 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3746 } else if (irqtime_account_si_update()) {
3747 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
Venkatesh Pallipadi414bee92010-12-21 17:09:04 -08003748 } else if (this_cpu_ksoftirqd() == p) {
3749 /*
3750 * ksoftirqd time do not get accounted in cpu_softirq_time.
3751 * So, we have to handle it separately here.
3752 * Also, p->stime needs to be updated for ksoftirqd.
3753 */
3754 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3755 &cpustat->softirq);
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003756 } else if (user_tick) {
3757 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3758 } else if (p == rq->idle) {
3759 account_idle_time(cputime_one_jiffy);
3760 } else if (p->flags & PF_VCPU) { /* System time or guest time */
3761 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
3762 } else {
3763 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3764 &cpustat->system);
3765 }
3766}
3767
3768static void irqtime_account_idle_ticks(int ticks)
3769{
3770 int i;
3771 struct rq *rq = this_rq();
3772
3773 for (i = 0; i < ticks; i++)
3774 irqtime_account_process_tick(current, 0, rq);
3775}
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003776#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003777static void irqtime_account_idle_ticks(int ticks) {}
3778static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3779 struct rq *rq) {}
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003780#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003781
3782/*
3783 * Account a single tick of cpu time.
3784 * @p: the process that the cpu time gets accounted to
3785 * @user_tick: indicates if the tick is a user or a system tick
3786 */
3787void account_process_tick(struct task_struct *p, int user_tick)
3788{
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003789 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003790 struct rq *rq = this_rq();
3791
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003792 if (sched_clock_irqtime) {
3793 irqtime_account_process_tick(p, user_tick, rq);
3794 return;
3795 }
3796
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003797 if (user_tick)
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003798 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
Eric Dumazetf5f293a2009-04-29 14:44:49 +02003799 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003800 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003801 one_jiffy_scaled);
3802 else
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003803 account_idle_time(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003804}
3805
3806/*
3807 * Account multiple ticks of steal time.
3808 * @p: the process from which the cpu time has been stolen
3809 * @ticks: number of stolen ticks
3810 */
3811void account_steal_ticks(unsigned long ticks)
3812{
3813 account_steal_time(jiffies_to_cputime(ticks));
3814}
3815
3816/*
3817 * Account multiple ticks of idle time.
3818 * @ticks: number of stolen ticks
3819 */
3820void account_idle_ticks(unsigned long ticks)
3821{
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003822
3823 if (sched_clock_irqtime) {
3824 irqtime_account_idle_ticks(ticks);
3825 return;
3826 }
3827
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003828 account_idle_time(jiffies_to_cputime(ticks));
3829}
3830
3831#endif
3832
Christoph Lameter7835b982006-12-10 02:20:22 -08003833/*
Balbir Singh49048622008-09-05 18:12:23 +02003834 * Use precise platform statistics if available:
3835 */
3836#ifdef CONFIG_VIRT_CPU_ACCOUNTING
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003837void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003838{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003839 *ut = p->utime;
3840 *st = p->stime;
Balbir Singh49048622008-09-05 18:12:23 +02003841}
3842
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003843void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003844{
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003845 struct task_cputime cputime;
3846
3847 thread_group_cputime(p, &cputime);
3848
3849 *ut = cputime.utime;
3850 *st = cputime.stime;
Balbir Singh49048622008-09-05 18:12:23 +02003851}
3852#else
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003853
3854#ifndef nsecs_to_cputime
Hidetoshi Setob7b20df2009-11-26 14:49:27 +09003855# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003856#endif
3857
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003858void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003859{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003860 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
Balbir Singh49048622008-09-05 18:12:23 +02003861
3862 /*
3863 * Use CFS's precise accounting:
3864 */
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003865 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
Balbir Singh49048622008-09-05 18:12:23 +02003866
3867 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003868 u64 temp = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003869
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003870 temp *= utime;
Balbir Singh49048622008-09-05 18:12:23 +02003871 do_div(temp, total);
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003872 utime = (cputime_t)temp;
3873 } else
3874 utime = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003875
3876 /*
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003877 * Compare with previous values, to keep monotonicity:
Balbir Singh49048622008-09-05 18:12:23 +02003878 */
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003879 p->prev_utime = max(p->prev_utime, utime);
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003880 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
Balbir Singh49048622008-09-05 18:12:23 +02003881
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003882 *ut = p->prev_utime;
3883 *st = p->prev_stime;
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003884}
Balbir Singh49048622008-09-05 18:12:23 +02003885
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003886/*
3887 * Must be called with siglock held.
3888 */
3889void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3890{
3891 struct signal_struct *sig = p->signal;
3892 struct task_cputime cputime;
3893 cputime_t rtime, utime, total;
3894
3895 thread_group_cputime(p, &cputime);
3896
3897 total = cputime_add(cputime.utime, cputime.stime);
3898 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3899
3900 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003901 u64 temp = rtime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003902
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003903 temp *= cputime.utime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003904 do_div(temp, total);
3905 utime = (cputime_t)temp;
3906 } else
3907 utime = rtime;
3908
3909 sig->prev_utime = max(sig->prev_utime, utime);
3910 sig->prev_stime = max(sig->prev_stime,
3911 cputime_sub(rtime, sig->prev_utime));
3912
3913 *ut = sig->prev_utime;
3914 *st = sig->prev_stime;
Balbir Singh49048622008-09-05 18:12:23 +02003915}
3916#endif
3917
Balbir Singh49048622008-09-05 18:12:23 +02003918/*
Christoph Lameter7835b982006-12-10 02:20:22 -08003919 * This function gets called by the timer code, with HZ frequency.
3920 * We call it with interrupts disabled.
3921 *
3922 * It also gets called by the fork code, when changing the parent's
3923 * timeslices.
3924 */
3925void scheduler_tick(void)
3926{
Christoph Lameter7835b982006-12-10 02:20:22 -08003927 int cpu = smp_processor_id();
3928 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003929 struct task_struct *curr = rq->curr;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003930
3931 sched_clock_tick();
Christoph Lameter7835b982006-12-10 02:20:22 -08003932
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003933 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003934 update_rq_clock(rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003935 update_cpu_load_active(rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01003936 curr->sched_class->task_tick(rq, curr, 0);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003937 raw_spin_unlock(&rq->lock);
Ingo Molnardd41f592007-07-09 18:51:59 +02003938
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02003939 perf_event_task_tick();
Peter Zijlstrae220d2d2009-05-23 18:28:55 +02003940
Christoph Lametere418e1c2006-12-10 02:20:23 -08003941#ifdef CONFIG_SMP
Ingo Molnardd41f592007-07-09 18:51:59 +02003942 rq->idle_at_tick = idle_cpu(cpu);
3943 trigger_load_balance(rq, cpu);
Christoph Lametere418e1c2006-12-10 02:20:23 -08003944#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945}
3946
Lai Jiangshan132380a2009-04-02 14:18:25 +08003947notrace unsigned long get_parent_ip(unsigned long addr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003948{
3949 if (in_lock_functions(addr)) {
3950 addr = CALLER_ADDR2;
3951 if (in_lock_functions(addr))
3952 addr = CALLER_ADDR3;
3953 }
3954 return addr;
3955}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956
Steven Rostedt7e49fcc2009-01-22 19:01:40 -05003957#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3958 defined(CONFIG_PREEMPT_TRACER))
3959
Srinivasa Ds43627582008-02-23 15:24:04 -08003960void __kprobes add_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003962#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 /*
3964 * Underflow?
3965 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003966 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3967 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003968#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969 preempt_count() += val;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003970#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971 /*
3972 * Spinlock count overflowing soon?
3973 */
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08003974 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3975 PREEMPT_MASK - 10);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003976#endif
3977 if (preempt_count() == val)
3978 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979}
3980EXPORT_SYMBOL(add_preempt_count);
3981
Srinivasa Ds43627582008-02-23 15:24:04 -08003982void __kprobes sub_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003983{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003984#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 /*
3986 * Underflow?
3987 */
Ingo Molnar01e3eb82009-01-12 13:00:50 +01003988 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003989 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990 /*
3991 * Is the spinlock portion underflowing?
3992 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003993 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3994 !(preempt_count() & PREEMPT_MASK)))
3995 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003996#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003997
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003998 if (preempt_count() == val)
3999 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 preempt_count() -= val;
4001}
4002EXPORT_SYMBOL(sub_preempt_count);
4003
4004#endif
4005
4006/*
Ingo Molnardd41f592007-07-09 18:51:59 +02004007 * Print scheduling while atomic bug:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008 */
Ingo Molnardd41f592007-07-09 18:51:59 +02004009static noinline void __schedule_bug(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010{
Satyam Sharma838225b2007-10-24 18:23:50 +02004011 struct pt_regs *regs = get_irq_regs();
4012
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004013 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4014 prev->comm, prev->pid, preempt_count());
Satyam Sharma838225b2007-10-24 18:23:50 +02004015
Ingo Molnardd41f592007-07-09 18:51:59 +02004016 debug_show_held_locks(prev);
Arjan van de Vene21f5b12008-05-23 09:05:58 -07004017 print_modules();
Ingo Molnardd41f592007-07-09 18:51:59 +02004018 if (irqs_disabled())
4019 print_irqtrace_events(prev);
Satyam Sharma838225b2007-10-24 18:23:50 +02004020
4021 if (regs)
4022 show_regs(regs);
4023 else
4024 dump_stack();
Ingo Molnardd41f592007-07-09 18:51:59 +02004025}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026
Ingo Molnardd41f592007-07-09 18:51:59 +02004027/*
4028 * Various schedule()-time debugging checks and statistics:
4029 */
4030static inline void schedule_debug(struct task_struct *prev)
4031{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032 /*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004033 * Test if we are atomic. Since do_exit() needs to call into
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034 * schedule() atomically, we ignore that path for now.
4035 * Otherwise, whine if we are scheduling when we should not be.
4036 */
Roel Kluin3f33a7c2008-05-13 23:44:11 +02004037 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
Ingo Molnardd41f592007-07-09 18:51:59 +02004038 __schedule_bug(prev);
4039
Linus Torvalds1da177e2005-04-16 15:20:36 -07004040 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4041
Ingo Molnar2d723762007-10-15 17:00:12 +02004042 schedstat_inc(this_rq(), sched_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02004043#ifdef CONFIG_SCHEDSTATS
4044 if (unlikely(prev->lock_depth >= 0)) {
Yong Zhangfce20972011-01-14 15:57:39 +08004045 schedstat_inc(this_rq(), rq_sched_info.bkl_count);
Ingo Molnar2d723762007-10-15 17:00:12 +02004046 schedstat_inc(prev, sched_info.bkl_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02004047 }
4048#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02004049}
4050
Peter Zijlstra6cecd082009-11-30 13:00:37 +01004051static void put_prev_task(struct rq *rq, struct task_struct *prev)
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004052{
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004053 if (prev->on_rq)
Mike Galbraitha64692a2010-03-11 17:16:20 +01004054 update_rq_clock(rq);
Peter Zijlstra6cecd082009-11-30 13:00:37 +01004055 prev->sched_class->put_prev_task(rq, prev);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004056}
4057
Ingo Molnardd41f592007-07-09 18:51:59 +02004058/*
4059 * Pick up the highest-prio task:
4060 */
4061static inline struct task_struct *
Wang Chenb67802e2009-03-02 13:55:26 +08004062pick_next_task(struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02004063{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02004064 const struct sched_class *class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004065 struct task_struct *p;
4066
4067 /*
4068 * Optimization: we know that if all tasks are in
4069 * the fair class we can call that function directly:
4070 */
4071 if (likely(rq->nr_running == rq->cfs.nr_running)) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004072 p = fair_sched_class.pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004073 if (likely(p))
4074 return p;
4075 }
4076
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004077 for_each_class(class) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004078 p = class->pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004079 if (p)
4080 return p;
Ingo Molnardd41f592007-07-09 18:51:59 +02004081 }
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004082
4083 BUG(); /* the idle class will always have a runnable task */
Ingo Molnardd41f592007-07-09 18:51:59 +02004084}
4085
4086/*
4087 * schedule() is the main scheduler function.
4088 */
Peter Zijlstraff743342009-03-13 12:21:26 +01004089asmlinkage void __sched schedule(void)
Ingo Molnardd41f592007-07-09 18:51:59 +02004090{
4091 struct task_struct *prev, *next;
Harvey Harrison67ca7bd2008-02-15 09:56:36 -08004092 unsigned long *switch_count;
Ingo Molnardd41f592007-07-09 18:51:59 +02004093 struct rq *rq;
Peter Zijlstra31656512008-07-18 18:01:23 +02004094 int cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02004095
Peter Zijlstraff743342009-03-13 12:21:26 +01004096need_resched:
4097 preempt_disable();
Ingo Molnardd41f592007-07-09 18:51:59 +02004098 cpu = smp_processor_id();
4099 rq = cpu_rq(cpu);
Paul E. McKenney25502a62010-04-01 17:37:01 -07004100 rcu_note_context_switch(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02004101 prev = rq->curr;
Ingo Molnardd41f592007-07-09 18:51:59 +02004102
Ingo Molnardd41f592007-07-09 18:51:59 +02004103 schedule_debug(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104
Peter Zijlstra31656512008-07-18 18:01:23 +02004105 if (sched_feat(HRTICK))
Mike Galbraithf333fdc2008-05-12 21:20:55 +02004106 hrtick_clear(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004107
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004108 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004110 switch_count = &prev->nivcsw;
Ingo Molnardd41f592007-07-09 18:51:59 +02004111 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
Tejun Heo21aa9af2010-06-08 21:40:37 +02004112 if (unlikely(signal_pending_state(prev->state, prev))) {
Ingo Molnardd41f592007-07-09 18:51:59 +02004113 prev->state = TASK_RUNNING;
Tejun Heo21aa9af2010-06-08 21:40:37 +02004114 } else {
Peter Zijlstra2acca552011-04-05 17:23:50 +02004115 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4116 prev->on_rq = 0;
4117
Tejun Heo21aa9af2010-06-08 21:40:37 +02004118 /*
Peter Zijlstra2acca552011-04-05 17:23:50 +02004119 * If a worker went to sleep, notify and ask workqueue
4120 * whether it wants to wake up a task to maintain
4121 * concurrency.
Tejun Heo21aa9af2010-06-08 21:40:37 +02004122 */
4123 if (prev->flags & PF_WQ_WORKER) {
4124 struct task_struct *to_wakeup;
4125
4126 to_wakeup = wq_worker_sleeping(prev, cpu);
4127 if (to_wakeup)
4128 try_to_wake_up_local(to_wakeup);
4129 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004130
Linus Torvalds6631e632011-04-13 08:08:20 -07004131 /*
Peter Zijlstra2acca552011-04-05 17:23:50 +02004132 * If we are going to sleep and we have plugged IO
4133 * queued, make sure to submit it to avoid deadlocks.
Linus Torvalds6631e632011-04-13 08:08:20 -07004134 */
4135 if (blk_needs_flush_plug(prev)) {
4136 raw_spin_unlock(&rq->lock);
4137 blk_flush_plug(prev);
4138 raw_spin_lock(&rq->lock);
4139 }
Tejun Heo21aa9af2010-06-08 21:40:37 +02004140 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004141 switch_count = &prev->nvcsw;
4142 }
4143
Gregory Haskins3f029d32009-07-29 11:08:47 -04004144 pre_schedule(rq, prev);
Steven Rostedtf65eda42008-01-25 21:08:07 +01004145
Ingo Molnardd41f592007-07-09 18:51:59 +02004146 if (unlikely(!rq->nr_running))
4147 idle_balance(cpu, rq);
4148
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004149 put_prev_task(rq, prev);
Wang Chenb67802e2009-03-02 13:55:26 +08004150 next = pick_next_task(rq);
Mike Galbraithf26f9af2010-12-08 11:05:42 +01004151 clear_tsk_need_resched(prev);
4152 rq->skip_clock_update = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154 if (likely(prev != next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 rq->nr_switches++;
4156 rq->curr = next;
4157 ++*switch_count;
4158
Ingo Molnardd41f592007-07-09 18:51:59 +02004159 context_switch(rq, prev, next); /* unlocks the rq */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004160 /*
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004161 * The context switch have flipped the stack from under us
4162 * and restored the local variables which were saved when
4163 * this task called schedule() in the past. prev == current
4164 * is still correct, but it can be moved to another cpu/rq.
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004165 */
4166 cpu = smp_processor_id();
4167 rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004169 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170
Gregory Haskins3f029d32009-07-29 11:08:47 -04004171 post_schedule(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173 preempt_enable_no_resched();
Peter Zijlstraff743342009-03-13 12:21:26 +01004174 if (need_resched())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175 goto need_resched;
4176}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177EXPORT_SYMBOL(schedule);
4178
Frederic Weisbeckerc08f7822009-12-02 20:49:17 +01004179#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004180
4181static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
4182{
4183 bool ret = false;
4184
4185 rcu_read_lock();
4186 if (lock->owner != owner)
4187 goto fail;
4188
4189 /*
4190 * Ensure we emit the owner->on_cpu, dereference _after_ checking
4191 * lock->owner still matches owner, if that fails, owner might
4192 * point to free()d memory, if it still matches, the rcu_read_lock()
4193 * ensures the memory stays valid.
4194 */
4195 barrier();
4196
4197 ret = owner->on_cpu;
4198fail:
4199 rcu_read_unlock();
4200
4201 return ret;
4202}
4203
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004204/*
4205 * Look out! "owner" is an entirely speculative pointer
4206 * access and not reliable.
4207 */
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004208int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004209{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004210 if (!sched_feat(OWNER_SPIN))
4211 return 0;
4212
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004213 while (owner_running(lock, owner)) {
4214 if (need_resched())
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004215 return 0;
4216
Gerald Schaefer335d7af2010-11-22 15:47:36 +01004217 arch_mutex_cpu_relax();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004218 }
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004219
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004220 /*
4221 * If the owner changed to another task there is likely
4222 * heavy contention, stop spinning.
4223 */
4224 if (lock->owner)
4225 return 0;
4226
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004227 return 1;
4228}
4229#endif
4230
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231#ifdef CONFIG_PREEMPT
4232/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004233 * this is the entry point to schedule() from in-kernel preemption
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004234 * off of preempt_enable. Kernel preemptions off return from interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235 * occur there and call schedule directly.
4236 */
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004237asmlinkage void __sched notrace preempt_schedule(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238{
4239 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004240
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241 /*
4242 * If there is a non-zero preempt_count or interrupts are disabled,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004243 * we do not want to preempt the current task. Just return..
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 */
Nick Pigginbeed33a2006-10-11 01:21:52 -07004245 if (likely(ti->preempt_count || irqs_disabled()))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246 return;
4247
Andi Kleen3a5c3592007-10-15 17:00:14 +02004248 do {
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004249 add_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004250 schedule();
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004251 sub_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004252
4253 /*
4254 * Check again in case we missed a preemption opportunity
4255 * between schedule and now.
4256 */
4257 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004258 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260EXPORT_SYMBOL(preempt_schedule);
4261
4262/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004263 * this is the entry point to schedule() from kernel preemption
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264 * off of irq context.
4265 * Note, that this is called and return with irqs disabled. This will
4266 * protect us against recursive calling from irq.
4267 */
4268asmlinkage void __sched preempt_schedule_irq(void)
4269{
4270 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004271
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004272 /* Catch callers which need to be fixed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 BUG_ON(ti->preempt_count || !irqs_disabled());
4274
Andi Kleen3a5c3592007-10-15 17:00:14 +02004275 do {
4276 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004277 local_irq_enable();
4278 schedule();
4279 local_irq_disable();
Andi Kleen3a5c3592007-10-15 17:00:14 +02004280 sub_preempt_count(PREEMPT_ACTIVE);
4281
4282 /*
4283 * Check again in case we missed a preemption opportunity
4284 * between schedule and now.
4285 */
4286 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004287 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288}
4289
4290#endif /* CONFIG_PREEMPT */
4291
Peter Zijlstra63859d42009-09-15 19:14:42 +02004292int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004293 void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294{
Peter Zijlstra63859d42009-09-15 19:14:42 +02004295 return try_to_wake_up(curr->private, mode, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297EXPORT_SYMBOL(default_wake_function);
4298
4299/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004300 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4301 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 * number) then we wake all the non-exclusive tasks and one exclusive task.
4303 *
4304 * There are circumstances in which we can try to wake a task which has already
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004305 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4307 */
Johannes Weiner78ddb082009-04-14 16:53:05 +02004308static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
Peter Zijlstra63859d42009-09-15 19:14:42 +02004309 int nr_exclusive, int wake_flags, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310{
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004311 wait_queue_t *curr, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004313 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07004314 unsigned flags = curr->flags;
4315
Peter Zijlstra63859d42009-09-15 19:14:42 +02004316 if (curr->func(curr, mode, wake_flags, key) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07004317 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318 break;
4319 }
4320}
4321
4322/**
4323 * __wake_up - wake up threads blocked on a waitqueue.
4324 * @q: the waitqueue
4325 * @mode: which threads
4326 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Martin Waitz67be2dd2005-05-01 08:59:26 -07004327 * @key: is directly passed to the wakeup function
David Howells50fa6102009-04-28 15:01:38 +01004328 *
4329 * It may be assumed that this function implies a write memory barrier before
4330 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004332void __wake_up(wait_queue_head_t *q, unsigned int mode,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004333 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334{
4335 unsigned long flags;
4336
4337 spin_lock_irqsave(&q->lock, flags);
4338 __wake_up_common(q, mode, nr_exclusive, 0, key);
4339 spin_unlock_irqrestore(&q->lock, flags);
4340}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341EXPORT_SYMBOL(__wake_up);
4342
4343/*
4344 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4345 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004346void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347{
4348 __wake_up_common(q, mode, 1, 0, NULL);
4349}
Michal Nazarewicz22c43c82010-05-05 12:53:11 +02004350EXPORT_SYMBOL_GPL(__wake_up_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351
Davide Libenzi4ede8162009-03-31 15:24:20 -07004352void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4353{
4354 __wake_up_common(q, mode, 1, 0, key);
4355}
Trond Myklebustbf294b42011-02-21 11:05:41 -08004356EXPORT_SYMBOL_GPL(__wake_up_locked_key);
Davide Libenzi4ede8162009-03-31 15:24:20 -07004357
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358/**
Davide Libenzi4ede8162009-03-31 15:24:20 -07004359 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360 * @q: the waitqueue
4361 * @mode: which threads
4362 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Davide Libenzi4ede8162009-03-31 15:24:20 -07004363 * @key: opaque value to be passed to wakeup targets
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 *
4365 * The sync wakeup differs that the waker knows that it will schedule
4366 * away soon, so while the target thread will be woken up, it will not
4367 * be migrated to another CPU - ie. the two threads are 'synchronized'
4368 * with each other. This can prevent needless bouncing between CPUs.
4369 *
4370 * On UP it can prevent extra preemption.
David Howells50fa6102009-04-28 15:01:38 +01004371 *
4372 * It may be assumed that this function implies a write memory barrier before
4373 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 */
Davide Libenzi4ede8162009-03-31 15:24:20 -07004375void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4376 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377{
4378 unsigned long flags;
Peter Zijlstra7d478722009-09-14 19:55:44 +02004379 int wake_flags = WF_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380
4381 if (unlikely(!q))
4382 return;
4383
4384 if (unlikely(!nr_exclusive))
Peter Zijlstra7d478722009-09-14 19:55:44 +02004385 wake_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386
4387 spin_lock_irqsave(&q->lock, flags);
Peter Zijlstra7d478722009-09-14 19:55:44 +02004388 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004389 spin_unlock_irqrestore(&q->lock, flags);
4390}
Davide Libenzi4ede8162009-03-31 15:24:20 -07004391EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4392
4393/*
4394 * __wake_up_sync - see __wake_up_sync_key()
4395 */
4396void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4397{
4398 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4399}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004400EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4401
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004402/**
4403 * complete: - signals a single thread waiting on this completion
4404 * @x: holds the state of this particular completion
4405 *
4406 * This will wake up a single thread waiting on this completion. Threads will be
4407 * awakened in the same order in which they were queued.
4408 *
4409 * See also complete_all(), wait_for_completion() and related routines.
David Howells50fa6102009-04-28 15:01:38 +01004410 *
4411 * It may be assumed that this function implies a write memory barrier before
4412 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004413 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004414void complete(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415{
4416 unsigned long flags;
4417
4418 spin_lock_irqsave(&x->wait.lock, flags);
4419 x->done++;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004420 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421 spin_unlock_irqrestore(&x->wait.lock, flags);
4422}
4423EXPORT_SYMBOL(complete);
4424
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004425/**
4426 * complete_all: - signals all threads waiting on this completion
4427 * @x: holds the state of this particular completion
4428 *
4429 * This will wake up all threads waiting on this particular completion event.
David Howells50fa6102009-04-28 15:01:38 +01004430 *
4431 * It may be assumed that this function implies a write memory barrier before
4432 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004433 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004434void complete_all(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435{
4436 unsigned long flags;
4437
4438 spin_lock_irqsave(&x->wait.lock, flags);
4439 x->done += UINT_MAX/2;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004440 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441 spin_unlock_irqrestore(&x->wait.lock, flags);
4442}
4443EXPORT_SYMBOL(complete_all);
4444
Andi Kleen8cbbe862007-10-15 17:00:14 +02004445static inline long __sched
4446do_wait_for_common(struct completion *x, long timeout, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448 if (!x->done) {
4449 DECLARE_WAITQUEUE(wait, current);
4450
Changli Gaoa93d2f12010-05-07 14:33:26 +08004451 __add_wait_queue_tail_exclusive(&x->wait, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 do {
Oleg Nesterov94d3d822008-08-20 16:54:41 -07004453 if (signal_pending_state(state, current)) {
Oleg Nesterovea71a542008-06-20 18:32:20 +04004454 timeout = -ERESTARTSYS;
4455 break;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004456 }
4457 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004459 timeout = schedule_timeout(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 spin_lock_irq(&x->wait.lock);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004461 } while (!x->done && timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 __remove_wait_queue(&x->wait, &wait);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004463 if (!x->done)
4464 return timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465 }
4466 x->done--;
Oleg Nesterovea71a542008-06-20 18:32:20 +04004467 return timeout ?: 1;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004468}
4469
4470static long __sched
4471wait_for_common(struct completion *x, long timeout, int state)
4472{
4473 might_sleep();
4474
4475 spin_lock_irq(&x->wait.lock);
4476 timeout = do_wait_for_common(x, timeout, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004478 return timeout;
4479}
4480
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004481/**
4482 * wait_for_completion: - waits for completion of a task
4483 * @x: holds the state of this particular completion
4484 *
4485 * This waits to be signaled for completion of a specific task. It is NOT
4486 * interruptible and there is no timeout.
4487 *
4488 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4489 * and interrupt capability. Also see complete().
4490 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004491void __sched wait_for_completion(struct completion *x)
Andi Kleen8cbbe862007-10-15 17:00:14 +02004492{
4493 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494}
4495EXPORT_SYMBOL(wait_for_completion);
4496
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004497/**
4498 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4499 * @x: holds the state of this particular completion
4500 * @timeout: timeout value in jiffies
4501 *
4502 * This waits for either a completion of a specific task to be signaled or for a
4503 * specified timeout to expire. The timeout is in jiffies. It is not
4504 * interruptible.
4505 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004506unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4508{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004509 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510}
4511EXPORT_SYMBOL(wait_for_completion_timeout);
4512
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004513/**
4514 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4515 * @x: holds the state of this particular completion
4516 *
4517 * This waits for completion of a specific task to be signaled. It is
4518 * interruptible.
4519 */
Andi Kleen8cbbe862007-10-15 17:00:14 +02004520int __sched wait_for_completion_interruptible(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521{
Andi Kleen51e97992007-10-18 21:32:55 +02004522 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4523 if (t == -ERESTARTSYS)
4524 return t;
4525 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526}
4527EXPORT_SYMBOL(wait_for_completion_interruptible);
4528
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004529/**
4530 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4531 * @x: holds the state of this particular completion
4532 * @timeout: timeout value in jiffies
4533 *
4534 * This waits for either a completion of a specific task to be signaled or for a
4535 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4536 */
NeilBrown6bf41232011-01-05 12:50:16 +11004537long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538wait_for_completion_interruptible_timeout(struct completion *x,
4539 unsigned long timeout)
4540{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004541 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542}
4543EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4544
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004545/**
4546 * wait_for_completion_killable: - waits for completion of a task (killable)
4547 * @x: holds the state of this particular completion
4548 *
4549 * This waits to be signaled for completion of a specific task. It can be
4550 * interrupted by a kill signal.
4551 */
Matthew Wilcox009e5772007-12-06 12:29:54 -05004552int __sched wait_for_completion_killable(struct completion *x)
4553{
4554 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4555 if (t == -ERESTARTSYS)
4556 return t;
4557 return 0;
4558}
4559EXPORT_SYMBOL(wait_for_completion_killable);
4560
Dave Chinnerbe4de352008-08-15 00:40:44 -07004561/**
Sage Weil0aa12fb2010-05-29 09:12:30 -07004562 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4563 * @x: holds the state of this particular completion
4564 * @timeout: timeout value in jiffies
4565 *
4566 * This waits for either a completion of a specific task to be
4567 * signaled or for a specified timeout to expire. It can be
4568 * interrupted by a kill signal. The timeout is in jiffies.
4569 */
NeilBrown6bf41232011-01-05 12:50:16 +11004570long __sched
Sage Weil0aa12fb2010-05-29 09:12:30 -07004571wait_for_completion_killable_timeout(struct completion *x,
4572 unsigned long timeout)
4573{
4574 return wait_for_common(x, timeout, TASK_KILLABLE);
4575}
4576EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4577
4578/**
Dave Chinnerbe4de352008-08-15 00:40:44 -07004579 * try_wait_for_completion - try to decrement a completion without blocking
4580 * @x: completion structure
4581 *
4582 * Returns: 0 if a decrement cannot be done without blocking
4583 * 1 if a decrement succeeded.
4584 *
4585 * If a completion is being used as a counting completion,
4586 * attempt to decrement the counter without blocking. This
4587 * enables us to avoid waiting if the resource the completion
4588 * is protecting is not available.
4589 */
4590bool try_wait_for_completion(struct completion *x)
4591{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004592 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004593 int ret = 1;
4594
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004595 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004596 if (!x->done)
4597 ret = 0;
4598 else
4599 x->done--;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004600 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004601 return ret;
4602}
4603EXPORT_SYMBOL(try_wait_for_completion);
4604
4605/**
4606 * completion_done - Test to see if a completion has any waiters
4607 * @x: completion structure
4608 *
4609 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4610 * 1 if there are no waiters.
4611 *
4612 */
4613bool completion_done(struct completion *x)
4614{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004615 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004616 int ret = 1;
4617
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004618 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004619 if (!x->done)
4620 ret = 0;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004621 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004622 return ret;
4623}
4624EXPORT_SYMBOL(completion_done);
4625
Andi Kleen8cbbe862007-10-15 17:00:14 +02004626static long __sched
4627sleep_on_common(wait_queue_head_t *q, int state, long timeout)
Ingo Molnar0fec1712007-07-09 18:52:01 +02004628{
4629 unsigned long flags;
4630 wait_queue_t wait;
4631
4632 init_waitqueue_entry(&wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633
Andi Kleen8cbbe862007-10-15 17:00:14 +02004634 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635
Andi Kleen8cbbe862007-10-15 17:00:14 +02004636 spin_lock_irqsave(&q->lock, flags);
4637 __add_wait_queue(q, &wait);
4638 spin_unlock(&q->lock);
4639 timeout = schedule_timeout(timeout);
4640 spin_lock_irq(&q->lock);
4641 __remove_wait_queue(q, &wait);
4642 spin_unlock_irqrestore(&q->lock, flags);
4643
4644 return timeout;
4645}
4646
4647void __sched interruptible_sleep_on(wait_queue_head_t *q)
4648{
4649 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651EXPORT_SYMBOL(interruptible_sleep_on);
4652
Ingo Molnar0fec1712007-07-09 18:52:01 +02004653long __sched
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004654interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004656 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4659
Ingo Molnar0fec1712007-07-09 18:52:01 +02004660void __sched sleep_on(wait_queue_head_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004662 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664EXPORT_SYMBOL(sleep_on);
4665
Ingo Molnar0fec1712007-07-09 18:52:01 +02004666long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004668 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670EXPORT_SYMBOL(sleep_on_timeout);
4671
Ingo Molnarb29739f2006-06-27 02:54:51 -07004672#ifdef CONFIG_RT_MUTEXES
4673
4674/*
4675 * rt_mutex_setprio - set the current priority of a task
4676 * @p: task
4677 * @prio: prio value (kernel-internal form)
4678 *
4679 * This function changes the 'effective' priority of a task. It does
4680 * not touch ->normal_prio like __setscheduler().
4681 *
4682 * Used by the rt_mutex code to implement priority inheritance logic.
4683 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004684void rt_mutex_setprio(struct task_struct *p, int prio)
Ingo Molnarb29739f2006-06-27 02:54:51 -07004685{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004686 int oldprio, on_rq, running;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004687 struct rq *rq;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004688 const struct sched_class *prev_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004689
4690 BUG_ON(prio < 0 || prio > MAX_PRIO);
4691
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004692 rq = __task_rq_lock(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004693
Steven Rostedta8027072010-09-20 15:13:34 -04004694 trace_sched_pi_setprio(p, prio);
Andrew Mortond5f9f942007-05-08 20:27:06 -07004695 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004696 prev_class = p->sched_class;
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004697 on_rq = p->on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004698 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004699 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004700 dequeue_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004701 if (running)
4702 p->sched_class->put_prev_task(rq, p);
Ingo Molnardd41f592007-07-09 18:51:59 +02004703
4704 if (rt_prio(prio))
4705 p->sched_class = &rt_sched_class;
4706 else
4707 p->sched_class = &fair_sched_class;
4708
Ingo Molnarb29739f2006-06-27 02:54:51 -07004709 p->prio = prio;
4710
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004711 if (running)
4712 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01004713 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004714 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004715
Peter Zijlstrada7a7352011-01-17 17:03:27 +01004716 check_class_changed(rq, p, prev_class, oldprio);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004717 __task_rq_unlock(rq);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004718}
4719
4720#endif
4721
Ingo Molnar36c8b582006-07-03 00:25:41 -07004722void set_user_nice(struct task_struct *p, long nice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723{
Ingo Molnardd41f592007-07-09 18:51:59 +02004724 int old_prio, delta, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004726 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004727
4728 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4729 return;
4730 /*
4731 * We have to be careful, if called from sys_setpriority(),
4732 * the task might be in the middle of scheduling on another CPU.
4733 */
4734 rq = task_rq_lock(p, &flags);
4735 /*
4736 * The RT priorities are set via sched_setscheduler(), but we still
4737 * allow the 'normal' nice value to be set - but as expected
4738 * it wont have any effect on scheduling until the task is
Ingo Molnardd41f592007-07-09 18:51:59 +02004739 * SCHED_FIFO/SCHED_RR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 */
Ingo Molnare05606d2007-07-09 18:51:59 +02004741 if (task_has_rt_policy(p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742 p->static_prio = NICE_TO_PRIO(nice);
4743 goto out_unlock;
4744 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004745 on_rq = p->on_rq;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02004746 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004747 dequeue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749 p->static_prio = NICE_TO_PRIO(nice);
Peter Williams2dd73a42006-06-27 02:54:34 -07004750 set_load_weight(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004751 old_prio = p->prio;
4752 p->prio = effective_prio(p);
4753 delta = p->prio - old_prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754
Ingo Molnardd41f592007-07-09 18:51:59 +02004755 if (on_rq) {
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004756 enqueue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 /*
Andrew Mortond5f9f942007-05-08 20:27:06 -07004758 * If the task increased its priority or is running and
4759 * lowered its priority, then reschedule its CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004760 */
Andrew Mortond5f9f942007-05-08 20:27:06 -07004761 if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762 resched_task(rq->curr);
4763 }
4764out_unlock:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004765 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004767EXPORT_SYMBOL(set_user_nice);
4768
Matt Mackalle43379f2005-05-01 08:59:00 -07004769/*
4770 * can_nice - check if a task can reduce its nice value
4771 * @p: task
4772 * @nice: nice value
4773 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004774int can_nice(const struct task_struct *p, const int nice)
Matt Mackalle43379f2005-05-01 08:59:00 -07004775{
Matt Mackall024f4742005-08-18 11:24:19 -07004776 /* convert nice value [19,-20] to rlimit style value [1,40] */
4777 int nice_rlim = 20 - nice;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004778
Jiri Slaby78d7d402010-03-05 13:42:54 -08004779 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
Matt Mackalle43379f2005-05-01 08:59:00 -07004780 capable(CAP_SYS_NICE));
4781}
4782
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783#ifdef __ARCH_WANT_SYS_NICE
4784
4785/*
4786 * sys_nice - change the priority of the current process.
4787 * @increment: priority increment
4788 *
4789 * sys_setpriority is a more generic, but much slower function that
4790 * does similar things.
4791 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004792SYSCALL_DEFINE1(nice, int, increment)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793{
Ingo Molnar48f24c42006-07-03 00:25:40 -07004794 long nice, retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795
4796 /*
4797 * Setpriority might change our priority at the same moment.
4798 * We don't have to worry. Conceptually one call occurs first
4799 * and we have a single winner.
4800 */
Matt Mackalle43379f2005-05-01 08:59:00 -07004801 if (increment < -40)
4802 increment = -40;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803 if (increment > 40)
4804 increment = 40;
4805
Américo Wang2b8f8362009-02-16 18:54:21 +08004806 nice = TASK_NICE(current) + increment;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807 if (nice < -20)
4808 nice = -20;
4809 if (nice > 19)
4810 nice = 19;
4811
Matt Mackalle43379f2005-05-01 08:59:00 -07004812 if (increment < 0 && !can_nice(current, nice))
4813 return -EPERM;
4814
Linus Torvalds1da177e2005-04-16 15:20:36 -07004815 retval = security_task_setnice(current, nice);
4816 if (retval)
4817 return retval;
4818
4819 set_user_nice(current, nice);
4820 return 0;
4821}
4822
4823#endif
4824
4825/**
4826 * task_prio - return the priority value of a given task.
4827 * @p: the task in question.
4828 *
4829 * This is the priority value as seen by users in /proc.
4830 * RT tasks are offset by -200. Normal tasks are centered
4831 * around 0, value goes from -16 to +15.
4832 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004833int task_prio(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834{
4835 return p->prio - MAX_RT_PRIO;
4836}
4837
4838/**
4839 * task_nice - return the nice value of a given task.
4840 * @p: the task in question.
4841 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004842int task_nice(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004843{
4844 return TASK_NICE(p);
4845}
Pavel Roskin150d8be2008-03-05 16:56:37 -05004846EXPORT_SYMBOL(task_nice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004847
4848/**
4849 * idle_cpu - is a given cpu idle currently?
4850 * @cpu: the processor in question.
4851 */
4852int idle_cpu(int cpu)
4853{
4854 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4855}
4856
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857/**
4858 * idle_task - return the idle task for a given cpu.
4859 * @cpu: the processor in question.
4860 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004861struct task_struct *idle_task(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862{
4863 return cpu_rq(cpu)->idle;
4864}
4865
4866/**
4867 * find_process_by_pid - find a process with a matching PID value.
4868 * @pid: the pid in question.
4869 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02004870static struct task_struct *find_process_by_pid(pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871{
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07004872 return pid ? find_task_by_vpid(pid) : current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873}
4874
4875/* Actually do priority change: must hold rq lock. */
Ingo Molnardd41f592007-07-09 18:51:59 +02004876static void
4877__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879 p->policy = policy;
4880 p->rt_priority = prio;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004881 p->normal_prio = normal_prio(p);
4882 /* we are holding p->pi_lock already */
4883 p->prio = rt_mutex_getprio(p);
Peter Zijlstraffd44db2009-11-10 20:12:01 +01004884 if (rt_prio(p->prio))
4885 p->sched_class = &rt_sched_class;
4886 else
4887 p->sched_class = &fair_sched_class;
Peter Williams2dd73a42006-06-27 02:54:34 -07004888 set_load_weight(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889}
4890
David Howellsc69e8d92008-11-14 10:39:19 +11004891/*
4892 * check the target process has a UID that matches the current process's
4893 */
4894static bool check_same_owner(struct task_struct *p)
4895{
4896 const struct cred *cred = current_cred(), *pcred;
4897 bool match;
4898
4899 rcu_read_lock();
4900 pcred = __task_cred(p);
Serge E. Hallynb0e77592011-03-23 16:43:24 -07004901 if (cred->user->user_ns == pcred->user->user_ns)
4902 match = (cred->euid == pcred->euid ||
4903 cred->euid == pcred->uid);
4904 else
4905 match = false;
David Howellsc69e8d92008-11-14 10:39:19 +11004906 rcu_read_unlock();
4907 return match;
4908}
4909
Rusty Russell961ccdd2008-06-23 13:55:38 +10004910static int __sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07004911 const struct sched_param *param, bool user)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004912{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004913 int retval, oldprio, oldpolicy = -1, on_rq, running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004914 unsigned long flags;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004915 const struct sched_class *prev_class;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004916 struct rq *rq;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004917 int reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004918
Steven Rostedt66e53932006-06-27 02:54:44 -07004919 /* may grab non-irq protected spin_locks */
4920 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921recheck:
4922 /* double check policy once rq lock held */
Lennart Poetteringca94c442009-06-15 17:17:47 +02004923 if (policy < 0) {
4924 reset_on_fork = p->sched_reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925 policy = oldpolicy = p->policy;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004926 } else {
4927 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4928 policy &= ~SCHED_RESET_ON_FORK;
4929
4930 if (policy != SCHED_FIFO && policy != SCHED_RR &&
4931 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4932 policy != SCHED_IDLE)
4933 return -EINVAL;
4934 }
4935
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936 /*
4937 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnardd41f592007-07-09 18:51:59 +02004938 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4939 * SCHED_BATCH and SCHED_IDLE is 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004940 */
4941 if (param->sched_priority < 0 ||
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004942 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
Steven Rostedtd46523e2005-07-25 16:28:39 -04004943 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004944 return -EINVAL;
Ingo Molnare05606d2007-07-09 18:51:59 +02004945 if (rt_policy(policy) != (param->sched_priority != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004946 return -EINVAL;
4947
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004948 /*
4949 * Allow unprivileged RT tasks to decrease priority:
4950 */
Rusty Russell961ccdd2008-06-23 13:55:38 +10004951 if (user && !capable(CAP_SYS_NICE)) {
Ingo Molnare05606d2007-07-09 18:51:59 +02004952 if (rt_policy(policy)) {
Oleg Nesterova44702e2010-06-11 01:09:44 +02004953 unsigned long rlim_rtprio =
4954 task_rlimit(p, RLIMIT_RTPRIO);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004955
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004956 /* can't set/change the rt policy */
4957 if (policy != p->policy && !rlim_rtprio)
4958 return -EPERM;
4959
4960 /* can't increase priority */
4961 if (param->sched_priority > p->rt_priority &&
4962 param->sched_priority > rlim_rtprio)
4963 return -EPERM;
4964 }
Darren Hartc02aa732011-02-17 15:37:07 -08004965
Ingo Molnardd41f592007-07-09 18:51:59 +02004966 /*
Darren Hartc02aa732011-02-17 15:37:07 -08004967 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4968 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
Ingo Molnardd41f592007-07-09 18:51:59 +02004969 */
Darren Hartc02aa732011-02-17 15:37:07 -08004970 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
4971 if (!can_nice(p, TASK_NICE(p)))
4972 return -EPERM;
4973 }
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004974
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004975 /* can't change other user's priorities */
David Howellsc69e8d92008-11-14 10:39:19 +11004976 if (!check_same_owner(p))
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004977 return -EPERM;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004978
4979 /* Normal users shall not reset the sched_reset_on_fork flag */
4980 if (p->sched_reset_on_fork && !reset_on_fork)
4981 return -EPERM;
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004982 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004984 if (user) {
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09004985 retval = security_task_setscheduler(p);
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004986 if (retval)
4987 return retval;
4988 }
4989
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990 /*
Ingo Molnarb29739f2006-06-27 02:54:51 -07004991 * make sure no PI-waiters arrive (or leave) while we are
4992 * changing the priority of the task:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004993 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004994 * To be able to change p->policy safely, the appropriate
Linus Torvalds1da177e2005-04-16 15:20:36 -07004995 * runqueue lock must be held.
4996 */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004997 rq = task_rq_lock(p, &flags);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02004998
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004999 /*
5000 * Changing the policy of the stop threads its a very bad idea
5001 */
5002 if (p == rq->stop) {
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005003 task_rq_unlock(rq, p, &flags);
Peter Zijlstra34f971f2010-09-22 13:53:15 +02005004 return -EINVAL;
5005 }
5006
Dario Faggiolia51e9192011-03-24 14:00:18 +01005007 /*
5008 * If not changing anything there's no need to proceed further:
5009 */
5010 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
5011 param->sched_priority == p->rt_priority))) {
5012
5013 __task_rq_unlock(rq);
5014 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5015 return 0;
5016 }
5017
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005018#ifdef CONFIG_RT_GROUP_SCHED
5019 if (user) {
5020 /*
5021 * Do not allow realtime tasks into groups that have no runtime
5022 * assigned.
5023 */
5024 if (rt_bandwidth_enabled() && rt_policy(policy) &&
Mike Galbraithf4493772011-01-13 04:54:50 +01005025 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5026 !task_group_is_autogroup(task_group(p))) {
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005027 task_rq_unlock(rq, p, &flags);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005028 return -EPERM;
5029 }
5030 }
5031#endif
5032
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033 /* recheck policy now with rq lock held */
5034 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5035 policy = oldpolicy = -1;
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005036 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005037 goto recheck;
5038 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02005039 on_rq = p->on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01005040 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005041 if (on_rq)
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005042 deactivate_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005043 if (running)
5044 p->sched_class->put_prev_task(rq, p);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02005045
Lennart Poetteringca94c442009-06-15 17:17:47 +02005046 p->sched_reset_on_fork = reset_on_fork;
5047
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01005049 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02005050 __setscheduler(rq, p, policy, param->sched_priority);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02005051
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005052 if (running)
5053 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005054 if (on_rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02005055 activate_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005056
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005057 check_class_changed(rq, p, prev_class, oldprio);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005058 task_rq_unlock(rq, p, &flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005059
Thomas Gleixner95e02ca2006-06-27 02:55:02 -07005060 rt_mutex_adjust_pi(p);
5061
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062 return 0;
5063}
Rusty Russell961ccdd2008-06-23 13:55:38 +10005064
5065/**
5066 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5067 * @p: the task in question.
5068 * @policy: new policy.
5069 * @param: structure containing the new RT priority.
5070 *
5071 * NOTE that the task may be already dead.
5072 */
5073int sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005074 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10005075{
5076 return __sched_setscheduler(p, policy, param, true);
5077}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078EXPORT_SYMBOL_GPL(sched_setscheduler);
5079
Rusty Russell961ccdd2008-06-23 13:55:38 +10005080/**
5081 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5082 * @p: the task in question.
5083 * @policy: new policy.
5084 * @param: structure containing the new RT priority.
5085 *
5086 * Just like sched_setscheduler, only don't bother checking if the
5087 * current context has permission. For example, this is needed in
5088 * stop_machine(): we create temporary high priority worker threads,
5089 * but our caller might not have that capability.
5090 */
5091int sched_setscheduler_nocheck(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005092 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10005093{
5094 return __sched_setscheduler(p, policy, param, false);
5095}
5096
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005097static int
5098do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100 struct sched_param lparam;
5101 struct task_struct *p;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005102 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103
5104 if (!param || pid < 0)
5105 return -EINVAL;
5106 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5107 return -EFAULT;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005108
5109 rcu_read_lock();
5110 retval = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005111 p = find_process_by_pid(pid);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005112 if (p != NULL)
5113 retval = sched_setscheduler(p, policy, &lparam);
5114 rcu_read_unlock();
Ingo Molnar36c8b582006-07-03 00:25:41 -07005115
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116 return retval;
5117}
5118
5119/**
5120 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5121 * @pid: the pid in question.
5122 * @policy: new policy.
5123 * @param: structure containing the new RT priority.
5124 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005125SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5126 struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127{
Jason Baronc21761f2006-01-18 17:43:03 -08005128 /* negative values for policy are not valid */
5129 if (policy < 0)
5130 return -EINVAL;
5131
Linus Torvalds1da177e2005-04-16 15:20:36 -07005132 return do_sched_setscheduler(pid, policy, param);
5133}
5134
5135/**
5136 * sys_sched_setparam - set/change the RT priority of a thread
5137 * @pid: the pid in question.
5138 * @param: structure containing the new RT priority.
5139 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005140SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141{
5142 return do_sched_setscheduler(pid, -1, param);
5143}
5144
5145/**
5146 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5147 * @pid: the pid in question.
5148 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005149SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005151 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005152 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153
5154 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005155 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156
5157 retval = -ESRCH;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005158 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159 p = find_process_by_pid(pid);
5160 if (p) {
5161 retval = security_task_getscheduler(p);
5162 if (!retval)
Lennart Poetteringca94c442009-06-15 17:17:47 +02005163 retval = p->policy
5164 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165 }
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005166 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 return retval;
5168}
5169
5170/**
Lennart Poetteringca94c442009-06-15 17:17:47 +02005171 * sys_sched_getparam - get the RT priority of a thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172 * @pid: the pid in question.
5173 * @param: structure containing the RT priority.
5174 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005175SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176{
5177 struct sched_param lp;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005178 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005179 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180
5181 if (!param || pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005182 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005184 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 p = find_process_by_pid(pid);
5186 retval = -ESRCH;
5187 if (!p)
5188 goto out_unlock;
5189
5190 retval = security_task_getscheduler(p);
5191 if (retval)
5192 goto out_unlock;
5193
5194 lp.sched_priority = p->rt_priority;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005195 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196
5197 /*
5198 * This one might sleep, we cannot do it with a spinlock held ...
5199 */
5200 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5201
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202 return retval;
5203
5204out_unlock:
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005205 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206 return retval;
5207}
5208
Rusty Russell96f874e2008-11-25 02:35:14 +10305209long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305211 cpumask_var_t cpus_allowed, new_mask;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005212 struct task_struct *p;
5213 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005215 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005216 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217
5218 p = find_process_by_pid(pid);
5219 if (!p) {
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005220 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005221 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222 return -ESRCH;
5223 }
5224
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005225 /* Prevent p going away */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 get_task_struct(p);
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005227 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305229 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5230 retval = -ENOMEM;
5231 goto out_put_task;
5232 }
5233 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5234 retval = -ENOMEM;
5235 goto out_free_cpus_allowed;
5236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237 retval = -EPERM;
Serge E. Hallynb0e77592011-03-23 16:43:24 -07005238 if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239 goto out_unlock;
5240
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09005241 retval = security_task_setscheduler(p);
David Quigleye7834f82006-06-23 02:03:59 -07005242 if (retval)
5243 goto out_unlock;
5244
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305245 cpuset_cpus_allowed(p, cpus_allowed);
5246 cpumask_and(new_mask, in_mask, cpus_allowed);
Peter Zijlstra49246272010-10-17 21:46:10 +02005247again:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305248 retval = set_cpus_allowed_ptr(p, new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249
Paul Menage8707d8b2007-10-18 23:40:22 -07005250 if (!retval) {
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305251 cpuset_cpus_allowed(p, cpus_allowed);
5252 if (!cpumask_subset(new_mask, cpus_allowed)) {
Paul Menage8707d8b2007-10-18 23:40:22 -07005253 /*
5254 * We must have raced with a concurrent cpuset
5255 * update. Just reset the cpus_allowed to the
5256 * cpuset's cpus_allowed
5257 */
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305258 cpumask_copy(new_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07005259 goto again;
5260 }
5261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262out_unlock:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305263 free_cpumask_var(new_mask);
5264out_free_cpus_allowed:
5265 free_cpumask_var(cpus_allowed);
5266out_put_task:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267 put_task_struct(p);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005268 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269 return retval;
5270}
5271
5272static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
Rusty Russell96f874e2008-11-25 02:35:14 +10305273 struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274{
Rusty Russell96f874e2008-11-25 02:35:14 +10305275 if (len < cpumask_size())
5276 cpumask_clear(new_mask);
5277 else if (len > cpumask_size())
5278 len = cpumask_size();
5279
Linus Torvalds1da177e2005-04-16 15:20:36 -07005280 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5281}
5282
5283/**
5284 * sys_sched_setaffinity - set the cpu affinity of a process
5285 * @pid: pid of the process
5286 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5287 * @user_mask_ptr: user-space pointer to the new cpu mask
5288 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005289SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5290 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005291{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305292 cpumask_var_t new_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005293 int retval;
5294
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305295 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5296 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005297
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305298 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5299 if (retval == 0)
5300 retval = sched_setaffinity(pid, new_mask);
5301 free_cpumask_var(new_mask);
5302 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005303}
5304
Rusty Russell96f874e2008-11-25 02:35:14 +10305305long sched_getaffinity(pid_t pid, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005306{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005307 struct task_struct *p;
Thomas Gleixner31605682009-12-08 20:24:16 +00005308 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005311 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005312 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313
5314 retval = -ESRCH;
5315 p = find_process_by_pid(pid);
5316 if (!p)
5317 goto out_unlock;
5318
David Quigleye7834f82006-06-23 02:03:59 -07005319 retval = security_task_getscheduler(p);
5320 if (retval)
5321 goto out_unlock;
5322
Peter Zijlstra013fdb82011-04-05 17:23:45 +02005323 raw_spin_lock_irqsave(&p->pi_lock, flags);
Rusty Russell96f874e2008-11-25 02:35:14 +10305324 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
Peter Zijlstra013fdb82011-04-05 17:23:45 +02005325 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005326
5327out_unlock:
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005328 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005329 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330
Ulrich Drepper9531b622007-08-09 11:16:46 +02005331 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005332}
5333
5334/**
5335 * sys_sched_getaffinity - get the cpu affinity of a process
5336 * @pid: pid of the process
5337 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5338 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5339 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005340SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5341 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342{
5343 int ret;
Rusty Russellf17c8602008-11-25 02:35:11 +10305344 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005345
Anton Blanchard84fba5e2010-04-06 17:02:19 +10005346 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005347 return -EINVAL;
5348 if (len & (sizeof(unsigned long)-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005349 return -EINVAL;
5350
Rusty Russellf17c8602008-11-25 02:35:11 +10305351 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5352 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005353
Rusty Russellf17c8602008-11-25 02:35:11 +10305354 ret = sched_getaffinity(pid, mask);
5355 if (ret == 0) {
KOSAKI Motohiro8bc037f2010-03-17 09:36:58 +09005356 size_t retlen = min_t(size_t, len, cpumask_size());
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005357
5358 if (copy_to_user(user_mask_ptr, mask, retlen))
Rusty Russellf17c8602008-11-25 02:35:11 +10305359 ret = -EFAULT;
5360 else
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005361 ret = retlen;
Rusty Russellf17c8602008-11-25 02:35:11 +10305362 }
5363 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005364
Rusty Russellf17c8602008-11-25 02:35:11 +10305365 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005366}
5367
5368/**
5369 * sys_sched_yield - yield the current processor to other threads.
5370 *
Ingo Molnardd41f592007-07-09 18:51:59 +02005371 * This function yields the current CPU to other tasks. If there are no
5372 * other threads running on this CPU then this function will return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005374SYSCALL_DEFINE0(sched_yield)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005376 struct rq *rq = this_rq_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377
Ingo Molnar2d723762007-10-15 17:00:12 +02005378 schedstat_inc(rq, yld_count);
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02005379 current->sched_class->yield_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005380
5381 /*
5382 * Since we are going to call schedule() anyway, there's
5383 * no need to preempt or enable interrupts:
5384 */
5385 __release(rq->lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07005386 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +01005387 do_raw_spin_unlock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388 preempt_enable_no_resched();
5389
5390 schedule();
5391
5392 return 0;
5393}
5394
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005395static inline int should_resched(void)
5396{
5397 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5398}
5399
Andrew Mortone7b38402006-06-30 01:56:00 -07005400static void __cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005401{
Frederic Weisbeckere7aaaa62009-07-16 15:44:29 +02005402 add_preempt_count(PREEMPT_ACTIVE);
5403 schedule();
5404 sub_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005405}
5406
Herbert Xu02b67cc2008-01-25 21:08:28 +01005407int __sched _cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005409 if (should_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410 __cond_resched();
5411 return 1;
5412 }
5413 return 0;
5414}
Herbert Xu02b67cc2008-01-25 21:08:28 +01005415EXPORT_SYMBOL(_cond_resched);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416
5417/*
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005418 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419 * call schedule, and on return reacquire the lock.
5420 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005421 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422 * operations here to prevent schedule() from being called twice (once via
5423 * spin_unlock(), once by hand).
5424 */
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005425int __cond_resched_lock(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005427 int resched = should_resched();
Jan Kara6df3cec2005-06-13 15:52:32 -07005428 int ret = 0;
5429
Peter Zijlstraf607c662009-07-20 19:16:29 +02005430 lockdep_assert_held(lock);
5431
Nick Piggin95c354f2008-01-30 13:31:20 +01005432 if (spin_needbreak(lock) || resched) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433 spin_unlock(lock);
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005434 if (resched)
Nick Piggin95c354f2008-01-30 13:31:20 +01005435 __cond_resched();
5436 else
5437 cpu_relax();
Jan Kara6df3cec2005-06-13 15:52:32 -07005438 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005439 spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440 }
Jan Kara6df3cec2005-06-13 15:52:32 -07005441 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005443EXPORT_SYMBOL(__cond_resched_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005445int __sched __cond_resched_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446{
5447 BUG_ON(!in_softirq());
5448
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005449 if (should_resched()) {
Thomas Gleixner98d82562007-05-23 13:58:18 -07005450 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005451 __cond_resched();
5452 local_bh_disable();
5453 return 1;
5454 }
5455 return 0;
5456}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005457EXPORT_SYMBOL(__cond_resched_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458
Linus Torvalds1da177e2005-04-16 15:20:36 -07005459/**
5460 * yield - yield the current processor to other threads.
5461 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08005462 * This is a shortcut for kernel-space yielding - it marks the
Linus Torvalds1da177e2005-04-16 15:20:36 -07005463 * thread runnable and calls sys_sched_yield().
5464 */
5465void __sched yield(void)
5466{
5467 set_current_state(TASK_RUNNING);
5468 sys_sched_yield();
5469}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470EXPORT_SYMBOL(yield);
5471
Mike Galbraithd95f4122011-02-01 09:50:51 -05005472/**
5473 * yield_to - yield the current processor to another thread in
5474 * your thread group, or accelerate that thread toward the
5475 * processor it's on.
Randy Dunlap16addf92011-03-18 09:34:53 -07005476 * @p: target task
5477 * @preempt: whether task preemption is allowed or not
Mike Galbraithd95f4122011-02-01 09:50:51 -05005478 *
5479 * It's the caller's job to ensure that the target task struct
5480 * can't go away on us before we can do any checks.
5481 *
5482 * Returns true if we indeed boosted the target task.
5483 */
5484bool __sched yield_to(struct task_struct *p, bool preempt)
5485{
5486 struct task_struct *curr = current;
5487 struct rq *rq, *p_rq;
5488 unsigned long flags;
5489 bool yielded = 0;
5490
5491 local_irq_save(flags);
5492 rq = this_rq();
5493
5494again:
5495 p_rq = task_rq(p);
5496 double_rq_lock(rq, p_rq);
5497 while (task_rq(p) != p_rq) {
5498 double_rq_unlock(rq, p_rq);
5499 goto again;
5500 }
5501
5502 if (!curr->sched_class->yield_to_task)
5503 goto out;
5504
5505 if (curr->sched_class != p->sched_class)
5506 goto out;
5507
5508 if (task_running(p_rq, p) || p->state)
5509 goto out;
5510
5511 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
Venkatesh Pallipadi6d1cafd2011-03-01 16:28:21 -08005512 if (yielded) {
Mike Galbraithd95f4122011-02-01 09:50:51 -05005513 schedstat_inc(rq, yld_count);
Venkatesh Pallipadi6d1cafd2011-03-01 16:28:21 -08005514 /*
5515 * Make p's CPU reschedule; pick_next_entity takes care of
5516 * fairness.
5517 */
5518 if (preempt && rq != p_rq)
5519 resched_task(p_rq->curr);
5520 }
Mike Galbraithd95f4122011-02-01 09:50:51 -05005521
5522out:
5523 double_rq_unlock(rq, p_rq);
5524 local_irq_restore(flags);
5525
5526 if (yielded)
5527 schedule();
5528
5529 return yielded;
5530}
5531EXPORT_SYMBOL_GPL(yield_to);
5532
Linus Torvalds1da177e2005-04-16 15:20:36 -07005533/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005534 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
Linus Torvalds1da177e2005-04-16 15:20:36 -07005535 * that process accounting knows that this is a task in IO wait state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536 */
5537void __sched io_schedule(void)
5538{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005539 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005540
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005541 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005542 atomic_inc(&rq->nr_iowait);
Jens Axboe73c10102011-03-08 13:19:51 +01005543 blk_flush_plug(current);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005544 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005545 schedule();
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005546 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005547 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005548 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005550EXPORT_SYMBOL(io_schedule);
5551
5552long __sched io_schedule_timeout(long timeout)
5553{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005554 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005555 long ret;
5556
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005557 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005558 atomic_inc(&rq->nr_iowait);
Jens Axboe73c10102011-03-08 13:19:51 +01005559 blk_flush_plug(current);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005560 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005561 ret = schedule_timeout(timeout);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005562 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005563 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005564 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565 return ret;
5566}
5567
5568/**
5569 * sys_sched_get_priority_max - return maximum RT priority.
5570 * @policy: scheduling class.
5571 *
5572 * this syscall returns the maximum rt_priority that can be used
5573 * by a given scheduling class.
5574 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005575SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576{
5577 int ret = -EINVAL;
5578
5579 switch (policy) {
5580 case SCHED_FIFO:
5581 case SCHED_RR:
5582 ret = MAX_USER_RT_PRIO-1;
5583 break;
5584 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005585 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005586 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005587 ret = 0;
5588 break;
5589 }
5590 return ret;
5591}
5592
5593/**
5594 * sys_sched_get_priority_min - return minimum RT priority.
5595 * @policy: scheduling class.
5596 *
5597 * this syscall returns the minimum rt_priority that can be used
5598 * by a given scheduling class.
5599 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005600SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005601{
5602 int ret = -EINVAL;
5603
5604 switch (policy) {
5605 case SCHED_FIFO:
5606 case SCHED_RR:
5607 ret = 1;
5608 break;
5609 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005610 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005611 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005612 ret = 0;
5613 }
5614 return ret;
5615}
5616
5617/**
5618 * sys_sched_rr_get_interval - return the default timeslice of a process.
5619 * @pid: pid of the process.
5620 * @interval: userspace pointer to the timeslice value.
5621 *
5622 * this syscall writes the default timeslice value of a given process
5623 * into the user-space timespec buffer. A value of '0' means infinity.
5624 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01005625SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
Heiko Carstens754fe8d2009-01-14 14:14:09 +01005626 struct timespec __user *, interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005627{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005628 struct task_struct *p;
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005629 unsigned int time_slice;
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005630 unsigned long flags;
5631 struct rq *rq;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005632 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633 struct timespec t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005634
5635 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005636 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005637
5638 retval = -ESRCH;
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005639 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640 p = find_process_by_pid(pid);
5641 if (!p)
5642 goto out_unlock;
5643
5644 retval = security_task_getscheduler(p);
5645 if (retval)
5646 goto out_unlock;
5647
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005648 rq = task_rq_lock(p, &flags);
5649 time_slice = p->sched_class->get_rr_interval(rq, p);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005650 task_rq_unlock(rq, p, &flags);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005651
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005652 rcu_read_unlock();
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005653 jiffies_to_timespec(time_slice, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005654 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655 return retval;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005656
Linus Torvalds1da177e2005-04-16 15:20:36 -07005657out_unlock:
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005658 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659 return retval;
5660}
5661
Steven Rostedt7c731e02008-05-12 21:20:41 +02005662static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005663
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005664void sched_show_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005665{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666 unsigned long free = 0;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005667 unsigned state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668
Linus Torvalds1da177e2005-04-16 15:20:36 -07005669 state = p->state ? __ffs(p->state) + 1 : 0;
Erik Gilling28d06862010-11-19 18:08:51 -08005670 printk(KERN_INFO "%-15.15s %c", p->comm,
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005671 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
Ingo Molnar4bd77322007-07-11 21:21:47 +02005672#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005674 printk(KERN_CONT " running ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005676 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005677#else
5678 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005679 printk(KERN_CONT " running task ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005680 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005681 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005682#endif
5683#ifdef CONFIG_DEBUG_STACK_USAGE
Eric Sandeen7c9f8862008-04-22 16:38:23 -05005684 free = stack_not_used(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685#endif
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005686 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
David Rientjesaa47b7e2009-05-04 01:38:05 -07005687 task_pid_nr(p), task_pid_nr(p->real_parent),
5688 (unsigned long)task_thread_info(p)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005689
Nick Piggin5fb5e6d2008-01-25 21:08:34 +01005690 show_stack(p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005691}
5692
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005693void show_state_filter(unsigned long state_filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005694{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005695 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005696
Ingo Molnar4bd77322007-07-11 21:21:47 +02005697#if BITS_PER_LONG == 32
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005698 printk(KERN_INFO
5699 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005700#else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005701 printk(KERN_INFO
5702 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005703#endif
5704 read_lock(&tasklist_lock);
5705 do_each_thread(g, p) {
5706 /*
5707 * reset the NMI-timeout, listing all files on a slow
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005708 * console might take a lot of time:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005709 */
5710 touch_nmi_watchdog();
Ingo Molnar39bc89f2007-04-25 20:50:03 -07005711 if (!state_filter || (p->state & state_filter))
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005712 sched_show_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005713 } while_each_thread(g, p);
5714
Jeremy Fitzhardinge04c91672007-05-08 00:28:05 -07005715 touch_all_softlockup_watchdogs();
5716
Ingo Molnardd41f592007-07-09 18:51:59 +02005717#ifdef CONFIG_SCHED_DEBUG
5718 sysrq_sched_debug_show();
5719#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005720 read_unlock(&tasklist_lock);
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005721 /*
5722 * Only show locks if all tasks are dumped:
5723 */
Shmulik Ladkani93335a22009-11-25 15:23:41 +02005724 if (!state_filter)
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005725 debug_show_all_locks();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005726}
5727
Ingo Molnar1df21052007-07-09 18:51:58 +02005728void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5729{
Ingo Molnardd41f592007-07-09 18:51:59 +02005730 idle->sched_class = &idle_sched_class;
Ingo Molnar1df21052007-07-09 18:51:58 +02005731}
5732
Ingo Molnarf340c0d2005-06-28 16:40:42 +02005733/**
5734 * init_idle - set up an idle thread for a given CPU
5735 * @idle: task in question
5736 * @cpu: cpu the idle task belongs to
5737 *
5738 * NOTE: this function does not set the idle thread's NEED_RESCHED
5739 * flag, to make booting more robust.
5740 */
Nick Piggin5c1e1762006-10-03 01:14:04 -07005741void __cpuinit init_idle(struct task_struct *idle, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005742{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005743 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005744 unsigned long flags;
5745
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005746 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar5cbd54e2008-11-12 20:05:50 +01005747
Ingo Molnardd41f592007-07-09 18:51:59 +02005748 __sched_fork(idle);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01005749 idle->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02005750 idle->se.exec_start = sched_clock();
5751
Rusty Russell96f874e2008-11-25 02:35:14 +10305752 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005753 /*
5754 * We're having a chicken and egg problem, even though we are
5755 * holding rq->lock, the cpu isn't yet set to this cpu so the
5756 * lockdep check in task_group() will fail.
5757 *
5758 * Similar case to sched_fork(). / Alternatively we could
5759 * use task_rq_lock() here and obtain the other rq->lock.
5760 *
5761 * Silence PROVE_RCU
5762 */
5763 rcu_read_lock();
Ingo Molnardd41f592007-07-09 18:51:59 +02005764 __set_task_cpu(idle, cpu);
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005765 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005766
Linus Torvalds1da177e2005-04-16 15:20:36 -07005767 rq->curr = rq->idle = idle;
Peter Zijlstra3ca7a442011-04-05 17:23:40 +02005768#if defined(CONFIG_SMP)
5769 idle->on_cpu = 1;
Nick Piggin4866cde2005-06-25 14:57:23 -07005770#endif
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005771 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005772
5773 /* Set the preempt count _outside_ the spinlocks! */
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005774#if defined(CONFIG_PREEMPT)
5775 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5776#else
Al Viroa1261f52005-11-13 16:06:55 -08005777 task_thread_info(idle)->preempt_count = 0;
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005778#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02005779 /*
5780 * The idle tasks have their own, simple scheduling class:
5781 */
5782 idle->sched_class = &idle_sched_class;
Steven Rostedt868baf02011-02-10 21:26:13 -05005783 ftrace_graph_init_idle_task(idle, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005784}
5785
5786/*
5787 * In a system that switches off the HZ timer nohz_cpu_mask
5788 * indicates which cpus entered this state. This is used
5789 * in the rcu update to wait only for active cpus. For system
5790 * which do not switch off the HZ timer nohz_cpu_mask should
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305791 * always be CPU_BITS_NONE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005792 */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305793cpumask_var_t nohz_cpu_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005794
Ingo Molnar19978ca2007-11-09 22:39:38 +01005795/*
5796 * Increase the granularity value when there are more CPUs,
5797 * because with more CPUs the 'effective latency' as visible
5798 * to users decreases. But the relationship is not linear,
5799 * so pick a second-best guess by going with the log2 of the
5800 * number of CPUs.
5801 *
5802 * This idea comes from the SD scheduler of Con Kolivas:
5803 */
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005804static int get_update_sysctl_factor(void)
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005805{
Mike Galbraith4ca3ef72009-12-10 09:25:53 +01005806 unsigned int cpus = min_t(int, num_online_cpus(), 8);
Christian Ehrhardt1983a922009-11-30 12:16:47 +01005807 unsigned int factor;
5808
5809 switch (sysctl_sched_tunable_scaling) {
5810 case SCHED_TUNABLESCALING_NONE:
5811 factor = 1;
5812 break;
5813 case SCHED_TUNABLESCALING_LINEAR:
5814 factor = cpus;
5815 break;
5816 case SCHED_TUNABLESCALING_LOG:
5817 default:
5818 factor = 1 + ilog2(cpus);
5819 break;
5820 }
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005821
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005822 return factor;
5823}
5824
5825static void update_sysctl(void)
5826{
5827 unsigned int factor = get_update_sysctl_factor();
5828
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005829#define SET_SYSCTL(name) \
5830 (sysctl_##name = (factor) * normalized_sysctl_##name)
5831 SET_SYSCTL(sched_min_granularity);
5832 SET_SYSCTL(sched_latency);
5833 SET_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005834#undef SET_SYSCTL
5835}
5836
Ingo Molnar19978ca2007-11-09 22:39:38 +01005837static inline void sched_init_granularity(void)
5838{
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005839 update_sysctl();
Ingo Molnar19978ca2007-11-09 22:39:38 +01005840}
5841
Linus Torvalds1da177e2005-04-16 15:20:36 -07005842#ifdef CONFIG_SMP
5843/*
5844 * This is how migration works:
5845 *
Tejun Heo969c7922010-05-06 18:49:21 +02005846 * 1) we invoke migration_cpu_stop() on the target CPU using
5847 * stop_one_cpu().
5848 * 2) stopper starts to run (implicitly forcing the migrated thread
5849 * off the CPU)
5850 * 3) it checks whether the migrated task is still in the wrong runqueue.
5851 * 4) if it's in the wrong runqueue then the migration thread removes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005852 * it and puts it into the right queue.
Tejun Heo969c7922010-05-06 18:49:21 +02005853 * 5) stopper completes and stop_one_cpu() returns and the migration
5854 * is done.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855 */
5856
5857/*
5858 * Change a given task's CPU affinity. Migrate the thread to a
5859 * proper CPU and schedule it away if the CPU it's executing on
5860 * is removed from the allowed bitmask.
5861 *
5862 * NOTE: the caller must have a valid reference to the task, the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005863 * task must not exit() & deallocate itself prematurely. The
Linus Torvalds1da177e2005-04-16 15:20:36 -07005864 * call is not atomic; no spinlocks may be held.
5865 */
Rusty Russell96f874e2008-11-25 02:35:14 +10305866int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005867{
5868 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005869 struct rq *rq;
Tejun Heo969c7922010-05-06 18:49:21 +02005870 unsigned int dest_cpu;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005871 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005872
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005873 rq = task_rq_lock(p, &flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01005874
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005875 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005876 ret = -EINVAL;
5877 goto out;
5878 }
5879
David Rientjes9985b0b2008-06-05 12:57:11 -07005880 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
Rusty Russell96f874e2008-11-25 02:35:14 +10305881 !cpumask_equal(&p->cpus_allowed, new_mask))) {
David Rientjes9985b0b2008-06-05 12:57:11 -07005882 ret = -EINVAL;
5883 goto out;
5884 }
5885
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005886 if (p->sched_class->set_cpus_allowed)
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005887 p->sched_class->set_cpus_allowed(p, new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005888 else {
Rusty Russell96f874e2008-11-25 02:35:14 +10305889 cpumask_copy(&p->cpus_allowed, new_mask);
5890 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005891 }
5892
Linus Torvalds1da177e2005-04-16 15:20:36 -07005893 /* Can the task run on the task's current CPU? If so, we're done */
Rusty Russell96f874e2008-11-25 02:35:14 +10305894 if (cpumask_test_cpu(task_cpu(p), new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005895 goto out;
5896
Tejun Heo969c7922010-05-06 18:49:21 +02005897 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
Peter Zijlstra7608dec2011-04-05 17:23:46 +02005898 if (need_migrate_task(p)) {
Tejun Heo969c7922010-05-06 18:49:21 +02005899 struct migration_arg arg = { p, dest_cpu };
Linus Torvalds1da177e2005-04-16 15:20:36 -07005900 /* Need help from migration thread: drop lock and wait. */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005901 task_rq_unlock(rq, p, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005902 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005903 tlb_migrate_finish(p->mm);
5904 return 0;
5905 }
5906out:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005907 task_rq_unlock(rq, p, &flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005908
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909 return ret;
5910}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005911EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005912
5913/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005914 * Move (not current) task off this cpu, onto dest cpu. We're doing
Linus Torvalds1da177e2005-04-16 15:20:36 -07005915 * this because either it can't run here any more (set_cpus_allowed()
5916 * away from this CPU, or CPU going down), or because we're
5917 * attempting to rebalance this task on exec (sched_exec).
5918 *
5919 * So we race with normal scheduler movements, but that's OK, as long
5920 * as the task is no longer on this CPU.
Kirill Korotaevefc30812006-06-27 02:54:32 -07005921 *
5922 * Returns non-zero if task was successfully migrated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005923 */
Kirill Korotaevefc30812006-06-27 02:54:32 -07005924static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005925{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005926 struct rq *rq_dest, *rq_src;
Peter Zijlstrae2912002009-12-16 18:04:36 +01005927 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005928
Max Krasnyanskye761b772008-07-15 04:43:49 -07005929 if (unlikely(!cpu_active(dest_cpu)))
Kirill Korotaevefc30812006-06-27 02:54:32 -07005930 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005931
5932 rq_src = cpu_rq(src_cpu);
5933 rq_dest = cpu_rq(dest_cpu);
5934
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005935 raw_spin_lock(&p->pi_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005936 double_rq_lock(rq_src, rq_dest);
5937 /* Already moved. */
5938 if (task_cpu(p) != src_cpu)
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005939 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005940 /* Affinity changed (again). */
Rusty Russell96f874e2008-11-25 02:35:14 +10305941 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005942 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005943
Peter Zijlstrae2912002009-12-16 18:04:36 +01005944 /*
5945 * If we're not on a rq, the next wake-up will ensure we're
5946 * placed properly.
5947 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02005948 if (p->on_rq) {
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005949 deactivate_task(rq_src, p, 0);
Peter Zijlstrae2912002009-12-16 18:04:36 +01005950 set_task_cpu(p, dest_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005951 activate_task(rq_dest, p, 0);
Peter Zijlstra15afe092008-09-20 23:38:02 +02005952 check_preempt_curr(rq_dest, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005953 }
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005954done:
Kirill Korotaevefc30812006-06-27 02:54:32 -07005955 ret = 1;
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005956fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005957 double_rq_unlock(rq_src, rq_dest);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005958 raw_spin_unlock(&p->pi_lock);
Kirill Korotaevefc30812006-06-27 02:54:32 -07005959 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005960}
5961
5962/*
Tejun Heo969c7922010-05-06 18:49:21 +02005963 * migration_cpu_stop - this will be executed by a highprio stopper thread
5964 * and performs thread migration by bumping thread off CPU then
5965 * 'pushing' onto another runqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005966 */
Tejun Heo969c7922010-05-06 18:49:21 +02005967static int migration_cpu_stop(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005968{
Tejun Heo969c7922010-05-06 18:49:21 +02005969 struct migration_arg *arg = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005970
Tejun Heo969c7922010-05-06 18:49:21 +02005971 /*
5972 * The original target cpu might have gone down and we might
5973 * be on another cpu but it doesn't matter.
5974 */
5975 local_irq_disable();
5976 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5977 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005978 return 0;
5979}
5980
5981#ifdef CONFIG_HOTPLUG_CPU
Linus Torvalds1da177e2005-04-16 15:20:36 -07005982
Ingo Molnar48f24c42006-07-03 00:25:40 -07005983/*
5984 * Ensures that the idle task is using init_mm right before its cpu goes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005985 * offline.
5986 */
5987void idle_task_exit(void)
5988{
5989 struct mm_struct *mm = current->active_mm;
5990
5991 BUG_ON(cpu_online(smp_processor_id()));
5992
5993 if (mm != &init_mm)
5994 switch_mm(mm, &init_mm, current);
5995 mmdrop(mm);
5996}
5997
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01005998/*
5999 * While a dead CPU has no uninterruptible tasks queued at this point,
6000 * it might still have a nonzero ->nr_uninterruptible counter, because
6001 * for performance reasons the counter is not stricly tracking tasks to
6002 * their home CPUs. So we just add the counter to another CPU's counter,
6003 * to keep the global sum constant after CPU-down:
6004 */
6005static void migrate_nr_uninterruptible(struct rq *rq_src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006006{
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006007 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006008
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006009 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
6010 rq_src->nr_uninterruptible = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006011}
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006012
6013/*
6014 * remove the tasks which were accounted by rq from calc_load_tasks.
6015 */
6016static void calc_global_load_remove(struct rq *rq)
6017{
6018 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
Thomas Gleixnera468d382009-07-17 14:15:46 +02006019 rq->calc_load_active = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006020}
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006021
6022/*
6023 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6024 * try_to_wake_up()->select_task_rq().
6025 *
6026 * Called with rq->lock held even though we'er in stop_machine() and
6027 * there's no concurrency possible, we hold the required locks anyway
6028 * because of lock validation efforts.
6029 */
6030static void migrate_tasks(unsigned int dead_cpu)
6031{
6032 struct rq *rq = cpu_rq(dead_cpu);
6033 struct task_struct *next, *stop = rq->stop;
6034 int dest_cpu;
6035
6036 /*
6037 * Fudge the rq selection such that the below task selection loop
6038 * doesn't get stuck on the currently eligible stop task.
6039 *
6040 * We're currently inside stop_machine() and the rq is either stuck
6041 * in the stop_machine_cpu_stop() loop, or we're executing this code,
6042 * either way we should never end up calling schedule() until we're
6043 * done here.
6044 */
6045 rq->stop = NULL;
6046
6047 for ( ; ; ) {
6048 /*
6049 * There's this thread running, bail when that's the only
6050 * remaining thread.
6051 */
6052 if (rq->nr_running == 1)
6053 break;
6054
6055 next = pick_next_task(rq);
6056 BUG_ON(!next);
6057 next->sched_class->put_prev_task(rq, next);
6058
6059 /* Find suitable destination for @next, with force if needed. */
6060 dest_cpu = select_fallback_rq(dead_cpu, next);
6061 raw_spin_unlock(&rq->lock);
6062
6063 __migrate_task(next, dead_cpu, dest_cpu);
6064
6065 raw_spin_lock(&rq->lock);
6066 }
6067
6068 rq->stop = stop;
6069}
6070
Linus Torvalds1da177e2005-04-16 15:20:36 -07006071#endif /* CONFIG_HOTPLUG_CPU */
6072
Nick Piggine692ab52007-07-26 13:40:43 +02006073#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
6074
6075static struct ctl_table sd_ctl_dir[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02006076 {
6077 .procname = "sched_domain",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006078 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02006079 },
Eric W. Biederman56992302009-11-05 15:38:40 -08006080 {}
Nick Piggine692ab52007-07-26 13:40:43 +02006081};
6082
6083static struct ctl_table sd_ctl_root[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02006084 {
6085 .procname = "kernel",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006086 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02006087 .child = sd_ctl_dir,
6088 },
Eric W. Biederman56992302009-11-05 15:38:40 -08006089 {}
Nick Piggine692ab52007-07-26 13:40:43 +02006090};
6091
6092static struct ctl_table *sd_alloc_ctl_entry(int n)
6093{
6094 struct ctl_table *entry =
Milton Miller5cf9f062007-10-15 17:00:19 +02006095 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
Nick Piggine692ab52007-07-26 13:40:43 +02006096
Nick Piggine692ab52007-07-26 13:40:43 +02006097 return entry;
6098}
6099
Milton Miller6382bc92007-10-15 17:00:19 +02006100static void sd_free_ctl_entry(struct ctl_table **tablep)
6101{
Milton Millercd790072007-10-17 16:55:11 +02006102 struct ctl_table *entry;
Milton Miller6382bc92007-10-15 17:00:19 +02006103
Milton Millercd790072007-10-17 16:55:11 +02006104 /*
6105 * In the intermediate directories, both the child directory and
6106 * procname are dynamically allocated and could fail but the mode
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006107 * will always be set. In the lowest directory the names are
Milton Millercd790072007-10-17 16:55:11 +02006108 * static strings and all have proc handlers.
6109 */
6110 for (entry = *tablep; entry->mode; entry++) {
Milton Miller6382bc92007-10-15 17:00:19 +02006111 if (entry->child)
6112 sd_free_ctl_entry(&entry->child);
Milton Millercd790072007-10-17 16:55:11 +02006113 if (entry->proc_handler == NULL)
6114 kfree(entry->procname);
6115 }
Milton Miller6382bc92007-10-15 17:00:19 +02006116
6117 kfree(*tablep);
6118 *tablep = NULL;
6119}
6120
Nick Piggine692ab52007-07-26 13:40:43 +02006121static void
Alexey Dobriyane0361852007-08-09 11:16:46 +02006122set_table_entry(struct ctl_table *entry,
Nick Piggine692ab52007-07-26 13:40:43 +02006123 const char *procname, void *data, int maxlen,
6124 mode_t mode, proc_handler *proc_handler)
6125{
Nick Piggine692ab52007-07-26 13:40:43 +02006126 entry->procname = procname;
6127 entry->data = data;
6128 entry->maxlen = maxlen;
6129 entry->mode = mode;
6130 entry->proc_handler = proc_handler;
6131}
6132
6133static struct ctl_table *
6134sd_alloc_ctl_domain_table(struct sched_domain *sd)
6135{
Ingo Molnara5d8c342008-10-09 11:35:51 +02006136 struct ctl_table *table = sd_alloc_ctl_entry(13);
Nick Piggine692ab52007-07-26 13:40:43 +02006137
Milton Millerad1cdc12007-10-15 17:00:19 +02006138 if (table == NULL)
6139 return NULL;
6140
Alexey Dobriyane0361852007-08-09 11:16:46 +02006141 set_table_entry(&table[0], "min_interval", &sd->min_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02006142 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006143 set_table_entry(&table[1], "max_interval", &sd->max_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02006144 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006145 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006146 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006147 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006148 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006149 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006150 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006151 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006152 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006153 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006154 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006155 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
Nick Piggine692ab52007-07-26 13:40:43 +02006156 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006157 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
Nick Piggine692ab52007-07-26 13:40:43 +02006158 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02006159 set_table_entry(&table[9], "cache_nice_tries",
Nick Piggine692ab52007-07-26 13:40:43 +02006160 &sd->cache_nice_tries,
6161 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02006162 set_table_entry(&table[10], "flags", &sd->flags,
Nick Piggine692ab52007-07-26 13:40:43 +02006163 sizeof(int), 0644, proc_dointvec_minmax);
Ingo Molnara5d8c342008-10-09 11:35:51 +02006164 set_table_entry(&table[11], "name", sd->name,
6165 CORENAME_MAX_SIZE, 0444, proc_dostring);
6166 /* &table[12] is terminator */
Nick Piggine692ab52007-07-26 13:40:43 +02006167
6168 return table;
6169}
6170
Ingo Molnar9a4e7152007-11-28 15:52:56 +01006171static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
Nick Piggine692ab52007-07-26 13:40:43 +02006172{
6173 struct ctl_table *entry, *table;
6174 struct sched_domain *sd;
6175 int domain_num = 0, i;
6176 char buf[32];
6177
6178 for_each_domain(cpu, sd)
6179 domain_num++;
6180 entry = table = sd_alloc_ctl_entry(domain_num + 1);
Milton Millerad1cdc12007-10-15 17:00:19 +02006181 if (table == NULL)
6182 return NULL;
Nick Piggine692ab52007-07-26 13:40:43 +02006183
6184 i = 0;
6185 for_each_domain(cpu, sd) {
6186 snprintf(buf, 32, "domain%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006187 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006188 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006189 entry->child = sd_alloc_ctl_domain_table(sd);
6190 entry++;
6191 i++;
6192 }
6193 return table;
6194}
6195
6196static struct ctl_table_header *sd_sysctl_header;
Milton Miller6382bc92007-10-15 17:00:19 +02006197static void register_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006198{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006199 int i, cpu_num = num_possible_cpus();
Nick Piggine692ab52007-07-26 13:40:43 +02006200 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6201 char buf[32];
6202
Milton Miller73785472007-10-24 18:23:48 +02006203 WARN_ON(sd_ctl_dir[0].child);
6204 sd_ctl_dir[0].child = entry;
6205
Milton Millerad1cdc12007-10-15 17:00:19 +02006206 if (entry == NULL)
6207 return;
6208
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006209 for_each_possible_cpu(i) {
Nick Piggine692ab52007-07-26 13:40:43 +02006210 snprintf(buf, 32, "cpu%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006211 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006212 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006213 entry->child = sd_alloc_ctl_cpu_table(i);
Milton Miller97b6ea72007-10-15 17:00:19 +02006214 entry++;
Nick Piggine692ab52007-07-26 13:40:43 +02006215 }
Milton Miller73785472007-10-24 18:23:48 +02006216
6217 WARN_ON(sd_sysctl_header);
Nick Piggine692ab52007-07-26 13:40:43 +02006218 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6219}
Milton Miller6382bc92007-10-15 17:00:19 +02006220
Milton Miller73785472007-10-24 18:23:48 +02006221/* may be called multiple times per register */
Milton Miller6382bc92007-10-15 17:00:19 +02006222static void unregister_sched_domain_sysctl(void)
6223{
Milton Miller73785472007-10-24 18:23:48 +02006224 if (sd_sysctl_header)
6225 unregister_sysctl_table(sd_sysctl_header);
Milton Miller6382bc92007-10-15 17:00:19 +02006226 sd_sysctl_header = NULL;
Milton Miller73785472007-10-24 18:23:48 +02006227 if (sd_ctl_dir[0].child)
6228 sd_free_ctl_entry(&sd_ctl_dir[0].child);
Milton Miller6382bc92007-10-15 17:00:19 +02006229}
Nick Piggine692ab52007-07-26 13:40:43 +02006230#else
Milton Miller6382bc92007-10-15 17:00:19 +02006231static void register_sched_domain_sysctl(void)
6232{
6233}
6234static void unregister_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006235{
6236}
6237#endif
6238
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006239static void set_rq_online(struct rq *rq)
6240{
6241 if (!rq->online) {
6242 const struct sched_class *class;
6243
Rusty Russellc6c49272008-11-25 02:35:05 +10306244 cpumask_set_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006245 rq->online = 1;
6246
6247 for_each_class(class) {
6248 if (class->rq_online)
6249 class->rq_online(rq);
6250 }
6251 }
6252}
6253
6254static void set_rq_offline(struct rq *rq)
6255{
6256 if (rq->online) {
6257 const struct sched_class *class;
6258
6259 for_each_class(class) {
6260 if (class->rq_offline)
6261 class->rq_offline(rq);
6262 }
6263
Rusty Russellc6c49272008-11-25 02:35:05 +10306264 cpumask_clear_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006265 rq->online = 0;
6266 }
6267}
6268
Linus Torvalds1da177e2005-04-16 15:20:36 -07006269/*
6270 * migration_call - callback that gets triggered when a CPU is added.
6271 * Here we can start up the necessary migration thread for the new CPU.
6272 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006273static int __cpuinit
6274migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006275{
Ingo Molnar48f24c42006-07-03 00:25:40 -07006276 int cpu = (long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006277 unsigned long flags;
Tejun Heo969c7922010-05-06 18:49:21 +02006278 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006279
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006280 switch (action & ~CPU_TASKS_FROZEN) {
Gautham R Shenoy5be93612007-05-09 02:34:04 -07006281
Linus Torvalds1da177e2005-04-16 15:20:36 -07006282 case CPU_UP_PREPARE:
Thomas Gleixnera468d382009-07-17 14:15:46 +02006283 rq->calc_load_update = calc_load_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006284 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006285
Linus Torvalds1da177e2005-04-16 15:20:36 -07006286 case CPU_ONLINE:
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006287 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006288 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006289 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306290 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006291
6292 set_rq_online(rq);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006293 }
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006294 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006295 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006296
Linus Torvalds1da177e2005-04-16 15:20:36 -07006297#ifdef CONFIG_HOTPLUG_CPU
Gregory Haskins08f503b2008-03-10 17:59:11 -04006298 case CPU_DYING:
Gregory Haskins57d885f2008-01-25 21:08:18 +01006299 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006300 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006301 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306302 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006303 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006304 }
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006305 migrate_tasks(cpu);
6306 BUG_ON(rq->nr_running != 1); /* the migration thread */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006307 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006308
6309 migrate_nr_uninterruptible(rq);
6310 calc_global_load_remove(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006311 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006312#endif
6313 }
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006314
6315 update_max_interval();
6316
Linus Torvalds1da177e2005-04-16 15:20:36 -07006317 return NOTIFY_OK;
6318}
6319
Paul Mackerrasf38b0822009-06-02 21:05:16 +10006320/*
6321 * Register at high priority so that task migration (migrate_all_tasks)
6322 * happens before everything else. This has to be lower priority than
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006323 * the notifier in the perf_event subsystem, though.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006324 */
Chandra Seetharaman26c21432006-06-27 02:54:10 -07006325static struct notifier_block __cpuinitdata migration_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006326 .notifier_call = migration_call,
Tejun Heo50a323b2010-06-08 21:40:36 +02006327 .priority = CPU_PRI_MIGRATION,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006328};
6329
Tejun Heo3a101d02010-06-08 21:40:36 +02006330static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
6331 unsigned long action, void *hcpu)
6332{
6333 switch (action & ~CPU_TASKS_FROZEN) {
6334 case CPU_ONLINE:
6335 case CPU_DOWN_FAILED:
6336 set_cpu_active((long)hcpu, true);
6337 return NOTIFY_OK;
6338 default:
6339 return NOTIFY_DONE;
6340 }
6341}
6342
6343static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6344 unsigned long action, void *hcpu)
6345{
6346 switch (action & ~CPU_TASKS_FROZEN) {
6347 case CPU_DOWN_PREPARE:
6348 set_cpu_active((long)hcpu, false);
6349 return NOTIFY_OK;
6350 default:
6351 return NOTIFY_DONE;
6352 }
6353}
6354
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006355static int __init migration_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006356{
6357 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -07006358 int err;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006359
Tejun Heo3a101d02010-06-08 21:40:36 +02006360 /* Initialize migration for the boot CPU */
Akinobu Mita07dccf32006-09-29 02:00:22 -07006361 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6362 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6364 register_cpu_notifier(&migration_notifier);
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006365
Tejun Heo3a101d02010-06-08 21:40:36 +02006366 /* Register cpu active notifiers */
6367 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6368 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6369
Thomas Gleixnera004cd42009-07-21 09:54:05 +02006370 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006371}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006372early_initcall(migration_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006373#endif
6374
6375#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07006376
Ingo Molnar3e9830d2007-10-15 17:00:13 +02006377#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006378
Mike Travisf6630112009-11-17 18:22:15 -06006379static __read_mostly int sched_domain_debug_enabled;
6380
6381static int __init sched_domain_debug_setup(char *str)
6382{
6383 sched_domain_debug_enabled = 1;
6384
6385 return 0;
6386}
6387early_param("sched_debug", sched_domain_debug_setup);
6388
Mike Travis7c16ec52008-04-04 18:11:11 -07006389static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
Rusty Russell96f874e2008-11-25 02:35:14 +10306390 struct cpumask *groupmask)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006391{
6392 struct sched_group *group = sd->groups;
Mike Travis434d53b2008-04-04 18:11:04 -07006393 char str[256];
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006394
Rusty Russell968ea6d2008-12-13 21:55:51 +10306395 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
Rusty Russell96f874e2008-11-25 02:35:14 +10306396 cpumask_clear(groupmask);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006397
6398 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6399
6400 if (!(sd->flags & SD_LOAD_BALANCE)) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006401 printk("does not load-balance\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006402 if (sd->parent)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006403 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6404 " has parent");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006405 return -1;
6406 }
6407
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006408 printk(KERN_CONT "span %s level %s\n", str, sd->name);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006409
Rusty Russell758b2cd2008-11-25 02:35:04 +10306410 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006411 printk(KERN_ERR "ERROR: domain->span does not contain "
6412 "CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006413 }
Rusty Russell758b2cd2008-11-25 02:35:04 +10306414 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006415 printk(KERN_ERR "ERROR: domain->groups does not contain"
6416 " CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006417 }
6418
6419 printk(KERN_DEBUG "%*s groups:", level + 1, "");
6420 do {
6421 if (!group) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006422 printk("\n");
6423 printk(KERN_ERR "ERROR: group is NULL\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006424 break;
6425 }
6426
Peter Zijlstra18a38852009-09-01 10:34:39 +02006427 if (!group->cpu_power) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006428 printk(KERN_CONT "\n");
6429 printk(KERN_ERR "ERROR: domain->cpu_power not "
6430 "set\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006431 break;
6432 }
6433
Rusty Russell758b2cd2008-11-25 02:35:04 +10306434 if (!cpumask_weight(sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006435 printk(KERN_CONT "\n");
6436 printk(KERN_ERR "ERROR: empty group\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006437 break;
6438 }
6439
Rusty Russell758b2cd2008-11-25 02:35:04 +10306440 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006441 printk(KERN_CONT "\n");
6442 printk(KERN_ERR "ERROR: repeated CPUs\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006443 break;
6444 }
6445
Rusty Russell758b2cd2008-11-25 02:35:04 +10306446 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006447
Rusty Russell968ea6d2008-12-13 21:55:51 +10306448 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306449
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006450 printk(KERN_CONT " %s", str);
Peter Zijlstra18a38852009-09-01 10:34:39 +02006451 if (group->cpu_power != SCHED_LOAD_SCALE) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006452 printk(KERN_CONT " (cpu_power = %d)",
6453 group->cpu_power);
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306454 }
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006455
6456 group = group->next;
6457 } while (group != sd->groups);
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006458 printk(KERN_CONT "\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006459
Rusty Russell758b2cd2008-11-25 02:35:04 +10306460 if (!cpumask_equal(sched_domain_span(sd), groupmask))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006461 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006462
Rusty Russell758b2cd2008-11-25 02:35:04 +10306463 if (sd->parent &&
6464 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006465 printk(KERN_ERR "ERROR: parent span is not a superset "
6466 "of domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006467 return 0;
6468}
6469
Linus Torvalds1da177e2005-04-16 15:20:36 -07006470static void sched_domain_debug(struct sched_domain *sd, int cpu)
6471{
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306472 cpumask_var_t groupmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006473 int level = 0;
6474
Mike Travisf6630112009-11-17 18:22:15 -06006475 if (!sched_domain_debug_enabled)
6476 return;
6477
Nick Piggin41c7ce92005-06-25 14:57:24 -07006478 if (!sd) {
6479 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6480 return;
6481 }
6482
Linus Torvalds1da177e2005-04-16 15:20:36 -07006483 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6484
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306485 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006486 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6487 return;
6488 }
6489
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006490 for (;;) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006491 if (sched_domain_debug_one(sd, cpu, level, groupmask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006492 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006493 level++;
6494 sd = sd->parent;
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08006495 if (!sd)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006496 break;
6497 }
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306498 free_cpumask_var(groupmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006499}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006500#else /* !CONFIG_SCHED_DEBUG */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006501# define sched_domain_debug(sd, cpu) do { } while (0)
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006502#endif /* CONFIG_SCHED_DEBUG */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006503
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006504static int sd_degenerate(struct sched_domain *sd)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006505{
Rusty Russell758b2cd2008-11-25 02:35:04 +10306506 if (cpumask_weight(sched_domain_span(sd)) == 1)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006507 return 1;
6508
6509 /* Following flags need at least 2 groups */
6510 if (sd->flags & (SD_LOAD_BALANCE |
6511 SD_BALANCE_NEWIDLE |
6512 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006513 SD_BALANCE_EXEC |
6514 SD_SHARE_CPUPOWER |
6515 SD_SHARE_PKG_RESOURCES)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006516 if (sd->groups != sd->groups->next)
6517 return 0;
6518 }
6519
6520 /* Following flags don't use groups */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02006521 if (sd->flags & (SD_WAKE_AFFINE))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006522 return 0;
6523
6524 return 1;
6525}
6526
Ingo Molnar48f24c42006-07-03 00:25:40 -07006527static int
6528sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006529{
6530 unsigned long cflags = sd->flags, pflags = parent->flags;
6531
6532 if (sd_degenerate(parent))
6533 return 1;
6534
Rusty Russell758b2cd2008-11-25 02:35:04 +10306535 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006536 return 0;
6537
Suresh Siddha245af2c2005-06-25 14:57:25 -07006538 /* Flags needing groups don't count if only 1 group in parent */
6539 if (parent->groups == parent->groups->next) {
6540 pflags &= ~(SD_LOAD_BALANCE |
6541 SD_BALANCE_NEWIDLE |
6542 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006543 SD_BALANCE_EXEC |
6544 SD_SHARE_CPUPOWER |
6545 SD_SHARE_PKG_RESOURCES);
Ken Chen54364992008-12-07 18:47:37 -08006546 if (nr_node_ids == 1)
6547 pflags &= ~SD_SERIALIZE;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006548 }
6549 if (~cflags & pflags)
6550 return 0;
6551
6552 return 1;
6553}
6554
Rusty Russellc6c49272008-11-25 02:35:05 +10306555static void free_rootdomain(struct root_domain *rd)
6556{
Peter Zijlstra047106a2009-11-16 10:28:09 +01006557 synchronize_sched();
6558
Rusty Russell68e74562008-11-25 02:35:13 +10306559 cpupri_cleanup(&rd->cpupri);
6560
Rusty Russellc6c49272008-11-25 02:35:05 +10306561 free_cpumask_var(rd->rto_mask);
6562 free_cpumask_var(rd->online);
6563 free_cpumask_var(rd->span);
6564 kfree(rd);
6565}
6566
Gregory Haskins57d885f2008-01-25 21:08:18 +01006567static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6568{
Ingo Molnara0490fa2009-02-12 11:35:40 +01006569 struct root_domain *old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006570 unsigned long flags;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006571
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006572 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006573
6574 if (rq->rd) {
Ingo Molnara0490fa2009-02-12 11:35:40 +01006575 old_rd = rq->rd;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006576
Rusty Russellc6c49272008-11-25 02:35:05 +10306577 if (cpumask_test_cpu(rq->cpu, old_rd->online))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006578 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006579
Rusty Russellc6c49272008-11-25 02:35:05 +10306580 cpumask_clear_cpu(rq->cpu, old_rd->span);
Gregory Haskinsdc938522008-01-25 21:08:26 +01006581
Ingo Molnara0490fa2009-02-12 11:35:40 +01006582 /*
6583 * If we dont want to free the old_rt yet then
6584 * set old_rd to NULL to skip the freeing later
6585 * in this function:
6586 */
6587 if (!atomic_dec_and_test(&old_rd->refcount))
6588 old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006589 }
6590
6591 atomic_inc(&rd->refcount);
6592 rq->rd = rd;
6593
Rusty Russellc6c49272008-11-25 02:35:05 +10306594 cpumask_set_cpu(rq->cpu, rd->span);
Gregory Haskins00aec932009-07-30 10:57:23 -04006595 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006596 set_rq_online(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006597
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006598 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnara0490fa2009-02-12 11:35:40 +01006599
6600 if (old_rd)
6601 free_rootdomain(old_rd);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006602}
6603
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006604static int init_rootdomain(struct root_domain *rd)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006605{
6606 memset(rd, 0, sizeof(*rd));
6607
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006608 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
Li Zefan0c910d22009-01-06 17:39:06 +08006609 goto out;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006610 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306611 goto free_span;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006612 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306613 goto free_online;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02006614
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006615 if (cpupri_init(&rd->cpupri) != 0)
Rusty Russell68e74562008-11-25 02:35:13 +10306616 goto free_rto_mask;
Rusty Russellc6c49272008-11-25 02:35:05 +10306617 return 0;
6618
Rusty Russell68e74562008-11-25 02:35:13 +10306619free_rto_mask:
6620 free_cpumask_var(rd->rto_mask);
Rusty Russellc6c49272008-11-25 02:35:05 +10306621free_online:
6622 free_cpumask_var(rd->online);
6623free_span:
6624 free_cpumask_var(rd->span);
Li Zefan0c910d22009-01-06 17:39:06 +08006625out:
Rusty Russellc6c49272008-11-25 02:35:05 +10306626 return -ENOMEM;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006627}
6628
6629static void init_defrootdomain(void)
6630{
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006631 init_rootdomain(&def_root_domain);
Rusty Russellc6c49272008-11-25 02:35:05 +10306632
Gregory Haskins57d885f2008-01-25 21:08:18 +01006633 atomic_set(&def_root_domain.refcount, 1);
6634}
6635
Gregory Haskinsdc938522008-01-25 21:08:26 +01006636static struct root_domain *alloc_rootdomain(void)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006637{
6638 struct root_domain *rd;
6639
6640 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6641 if (!rd)
6642 return NULL;
6643
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006644 if (init_rootdomain(rd) != 0) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306645 kfree(rd);
6646 return NULL;
6647 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01006648
6649 return rd;
6650}
6651
Linus Torvalds1da177e2005-04-16 15:20:36 -07006652/*
Ingo Molnar0eab9142008-01-25 21:08:19 +01006653 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
Linus Torvalds1da177e2005-04-16 15:20:36 -07006654 * hold the hotplug lock.
6655 */
Ingo Molnar0eab9142008-01-25 21:08:19 +01006656static void
6657cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006658{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006659 struct rq *rq = cpu_rq(cpu);
Suresh Siddha245af2c2005-06-25 14:57:25 -07006660 struct sched_domain *tmp;
6661
Peter Zijlstra669c55e2010-04-16 14:59:29 +02006662 for (tmp = sd; tmp; tmp = tmp->parent)
6663 tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
6664
Suresh Siddha245af2c2005-06-25 14:57:25 -07006665 /* Remove the sched domains which do not contribute to scheduling. */
Li Zefanf29c9b12008-11-06 09:45:16 +08006666 for (tmp = sd; tmp; ) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006667 struct sched_domain *parent = tmp->parent;
6668 if (!parent)
6669 break;
Li Zefanf29c9b12008-11-06 09:45:16 +08006670
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006671 if (sd_parent_degenerate(tmp, parent)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006672 tmp->parent = parent->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006673 if (parent->parent)
6674 parent->parent->child = tmp;
Li Zefanf29c9b12008-11-06 09:45:16 +08006675 } else
6676 tmp = tmp->parent;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006677 }
6678
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006679 if (sd && sd_degenerate(sd)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006680 sd = sd->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006681 if (sd)
6682 sd->child = NULL;
6683 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006684
6685 sched_domain_debug(sd, cpu);
6686
Gregory Haskins57d885f2008-01-25 21:08:18 +01006687 rq_attach_root(rq, rd);
Nick Piggin674311d2005-06-25 14:57:27 -07006688 rcu_assign_pointer(rq->sd, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006689}
6690
6691/* cpus with isolated domains */
Rusty Russelldcc30a32008-11-25 02:35:12 +10306692static cpumask_var_t cpu_isolated_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006693
6694/* Setup the mask of cpus configured for isolated domains */
6695static int __init isolated_cpu_setup(char *str)
6696{
Rusty Russellbdddd292009-12-02 14:09:16 +10306697 alloc_bootmem_cpumask_var(&cpu_isolated_map);
Rusty Russell968ea6d2008-12-13 21:55:51 +10306698 cpulist_parse(str, cpu_isolated_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006699 return 1;
6700}
6701
Ingo Molnar8927f492007-10-15 17:00:13 +02006702__setup("isolcpus=", isolated_cpu_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006703
6704/*
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006705 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
6706 * to a function which identifies what group(along with sched group) a CPU
Rusty Russell96f874e2008-11-25 02:35:14 +10306707 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6708 * (due to the fact that we keep track of groups covered with a struct cpumask).
Linus Torvalds1da177e2005-04-16 15:20:36 -07006709 *
6710 * init_sched_build_groups will build a circular linked list of the groups
6711 * covered by the given span, and will set each group's ->cpumask correctly,
6712 * and ->cpu_power to 0.
6713 */
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006714static void
Rusty Russell96f874e2008-11-25 02:35:14 +10306715init_sched_build_groups(const struct cpumask *span,
6716 const struct cpumask *cpu_map,
6717 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
Mike Travis7c16ec52008-04-04 18:11:11 -07006718 struct sched_group **sg,
Rusty Russell96f874e2008-11-25 02:35:14 +10306719 struct cpumask *tmpmask),
6720 struct cpumask *covered, struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006721{
6722 struct sched_group *first = NULL, *last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723 int i;
6724
Rusty Russell96f874e2008-11-25 02:35:14 +10306725 cpumask_clear(covered);
Mike Travis7c16ec52008-04-04 18:11:11 -07006726
Rusty Russellabcd0832008-11-25 02:35:02 +10306727 for_each_cpu(i, span) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006728 struct sched_group *sg;
Mike Travis7c16ec52008-04-04 18:11:11 -07006729 int group = group_fn(i, cpu_map, &sg, tmpmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006730 int j;
6731
Rusty Russell758b2cd2008-11-25 02:35:04 +10306732 if (cpumask_test_cpu(i, covered))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006733 continue;
6734
Rusty Russell758b2cd2008-11-25 02:35:04 +10306735 cpumask_clear(sched_group_cpus(sg));
Peter Zijlstra18a38852009-09-01 10:34:39 +02006736 sg->cpu_power = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006737
Rusty Russellabcd0832008-11-25 02:35:02 +10306738 for_each_cpu(j, span) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006739 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006740 continue;
6741
Rusty Russell96f874e2008-11-25 02:35:14 +10306742 cpumask_set_cpu(j, covered);
Rusty Russell758b2cd2008-11-25 02:35:04 +10306743 cpumask_set_cpu(j, sched_group_cpus(sg));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006744 }
6745 if (!first)
6746 first = sg;
6747 if (last)
6748 last->next = sg;
6749 last = sg;
6750 }
6751 last->next = first;
6752}
6753
John Hawkes9c1cfda2005-09-06 15:18:14 -07006754#define SD_NODES_PER_DOMAIN 16
Linus Torvalds1da177e2005-04-16 15:20:36 -07006755
John Hawkes9c1cfda2005-09-06 15:18:14 -07006756#ifdef CONFIG_NUMA
akpm@osdl.org198e2f12006-01-12 01:05:30 -08006757
John Hawkes9c1cfda2005-09-06 15:18:14 -07006758/**
6759 * find_next_best_node - find the next node to include in a sched_domain
6760 * @node: node whose sched_domain we're building
6761 * @used_nodes: nodes already in the sched_domain
6762 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006763 * Find the next node to include in a given scheduling domain. Simply
John Hawkes9c1cfda2005-09-06 15:18:14 -07006764 * finds the closest node not already in the @used_nodes map.
6765 *
6766 * Should use nodemask_t.
6767 */
Mike Travisc5f59f02008-04-04 18:11:10 -07006768static int find_next_best_node(int node, nodemask_t *used_nodes)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006769{
6770 int i, n, val, min_val, best_node = 0;
6771
6772 min_val = INT_MAX;
6773
Mike Travis076ac2a2008-05-12 21:21:12 +02006774 for (i = 0; i < nr_node_ids; i++) {
John Hawkes9c1cfda2005-09-06 15:18:14 -07006775 /* Start at @node */
Mike Travis076ac2a2008-05-12 21:21:12 +02006776 n = (node + i) % nr_node_ids;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006777
6778 if (!nr_cpus_node(n))
6779 continue;
6780
6781 /* Skip already used nodes */
Mike Travisc5f59f02008-04-04 18:11:10 -07006782 if (node_isset(n, *used_nodes))
John Hawkes9c1cfda2005-09-06 15:18:14 -07006783 continue;
6784
6785 /* Simple min distance search */
6786 val = node_distance(node, n);
6787
6788 if (val < min_val) {
6789 min_val = val;
6790 best_node = n;
6791 }
6792 }
6793
Mike Travisc5f59f02008-04-04 18:11:10 -07006794 node_set(best_node, *used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006795 return best_node;
6796}
6797
6798/**
6799 * sched_domain_node_span - get a cpumask for a node's sched_domain
6800 * @node: node whose cpumask we're constructing
Randy Dunlap73486722008-04-22 10:07:22 -07006801 * @span: resulting cpumask
John Hawkes9c1cfda2005-09-06 15:18:14 -07006802 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006803 * Given a node, construct a good cpumask for its sched_domain to span. It
John Hawkes9c1cfda2005-09-06 15:18:14 -07006804 * should be one that prevents unnecessary balancing, but also spreads tasks
6805 * out optimally.
6806 */
Rusty Russell96f874e2008-11-25 02:35:14 +10306807static void sched_domain_node_span(int node, struct cpumask *span)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006808{
Mike Travisc5f59f02008-04-04 18:11:10 -07006809 nodemask_t used_nodes;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006810 int i;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006811
Mike Travis6ca09df2008-12-31 18:08:45 -08006812 cpumask_clear(span);
Mike Travisc5f59f02008-04-04 18:11:10 -07006813 nodes_clear(used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006814
Mike Travis6ca09df2008-12-31 18:08:45 -08006815 cpumask_or(span, span, cpumask_of_node(node));
Mike Travisc5f59f02008-04-04 18:11:10 -07006816 node_set(node, used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006817
6818 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
Mike Travisc5f59f02008-04-04 18:11:10 -07006819 int next_node = find_next_best_node(node, &used_nodes);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006820
Mike Travis6ca09df2008-12-31 18:08:45 -08006821 cpumask_or(span, span, cpumask_of_node(next_node));
John Hawkes9c1cfda2005-09-06 15:18:14 -07006822 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006823}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006824#endif /* CONFIG_NUMA */
John Hawkes9c1cfda2005-09-06 15:18:14 -07006825
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006826int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006827
John Hawkes9c1cfda2005-09-06 15:18:14 -07006828/*
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306829 * The cpus mask in sched_group and sched_domain hangs off the end.
Ingo Molnar4200efd2009-05-19 09:22:19 +02006830 *
6831 * ( See the the comments in include/linux/sched.h:struct sched_group
6832 * and struct sched_domain. )
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306833 */
6834struct static_sched_group {
6835 struct sched_group sg;
6836 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
6837};
6838
6839struct static_sched_domain {
6840 struct sched_domain sd;
6841 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
6842};
6843
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006844struct s_data {
6845#ifdef CONFIG_NUMA
6846 int sd_allnodes;
6847 cpumask_var_t domainspan;
6848 cpumask_var_t covered;
6849 cpumask_var_t notcovered;
6850#endif
6851 cpumask_var_t nodemask;
6852 cpumask_var_t this_sibling_map;
6853 cpumask_var_t this_core_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02006854 cpumask_var_t this_book_map;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006855 cpumask_var_t send_covered;
6856 cpumask_var_t tmpmask;
6857 struct sched_group **sched_group_nodes;
6858 struct root_domain *rd;
6859};
6860
Andreas Herrmann2109b992009-08-18 12:53:00 +02006861enum s_alloc {
6862 sa_sched_groups = 0,
6863 sa_rootdomain,
6864 sa_tmpmask,
6865 sa_send_covered,
Heiko Carstens01a08542010-08-31 10:28:16 +02006866 sa_this_book_map,
Andreas Herrmann2109b992009-08-18 12:53:00 +02006867 sa_this_core_map,
6868 sa_this_sibling_map,
6869 sa_nodemask,
6870 sa_sched_group_nodes,
6871#ifdef CONFIG_NUMA
6872 sa_notcovered,
6873 sa_covered,
6874 sa_domainspan,
6875#endif
6876 sa_none,
6877};
6878
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306879/*
Ingo Molnar48f24c42006-07-03 00:25:40 -07006880 * SMT sched-domains:
John Hawkes9c1cfda2005-09-06 15:18:14 -07006881 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006882#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306883static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
Tejun Heo1871e522009-10-29 22:34:13 +09006884static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006885
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006886static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306887cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
6888 struct sched_group **sg, struct cpumask *unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006889{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006890 if (sg)
Tejun Heo1871e522009-10-29 22:34:13 +09006891 *sg = &per_cpu(sched_groups, cpu).sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006892 return cpu;
6893}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006894#endif /* CONFIG_SCHED_SMT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006895
Ingo Molnar48f24c42006-07-03 00:25:40 -07006896/*
6897 * multi-core sched-domains:
6898 */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006899#ifdef CONFIG_SCHED_MC
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306900static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
6901static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006902
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006903static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306904cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
6905 struct sched_group **sg, struct cpumask *mask)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006906{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006907 int group;
Heiko Carstensf2698932010-08-31 10:28:15 +02006908#ifdef CONFIG_SCHED_SMT
Rusty Russellc69fc562009-03-13 14:49:46 +10306909 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306910 group = cpumask_first(mask);
Heiko Carstensf2698932010-08-31 10:28:15 +02006911#else
6912 group = cpu;
6913#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006914 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306915 *sg = &per_cpu(sched_group_core, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006916 return group;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006917}
Heiko Carstensf2698932010-08-31 10:28:15 +02006918#endif /* CONFIG_SCHED_MC */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006919
Heiko Carstens01a08542010-08-31 10:28:16 +02006920/*
6921 * book sched-domains:
6922 */
6923#ifdef CONFIG_SCHED_BOOK
6924static DEFINE_PER_CPU(struct static_sched_domain, book_domains);
6925static DEFINE_PER_CPU(struct static_sched_group, sched_group_book);
6926
Linus Torvalds1da177e2005-04-16 15:20:36 -07006927static int
Heiko Carstens01a08542010-08-31 10:28:16 +02006928cpu_to_book_group(int cpu, const struct cpumask *cpu_map,
6929 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006930{
Heiko Carstens01a08542010-08-31 10:28:16 +02006931 int group = cpu;
6932#ifdef CONFIG_SCHED_MC
6933 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
6934 group = cpumask_first(mask);
6935#elif defined(CONFIG_SCHED_SMT)
6936 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
6937 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006938#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02006939 if (sg)
6940 *sg = &per_cpu(sched_group_book, group).sg;
6941 return group;
6942}
6943#endif /* CONFIG_SCHED_BOOK */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006944
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306945static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
6946static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006947
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006948static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306949cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
6950 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006951{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006952 int group;
Heiko Carstens01a08542010-08-31 10:28:16 +02006953#ifdef CONFIG_SCHED_BOOK
6954 cpumask_and(mask, cpu_book_mask(cpu), cpu_map);
6955 group = cpumask_first(mask);
6956#elif defined(CONFIG_SCHED_MC)
Mike Travis6ca09df2008-12-31 18:08:45 -08006957 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306958 group = cpumask_first(mask);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006959#elif defined(CONFIG_SCHED_SMT)
Rusty Russellc69fc562009-03-13 14:49:46 +10306960 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306961 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006962#else
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006963 group = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006964#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006965 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306966 *sg = &per_cpu(sched_group_phys, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006967 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006968}
6969
6970#ifdef CONFIG_NUMA
John Hawkes9c1cfda2005-09-06 15:18:14 -07006971/*
6972 * The init_sched_build_groups can't handle what we want to do with node
6973 * groups, so roll our own. Now each node has its own list of groups which
6974 * gets dynamically allocated.
6975 */
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006976static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
Mike Travis434d53b2008-04-04 18:11:04 -07006977static struct sched_group ***sched_group_nodes_bycpu;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006978
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006979static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306980static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006981
Rusty Russell96f874e2008-11-25 02:35:14 +10306982static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
6983 struct sched_group **sg,
6984 struct cpumask *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006985{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006986 int group;
6987
Mike Travis6ca09df2008-12-31 18:08:45 -08006988 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306989 group = cpumask_first(nodemask);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006990
6991 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306992 *sg = &per_cpu(sched_group_allnodes, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006993 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006994}
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006995
Siddha, Suresh B08069032006-03-27 01:15:23 -08006996static void init_numa_sched_groups_power(struct sched_group *group_head)
6997{
6998 struct sched_group *sg = group_head;
6999 int j;
7000
7001 if (!sg)
7002 return;
Andi Kleen3a5c3592007-10-15 17:00:14 +02007003 do {
Rusty Russell758b2cd2008-11-25 02:35:04 +10307004 for_each_cpu(j, sched_group_cpus(sg)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02007005 struct sched_domain *sd;
Siddha, Suresh B08069032006-03-27 01:15:23 -08007006
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307007 sd = &per_cpu(phys_domains, j).sd;
Miao Xie13318a72009-04-15 09:59:10 +08007008 if (j != group_first_cpu(sd->groups)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02007009 /*
7010 * Only add "power" once for each
7011 * physical package.
7012 */
7013 continue;
7014 }
7015
Peter Zijlstra18a38852009-09-01 10:34:39 +02007016 sg->cpu_power += sd->groups->cpu_power;
Siddha, Suresh B08069032006-03-27 01:15:23 -08007017 }
Andi Kleen3a5c3592007-10-15 17:00:14 +02007018 sg = sg->next;
7019 } while (sg != group_head);
Siddha, Suresh B08069032006-03-27 01:15:23 -08007020}
Andreas Herrmann0601a882009-08-18 13:01:11 +02007021
7022static int build_numa_sched_groups(struct s_data *d,
7023 const struct cpumask *cpu_map, int num)
7024{
7025 struct sched_domain *sd;
7026 struct sched_group *sg, *prev;
7027 int n, j;
7028
7029 cpumask_clear(d->covered);
7030 cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
7031 if (cpumask_empty(d->nodemask)) {
7032 d->sched_group_nodes[num] = NULL;
7033 goto out;
7034 }
7035
7036 sched_domain_node_span(num, d->domainspan);
7037 cpumask_and(d->domainspan, d->domainspan, cpu_map);
7038
7039 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7040 GFP_KERNEL, num);
7041 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007042 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
7043 num);
Andreas Herrmann0601a882009-08-18 13:01:11 +02007044 return -ENOMEM;
7045 }
7046 d->sched_group_nodes[num] = sg;
7047
7048 for_each_cpu(j, d->nodemask) {
7049 sd = &per_cpu(node_domains, j).sd;
7050 sd->groups = sg;
7051 }
7052
Peter Zijlstra18a38852009-09-01 10:34:39 +02007053 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02007054 cpumask_copy(sched_group_cpus(sg), d->nodemask);
7055 sg->next = sg;
7056 cpumask_or(d->covered, d->covered, d->nodemask);
7057
7058 prev = sg;
7059 for (j = 0; j < nr_node_ids; j++) {
7060 n = (num + j) % nr_node_ids;
7061 cpumask_complement(d->notcovered, d->covered);
7062 cpumask_and(d->tmpmask, d->notcovered, cpu_map);
7063 cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
7064 if (cpumask_empty(d->tmpmask))
7065 break;
7066 cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
7067 if (cpumask_empty(d->tmpmask))
7068 continue;
7069 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7070 GFP_KERNEL, num);
7071 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007072 printk(KERN_WARNING
7073 "Can not alloc domain group for node %d\n", j);
Andreas Herrmann0601a882009-08-18 13:01:11 +02007074 return -ENOMEM;
7075 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02007076 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02007077 cpumask_copy(sched_group_cpus(sg), d->tmpmask);
7078 sg->next = prev->next;
7079 cpumask_or(d->covered, d->covered, d->tmpmask);
7080 prev->next = sg;
7081 prev = sg;
7082 }
7083out:
7084 return 0;
7085}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007086#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007087
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007088#ifdef CONFIG_NUMA
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007089/* Free memory allocated for various sched_group structures */
Rusty Russell96f874e2008-11-25 02:35:14 +10307090static void free_sched_groups(const struct cpumask *cpu_map,
7091 struct cpumask *nodemask)
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007092{
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007093 int cpu, i;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007094
Rusty Russellabcd0832008-11-25 02:35:02 +10307095 for_each_cpu(cpu, cpu_map) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007096 struct sched_group **sched_group_nodes
7097 = sched_group_nodes_bycpu[cpu];
7098
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007099 if (!sched_group_nodes)
7100 continue;
7101
Mike Travis076ac2a2008-05-12 21:21:12 +02007102 for (i = 0; i < nr_node_ids; i++) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007103 struct sched_group *oldsg, *sg = sched_group_nodes[i];
7104
Mike Travis6ca09df2008-12-31 18:08:45 -08007105 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10307106 if (cpumask_empty(nodemask))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007107 continue;
7108
7109 if (sg == NULL)
7110 continue;
7111 sg = sg->next;
7112next_sg:
7113 oldsg = sg;
7114 sg = sg->next;
7115 kfree(oldsg);
7116 if (oldsg != sched_group_nodes[i])
7117 goto next_sg;
7118 }
7119 kfree(sched_group_nodes);
7120 sched_group_nodes_bycpu[cpu] = NULL;
7121 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007122}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007123#else /* !CONFIG_NUMA */
Rusty Russell96f874e2008-11-25 02:35:14 +10307124static void free_sched_groups(const struct cpumask *cpu_map,
7125 struct cpumask *nodemask)
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007126{
7127}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007128#endif /* CONFIG_NUMA */
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007129
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130/*
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007131 * Initialize sched groups cpu_power.
7132 *
7133 * cpu_power indicates the capacity of sched group, which is used while
7134 * distributing the load between different sched groups in a sched domain.
7135 * Typically cpu_power for all the groups in a sched domain will be same unless
7136 * there are asymmetries in the topology. If there are asymmetries, group
7137 * having more cpu_power will pickup more load compared to the group having
7138 * less cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007139 */
7140static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7141{
7142 struct sched_domain *child;
7143 struct sched_group *group;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007144 long power;
7145 int weight;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007146
7147 WARN_ON(!sd || !sd->groups);
7148
Miao Xie13318a72009-04-15 09:59:10 +08007149 if (cpu != group_first_cpu(sd->groups))
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007150 return;
7151
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07007152 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
7153
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007154 child = sd->child;
7155
Peter Zijlstra18a38852009-09-01 10:34:39 +02007156 sd->groups->cpu_power = 0;
Eric Dumazet5517d862007-05-08 00:32:57 -07007157
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007158 if (!child) {
7159 power = SCHED_LOAD_SCALE;
7160 weight = cpumask_weight(sched_domain_span(sd));
7161 /*
7162 * SMT siblings share the power of a single core.
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02007163 * Usually multiple threads get a better yield out of
7164 * that one core than a single thread would have,
7165 * reflect that in sd->smt_gain.
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007166 */
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02007167 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
7168 power *= sd->smt_gain;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007169 power /= weight;
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02007170 power >>= SCHED_LOAD_SHIFT;
7171 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02007172 sd->groups->cpu_power += power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007173 return;
7174 }
7175
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007176 /*
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007177 * Add cpu_power of each child group to this groups cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007178 */
7179 group = child->groups;
7180 do {
Peter Zijlstra18a38852009-09-01 10:34:39 +02007181 sd->groups->cpu_power += group->cpu_power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007182 group = group->next;
7183 } while (group != child->groups);
7184}
7185
7186/*
Mike Travis7c16ec52008-04-04 18:11:11 -07007187 * Initializers for schedule domains
7188 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
7189 */
7190
Ingo Molnara5d8c342008-10-09 11:35:51 +02007191#ifdef CONFIG_SCHED_DEBUG
7192# define SD_INIT_NAME(sd, type) sd->name = #type
7193#else
7194# define SD_INIT_NAME(sd, type) do { } while (0)
7195#endif
7196
Mike Travis7c16ec52008-04-04 18:11:11 -07007197#define SD_INIT(sd, type) sd_init_##type(sd)
Ingo Molnara5d8c342008-10-09 11:35:51 +02007198
Mike Travis7c16ec52008-04-04 18:11:11 -07007199#define SD_INIT_FUNC(type) \
7200static noinline void sd_init_##type(struct sched_domain *sd) \
7201{ \
7202 memset(sd, 0, sizeof(*sd)); \
7203 *sd = SD_##type##_INIT; \
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007204 sd->level = SD_LV_##type; \
Ingo Molnara5d8c342008-10-09 11:35:51 +02007205 SD_INIT_NAME(sd, type); \
Mike Travis7c16ec52008-04-04 18:11:11 -07007206}
7207
7208SD_INIT_FUNC(CPU)
7209#ifdef CONFIG_NUMA
7210 SD_INIT_FUNC(ALLNODES)
7211 SD_INIT_FUNC(NODE)
7212#endif
7213#ifdef CONFIG_SCHED_SMT
7214 SD_INIT_FUNC(SIBLING)
7215#endif
7216#ifdef CONFIG_SCHED_MC
7217 SD_INIT_FUNC(MC)
7218#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007219#ifdef CONFIG_SCHED_BOOK
7220 SD_INIT_FUNC(BOOK)
7221#endif
Mike Travis7c16ec52008-04-04 18:11:11 -07007222
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007223static int default_relax_domain_level = -1;
7224
7225static int __init setup_relax_domain_level(char *str)
7226{
Li Zefan30e0e172008-05-13 10:27:17 +08007227 unsigned long val;
7228
7229 val = simple_strtoul(str, NULL, 0);
7230 if (val < SD_LV_MAX)
7231 default_relax_domain_level = val;
7232
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007233 return 1;
7234}
7235__setup("relax_domain_level=", setup_relax_domain_level);
7236
7237static void set_domain_attribute(struct sched_domain *sd,
7238 struct sched_domain_attr *attr)
7239{
7240 int request;
7241
7242 if (!attr || attr->relax_domain_level < 0) {
7243 if (default_relax_domain_level < 0)
7244 return;
7245 else
7246 request = default_relax_domain_level;
7247 } else
7248 request = attr->relax_domain_level;
7249 if (request < sd->level) {
7250 /* turn off idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007251 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007252 } else {
7253 /* turn on idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007254 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007255 }
7256}
7257
Andreas Herrmann2109b992009-08-18 12:53:00 +02007258static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
7259 const struct cpumask *cpu_map)
7260{
7261 switch (what) {
7262 case sa_sched_groups:
7263 free_sched_groups(cpu_map, d->tmpmask); /* fall through */
7264 d->sched_group_nodes = NULL;
7265 case sa_rootdomain:
7266 free_rootdomain(d->rd); /* fall through */
7267 case sa_tmpmask:
7268 free_cpumask_var(d->tmpmask); /* fall through */
7269 case sa_send_covered:
7270 free_cpumask_var(d->send_covered); /* fall through */
Heiko Carstens01a08542010-08-31 10:28:16 +02007271 case sa_this_book_map:
7272 free_cpumask_var(d->this_book_map); /* fall through */
Andreas Herrmann2109b992009-08-18 12:53:00 +02007273 case sa_this_core_map:
7274 free_cpumask_var(d->this_core_map); /* fall through */
7275 case sa_this_sibling_map:
7276 free_cpumask_var(d->this_sibling_map); /* fall through */
7277 case sa_nodemask:
7278 free_cpumask_var(d->nodemask); /* fall through */
7279 case sa_sched_group_nodes:
7280#ifdef CONFIG_NUMA
7281 kfree(d->sched_group_nodes); /* fall through */
7282 case sa_notcovered:
7283 free_cpumask_var(d->notcovered); /* fall through */
7284 case sa_covered:
7285 free_cpumask_var(d->covered); /* fall through */
7286 case sa_domainspan:
7287 free_cpumask_var(d->domainspan); /* fall through */
7288#endif
7289 case sa_none:
7290 break;
7291 }
7292}
7293
7294static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7295 const struct cpumask *cpu_map)
7296{
7297#ifdef CONFIG_NUMA
7298 if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
7299 return sa_none;
7300 if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
7301 return sa_domainspan;
7302 if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
7303 return sa_covered;
7304 /* Allocate the per-node list of sched groups */
7305 d->sched_group_nodes = kcalloc(nr_node_ids,
7306 sizeof(struct sched_group *), GFP_KERNEL);
7307 if (!d->sched_group_nodes) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007308 printk(KERN_WARNING "Can not alloc sched group node list\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02007309 return sa_notcovered;
7310 }
7311 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
7312#endif
7313 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
7314 return sa_sched_group_nodes;
7315 if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
7316 return sa_nodemask;
7317 if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
7318 return sa_this_sibling_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02007319 if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL))
Andreas Herrmann2109b992009-08-18 12:53:00 +02007320 return sa_this_core_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02007321 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
7322 return sa_this_book_map;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007323 if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
7324 return sa_send_covered;
7325 d->rd = alloc_rootdomain();
7326 if (!d->rd) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007327 printk(KERN_WARNING "Cannot alloc root domain\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02007328 return sa_tmpmask;
7329 }
7330 return sa_rootdomain;
7331}
7332
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007333static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
7334 const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
7335{
7336 struct sched_domain *sd = NULL;
7337#ifdef CONFIG_NUMA
7338 struct sched_domain *parent;
7339
7340 d->sd_allnodes = 0;
7341 if (cpumask_weight(cpu_map) >
7342 SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
7343 sd = &per_cpu(allnodes_domains, i).sd;
7344 SD_INIT(sd, ALLNODES);
7345 set_domain_attribute(sd, attr);
7346 cpumask_copy(sched_domain_span(sd), cpu_map);
7347 cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
7348 d->sd_allnodes = 1;
7349 }
7350 parent = sd;
7351
7352 sd = &per_cpu(node_domains, i).sd;
7353 SD_INIT(sd, NODE);
7354 set_domain_attribute(sd, attr);
7355 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
7356 sd->parent = parent;
7357 if (parent)
7358 parent->child = sd;
7359 cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
7360#endif
7361 return sd;
7362}
7363
Andreas Herrmann87cce662009-08-18 12:54:55 +02007364static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
7365 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7366 struct sched_domain *parent, int i)
7367{
7368 struct sched_domain *sd;
7369 sd = &per_cpu(phys_domains, i).sd;
7370 SD_INIT(sd, CPU);
7371 set_domain_attribute(sd, attr);
7372 cpumask_copy(sched_domain_span(sd), d->nodemask);
7373 sd->parent = parent;
7374 if (parent)
7375 parent->child = sd;
7376 cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
7377 return sd;
7378}
7379
Heiko Carstens01a08542010-08-31 10:28:16 +02007380static struct sched_domain *__build_book_sched_domain(struct s_data *d,
7381 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7382 struct sched_domain *parent, int i)
7383{
7384 struct sched_domain *sd = parent;
7385#ifdef CONFIG_SCHED_BOOK
7386 sd = &per_cpu(book_domains, i).sd;
7387 SD_INIT(sd, BOOK);
7388 set_domain_attribute(sd, attr);
7389 cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i));
7390 sd->parent = parent;
7391 parent->child = sd;
7392 cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask);
7393#endif
7394 return sd;
7395}
7396
Andreas Herrmann410c4082009-08-18 12:56:14 +02007397static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
7398 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7399 struct sched_domain *parent, int i)
7400{
7401 struct sched_domain *sd = parent;
7402#ifdef CONFIG_SCHED_MC
7403 sd = &per_cpu(core_domains, i).sd;
7404 SD_INIT(sd, MC);
7405 set_domain_attribute(sd, attr);
7406 cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
7407 sd->parent = parent;
7408 parent->child = sd;
7409 cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
7410#endif
7411 return sd;
7412}
7413
Andreas Herrmannd8173532009-08-18 12:57:03 +02007414static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
7415 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7416 struct sched_domain *parent, int i)
7417{
7418 struct sched_domain *sd = parent;
7419#ifdef CONFIG_SCHED_SMT
7420 sd = &per_cpu(cpu_domains, i).sd;
7421 SD_INIT(sd, SIBLING);
7422 set_domain_attribute(sd, attr);
7423 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
7424 sd->parent = parent;
7425 parent->child = sd;
7426 cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
7427#endif
7428 return sd;
7429}
7430
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007431static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
7432 const struct cpumask *cpu_map, int cpu)
7433{
7434 switch (l) {
7435#ifdef CONFIG_SCHED_SMT
7436 case SD_LV_SIBLING: /* set up CPU (sibling) groups */
7437 cpumask_and(d->this_sibling_map, cpu_map,
7438 topology_thread_cpumask(cpu));
7439 if (cpu == cpumask_first(d->this_sibling_map))
7440 init_sched_build_groups(d->this_sibling_map, cpu_map,
7441 &cpu_to_cpu_group,
7442 d->send_covered, d->tmpmask);
7443 break;
7444#endif
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007445#ifdef CONFIG_SCHED_MC
7446 case SD_LV_MC: /* set up multi-core groups */
7447 cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
7448 if (cpu == cpumask_first(d->this_core_map))
7449 init_sched_build_groups(d->this_core_map, cpu_map,
7450 &cpu_to_core_group,
7451 d->send_covered, d->tmpmask);
7452 break;
7453#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007454#ifdef CONFIG_SCHED_BOOK
7455 case SD_LV_BOOK: /* set up book groups */
7456 cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu));
7457 if (cpu == cpumask_first(d->this_book_map))
7458 init_sched_build_groups(d->this_book_map, cpu_map,
7459 &cpu_to_book_group,
7460 d->send_covered, d->tmpmask);
7461 break;
7462#endif
Andreas Herrmann86548092009-08-18 12:59:28 +02007463 case SD_LV_CPU: /* set up physical groups */
7464 cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
7465 if (!cpumask_empty(d->nodemask))
7466 init_sched_build_groups(d->nodemask, cpu_map,
7467 &cpu_to_phys_group,
7468 d->send_covered, d->tmpmask);
7469 break;
Andreas Herrmannde616e32009-08-18 13:00:13 +02007470#ifdef CONFIG_NUMA
7471 case SD_LV_ALLNODES:
7472 init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
7473 d->send_covered, d->tmpmask);
7474 break;
7475#endif
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007476 default:
7477 break;
7478 }
7479}
7480
Mike Travis7c16ec52008-04-04 18:11:11 -07007481/*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007482 * Build sched domains for a given set of cpus and attach the sched domains
7483 * to the individual cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07007484 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307485static int __build_sched_domains(const struct cpumask *cpu_map,
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007486 struct sched_domain_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007487{
Andreas Herrmann2109b992009-08-18 12:53:00 +02007488 enum s_alloc alloc_state = sa_none;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007489 struct s_data d;
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007490 struct sched_domain *sd;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007491 int i;
John Hawkesd1b55132005-09-06 15:18:14 -07007492#ifdef CONFIG_NUMA
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007493 d.sd_allnodes = 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307494#endif
7495
Andreas Herrmann2109b992009-08-18 12:53:00 +02007496 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7497 if (alloc_state != sa_rootdomain)
7498 goto error;
7499 alloc_state = sa_sched_groups;
Mike Travis7c16ec52008-04-04 18:11:11 -07007500
Linus Torvalds1da177e2005-04-16 15:20:36 -07007501 /*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007502 * Set up domains for cpus specified by the cpu_map.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007503 */
Rusty Russellabcd0832008-11-25 02:35:02 +10307504 for_each_cpu(i, cpu_map) {
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007505 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
7506 cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007507
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007508 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
Andreas Herrmann87cce662009-08-18 12:54:55 +02007509 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
Heiko Carstens01a08542010-08-31 10:28:16 +02007510 sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmann410c4082009-08-18 12:56:14 +02007511 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmannd8173532009-08-18 12:57:03 +02007512 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513 }
7514
Rusty Russellabcd0832008-11-25 02:35:02 +10307515 for_each_cpu(i, cpu_map) {
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007516 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
Heiko Carstens01a08542010-08-31 10:28:16 +02007517 build_sched_groups(&d, SD_LV_BOOK, cpu_map, i);
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007518 build_sched_groups(&d, SD_LV_MC, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007519 }
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007520
Linus Torvalds1da177e2005-04-16 15:20:36 -07007521 /* Set up physical groups */
Andreas Herrmann86548092009-08-18 12:59:28 +02007522 for (i = 0; i < nr_node_ids; i++)
7523 build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007524
7525#ifdef CONFIG_NUMA
7526 /* Set up node groups */
Andreas Herrmannde616e32009-08-18 13:00:13 +02007527 if (d.sd_allnodes)
7528 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007529
Andreas Herrmann0601a882009-08-18 13:01:11 +02007530 for (i = 0; i < nr_node_ids; i++)
7531 if (build_numa_sched_groups(&d, cpu_map, i))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007532 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007533#endif
7534
7535 /* Calculate CPU power for physical packages and nodes */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007536#ifdef CONFIG_SCHED_SMT
Rusty Russellabcd0832008-11-25 02:35:02 +10307537 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007538 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007539 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007540 }
7541#endif
7542#ifdef CONFIG_SCHED_MC
Rusty Russellabcd0832008-11-25 02:35:02 +10307543 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007544 sd = &per_cpu(core_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007545 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007546 }
7547#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007548#ifdef CONFIG_SCHED_BOOK
7549 for_each_cpu(i, cpu_map) {
7550 sd = &per_cpu(book_domains, i).sd;
7551 init_sched_groups_power(i, sd);
7552 }
7553#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07007554
Rusty Russellabcd0832008-11-25 02:35:02 +10307555 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007556 sd = &per_cpu(phys_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007557 init_sched_groups_power(i, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007558 }
7559
John Hawkes9c1cfda2005-09-06 15:18:14 -07007560#ifdef CONFIG_NUMA
Mike Travis076ac2a2008-05-12 21:21:12 +02007561 for (i = 0; i < nr_node_ids; i++)
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007562 init_numa_sched_groups_power(d.sched_group_nodes[i]);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007563
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007564 if (d.sd_allnodes) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007565 struct sched_group *sg;
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007566
Rusty Russell96f874e2008-11-25 02:35:14 +10307567 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007568 d.tmpmask);
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007569 init_numa_sched_groups_power(sg);
7570 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07007571#endif
7572
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573 /* Attach the domains */
Rusty Russellabcd0832008-11-25 02:35:02 +10307574 for_each_cpu(i, cpu_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007575#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307576 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007577#elif defined(CONFIG_SCHED_MC)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307578 sd = &per_cpu(core_domains, i).sd;
Heiko Carstens01a08542010-08-31 10:28:16 +02007579#elif defined(CONFIG_SCHED_BOOK)
7580 sd = &per_cpu(book_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007581#else
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307582 sd = &per_cpu(phys_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007583#endif
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007584 cpu_attach_domain(sd, d.rd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007585 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007586
Andreas Herrmann2109b992009-08-18 12:53:00 +02007587 d.sched_group_nodes = NULL; /* don't free this we still need it */
7588 __free_domain_allocs(&d, sa_tmpmask, cpu_map);
7589 return 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307590
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007591error:
Andreas Herrmann2109b992009-08-18 12:53:00 +02007592 __free_domain_allocs(&d, alloc_state, cpu_map);
7593 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007594}
Paul Jackson029190c2007-10-18 23:40:20 -07007595
Rusty Russell96f874e2008-11-25 02:35:14 +10307596static int build_sched_domains(const struct cpumask *cpu_map)
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007597{
7598 return __build_sched_domains(cpu_map, NULL);
7599}
7600
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307601static cpumask_var_t *doms_cur; /* current sched domains */
Paul Jackson029190c2007-10-18 23:40:20 -07007602static int ndoms_cur; /* number of sched domains in 'doms_cur' */
Ingo Molnar4285f5942008-05-16 17:47:14 +02007603static struct sched_domain_attr *dattr_cur;
7604 /* attribues of custom domains in 'doms_cur' */
Paul Jackson029190c2007-10-18 23:40:20 -07007605
7606/*
7607 * Special case: If a kmalloc of a doms_cur partition (array of
Rusty Russell42128232008-11-25 02:35:12 +10307608 * cpumask) fails, then fallback to a single sched domain,
7609 * as determined by the single cpumask fallback_doms.
Paul Jackson029190c2007-10-18 23:40:20 -07007610 */
Rusty Russell42128232008-11-25 02:35:12 +10307611static cpumask_var_t fallback_doms;
Paul Jackson029190c2007-10-18 23:40:20 -07007612
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007613/*
7614 * arch_update_cpu_topology lets virtualized architectures update the
7615 * cpu core maps. It is supposed to return 1 if the topology changed
7616 * or 0 if it stayed the same.
7617 */
7618int __attribute__((weak)) arch_update_cpu_topology(void)
Heiko Carstens22e52b02008-03-12 18:31:59 +01007619{
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007620 return 0;
Heiko Carstens22e52b02008-03-12 18:31:59 +01007621}
7622
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307623cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7624{
7625 int i;
7626 cpumask_var_t *doms;
7627
7628 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7629 if (!doms)
7630 return NULL;
7631 for (i = 0; i < ndoms; i++) {
7632 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7633 free_sched_domains(doms, i);
7634 return NULL;
7635 }
7636 }
7637 return doms;
7638}
7639
7640void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7641{
7642 unsigned int i;
7643 for (i = 0; i < ndoms; i++)
7644 free_cpumask_var(doms[i]);
7645 kfree(doms);
7646}
7647
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007648/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007649 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
Paul Jackson029190c2007-10-18 23:40:20 -07007650 * For now this just excludes isolated cpus, but could be used to
7651 * exclude other special cases in the future.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007652 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307653static int arch_init_sched_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007654{
Milton Miller73785472007-10-24 18:23:48 +02007655 int err;
7656
Heiko Carstens22e52b02008-03-12 18:31:59 +01007657 arch_update_cpu_topology();
Paul Jackson029190c2007-10-18 23:40:20 -07007658 ndoms_cur = 1;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307659 doms_cur = alloc_sched_domains(ndoms_cur);
Paul Jackson029190c2007-10-18 23:40:20 -07007660 if (!doms_cur)
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307661 doms_cur = &fallback_doms;
7662 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007663 dattr_cur = NULL;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307664 err = build_sched_domains(doms_cur[0]);
Milton Miller6382bc92007-10-15 17:00:19 +02007665 register_sched_domain_sysctl();
Milton Miller73785472007-10-24 18:23:48 +02007666
7667 return err;
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007668}
7669
Rusty Russell96f874e2008-11-25 02:35:14 +10307670static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
7671 struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007672{
Mike Travis7c16ec52008-04-04 18:11:11 -07007673 free_sched_groups(cpu_map, tmpmask);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007674}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007675
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007676/*
7677 * Detach sched domains from a group of cpus specified in cpu_map
7678 * These cpus will now be attached to the NULL domain
7679 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307680static void detach_destroy_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007681{
Rusty Russell96f874e2008-11-25 02:35:14 +10307682 /* Save because hotplug lock held. */
7683 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007684 int i;
7685
Rusty Russellabcd0832008-11-25 02:35:02 +10307686 for_each_cpu(i, cpu_map)
Gregory Haskins57d885f2008-01-25 21:08:18 +01007687 cpu_attach_domain(NULL, &def_root_domain, i);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007688 synchronize_sched();
Rusty Russell96f874e2008-11-25 02:35:14 +10307689 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007690}
7691
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007692/* handle null as "default" */
7693static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7694 struct sched_domain_attr *new, int idx_new)
7695{
7696 struct sched_domain_attr tmp;
7697
7698 /* fast path */
7699 if (!new && !cur)
7700 return 1;
7701
7702 tmp = SD_ATTR_INIT;
7703 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7704 new ? (new + idx_new) : &tmp,
7705 sizeof(struct sched_domain_attr));
7706}
7707
Paul Jackson029190c2007-10-18 23:40:20 -07007708/*
7709 * Partition sched domains as specified by the 'ndoms_new'
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007710 * cpumasks in the array doms_new[] of cpumasks. This compares
Paul Jackson029190c2007-10-18 23:40:20 -07007711 * doms_new[] to the current sched domain partitioning, doms_cur[].
7712 * It destroys each deleted domain and builds each new domain.
7713 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307714 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007715 * The masks don't intersect (don't overlap.) We should setup one
7716 * sched domain for each mask. CPUs not in any of the cpumasks will
7717 * not be load balanced. If the same cpumask appears both in the
Paul Jackson029190c2007-10-18 23:40:20 -07007718 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7719 * it as it is.
7720 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307721 * The passed in 'doms_new' should be allocated using
7722 * alloc_sched_domains. This routine takes ownership of it and will
7723 * free_sched_domains it when done with it. If the caller failed the
7724 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7725 * and partition_sched_domains() will fallback to the single partition
7726 * 'fallback_doms', it also forces the domains to be rebuilt.
Paul Jackson029190c2007-10-18 23:40:20 -07007727 *
Rusty Russell96f874e2008-11-25 02:35:14 +10307728 * If doms_new == NULL it will be replaced with cpu_online_mask.
Li Zefan700018e2008-11-18 14:02:03 +08007729 * ndoms_new == 0 is a special case for destroying existing domains,
7730 * and it will not create the default domain.
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007731 *
Paul Jackson029190c2007-10-18 23:40:20 -07007732 * Call with hotplug lock held
7733 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307734void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007735 struct sched_domain_attr *dattr_new)
Paul Jackson029190c2007-10-18 23:40:20 -07007736{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007737 int i, j, n;
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007738 int new_topology;
Paul Jackson029190c2007-10-18 23:40:20 -07007739
Heiko Carstens712555e2008-04-28 11:33:07 +02007740 mutex_lock(&sched_domains_mutex);
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007741
Milton Miller73785472007-10-24 18:23:48 +02007742 /* always unregister in case we don't destroy any domains */
7743 unregister_sched_domain_sysctl();
7744
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007745 /* Let architecture update cpu core mappings. */
7746 new_topology = arch_update_cpu_topology();
7747
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007748 n = doms_new ? ndoms_new : 0;
Paul Jackson029190c2007-10-18 23:40:20 -07007749
7750 /* Destroy deleted domains */
7751 for (i = 0; i < ndoms_cur; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007752 for (j = 0; j < n && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307753 if (cpumask_equal(doms_cur[i], doms_new[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007754 && dattrs_equal(dattr_cur, i, dattr_new, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007755 goto match1;
7756 }
7757 /* no match - a current sched domain not in new doms_new[] */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307758 detach_destroy_domains(doms_cur[i]);
Paul Jackson029190c2007-10-18 23:40:20 -07007759match1:
7760 ;
7761 }
7762
Max Krasnyanskye761b772008-07-15 04:43:49 -07007763 if (doms_new == NULL) {
7764 ndoms_cur = 0;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307765 doms_new = &fallback_doms;
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007766 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
Li Zefanfaa2f982008-11-04 16:20:23 +08007767 WARN_ON_ONCE(dattr_new);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007768 }
7769
Paul Jackson029190c2007-10-18 23:40:20 -07007770 /* Build new domains */
7771 for (i = 0; i < ndoms_new; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007772 for (j = 0; j < ndoms_cur && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307773 if (cpumask_equal(doms_new[i], doms_cur[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007774 && dattrs_equal(dattr_new, i, dattr_cur, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007775 goto match2;
7776 }
7777 /* no match - add a new doms_new */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307778 __build_sched_domains(doms_new[i],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007779 dattr_new ? dattr_new + i : NULL);
Paul Jackson029190c2007-10-18 23:40:20 -07007780match2:
7781 ;
7782 }
7783
7784 /* Remember the new sched domains */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307785 if (doms_cur != &fallback_doms)
7786 free_sched_domains(doms_cur, ndoms_cur);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007787 kfree(dattr_cur); /* kfree(NULL) is safe */
Paul Jackson029190c2007-10-18 23:40:20 -07007788 doms_cur = doms_new;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007789 dattr_cur = dattr_new;
Paul Jackson029190c2007-10-18 23:40:20 -07007790 ndoms_cur = ndoms_new;
Milton Miller73785472007-10-24 18:23:48 +02007791
7792 register_sched_domain_sysctl();
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007793
Heiko Carstens712555e2008-04-28 11:33:07 +02007794 mutex_unlock(&sched_domains_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -07007795}
7796
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007797#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Li Zefanc70f22d2009-01-05 19:07:50 +08007798static void arch_reinit_sched_domains(void)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007799{
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007800 get_online_cpus();
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007801
7802 /* Destroy domains first to force the rebuild */
7803 partition_sched_domains(0, NULL, NULL);
7804
Max Krasnyanskye761b772008-07-15 04:43:49 -07007805 rebuild_sched_domains();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007806 put_online_cpus();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007807}
7808
7809static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7810{
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307811 unsigned int level = 0;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007812
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307813 if (sscanf(buf, "%u", &level) != 1)
7814 return -EINVAL;
7815
7816 /*
7817 * level is always be positive so don't check for
7818 * level < POWERSAVINGS_BALANCE_NONE which is 0
7819 * What happens on 0 or 1 byte write,
7820 * need to check for count as well?
7821 */
7822
7823 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007824 return -EINVAL;
7825
7826 if (smt)
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307827 sched_smt_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007828 else
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307829 sched_mc_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007830
Li Zefanc70f22d2009-01-05 19:07:50 +08007831 arch_reinit_sched_domains();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007832
Li Zefanc70f22d2009-01-05 19:07:50 +08007833 return count;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007834}
7835
Adrian Bunk6707de002007-08-12 18:08:19 +02007836#ifdef CONFIG_SCHED_MC
Andi Kleenf718cd42008-07-29 22:33:52 -07007837static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007838 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007839 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007840{
7841 return sprintf(page, "%u\n", sched_mc_power_savings);
7842}
Andi Kleenf718cd42008-07-29 22:33:52 -07007843static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007844 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007845 const char *buf, size_t count)
7846{
7847 return sched_power_savings_store(buf, count, 0);
7848}
Andi Kleenf718cd42008-07-29 22:33:52 -07007849static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7850 sched_mc_power_savings_show,
7851 sched_mc_power_savings_store);
Adrian Bunk6707de002007-08-12 18:08:19 +02007852#endif
7853
7854#ifdef CONFIG_SCHED_SMT
Andi Kleenf718cd42008-07-29 22:33:52 -07007855static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007856 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007857 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007858{
7859 return sprintf(page, "%u\n", sched_smt_power_savings);
7860}
Andi Kleenf718cd42008-07-29 22:33:52 -07007861static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007862 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007863 const char *buf, size_t count)
7864{
7865 return sched_power_savings_store(buf, count, 1);
7866}
Andi Kleenf718cd42008-07-29 22:33:52 -07007867static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7868 sched_smt_power_savings_show,
Adrian Bunk6707de002007-08-12 18:08:19 +02007869 sched_smt_power_savings_store);
7870#endif
7871
Li Zefan39aac642009-01-05 19:18:02 +08007872int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007873{
7874 int err = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007875
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007876#ifdef CONFIG_SCHED_SMT
7877 if (smt_capable())
7878 err = sysfs_create_file(&cls->kset.kobj,
7879 &attr_sched_smt_power_savings.attr);
7880#endif
7881#ifdef CONFIG_SCHED_MC
7882 if (!err && mc_capable())
7883 err = sysfs_create_file(&cls->kset.kobj,
7884 &attr_sched_mc_power_savings.attr);
7885#endif
7886 return err;
7887}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007888#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007889
Linus Torvalds1da177e2005-04-16 15:20:36 -07007890/*
Tejun Heo3a101d02010-06-08 21:40:36 +02007891 * Update cpusets according to cpu_active mask. If cpusets are
7892 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7893 * around partition_sched_domains().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007894 */
Tejun Heo0b2e9182010-06-21 23:53:31 +02007895static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7896 void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007897{
Tejun Heo3a101d02010-06-08 21:40:36 +02007898 switch (action & ~CPU_TASKS_FROZEN) {
Max Krasnyanskye761b772008-07-15 04:43:49 -07007899 case CPU_ONLINE:
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007900 case CPU_DOWN_FAILED:
Tejun Heo3a101d02010-06-08 21:40:36 +02007901 cpuset_update_active_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007902 return NOTIFY_OK;
Max Krasnyanskye761b772008-07-15 04:43:49 -07007903 default:
7904 return NOTIFY_DONE;
7905 }
7906}
Tejun Heo3a101d02010-06-08 21:40:36 +02007907
Tejun Heo0b2e9182010-06-21 23:53:31 +02007908static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7909 void *hcpu)
Tejun Heo3a101d02010-06-08 21:40:36 +02007910{
7911 switch (action & ~CPU_TASKS_FROZEN) {
7912 case CPU_DOWN_PREPARE:
7913 cpuset_update_active_cpus();
7914 return NOTIFY_OK;
7915 default:
7916 return NOTIFY_DONE;
7917 }
7918}
Max Krasnyanskye761b772008-07-15 04:43:49 -07007919
7920static int update_runtime(struct notifier_block *nfb,
7921 unsigned long action, void *hcpu)
7922{
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007923 int cpu = (int)(long)hcpu;
7924
Linus Torvalds1da177e2005-04-16 15:20:36 -07007925 switch (action) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007926 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007927 case CPU_DOWN_PREPARE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007928 disable_runtime(cpu_rq(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007929 return NOTIFY_OK;
7930
Linus Torvalds1da177e2005-04-16 15:20:36 -07007931 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007932 case CPU_DOWN_FAILED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007933 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007934 case CPU_ONLINE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007935 enable_runtime(cpu_rq(cpu));
Max Krasnyanskye761b772008-07-15 04:43:49 -07007936 return NOTIFY_OK;
7937
Linus Torvalds1da177e2005-04-16 15:20:36 -07007938 default:
7939 return NOTIFY_DONE;
7940 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007941}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007942
7943void __init sched_init_smp(void)
7944{
Rusty Russelldcc30a32008-11-25 02:35:12 +10307945 cpumask_var_t non_isolated_cpus;
7946
7947 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
Yong Zhangcb5fd132009-09-14 20:20:16 +08007948 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
Nick Piggin5c1e1762006-10-03 01:14:04 -07007949
Mike Travis434d53b2008-04-04 18:11:04 -07007950#if defined(CONFIG_NUMA)
7951 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
7952 GFP_KERNEL);
7953 BUG_ON(sched_group_nodes_bycpu == NULL);
7954#endif
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007955 get_online_cpus();
Heiko Carstens712555e2008-04-28 11:33:07 +02007956 mutex_lock(&sched_domains_mutex);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007957 arch_init_sched_domains(cpu_active_mask);
Rusty Russelldcc30a32008-11-25 02:35:12 +10307958 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7959 if (cpumask_empty(non_isolated_cpus))
7960 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
Heiko Carstens712555e2008-04-28 11:33:07 +02007961 mutex_unlock(&sched_domains_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007962 put_online_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007963
Tejun Heo3a101d02010-06-08 21:40:36 +02007964 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7965 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007966
7967 /* RT runtime code needs to handle some hotplug events */
7968 hotcpu_notifier(update_runtime, 0);
7969
Peter Zijlstrab328ca12008-04-29 10:02:46 +02007970 init_hrtick();
Nick Piggin5c1e1762006-10-03 01:14:04 -07007971
7972 /* Move init over to a non-isolated CPU */
Rusty Russelldcc30a32008-11-25 02:35:12 +10307973 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
Nick Piggin5c1e1762006-10-03 01:14:04 -07007974 BUG();
Ingo Molnar19978ca2007-11-09 22:39:38 +01007975 sched_init_granularity();
Rusty Russelldcc30a32008-11-25 02:35:12 +10307976 free_cpumask_var(non_isolated_cpus);
Rusty Russell42128232008-11-25 02:35:12 +10307977
Rusty Russell0e3900e2008-11-25 02:35:13 +10307978 init_sched_rt_class();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007979}
7980#else
7981void __init sched_init_smp(void)
7982{
Ingo Molnar19978ca2007-11-09 22:39:38 +01007983 sched_init_granularity();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007984}
7985#endif /* CONFIG_SMP */
7986
Arun R Bharadwajcd1bb942009-04-16 12:15:34 +05307987const_debug unsigned int sysctl_timer_migration = 1;
7988
Linus Torvalds1da177e2005-04-16 15:20:36 -07007989int in_sched_functions(unsigned long addr)
7990{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007991 return in_lock_functions(addr) ||
7992 (addr >= (unsigned long)__sched_text_start
7993 && addr < (unsigned long)__sched_text_end);
7994}
7995
Alexey Dobriyana9957442007-10-15 17:00:13 +02007996static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02007997{
7998 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02007999 INIT_LIST_HEAD(&cfs_rq->tasks);
Ingo Molnardd41f592007-07-09 18:51:59 +02008000#ifdef CONFIG_FAIR_GROUP_SCHED
8001 cfs_rq->rq = rq;
Paul Turnerf07333b2011-01-21 20:45:03 -08008002 /* allow initial update_cfs_load() to truncate */
Peter Zijlstra6ea72f12011-01-26 13:36:03 +01008003#ifdef CONFIG_SMP
Paul Turnerf07333b2011-01-21 20:45:03 -08008004 cfs_rq->load_stamp = 1;
Ingo Molnardd41f592007-07-09 18:51:59 +02008005#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02008006#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02008007 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
Ingo Molnardd41f592007-07-09 18:51:59 +02008008}
8009
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008010static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8011{
8012 struct rt_prio_array *array;
8013 int i;
8014
8015 array = &rt_rq->active;
8016 for (i = 0; i < MAX_RT_PRIO; i++) {
8017 INIT_LIST_HEAD(array->queue + i);
8018 __clear_bit(i, array->bitmap);
8019 }
8020 /* delimiter for bitsearch: */
8021 __set_bit(MAX_RT_PRIO, array->bitmap);
8022
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008023#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -05008024 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins398a1532009-01-14 09:10:04 -05008025#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -05008026 rt_rq->highest_prio.next = MAX_RT_PRIO;
Peter Zijlstra48d5e252008-01-25 21:08:31 +01008027#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008028#endif
8029#ifdef CONFIG_SMP
8030 rt_rq->rt_nr_migratory = 0;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008031 rt_rq->overloaded = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008032 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008033#endif
8034
8035 rt_rq->rt_time = 0;
8036 rt_rq->rt_throttled = 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008037 rt_rq->rt_runtime = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008038 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008039
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008040#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01008041 rt_rq->rt_nr_boosted = 0;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008042 rt_rq->rq = rq;
8043#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008044}
8045
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008046#ifdef CONFIG_FAIR_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008047static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008048 struct sched_entity *se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008049 struct sched_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008050{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008051 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008052 tg->cfs_rq[cpu] = cfs_rq;
8053 init_cfs_rq(cfs_rq, rq);
8054 cfs_rq->tg = tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008055
8056 tg->se[cpu] = se;
Yong Zhang07e06b02011-01-07 15:17:36 +08008057 /* se could be NULL for root_task_group */
Dhaval Giani354d60c2008-04-19 19:44:59 +02008058 if (!se)
8059 return;
8060
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008061 if (!parent)
8062 se->cfs_rq = &rq->cfs;
8063 else
8064 se->cfs_rq = parent->my_q;
8065
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008066 se->my_q = cfs_rq;
Paul Turner94371782010-11-15 15:47:10 -08008067 update_load_set(&se->load, 0);
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008068 se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008069}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008070#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008071
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008072#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008073static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008074 struct sched_rt_entity *rt_se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008075 struct sched_rt_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008076{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008077 struct rq *rq = cpu_rq(cpu);
8078
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008079 tg->rt_rq[cpu] = rt_rq;
8080 init_rt_rq(rt_rq, rq);
8081 rt_rq->tg = tg;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008082 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008083
8084 tg->rt_se[cpu] = rt_se;
Dhaval Giani354d60c2008-04-19 19:44:59 +02008085 if (!rt_se)
8086 return;
8087
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008088 if (!parent)
8089 rt_se->rt_rq = &rq->rt;
8090 else
8091 rt_se->rt_rq = parent->my_q;
8092
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008093 rt_se->my_q = rt_rq;
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008094 rt_se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008095 INIT_LIST_HEAD(&rt_se->run_list);
8096}
8097#endif
8098
Linus Torvalds1da177e2005-04-16 15:20:36 -07008099void __init sched_init(void)
8100{
Ingo Molnardd41f592007-07-09 18:51:59 +02008101 int i, j;
Mike Travis434d53b2008-04-04 18:11:04 -07008102 unsigned long alloc_size = 0, ptr;
8103
8104#ifdef CONFIG_FAIR_GROUP_SCHED
8105 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8106#endif
8107#ifdef CONFIG_RT_GROUP_SCHED
8108 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8109#endif
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308110#ifdef CONFIG_CPUMASK_OFFSTACK
Rusty Russell8c083f02009-03-19 15:22:20 +10308111 alloc_size += num_possible_cpus() * cpumask_size();
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308112#endif
Mike Travis434d53b2008-04-04 18:11:04 -07008113 if (alloc_size) {
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03008114 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
Mike Travis434d53b2008-04-04 18:11:04 -07008115
8116#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008117 root_task_group.se = (struct sched_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008118 ptr += nr_cpu_ids * sizeof(void **);
8119
Yong Zhang07e06b02011-01-07 15:17:36 +08008120 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008121 ptr += nr_cpu_ids * sizeof(void **);
Peter Zijlstraeff766a2008-04-19 19:45:00 +02008122
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008123#endif /* CONFIG_FAIR_GROUP_SCHED */
Mike Travis434d53b2008-04-04 18:11:04 -07008124#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008125 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008126 ptr += nr_cpu_ids * sizeof(void **);
8127
Yong Zhang07e06b02011-01-07 15:17:36 +08008128 root_task_group.rt_rq = (struct rt_rq **)ptr;
Peter Zijlstraeff766a2008-04-19 19:45:00 +02008129 ptr += nr_cpu_ids * sizeof(void **);
8130
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008131#endif /* CONFIG_RT_GROUP_SCHED */
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308132#ifdef CONFIG_CPUMASK_OFFSTACK
8133 for_each_possible_cpu(i) {
8134 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
8135 ptr += cpumask_size();
8136 }
8137#endif /* CONFIG_CPUMASK_OFFSTACK */
Mike Travis434d53b2008-04-04 18:11:04 -07008138 }
Ingo Molnardd41f592007-07-09 18:51:59 +02008139
Gregory Haskins57d885f2008-01-25 21:08:18 +01008140#ifdef CONFIG_SMP
8141 init_defrootdomain();
8142#endif
8143
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008144 init_rt_bandwidth(&def_rt_bandwidth,
8145 global_rt_period(), global_rt_runtime());
8146
8147#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008148 init_rt_bandwidth(&root_task_group.rt_bandwidth,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008149 global_rt_period(), global_rt_runtime());
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008150#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008151
Dhaval Giani7c941432010-01-20 13:26:18 +01008152#ifdef CONFIG_CGROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008153 list_add(&root_task_group.list, &task_groups);
8154 INIT_LIST_HEAD(&root_task_group.children);
Mike Galbraith5091faa2010-11-30 14:18:03 +01008155 autogroup_init(&init_task);
Dhaval Giani7c941432010-01-20 13:26:18 +01008156#endif /* CONFIG_CGROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008157
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08008158 for_each_possible_cpu(i) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07008159 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008160
8161 rq = cpu_rq(i);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008162 raw_spin_lock_init(&rq->lock);
Nick Piggin78979862005-06-25 14:57:13 -07008163 rq->nr_running = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02008164 rq->calc_load_active = 0;
8165 rq->calc_load_update = jiffies + LOAD_FREQ;
Ingo Molnardd41f592007-07-09 18:51:59 +02008166 init_cfs_rq(&rq->cfs, rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008167 init_rt_rq(&rq->rt, rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008168#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008169 root_task_group.shares = root_task_group_load;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008170 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02008171 /*
Yong Zhang07e06b02011-01-07 15:17:36 +08008172 * How much cpu bandwidth does root_task_group get?
Dhaval Giani354d60c2008-04-19 19:44:59 +02008173 *
8174 * In case of task-groups formed thr' the cgroup filesystem, it
8175 * gets 100% of the cpu resources in the system. This overall
8176 * system cpu resource is divided among the tasks of
Yong Zhang07e06b02011-01-07 15:17:36 +08008177 * root_task_group and its child task-groups in a fair manner,
Dhaval Giani354d60c2008-04-19 19:44:59 +02008178 * based on each entity's (task or task-group's) weight
8179 * (se->load.weight).
8180 *
Yong Zhang07e06b02011-01-07 15:17:36 +08008181 * In other words, if root_task_group has 10 tasks of weight
Dhaval Giani354d60c2008-04-19 19:44:59 +02008182 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8183 * then A0's share of the cpu resource is:
8184 *
Ingo Molnar0d905bc2009-05-04 19:13:30 +02008185 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
Dhaval Giani354d60c2008-04-19 19:44:59 +02008186 *
Yong Zhang07e06b02011-01-07 15:17:36 +08008187 * We achieve this by letting root_task_group's tasks sit
8188 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
Dhaval Giani354d60c2008-04-19 19:44:59 +02008189 */
Yong Zhang07e06b02011-01-07 15:17:36 +08008190 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02008191#endif /* CONFIG_FAIR_GROUP_SCHED */
8192
8193 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008194#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008195 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
Yong Zhang07e06b02011-01-07 15:17:36 +08008196 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008197#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008198
Ingo Molnardd41f592007-07-09 18:51:59 +02008199 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
8200 rq->cpu_load[j] = 0;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07008201
8202 rq->last_load_update_tick = jiffies;
8203
Linus Torvalds1da177e2005-04-16 15:20:36 -07008204#ifdef CONFIG_SMP
Nick Piggin41c7ce92005-06-25 14:57:24 -07008205 rq->sd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01008206 rq->rd = NULL;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02008207 rq->cpu_power = SCHED_LOAD_SCALE;
Gregory Haskins3f029d32009-07-29 11:08:47 -04008208 rq->post_schedule = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008209 rq->active_balance = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02008210 rq->next_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008211 rq->push_cpu = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07008212 rq->cpu = i;
Gregory Haskins1f11eb62008-06-04 15:04:05 -04008213 rq->online = 0;
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01008214 rq->idle_stamp = 0;
8215 rq->avg_idle = 2*sysctl_sched_migration_cost;
Gregory Haskinsdc938522008-01-25 21:08:26 +01008216 rq_attach_root(rq, &def_root_domain);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008217#ifdef CONFIG_NO_HZ
8218 rq->nohz_balance_kick = 0;
8219 init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
8220#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008221#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01008222 init_rq_hrtick(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008223 atomic_set(&rq->nr_iowait, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008224 }
8225
Peter Williams2dd73a42006-06-27 02:54:34 -07008226 set_load_weight(&init_task);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008227
Avi Kivitye107be32007-07-26 13:40:43 +02008228#ifdef CONFIG_PREEMPT_NOTIFIERS
8229 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
8230#endif
8231
Christoph Lameterc9819f42006-12-10 02:20:25 -08008232#ifdef CONFIG_SMP
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008233 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
Christoph Lameterc9819f42006-12-10 02:20:25 -08008234#endif
8235
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008236#ifdef CONFIG_RT_MUTEXES
Thomas Gleixner1d615482009-11-17 14:54:03 +01008237 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008238#endif
8239
Linus Torvalds1da177e2005-04-16 15:20:36 -07008240 /*
8241 * The boot idle thread does lazy MMU switching as well:
8242 */
8243 atomic_inc(&init_mm.mm_count);
8244 enter_lazy_tlb(&init_mm, current);
8245
8246 /*
8247 * Make us the idle thread. Technically, schedule() should not be
8248 * called from this thread, however somewhere below it might be,
8249 * but because we are the idle thread, we just pick up running again
8250 * when this runqueue becomes "idle".
8251 */
8252 init_idle(current, smp_processor_id());
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02008253
8254 calc_load_update = jiffies + LOAD_FREQ;
8255
Ingo Molnardd41f592007-07-09 18:51:59 +02008256 /*
8257 * During early bootup we pretend to be a normal task:
8258 */
8259 current->sched_class = &fair_sched_class;
Ingo Molnar6892b752008-02-13 14:02:36 +01008260
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308261 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
Rusty Russell49557e62009-11-02 20:37:20 +10308262 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308263#ifdef CONFIG_SMP
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308264#ifdef CONFIG_NO_HZ
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008265 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8266 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
8267 atomic_set(&nohz.load_balancer, nr_cpu_ids);
8268 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
8269 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308270#endif
Rusty Russellbdddd292009-12-02 14:09:16 +10308271 /* May be allocated at isolcpus cmdline parse time */
8272 if (cpu_isolated_map == NULL)
8273 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308274#endif /* SMP */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308275
Ingo Molnar6892b752008-02-13 14:02:36 +01008276 scheduler_running = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008277}
8278
8279#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008280static inline int preempt_count_equals(int preempt_offset)
8281{
Frederic Weisbecker234da7b2009-12-16 20:21:05 +01008282 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008283
Arnd Bergmann4ba82162011-01-25 22:52:22 +01008284 return (nested == preempt_offset);
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008285}
8286
Simon Kagstromd8948372009-12-23 11:08:18 +01008287void __might_sleep(const char *file, int line, int preempt_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008288{
Ingo Molnar48f24c42006-07-03 00:25:40 -07008289#ifdef in_atomic
Linus Torvalds1da177e2005-04-16 15:20:36 -07008290 static unsigned long prev_jiffy; /* ratelimiting */
8291
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008292 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8293 system_state != SYSTEM_RUNNING || oops_in_progress)
Ingo Molnaraef745f2008-08-28 11:34:43 +02008294 return;
8295 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8296 return;
8297 prev_jiffy = jiffies;
8298
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01008299 printk(KERN_ERR
8300 "BUG: sleeping function called from invalid context at %s:%d\n",
8301 file, line);
8302 printk(KERN_ERR
8303 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8304 in_atomic(), irqs_disabled(),
8305 current->pid, current->comm);
Ingo Molnaraef745f2008-08-28 11:34:43 +02008306
8307 debug_show_held_locks(current);
8308 if (irqs_disabled())
8309 print_irqtrace_events(current);
8310 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008311#endif
8312}
8313EXPORT_SYMBOL(__might_sleep);
8314#endif
8315
8316#ifdef CONFIG_MAGIC_SYSRQ
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008317static void normalize_task(struct rq *rq, struct task_struct *p)
8318{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008319 const struct sched_class *prev_class = p->sched_class;
8320 int old_prio = p->prio;
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008321 int on_rq;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02008322
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02008323 on_rq = p->on_rq;
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008324 if (on_rq)
8325 deactivate_task(rq, p, 0);
8326 __setscheduler(rq, p, SCHED_NORMAL, 0);
8327 if (on_rq) {
8328 activate_task(rq, p, 0);
8329 resched_task(rq->curr);
8330 }
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008331
8332 check_class_changed(rq, p, prev_class, old_prio);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008333}
8334
Linus Torvalds1da177e2005-04-16 15:20:36 -07008335void normalize_rt_tasks(void)
8336{
Ingo Molnara0f98a12007-06-17 18:37:45 +02008337 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008338 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07008339 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008340
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008341 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008342 do_each_thread(g, p) {
Ingo Molnar178be792007-10-15 17:00:18 +02008343 /*
8344 * Only normalize user tasks:
8345 */
8346 if (!p->mm)
8347 continue;
8348
Ingo Molnardd41f592007-07-09 18:51:59 +02008349 p->se.exec_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008350#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03008351 p->se.statistics.wait_start = 0;
8352 p->se.statistics.sleep_start = 0;
8353 p->se.statistics.block_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008354#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02008355
8356 if (!rt_task(p)) {
8357 /*
8358 * Renice negative nice level userspace
8359 * tasks back to 0:
8360 */
8361 if (TASK_NICE(p) < 0 && p->mm)
8362 set_user_nice(p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008363 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02008364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008365
Thomas Gleixner1d615482009-11-17 14:54:03 +01008366 raw_spin_lock(&p->pi_lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -07008367 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008368
Ingo Molnar178be792007-10-15 17:00:18 +02008369 normalize_task(rq, p);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008370
Ingo Molnarb29739f2006-06-27 02:54:51 -07008371 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01008372 raw_spin_unlock(&p->pi_lock);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008373 } while_each_thread(g, p);
8374
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008375 read_unlock_irqrestore(&tasklist_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008376}
8377
8378#endif /* CONFIG_MAGIC_SYSRQ */
Linus Torvalds1df5c102005-09-12 07:59:21 -07008379
Jason Wessel67fc4e02010-05-20 21:04:21 -05008380#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008381/*
Jason Wessel67fc4e02010-05-20 21:04:21 -05008382 * These functions are only useful for the IA64 MCA handling, or kdb.
Linus Torvalds1df5c102005-09-12 07:59:21 -07008383 *
8384 * They can only be called when the whole system has been
8385 * stopped - every CPU needs to be quiescent, and no scheduling
8386 * activity can take place. Using them for anything else would
8387 * be a serious bug, and as a result, they aren't even visible
8388 * under any other configuration.
8389 */
8390
8391/**
8392 * curr_task - return the current task for a given cpu.
8393 * @cpu: the processor in question.
8394 *
8395 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8396 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008397struct task_struct *curr_task(int cpu)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008398{
8399 return cpu_curr(cpu);
8400}
8401
Jason Wessel67fc4e02010-05-20 21:04:21 -05008402#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8403
8404#ifdef CONFIG_IA64
Linus Torvalds1df5c102005-09-12 07:59:21 -07008405/**
8406 * set_curr_task - set the current task for a given cpu.
8407 * @cpu: the processor in question.
8408 * @p: the task pointer to set.
8409 *
8410 * Description: This function must only be used when non-maskable interrupts
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008411 * are serviced on a separate stack. It allows the architecture to switch the
8412 * notion of the current task on a cpu in a non-blocking manner. This function
Linus Torvalds1df5c102005-09-12 07:59:21 -07008413 * must be called with all CPU's synchronized, and interrupts disabled, the
8414 * and caller must save the original value of the current task (see
8415 * curr_task() above) and restore that value before reenabling interrupts and
8416 * re-starting the system.
8417 *
8418 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8419 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008420void set_curr_task(int cpu, struct task_struct *p)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008421{
8422 cpu_curr(cpu) = p;
8423}
8424
8425#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008426
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008427#ifdef CONFIG_FAIR_GROUP_SCHED
8428static void free_fair_sched_group(struct task_group *tg)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008429{
8430 int i;
8431
8432 for_each_possible_cpu(i) {
8433 if (tg->cfs_rq)
8434 kfree(tg->cfs_rq[i]);
8435 if (tg->se)
8436 kfree(tg->se[i]);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008437 }
8438
8439 kfree(tg->cfs_rq);
8440 kfree(tg->se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008441}
8442
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008443static
8444int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008445{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008446 struct cfs_rq *cfs_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008447 struct sched_entity *se;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008448 int i;
8449
Mike Travis434d53b2008-04-04 18:11:04 -07008450 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008451 if (!tg->cfs_rq)
8452 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008453 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008454 if (!tg->se)
8455 goto err;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008456
8457 tg->shares = NICE_0_LOAD;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008458
8459 for_each_possible_cpu(i) {
Li Zefaneab17222008-10-29 17:03:22 +08008460 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8461 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008462 if (!cfs_rq)
8463 goto err;
8464
Li Zefaneab17222008-10-29 17:03:22 +08008465 se = kzalloc_node(sizeof(struct sched_entity),
8466 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008467 if (!se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008468 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008469
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008470 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008471 }
8472
8473 return 1;
8474
Peter Zijlstra49246272010-10-17 21:46:10 +02008475err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008476 kfree(cfs_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008477err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008478 return 0;
8479}
8480
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008481static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8482{
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008483 struct rq *rq = cpu_rq(cpu);
8484 unsigned long flags;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008485
8486 /*
8487 * Only empty task groups can be destroyed; so we can speculatively
8488 * check on_list without danger of it being re-added.
8489 */
8490 if (!tg->cfs_rq[cpu]->on_list)
8491 return;
8492
8493 raw_spin_lock_irqsave(&rq->lock, flags);
Paul Turner822bc182010-11-29 16:55:40 -08008494 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008495 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008496}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008497#else /* !CONFG_FAIR_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008498static inline void free_fair_sched_group(struct task_group *tg)
8499{
8500}
8501
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008502static inline
8503int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008504{
8505 return 1;
8506}
8507
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008508static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8509{
8510}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008511#endif /* CONFIG_FAIR_GROUP_SCHED */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008512
8513#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008514static void free_rt_sched_group(struct task_group *tg)
8515{
8516 int i;
8517
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008518 destroy_rt_bandwidth(&tg->rt_bandwidth);
8519
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008520 for_each_possible_cpu(i) {
8521 if (tg->rt_rq)
8522 kfree(tg->rt_rq[i]);
8523 if (tg->rt_se)
8524 kfree(tg->rt_se[i]);
8525 }
8526
8527 kfree(tg->rt_rq);
8528 kfree(tg->rt_se);
8529}
8530
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008531static
8532int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008533{
8534 struct rt_rq *rt_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008535 struct sched_rt_entity *rt_se;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008536 struct rq *rq;
8537 int i;
8538
Mike Travis434d53b2008-04-04 18:11:04 -07008539 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008540 if (!tg->rt_rq)
8541 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008542 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008543 if (!tg->rt_se)
8544 goto err;
8545
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008546 init_rt_bandwidth(&tg->rt_bandwidth,
8547 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008548
8549 for_each_possible_cpu(i) {
8550 rq = cpu_rq(i);
8551
Li Zefaneab17222008-10-29 17:03:22 +08008552 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8553 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008554 if (!rt_rq)
8555 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008556
Li Zefaneab17222008-10-29 17:03:22 +08008557 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8558 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008559 if (!rt_se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008560 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008561
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008562 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008563 }
8564
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008565 return 1;
8566
Peter Zijlstra49246272010-10-17 21:46:10 +02008567err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008568 kfree(rt_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008569err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008570 return 0;
8571}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008572#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008573static inline void free_rt_sched_group(struct task_group *tg)
8574{
8575}
8576
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008577static inline
8578int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008579{
8580 return 1;
8581}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008582#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008583
Dhaval Giani7c941432010-01-20 13:26:18 +01008584#ifdef CONFIG_CGROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008585static void free_sched_group(struct task_group *tg)
8586{
8587 free_fair_sched_group(tg);
8588 free_rt_sched_group(tg);
Mike Galbraithe9aa1dd2011-01-05 11:11:25 +01008589 autogroup_free(tg);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008590 kfree(tg);
8591}
8592
8593/* allocate runqueue etc for a new task group */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008594struct task_group *sched_create_group(struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008595{
8596 struct task_group *tg;
8597 unsigned long flags;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008598
8599 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8600 if (!tg)
8601 return ERR_PTR(-ENOMEM);
8602
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008603 if (!alloc_fair_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008604 goto err;
8605
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008606 if (!alloc_rt_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008607 goto err;
8608
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008609 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008610 list_add_rcu(&tg->list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008611
8612 WARN_ON(!parent); /* root should already exist */
8613
8614 tg->parent = parent;
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008615 INIT_LIST_HEAD(&tg->children);
Zhang, Yanmin09f27242030-08-14 15:56:40 +08008616 list_add_rcu(&tg->siblings, &parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008617 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008618
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008619 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008620
8621err:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008622 free_sched_group(tg);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008623 return ERR_PTR(-ENOMEM);
8624}
8625
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008626/* rcu callback to free various structures associated with a task group */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008627static void free_sched_group_rcu(struct rcu_head *rhp)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008628{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008629 /* now it should be safe to free those cfs_rqs */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008630 free_sched_group(container_of(rhp, struct task_group, rcu));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008631}
8632
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008633/* Destroy runqueue etc associated with a task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008634void sched_destroy_group(struct task_group *tg)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008635{
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008636 unsigned long flags;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008637 int i;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008638
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008639 /* end participation in shares distribution */
8640 for_each_possible_cpu(i)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008641 unregister_fair_sched_group(tg, i);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008642
8643 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008644 list_del_rcu(&tg->list);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008645 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008646 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008647
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008648 /* wait for possible concurrent references to cfs_rqs complete */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008649 call_rcu(&tg->rcu, free_sched_group_rcu);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008650}
8651
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008652/* change task's runqueue when it moves between groups.
Ingo Molnar3a252012007-10-15 17:00:12 +02008653 * The caller of this function should have put the task in its new group
8654 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8655 * reflect its new group.
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008656 */
8657void sched_move_task(struct task_struct *tsk)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008658{
8659 int on_rq, running;
8660 unsigned long flags;
8661 struct rq *rq;
8662
8663 rq = task_rq_lock(tsk, &flags);
8664
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01008665 running = task_current(rq, tsk);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02008666 on_rq = tsk->on_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008667
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008668 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008669 dequeue_task(rq, tsk, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008670 if (unlikely(running))
8671 tsk->sched_class->put_prev_task(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008672
Peter Zijlstra810b3812008-02-29 15:21:01 -05008673#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008674 if (tsk->sched_class->task_move_group)
8675 tsk->sched_class->task_move_group(tsk, on_rq);
8676 else
Peter Zijlstra810b3812008-02-29 15:21:01 -05008677#endif
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008678 set_task_rq(tsk, task_cpu(tsk));
Peter Zijlstra810b3812008-02-29 15:21:01 -05008679
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008680 if (unlikely(running))
8681 tsk->sched_class->set_curr_task(rq);
8682 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01008683 enqueue_task(rq, tsk, 0);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008684
Peter Zijlstra0122ec52011-04-05 17:23:51 +02008685 task_rq_unlock(rq, tsk, &flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008686}
Dhaval Giani7c941432010-01-20 13:26:18 +01008687#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008688
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008689#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008690static DEFINE_MUTEX(shares_mutex);
8691
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008692int sched_group_set_shares(struct task_group *tg, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008693{
8694 int i;
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008695 unsigned long flags;
Ingo Molnarc61935f2008-01-22 11:24:58 +01008696
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008697 /*
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008698 * We can't change the weight of the root cgroup.
8699 */
8700 if (!tg->se[0])
8701 return -EINVAL;
8702
Peter Zijlstra18d95a22008-04-19 19:45:00 +02008703 if (shares < MIN_SHARES)
8704 shares = MIN_SHARES;
Miao Xiecb4ad1f2008-04-28 12:54:56 +08008705 else if (shares > MAX_SHARES)
8706 shares = MAX_SHARES;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008707
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008708 mutex_lock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008709 if (tg->shares == shares)
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008710 goto done;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008711
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008712 tg->shares = shares;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008713 for_each_possible_cpu(i) {
Paul Turner94371782010-11-15 15:47:10 -08008714 struct rq *rq = cpu_rq(i);
8715 struct sched_entity *se;
8716
8717 se = tg->se[i];
8718 /* Propagate contribution to hierarchy */
8719 raw_spin_lock_irqsave(&rq->lock, flags);
8720 for_each_sched_entity(se)
Paul Turner6d5ab292011-01-21 20:45:01 -08008721 update_cfs_shares(group_cfs_rq(se));
Paul Turner94371782010-11-15 15:47:10 -08008722 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008723 }
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008724
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008725done:
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008726 mutex_unlock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008727 return 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008728}
8729
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008730unsigned long sched_group_shares(struct task_group *tg)
8731{
8732 return tg->shares;
8733}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008734#endif
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008735
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008736#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008737/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008738 * Ensure that the real time constraints are schedulable.
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008739 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008740static DEFINE_MUTEX(rt_constraints_mutex);
8741
8742static unsigned long to_ratio(u64 period, u64 runtime)
8743{
8744 if (runtime == RUNTIME_INF)
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008745 return 1ULL << 20;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008746
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008747 return div64_u64(runtime << 20, period);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008748}
8749
Dhaval Giani521f1a242008-02-28 15:21:56 +05308750/* Must be called with tasklist_lock held */
8751static inline int tg_has_rt_tasks(struct task_group *tg)
8752{
8753 struct task_struct *g, *p;
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008754
Dhaval Giani521f1a242008-02-28 15:21:56 +05308755 do_each_thread(g, p) {
8756 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8757 return 1;
8758 } while_each_thread(g, p);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008759
Dhaval Giani521f1a242008-02-28 15:21:56 +05308760 return 0;
8761}
8762
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008763struct rt_schedulable_data {
8764 struct task_group *tg;
8765 u64 rt_period;
8766 u64 rt_runtime;
8767};
8768
8769static int tg_schedulable(struct task_group *tg, void *data)
8770{
8771 struct rt_schedulable_data *d = data;
8772 struct task_group *child;
8773 unsigned long total, sum = 0;
8774 u64 period, runtime;
8775
8776 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8777 runtime = tg->rt_bandwidth.rt_runtime;
8778
8779 if (tg == d->tg) {
8780 period = d->rt_period;
8781 runtime = d->rt_runtime;
8782 }
8783
Peter Zijlstra4653f802008-09-23 15:33:44 +02008784 /*
8785 * Cannot have more runtime than the period.
8786 */
8787 if (runtime > period && runtime != RUNTIME_INF)
8788 return -EINVAL;
8789
8790 /*
8791 * Ensure we don't starve existing RT tasks.
8792 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008793 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8794 return -EBUSY;
8795
8796 total = to_ratio(period, runtime);
8797
Peter Zijlstra4653f802008-09-23 15:33:44 +02008798 /*
8799 * Nobody can have more than the global setting allows.
8800 */
8801 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8802 return -EINVAL;
8803
8804 /*
8805 * The sum of our children's runtime should not exceed our own.
8806 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008807 list_for_each_entry_rcu(child, &tg->children, siblings) {
8808 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8809 runtime = child->rt_bandwidth.rt_runtime;
8810
8811 if (child == d->tg) {
8812 period = d->rt_period;
8813 runtime = d->rt_runtime;
8814 }
8815
8816 sum += to_ratio(period, runtime);
8817 }
8818
8819 if (sum > total)
8820 return -EINVAL;
8821
8822 return 0;
8823}
8824
8825static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8826{
8827 struct rt_schedulable_data data = {
8828 .tg = tg,
8829 .rt_period = period,
8830 .rt_runtime = runtime,
8831 };
8832
8833 return walk_tg_tree(tg_schedulable, tg_nop, &data);
8834}
8835
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008836static int tg_set_bandwidth(struct task_group *tg,
8837 u64 rt_period, u64 rt_runtime)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008838{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008839 int i, err = 0;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008840
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008841 mutex_lock(&rt_constraints_mutex);
Dhaval Giani521f1a242008-02-28 15:21:56 +05308842 read_lock(&tasklist_lock);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008843 err = __rt_schedulable(tg, rt_period, rt_runtime);
8844 if (err)
Dhaval Giani521f1a242008-02-28 15:21:56 +05308845 goto unlock;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008846
Thomas Gleixner0986b112009-11-17 15:32:06 +01008847 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008848 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8849 tg->rt_bandwidth.rt_runtime = rt_runtime;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008850
8851 for_each_possible_cpu(i) {
8852 struct rt_rq *rt_rq = tg->rt_rq[i];
8853
Thomas Gleixner0986b112009-11-17 15:32:06 +01008854 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008855 rt_rq->rt_runtime = rt_runtime;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008856 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008857 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008858 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstra49246272010-10-17 21:46:10 +02008859unlock:
Dhaval Giani521f1a242008-02-28 15:21:56 +05308860 read_unlock(&tasklist_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008861 mutex_unlock(&rt_constraints_mutex);
8862
8863 return err;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008864}
8865
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008866int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8867{
8868 u64 rt_runtime, rt_period;
8869
8870 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8871 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8872 if (rt_runtime_us < 0)
8873 rt_runtime = RUNTIME_INF;
8874
8875 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8876}
8877
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008878long sched_group_rt_runtime(struct task_group *tg)
8879{
8880 u64 rt_runtime_us;
8881
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008882 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008883 return -1;
8884
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008885 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008886 do_div(rt_runtime_us, NSEC_PER_USEC);
8887 return rt_runtime_us;
8888}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008889
8890int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8891{
8892 u64 rt_runtime, rt_period;
8893
8894 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8895 rt_runtime = tg->rt_bandwidth.rt_runtime;
8896
Raistlin619b0482008-06-26 18:54:09 +02008897 if (rt_period == 0)
8898 return -EINVAL;
8899
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008900 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8901}
8902
8903long sched_group_rt_period(struct task_group *tg)
8904{
8905 u64 rt_period_us;
8906
8907 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8908 do_div(rt_period_us, NSEC_PER_USEC);
8909 return rt_period_us;
8910}
8911
8912static int sched_rt_global_constraints(void)
8913{
Peter Zijlstra4653f802008-09-23 15:33:44 +02008914 u64 runtime, period;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008915 int ret = 0;
8916
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008917 if (sysctl_sched_rt_period <= 0)
8918 return -EINVAL;
8919
Peter Zijlstra4653f802008-09-23 15:33:44 +02008920 runtime = global_rt_runtime();
8921 period = global_rt_period();
8922
8923 /*
8924 * Sanity check on the sysctl variables.
8925 */
8926 if (runtime > period && runtime != RUNTIME_INF)
8927 return -EINVAL;
Peter Zijlstra10b612f2008-06-19 14:22:27 +02008928
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008929 mutex_lock(&rt_constraints_mutex);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008930 read_lock(&tasklist_lock);
Peter Zijlstra4653f802008-09-23 15:33:44 +02008931 ret = __rt_schedulable(NULL, 0, 0);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008932 read_unlock(&tasklist_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008933 mutex_unlock(&rt_constraints_mutex);
8934
8935 return ret;
8936}
Dhaval Giani54e99122009-02-27 15:13:54 +05308937
8938int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8939{
8940 /* Don't accept realtime tasks when there is no way for them to run */
8941 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8942 return 0;
8943
8944 return 1;
8945}
8946
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008947#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008948static int sched_rt_global_constraints(void)
8949{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008950 unsigned long flags;
8951 int i;
8952
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008953 if (sysctl_sched_rt_period <= 0)
8954 return -EINVAL;
8955
Peter Zijlstra60aa6052009-05-05 17:50:21 +02008956 /*
8957 * There's always some RT tasks in the root group
8958 * -- migration, kstopmachine etc..
8959 */
8960 if (sysctl_sched_rt_runtime == 0)
8961 return -EBUSY;
8962
Thomas Gleixner0986b112009-11-17 15:32:06 +01008963 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008964 for_each_possible_cpu(i) {
8965 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8966
Thomas Gleixner0986b112009-11-17 15:32:06 +01008967 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008968 rt_rq->rt_runtime = global_rt_runtime();
Thomas Gleixner0986b112009-11-17 15:32:06 +01008969 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008970 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008971 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008972
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008973 return 0;
8974}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008975#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008976
8977int sched_rt_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008978 void __user *buffer, size_t *lenp,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008979 loff_t *ppos)
8980{
8981 int ret;
8982 int old_period, old_runtime;
8983 static DEFINE_MUTEX(mutex);
8984
8985 mutex_lock(&mutex);
8986 old_period = sysctl_sched_rt_period;
8987 old_runtime = sysctl_sched_rt_runtime;
8988
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008989 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008990
8991 if (!ret && write) {
8992 ret = sched_rt_global_constraints();
8993 if (ret) {
8994 sysctl_sched_rt_period = old_period;
8995 sysctl_sched_rt_runtime = old_runtime;
8996 } else {
8997 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8998 def_rt_bandwidth.rt_period =
8999 ns_to_ktime(global_rt_period());
9000 }
9001 }
9002 mutex_unlock(&mutex);
9003
9004 return ret;
9005}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009006
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009007#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009008
9009/* return corresponding task_group object of a cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +02009010static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009011{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009012 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
9013 struct task_group, css);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009014}
9015
9016static struct cgroup_subsys_state *
Paul Menage2b01dfe2007-10-24 18:23:50 +02009017cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009018{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009019 struct task_group *tg, *parent;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009020
Paul Menage2b01dfe2007-10-24 18:23:50 +02009021 if (!cgrp->parent) {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009022 /* This is early initialization for the top cgroup */
Yong Zhang07e06b02011-01-07 15:17:36 +08009023 return &root_task_group.css;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009024 }
9025
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009026 parent = cgroup_tg(cgrp->parent);
9027 tg = sched_create_group(parent);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009028 if (IS_ERR(tg))
9029 return ERR_PTR(-ENOMEM);
9030
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009031 return &tg->css;
9032}
9033
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009034static void
9035cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009036{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009037 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009038
9039 sched_destroy_group(tg);
9040}
9041
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009042static int
Ben Blumbe367d02009-09-23 15:56:31 -07009043cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009044{
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009045#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Giani54e99122009-02-27 15:13:54 +05309046 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009047 return -EINVAL;
9048#else
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009049 /* We don't support RT-tasks being in separate groups */
9050 if (tsk->sched_class != &fair_sched_class)
9051 return -EINVAL;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009052#endif
Ben Blumbe367d02009-09-23 15:56:31 -07009053 return 0;
9054}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009055
Ben Blumbe367d02009-09-23 15:56:31 -07009056static int
9057cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9058 struct task_struct *tsk, bool threadgroup)
9059{
9060 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
9061 if (retval)
9062 return retval;
9063 if (threadgroup) {
9064 struct task_struct *c;
9065 rcu_read_lock();
9066 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
9067 retval = cpu_cgroup_can_attach_task(cgrp, c);
9068 if (retval) {
9069 rcu_read_unlock();
9070 return retval;
9071 }
9072 }
9073 rcu_read_unlock();
9074 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009075 return 0;
9076}
9077
9078static void
Paul Menage2b01dfe2007-10-24 18:23:50 +02009079cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
Ben Blumbe367d02009-09-23 15:56:31 -07009080 struct cgroup *old_cont, struct task_struct *tsk,
9081 bool threadgroup)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009082{
9083 sched_move_task(tsk);
Ben Blumbe367d02009-09-23 15:56:31 -07009084 if (threadgroup) {
9085 struct task_struct *c;
9086 rcu_read_lock();
9087 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
9088 sched_move_task(c);
9089 }
9090 rcu_read_unlock();
9091 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009092}
9093
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009094static void
Peter Zijlstrad41d5a02011-02-07 17:02:20 +01009095cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
9096 struct cgroup *old_cgrp, struct task_struct *task)
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009097{
9098 /*
9099 * cgroup_exit() is called in the copy_process() failure path.
9100 * Ignore this case since the task hasn't ran yet, this avoids
9101 * trying to poke a half freed task state from generic code.
9102 */
9103 if (!(task->flags & PF_EXITING))
9104 return;
9105
9106 sched_move_task(task);
9107}
9108
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009109#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagef4c753b2008-04-29 00:59:56 -07009110static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
Paul Menage2b01dfe2007-10-24 18:23:50 +02009111 u64 shareval)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009112{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009113 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009114}
9115
Paul Menagef4c753b2008-04-29 00:59:56 -07009116static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009117{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009118 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009119
9120 return (u64) tg->shares;
9121}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009122#endif /* CONFIG_FAIR_GROUP_SCHED */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009123
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009124#ifdef CONFIG_RT_GROUP_SCHED
Mirco Tischler0c708142008-05-14 16:05:46 -07009125static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
Paul Menage06ecb272008-04-29 01:00:06 -07009126 s64 val)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009127{
Paul Menage06ecb272008-04-29 01:00:06 -07009128 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009129}
9130
Paul Menage06ecb272008-04-29 01:00:06 -07009131static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009132{
Paul Menage06ecb272008-04-29 01:00:06 -07009133 return sched_group_rt_runtime(cgroup_tg(cgrp));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009134}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009135
9136static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
9137 u64 rt_period_us)
9138{
9139 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
9140}
9141
9142static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
9143{
9144 return sched_group_rt_period(cgroup_tg(cgrp));
9145}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009146#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009147
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009148static struct cftype cpu_files[] = {
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009149#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009150 {
9151 .name = "shares",
Paul Menagef4c753b2008-04-29 00:59:56 -07009152 .read_u64 = cpu_shares_read_u64,
9153 .write_u64 = cpu_shares_write_u64,
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009154 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009155#endif
9156#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009157 {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01009158 .name = "rt_runtime_us",
Paul Menage06ecb272008-04-29 01:00:06 -07009159 .read_s64 = cpu_rt_runtime_read,
9160 .write_s64 = cpu_rt_runtime_write,
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009161 },
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009162 {
9163 .name = "rt_period_us",
Paul Menagef4c753b2008-04-29 00:59:56 -07009164 .read_u64 = cpu_rt_period_read_uint,
9165 .write_u64 = cpu_rt_period_write_uint,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009166 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009167#endif
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009168};
9169
9170static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
9171{
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009172 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009173}
9174
9175struct cgroup_subsys cpu_cgroup_subsys = {
Ingo Molnar38605ca2007-10-29 21:18:11 +01009176 .name = "cpu",
9177 .create = cpu_cgroup_create,
9178 .destroy = cpu_cgroup_destroy,
9179 .can_attach = cpu_cgroup_can_attach,
9180 .attach = cpu_cgroup_attach,
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009181 .exit = cpu_cgroup_exit,
Ingo Molnar38605ca2007-10-29 21:18:11 +01009182 .populate = cpu_cgroup_populate,
9183 .subsys_id = cpu_cgroup_subsys_id,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009184 .early_init = 1,
9185};
9186
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009187#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009188
9189#ifdef CONFIG_CGROUP_CPUACCT
9190
9191/*
9192 * CPU accounting code for task groups.
9193 *
9194 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
9195 * (balbir@in.ibm.com).
9196 */
9197
Bharata B Rao934352f2008-11-10 20:41:13 +05309198/* track cpu usage of a group of tasks and its child groups */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009199struct cpuacct {
9200 struct cgroup_subsys_state css;
9201 /* cpuusage holds pointer to a u64-type object on every cpu */
Tejun Heo43cf38e2010-02-02 14:38:57 +09009202 u64 __percpu *cpuusage;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309203 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
Bharata B Rao934352f2008-11-10 20:41:13 +05309204 struct cpuacct *parent;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009205};
9206
9207struct cgroup_subsys cpuacct_subsys;
9208
9209/* return cpu accounting group corresponding to this container */
Dhaval Giani32cd7562008-02-29 10:02:43 +05309210static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009211{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309212 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009213 struct cpuacct, css);
9214}
9215
9216/* return cpu accounting group to which this task belongs */
9217static inline struct cpuacct *task_ca(struct task_struct *tsk)
9218{
9219 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
9220 struct cpuacct, css);
9221}
9222
9223/* create a new cpu accounting group */
9224static struct cgroup_subsys_state *cpuacct_create(
Dhaval Giani32cd7562008-02-29 10:02:43 +05309225 struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009226{
9227 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309228 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009229
9230 if (!ca)
Bharata B Raoef12fef2009-03-31 10:02:22 +05309231 goto out;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009232
9233 ca->cpuusage = alloc_percpu(u64);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309234 if (!ca->cpuusage)
9235 goto out_free_ca;
9236
9237 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9238 if (percpu_counter_init(&ca->cpustat[i], 0))
9239 goto out_free_counters;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009240
Bharata B Rao934352f2008-11-10 20:41:13 +05309241 if (cgrp->parent)
9242 ca->parent = cgroup_ca(cgrp->parent);
9243
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009244 return &ca->css;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309245
9246out_free_counters:
9247 while (--i >= 0)
9248 percpu_counter_destroy(&ca->cpustat[i]);
9249 free_percpu(ca->cpuusage);
9250out_free_ca:
9251 kfree(ca);
9252out:
9253 return ERR_PTR(-ENOMEM);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009254}
9255
9256/* destroy an existing cpu accounting group */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009257static void
Dhaval Giani32cd7562008-02-29 10:02:43 +05309258cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009259{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309260 struct cpuacct *ca = cgroup_ca(cgrp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309261 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009262
Bharata B Raoef12fef2009-03-31 10:02:22 +05309263 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9264 percpu_counter_destroy(&ca->cpustat[i]);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009265 free_percpu(ca->cpuusage);
9266 kfree(ca);
9267}
9268
Ken Chen720f5492008-12-15 22:02:01 -08009269static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9270{
Rusty Russellb36128c2009-02-20 16:29:08 +09009271 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009272 u64 data;
9273
9274#ifndef CONFIG_64BIT
9275 /*
9276 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
9277 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009278 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009279 data = *cpuusage;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009280 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009281#else
9282 data = *cpuusage;
9283#endif
9284
9285 return data;
9286}
9287
9288static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9289{
Rusty Russellb36128c2009-02-20 16:29:08 +09009290 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009291
9292#ifndef CONFIG_64BIT
9293 /*
9294 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
9295 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009296 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009297 *cpuusage = val;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009298 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009299#else
9300 *cpuusage = val;
9301#endif
9302}
9303
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009304/* return total cpu usage (in nanoseconds) of a group */
Dhaval Giani32cd7562008-02-29 10:02:43 +05309305static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009306{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309307 struct cpuacct *ca = cgroup_ca(cgrp);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009308 u64 totalcpuusage = 0;
9309 int i;
9310
Ken Chen720f5492008-12-15 22:02:01 -08009311 for_each_present_cpu(i)
9312 totalcpuusage += cpuacct_cpuusage_read(ca, i);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009313
9314 return totalcpuusage;
9315}
9316
Dhaval Giani0297b802008-02-29 10:02:44 +05309317static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
9318 u64 reset)
9319{
9320 struct cpuacct *ca = cgroup_ca(cgrp);
9321 int err = 0;
9322 int i;
9323
9324 if (reset) {
9325 err = -EINVAL;
9326 goto out;
9327 }
9328
Ken Chen720f5492008-12-15 22:02:01 -08009329 for_each_present_cpu(i)
9330 cpuacct_cpuusage_write(ca, i, 0);
Dhaval Giani0297b802008-02-29 10:02:44 +05309331
Dhaval Giani0297b802008-02-29 10:02:44 +05309332out:
9333 return err;
9334}
9335
Ken Chene9515c32008-12-15 22:04:15 -08009336static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
9337 struct seq_file *m)
9338{
9339 struct cpuacct *ca = cgroup_ca(cgroup);
9340 u64 percpu;
9341 int i;
9342
9343 for_each_present_cpu(i) {
9344 percpu = cpuacct_cpuusage_read(ca, i);
9345 seq_printf(m, "%llu ", (unsigned long long) percpu);
9346 }
9347 seq_printf(m, "\n");
9348 return 0;
9349}
9350
Bharata B Raoef12fef2009-03-31 10:02:22 +05309351static const char *cpuacct_stat_desc[] = {
9352 [CPUACCT_STAT_USER] = "user",
9353 [CPUACCT_STAT_SYSTEM] = "system",
9354};
9355
9356static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9357 struct cgroup_map_cb *cb)
9358{
9359 struct cpuacct *ca = cgroup_ca(cgrp);
9360 int i;
9361
9362 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
9363 s64 val = percpu_counter_read(&ca->cpustat[i]);
9364 val = cputime64_to_clock_t(val);
9365 cb->fill(cb, cpuacct_stat_desc[i], val);
9366 }
9367 return 0;
9368}
9369
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009370static struct cftype files[] = {
9371 {
9372 .name = "usage",
Paul Menagef4c753b2008-04-29 00:59:56 -07009373 .read_u64 = cpuusage_read,
9374 .write_u64 = cpuusage_write,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009375 },
Ken Chene9515c32008-12-15 22:04:15 -08009376 {
9377 .name = "usage_percpu",
9378 .read_seq_string = cpuacct_percpu_seq_read,
9379 },
Bharata B Raoef12fef2009-03-31 10:02:22 +05309380 {
9381 .name = "stat",
9382 .read_map = cpuacct_stats_show,
9383 },
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009384};
9385
Dhaval Giani32cd7562008-02-29 10:02:43 +05309386static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009387{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309388 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009389}
9390
9391/*
9392 * charge this task's execution time to its accounting group.
9393 *
9394 * called with rq->lock held.
9395 */
9396static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9397{
9398 struct cpuacct *ca;
Bharata B Rao934352f2008-11-10 20:41:13 +05309399 int cpu;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009400
Li Zefanc40c6f82009-02-26 15:40:15 +08009401 if (unlikely(!cpuacct_subsys.active))
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009402 return;
9403
Bharata B Rao934352f2008-11-10 20:41:13 +05309404 cpu = task_cpu(tsk);
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309405
9406 rcu_read_lock();
9407
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009408 ca = task_ca(tsk);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009409
Bharata B Rao934352f2008-11-10 20:41:13 +05309410 for (; ca; ca = ca->parent) {
Rusty Russellb36128c2009-02-20 16:29:08 +09009411 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009412 *cpuusage += cputime;
9413 }
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309414
9415 rcu_read_unlock();
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009416}
9417
Bharata B Raoef12fef2009-03-31 10:02:22 +05309418/*
Anton Blanchardfa535a72010-02-02 14:46:13 -08009419 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9420 * in cputime_t units. As a result, cpuacct_update_stats calls
9421 * percpu_counter_add with values large enough to always overflow the
9422 * per cpu batch limit causing bad SMP scalability.
9423 *
9424 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9425 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9426 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9427 */
9428#ifdef CONFIG_SMP
9429#define CPUACCT_BATCH \
9430 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9431#else
9432#define CPUACCT_BATCH 0
9433#endif
9434
9435/*
Bharata B Raoef12fef2009-03-31 10:02:22 +05309436 * Charge the system/user time to the task's accounting group.
9437 */
9438static void cpuacct_update_stats(struct task_struct *tsk,
9439 enum cpuacct_stat_index idx, cputime_t val)
9440{
9441 struct cpuacct *ca;
Anton Blanchardfa535a72010-02-02 14:46:13 -08009442 int batch = CPUACCT_BATCH;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309443
9444 if (unlikely(!cpuacct_subsys.active))
9445 return;
9446
9447 rcu_read_lock();
9448 ca = task_ca(tsk);
9449
9450 do {
Anton Blanchardfa535a72010-02-02 14:46:13 -08009451 __percpu_counter_add(&ca->cpustat[idx], val, batch);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309452 ca = ca->parent;
9453 } while (ca);
9454 rcu_read_unlock();
9455}
9456
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009457struct cgroup_subsys cpuacct_subsys = {
9458 .name = "cpuacct",
9459 .create = cpuacct_create,
9460 .destroy = cpuacct_destroy,
9461 .populate = cpuacct_populate,
9462 .subsys_id = cpuacct_subsys_id,
9463};
9464#endif /* CONFIG_CGROUP_CPUACCT */
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009465