blob: 6b269b79c52c84bb5a2f00025cd56e1314af6742 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
Ingo Molnarc31f2e82007-07-09 18:52:01 +020019 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
Ingo Molnarb9131762008-01-25 21:08:19 +010025 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080037#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/completion.h>
39#include <linux/kernel_stat.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070040#include <linux/debug_locks.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020041#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080045#include <linux/freezer.h>
akpm@osdl.org198e2f12006-01-12 01:05:30 -080046#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/blkdev.h>
48#include <linux/delay.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070049#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
Alexey Dobriyanb5aadf72008-10-06 13:23:43 +040057#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/seq_file.h>
Tejun Heo969c7922010-05-06 18:49:21 +020059#include <linux/stop_machine.h>
Nick Piggine692ab52007-07-26 13:40:43 +020060#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#include <linux/syscalls.h>
62#include <linux/times.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070063#include <linux/tsacct_kern.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080064#include <linux/kprobes.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070065#include <linux/delayacct.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020066#include <linux/unistd.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020067#include <linux/pagemap.h>
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010068#include <linux/hrtimer.h>
Reynes Philippe30914a52008-03-17 16:19:05 -070069#include <linux/tick.h>
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020070#include <linux/debugfs.h>
71#include <linux/ctype.h>
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020072#include <linux/ftrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090073#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Eric Dumazet5517d862007-05-08 00:32:57 -070075#include <asm/tlb.h>
Satyam Sharma838225b2007-10-24 18:23:50 +020076#include <asm/irq_regs.h>
Gerald Schaefer335d7af2010-11-22 15:47:36 +010077#include <asm/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Gregory Haskins6e0534f2008-05-12 21:21:01 +020079#include "sched_cpupri.h"
Tejun Heo21aa9af2010-06-08 21:40:37 +020080#include "workqueue_sched.h"
Mike Galbraith5091faa2010-11-30 14:18:03 +010081#include "sched_autogroup.h"
Gregory Haskins6e0534f2008-05-12 21:21:01 +020082
Steven Rostedta8d154b2009-04-10 09:36:00 -040083#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040084#include <trace/events/sched.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086/*
87 * Convert user-nice values [ -20 ... 0 ... 19 ]
88 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
89 * and back.
90 */
91#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
92#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
93#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
94
95/*
96 * 'User priority' is the nice value converted to something we
97 * can work with better when scaling various scheduler parameters,
98 * it's a [ 0 ... 39 ] range.
99 */
100#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
101#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
102#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
103
104/*
Ingo Molnard7876a02008-01-25 21:08:19 +0100105 * Helpers for converting nanosecond timing to jiffy resolution
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 */
Eric Dumazetd6322fa2007-11-09 22:39:38 +0100107#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200109#define NICE_0_LOAD SCHED_LOAD_SCALE
110#define NICE_0_SHIFT SCHED_LOAD_SHIFT
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/*
113 * These are the 'tuning knobs' of the scheduler:
114 *
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +0200115 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 * Timeslices get refilled after they expire.
117 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#define DEF_TIMESLICE (100 * HZ / 1000)
Peter Williams2dd73a42006-06-27 02:54:34 -0700119
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200120/*
121 * single value that denotes runtime == period, ie unlimited time.
122 */
123#define RUNTIME_INF ((u64)~0ULL)
124
Ingo Molnare05606d2007-07-09 18:51:59 +0200125static inline int rt_policy(int policy)
126{
Roel Kluin3f33a7c2008-05-13 23:44:11 +0200127 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
Ingo Molnare05606d2007-07-09 18:51:59 +0200128 return 1;
129 return 0;
130}
131
132static inline int task_has_rt_policy(struct task_struct *p)
133{
134 return rt_policy(p->policy);
135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/*
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200138 * This is the priority-queue data structure of the RT scheduling class:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 */
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200140struct rt_prio_array {
141 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
142 struct list_head queue[MAX_RT_PRIO];
143};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200145struct rt_bandwidth {
Ingo Molnarea736ed2008-03-25 13:51:45 +0100146 /* nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100147 raw_spinlock_t rt_runtime_lock;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100148 ktime_t rt_period;
149 u64 rt_runtime;
150 struct hrtimer rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200151};
152
153static struct rt_bandwidth def_rt_bandwidth;
154
155static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
156
157static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
158{
159 struct rt_bandwidth *rt_b =
160 container_of(timer, struct rt_bandwidth, rt_period_timer);
161 ktime_t now;
162 int overrun;
163 int idle = 0;
164
165 for (;;) {
166 now = hrtimer_cb_get_time(timer);
167 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
168
169 if (!overrun)
170 break;
171
172 idle = do_sched_rt_period_timer(rt_b, overrun);
173 }
174
175 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
176}
177
178static
179void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
180{
181 rt_b->rt_period = ns_to_ktime(period);
182 rt_b->rt_runtime = runtime;
183
Thomas Gleixner0986b112009-11-17 15:32:06 +0100184 raw_spin_lock_init(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200185
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200186 hrtimer_init(&rt_b->rt_period_timer,
187 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
188 rt_b->rt_period_timer.function = sched_rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200189}
190
Krzysztof Heltc8bfff62008-09-05 23:46:19 +0200191static inline int rt_bandwidth_enabled(void)
192{
193 return sysctl_sched_rt_runtime >= 0;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200194}
195
196static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
197{
198 ktime_t now;
199
Hiroshi Shimamotocac64d02009-02-25 09:59:26 -0800200 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200201 return;
202
203 if (hrtimer_active(&rt_b->rt_period_timer))
204 return;
205
Thomas Gleixner0986b112009-11-17 15:32:06 +0100206 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200207 for (;;) {
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100208 unsigned long delta;
209 ktime_t soft, hard;
210
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200211 if (hrtimer_active(&rt_b->rt_period_timer))
212 break;
213
214 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
215 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100216
217 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
218 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
219 delta = ktime_to_ns(ktime_sub(hard, soft));
220 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530221 HRTIMER_MODE_ABS_PINNED, 0);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200222 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100223 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200224}
225
226#ifdef CONFIG_RT_GROUP_SCHED
227static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
228{
229 hrtimer_cancel(&rt_b->rt_period_timer);
230}
231#endif
232
Heiko Carstens712555e2008-04-28 11:33:07 +0200233/*
234 * sched_domains_mutex serializes calls to arch_init_sched_domains,
235 * detach_destroy_domains and partition_sched_domains.
236 */
237static DEFINE_MUTEX(sched_domains_mutex);
238
Dhaval Giani7c941432010-01-20 13:26:18 +0100239#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200240
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700241#include <linux/cgroup.h>
242
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200243struct cfs_rq;
244
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100245static LIST_HEAD(task_groups);
246
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200247/* task group related information */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200248struct task_group {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700249 struct cgroup_subsys_state css;
Arun R Bharadwaj6c415b92008-12-01 20:49:05 +0530250
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100251#ifdef CONFIG_FAIR_GROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200252 /* schedulable entities of this group on each cpu */
253 struct sched_entity **se;
254 /* runqueue "owned" by this group on each cpu */
255 struct cfs_rq **cfs_rq;
256 unsigned long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800257
258 atomic_t load_weight;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100259#endif
260
261#ifdef CONFIG_RT_GROUP_SCHED
262 struct sched_rt_entity **rt_se;
263 struct rt_rq **rt_rq;
264
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200265 struct rt_bandwidth rt_bandwidth;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100266#endif
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100267
Srivatsa Vaddagiriae8393e2007-10-29 21:18:11 +0100268 struct rcu_head rcu;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100269 struct list_head list;
Peter Zijlstraf473aa52008-04-19 19:45:00 +0200270
271 struct task_group *parent;
272 struct list_head siblings;
273 struct list_head children;
Mike Galbraith5091faa2010-11-30 14:18:03 +0100274
275#ifdef CONFIG_SCHED_AUTOGROUP
276 struct autogroup *autogroup;
277#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200278};
279
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800280/* task_group_lock serializes the addition/removal of task groups */
Peter Zijlstra8ed36992008-02-13 15:45:39 +0100281static DEFINE_SPINLOCK(task_group_lock);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100282
Cyrill Gorcunove9036b32009-10-26 22:24:14 +0300283#ifdef CONFIG_FAIR_GROUP_SCHED
284
Yong Zhang07e06b02011-01-07 15:17:36 +0800285# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200286
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800287/*
Lai Jiangshan2e084782008-06-12 16:42:58 +0800288 * A weight of 0 or 1 can cause arithmetics problems.
289 * A weight of a cfs_rq is the sum of weights of which entities
290 * are queued on this cfs_rq, so a weight of a entity should not be
291 * too large, so as the shares value of a task group.
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800292 * (The default weight is 1024 - so there's no practical
293 * limitation from this.)
294 */
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200295#define MIN_SHARES 2
Lai Jiangshan2e084782008-06-12 16:42:58 +0800296#define MAX_SHARES (1UL << 18)
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200297
Yong Zhang07e06b02011-01-07 15:17:36 +0800298static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100299#endif
300
301/* Default task group.
302 * Every task in system belong to this group at bootup.
303 */
Yong Zhang07e06b02011-01-07 15:17:36 +0800304struct task_group root_task_group;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200305
Dhaval Giani7c941432010-01-20 13:26:18 +0100306#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200307
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200308/* CFS-related fields in a runqueue */
309struct cfs_rq {
310 struct load_weight load;
311 unsigned long nr_running;
312
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200313 u64 exec_clock;
Ingo Molnare9acbff2007-10-15 17:00:04 +0200314 u64 min_vruntime;
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200315#ifndef CONFIG_64BIT
316 u64 min_vruntime_copy;
317#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200318
319 struct rb_root tasks_timeline;
320 struct rb_node *rb_leftmost;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +0200321
322 struct list_head tasks;
323 struct list_head *balance_iterator;
324
325 /*
326 * 'curr' points to currently running entity on this cfs_rq.
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200327 * It is set to NULL otherwise (i.e when none are currently running).
328 */
Rik van Rielac53db52011-02-01 09:51:03 -0500329 struct sched_entity *curr, *next, *last, *skip;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200330
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100331 unsigned int nr_spread_over;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200332
Ingo Molnar62160e32007-10-15 17:00:03 +0200333#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200334 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
335
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100336 /*
337 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200338 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
339 * (like users, containers etc.)
340 *
341 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
342 * list is used during load balance.
343 */
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800344 int on_list;
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100345 struct list_head leaf_cfs_rq_list;
346 struct task_group *tg; /* group that "owns" this runqueue */
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200347
348#ifdef CONFIG_SMP
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200349 /*
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200350 * the part of load.weight contributed by tasks
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200351 */
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200352 unsigned long task_weight;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200353
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200354 /*
355 * h_load = weight * f(tg)
356 *
357 * Where f(tg) is the recursive weight fraction assigned to
358 * this group.
359 */
360 unsigned long h_load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200361
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200362 /*
Paul Turner3b3d1902010-11-15 15:47:08 -0800363 * Maintaining per-cpu shares distribution for group scheduling
364 *
365 * load_stamp is the last time we updated the load average
366 * load_last is the last time we updated the load average and saw load
367 * load_unacc_exec_time is currently unaccounted execution time
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200368 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800369 u64 load_avg;
370 u64 load_period;
Paul Turner3b3d1902010-11-15 15:47:08 -0800371 u64 load_stamp, load_last, load_unacc_exec_time;
Peter Zijlstraf1d239f2008-06-27 13:41:38 +0200372
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800373 unsigned long load_contribution;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200374#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200375#endif
376};
377
378/* Real-Time classes' related field in a runqueue: */
379struct rt_rq {
380 struct rt_prio_array active;
Steven Rostedt63489e42008-01-25 21:08:03 +0100381 unsigned long rt_nr_running;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100382#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -0500383 struct {
384 int curr; /* highest queued rt task prio */
Gregory Haskins398a1532009-01-14 09:10:04 -0500385#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500386 int next; /* next highest */
Gregory Haskins398a1532009-01-14 09:10:04 -0500387#endif
Gregory Haskinse864c492008-12-29 09:39:49 -0500388 } highest_prio;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100389#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100390#ifdef CONFIG_SMP
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100391 unsigned long rt_nr_migratory;
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200392 unsigned long rt_nr_total;
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +0100393 int overloaded;
Gregory Haskins917b6272008-12-29 09:39:53 -0500394 struct plist_head pushable_tasks;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100395#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100396 int rt_throttled;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100397 u64 rt_time;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200398 u64 rt_runtime;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100399 /* Nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100400 raw_spinlock_t rt_runtime_lock;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100401
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100402#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100403 unsigned long rt_nr_boosted;
404
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100405 struct rq *rq;
406 struct list_head leaf_rt_rq_list;
407 struct task_group *tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100408#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200409};
410
Gregory Haskins57d885f2008-01-25 21:08:18 +0100411#ifdef CONFIG_SMP
412
413/*
414 * We add the notion of a root-domain which will be used to define per-domain
Ingo Molnar0eab9142008-01-25 21:08:19 +0100415 * variables. Each exclusive cpuset essentially defines an island domain by
416 * fully partitioning the member cpus from any other cpuset. Whenever a new
Gregory Haskins57d885f2008-01-25 21:08:18 +0100417 * exclusive cpuset is created, we also create and attach a new root-domain
418 * object.
419 *
Gregory Haskins57d885f2008-01-25 21:08:18 +0100420 */
421struct root_domain {
422 atomic_t refcount;
Rusty Russellc6c49272008-11-25 02:35:05 +1030423 cpumask_var_t span;
424 cpumask_var_t online;
Gregory Haskins637f5082008-01-25 21:08:18 +0100425
Ingo Molnar0eab9142008-01-25 21:08:19 +0100426 /*
Gregory Haskins637f5082008-01-25 21:08:18 +0100427 * The "RT overload" flag: it gets set if a CPU has more than
428 * one runnable RT task.
429 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030430 cpumask_var_t rto_mask;
Ingo Molnar0eab9142008-01-25 21:08:19 +0100431 atomic_t rto_count;
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200432 struct cpupri cpupri;
Gregory Haskins57d885f2008-01-25 21:08:18 +0100433};
434
Gregory Haskinsdc938522008-01-25 21:08:26 +0100435/*
436 * By default the system creates a single root-domain with all cpus as
437 * members (mimicking the global state we have today).
438 */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100439static struct root_domain def_root_domain;
440
Christian Dietriched2d3722010-09-06 16:37:05 +0200441#endif /* CONFIG_SMP */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100442
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200443/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 * This is the main, per-CPU runqueue data structure.
445 *
446 * Locking rule: those places that want to lock multiple runqueues
447 * (such as the load balancing or the thread migration code), lock
448 * acquire operations must be ordered by ascending &runqueue.
449 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700450struct rq {
Ingo Molnard8016492007-10-18 21:32:55 +0200451 /* runqueue lock: */
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100452 raw_spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
454 /*
455 * nr_running and cpu_load should be in the same cacheline because
456 * remote CPUs use both these fields when doing load calculation.
457 */
458 unsigned long nr_running;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200459 #define CPU_LOAD_IDX_MAX 5
460 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -0700461 unsigned long last_load_update_tick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700462#ifdef CONFIG_NO_HZ
Mike Galbraith39c0cbe2010-03-11 17:17:13 +0100463 u64 nohz_stamp;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700464 unsigned char nohz_balance_kick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700465#endif
Mike Galbraitha64692a2010-03-11 17:16:20 +0100466 unsigned int skip_clock_update;
467
Ingo Molnard8016492007-10-18 21:32:55 +0200468 /* capture load from *all* tasks on this cpu: */
469 struct load_weight load;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200470 unsigned long nr_load_updates;
471 u64 nr_switches;
472
473 struct cfs_rq cfs;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100474 struct rt_rq rt;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100475
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200476#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnard8016492007-10-18 21:32:55 +0200477 /* list of leaf cfs_rq on this cpu: */
478 struct list_head leaf_cfs_rq_list;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100479#endif
480#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100481 struct list_head leaf_rt_rq_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484 /*
485 * This is part of a global counter where only the total sum
486 * over all CPUs matters. A task can increase this counter on
487 * one CPU and if it got migrated afterwards it may decrease
488 * it on another CPU. Always updated under the runqueue lock:
489 */
490 unsigned long nr_uninterruptible;
491
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200492 struct task_struct *curr, *idle, *stop;
Christoph Lameterc9819f42006-12-10 02:20:25 -0800493 unsigned long next_balance;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 struct mm_struct *prev_mm;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200495
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200496 u64 clock;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700497 u64 clock_task;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 atomic_t nr_iowait;
500
501#ifdef CONFIG_SMP
Ingo Molnar0eab9142008-01-25 21:08:19 +0100502 struct root_domain *rd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 struct sched_domain *sd;
504
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +0200505 unsigned long cpu_power;
506
Henrik Austada0a522c2009-02-13 20:35:45 +0100507 unsigned char idle_at_tick;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 /* For active balancing */
Gregory Haskins3f029d32009-07-29 11:08:47 -0400509 int post_schedule;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 int active_balance;
511 int push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +0200512 struct cpu_stop_work active_balance_work;
Ingo Molnard8016492007-10-18 21:32:55 +0200513 /* cpu of this runqueue: */
514 int cpu;
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400515 int online;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Peter Zijlstraa8a51d52008-06-27 13:41:26 +0200517 unsigned long avg_load_per_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200519 u64 rt_avg;
520 u64 age_stamp;
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100521 u64 idle_stamp;
522 u64 avg_idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523#endif
524
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700525#ifdef CONFIG_IRQ_TIME_ACCOUNTING
526 u64 prev_irq_time;
527#endif
528
Thomas Gleixnerdce48a82009-04-11 10:43:41 +0200529 /* calc_load related fields */
530 unsigned long calc_load_update;
531 long calc_load_active;
532
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100533#ifdef CONFIG_SCHED_HRTICK
Peter Zijlstra31656512008-07-18 18:01:23 +0200534#ifdef CONFIG_SMP
535 int hrtick_csd_pending;
536 struct call_single_data hrtick_csd;
537#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100538 struct hrtimer hrtick_timer;
539#endif
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541#ifdef CONFIG_SCHEDSTATS
542 /* latency stats */
543 struct sched_info rq_sched_info;
Ken Chen9c2c4802008-12-16 23:41:22 -0800544 unsigned long long rq_cpu_time;
545 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547 /* sys_sched_yield() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200548 unsigned int yld_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550 /* schedule() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200551 unsigned int sched_switch;
552 unsigned int sched_count;
553 unsigned int sched_goidle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 /* try_to_wake_up() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200556 unsigned int ttwu_count;
557 unsigned int ttwu_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558#endif
559};
560
Fenghua Yuf34e3b62007-07-19 01:48:13 -0700561static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Mike Galbraitha64692a2010-03-11 17:16:20 +0100563
Peter Zijlstra1e5a7402010-10-31 12:37:04 +0100564static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
Ingo Molnardd41f592007-07-09 18:51:59 +0200565
Christoph Lameter0a2966b2006-09-25 23:30:51 -0700566static inline int cpu_of(struct rq *rq)
567{
568#ifdef CONFIG_SMP
569 return rq->cpu;
570#else
571 return 0;
572#endif
573}
574
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800575#define rcu_dereference_check_sched_domain(p) \
Paul E. McKenneyd11c5632010-02-22 17:04:50 -0800576 rcu_dereference_check((p), \
577 rcu_read_lock_sched_held() || \
578 lockdep_is_held(&sched_domains_mutex))
579
Ingo Molnar20d315d2007-07-09 18:51:58 +0200580/*
Nick Piggin674311d2005-06-25 14:57:27 -0700581 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -0700582 * See detach_destroy_domains: synchronize_sched for details.
Nick Piggin674311d2005-06-25 14:57:27 -0700583 *
584 * The domain tree of any CPU may only be accessed from within
585 * preempt-disabled sections.
586 */
Ingo Molnar48f24c42006-07-03 00:25:40 -0700587#define for_each_domain(cpu, __sd) \
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800588 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
591#define this_rq() (&__get_cpu_var(runqueues))
592#define task_rq(p) cpu_rq(task_cpu(p))
593#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Hitoshi Mitake54d35f22009-06-29 14:44:57 +0900594#define raw_rq() (&__raw_get_cpu_var(runqueues))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200596#ifdef CONFIG_CGROUP_SCHED
597
598/*
599 * Return the group to which this tasks belongs.
600 *
601 * We use task_subsys_state_check() and extend the RCU verification
602 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
603 * holds that lock for each task it moves into the cgroup. Therefore
604 * by holding that lock, we pin the task to the current cgroup.
605 */
606static inline struct task_group *task_group(struct task_struct *p)
607{
Mike Galbraith5091faa2010-11-30 14:18:03 +0100608 struct task_group *tg;
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200609 struct cgroup_subsys_state *css;
610
611 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
612 lockdep_is_held(&task_rq(p)->lock));
Mike Galbraith5091faa2010-11-30 14:18:03 +0100613 tg = container_of(css, struct task_group, css);
614
615 return autogroup_task_group(p, tg);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200616}
617
618/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
619static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
620{
621#ifdef CONFIG_FAIR_GROUP_SCHED
622 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
623 p->se.parent = task_group(p)->se[cpu];
624#endif
625
626#ifdef CONFIG_RT_GROUP_SCHED
627 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
628 p->rt.parent = task_group(p)->rt_se[cpu];
629#endif
630}
631
632#else /* CONFIG_CGROUP_SCHED */
633
634static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
635static inline struct task_group *task_group(struct task_struct *p)
636{
637 return NULL;
638}
639
640#endif /* CONFIG_CGROUP_SCHED */
641
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100642static void update_rq_clock_task(struct rq *rq, s64 delta);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700643
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100644static void update_rq_clock(struct rq *rq)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200645{
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100646 s64 delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700647
Mike Galbraithf26f9af2010-12-08 11:05:42 +0100648 if (rq->skip_clock_update)
649 return;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700650
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100651 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
652 rq->clock += delta;
653 update_rq_clock_task(rq, delta);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200654}
655
Ingo Molnare436d802007-07-19 21:28:35 +0200656/*
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200657 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
658 */
659#ifdef CONFIG_SCHED_DEBUG
660# define const_debug __read_mostly
661#else
662# define const_debug static const
663#endif
664
Ingo Molnar017730c2008-05-12 21:20:52 +0200665/**
Randy Dunlap1fd06bb2011-03-15 16:12:30 -0700666 * runqueue_is_locked - Returns true if the current cpu runqueue is locked
Randy Dunlape17b38b2009-10-11 19:12:00 -0700667 * @cpu: the processor in question.
Ingo Molnar017730c2008-05-12 21:20:52 +0200668 *
Ingo Molnar017730c2008-05-12 21:20:52 +0200669 * This interface allows printk to be called with the runqueue lock
670 * held and know whether or not it is OK to wake up the klogd.
671 */
Andrew Morton89f19f02009-09-19 11:55:44 -0700672int runqueue_is_locked(int cpu)
Ingo Molnar017730c2008-05-12 21:20:52 +0200673{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100674 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
Ingo Molnar017730c2008-05-12 21:20:52 +0200675}
676
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200677/*
678 * Debugging: various feature bits
679 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200680
681#define SCHED_FEAT(name, enabled) \
682 __SCHED_FEAT_##name ,
683
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200684enum {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200685#include "sched_features.h"
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200686};
687
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200688#undef SCHED_FEAT
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200689
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200690#define SCHED_FEAT(name, enabled) \
691 (1UL << __SCHED_FEAT_##name) * enabled |
692
693const_debug unsigned int sysctl_sched_features =
694#include "sched_features.h"
695 0;
696
697#undef SCHED_FEAT
698
699#ifdef CONFIG_SCHED_DEBUG
700#define SCHED_FEAT(name, enabled) \
701 #name ,
702
Harvey Harrison983ed7a2008-04-24 18:17:55 -0700703static __read_mostly char *sched_feat_names[] = {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200704#include "sched_features.h"
705 NULL
706};
707
708#undef SCHED_FEAT
709
Li Zefan34f3a812008-10-30 15:23:32 +0800710static int sched_feat_show(struct seq_file *m, void *v)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200711{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200712 int i;
713
714 for (i = 0; sched_feat_names[i]; i++) {
Li Zefan34f3a812008-10-30 15:23:32 +0800715 if (!(sysctl_sched_features & (1UL << i)))
716 seq_puts(m, "NO_");
717 seq_printf(m, "%s ", sched_feat_names[i]);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200718 }
Li Zefan34f3a812008-10-30 15:23:32 +0800719 seq_puts(m, "\n");
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200720
Li Zefan34f3a812008-10-30 15:23:32 +0800721 return 0;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200722}
723
724static ssize_t
725sched_feat_write(struct file *filp, const char __user *ubuf,
726 size_t cnt, loff_t *ppos)
727{
728 char buf[64];
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400729 char *cmp;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200730 int neg = 0;
731 int i;
732
733 if (cnt > 63)
734 cnt = 63;
735
736 if (copy_from_user(&buf, ubuf, cnt))
737 return -EFAULT;
738
739 buf[cnt] = 0;
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400740 cmp = strstrip(buf);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200741
Hillf Danton524429c2011-01-06 20:58:12 +0800742 if (strncmp(cmp, "NO_", 3) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200743 neg = 1;
744 cmp += 3;
745 }
746
747 for (i = 0; sched_feat_names[i]; i++) {
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400748 if (strcmp(cmp, sched_feat_names[i]) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200749 if (neg)
750 sysctl_sched_features &= ~(1UL << i);
751 else
752 sysctl_sched_features |= (1UL << i);
753 break;
754 }
755 }
756
757 if (!sched_feat_names[i])
758 return -EINVAL;
759
Jan Blunck42994722009-11-20 17:40:37 +0100760 *ppos += cnt;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200761
762 return cnt;
763}
764
Li Zefan34f3a812008-10-30 15:23:32 +0800765static int sched_feat_open(struct inode *inode, struct file *filp)
766{
767 return single_open(filp, sched_feat_show, NULL);
768}
769
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700770static const struct file_operations sched_feat_fops = {
Li Zefan34f3a812008-10-30 15:23:32 +0800771 .open = sched_feat_open,
772 .write = sched_feat_write,
773 .read = seq_read,
774 .llseek = seq_lseek,
775 .release = single_release,
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200776};
777
778static __init int sched_init_debug(void)
779{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200780 debugfs_create_file("sched_features", 0644, NULL, NULL,
781 &sched_feat_fops);
782
783 return 0;
784}
785late_initcall(sched_init_debug);
786
787#endif
788
789#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200790
791/*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +0100792 * Number of tasks to iterate in a single balance run.
793 * Limited because this is done with IRQs disabled.
794 */
795const_debug unsigned int sysctl_sched_nr_migrate = 32;
796
797/*
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200798 * period over which we average the RT time consumption, measured
799 * in ms.
800 *
801 * default: 1s
802 */
803const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
804
805/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100806 * period over which we measure -rt task cpu usage in us.
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100807 * default: 1s
808 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100809unsigned int sysctl_sched_rt_period = 1000000;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100810
Ingo Molnar6892b752008-02-13 14:02:36 +0100811static __read_mostly int scheduler_running;
812
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100813/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100814 * part of the period that we allow rt tasks to run in us.
815 * default: 0.95s
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100816 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100817int sysctl_sched_rt_runtime = 950000;
818
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200819static inline u64 global_rt_period(void)
820{
821 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
822}
823
824static inline u64 global_rt_runtime(void)
825{
roel kluine26873b2008-07-22 16:51:15 -0400826 if (sysctl_sched_rt_runtime < 0)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200827 return RUNTIME_INF;
828
829 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
830}
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832#ifndef prepare_arch_switch
Nick Piggin4866cde2005-06-25 14:57:23 -0700833# define prepare_arch_switch(next) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700835#ifndef finish_arch_switch
836# define finish_arch_switch(prev) do { } while (0)
837#endif
838
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100839static inline int task_current(struct rq *rq, struct task_struct *p)
840{
841 return rq->curr == p;
842}
843
Ingo Molnar70b97a72006-07-03 00:25:42 -0700844static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700845{
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200846#ifdef CONFIG_SMP
847 return p->on_cpu;
848#else
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100849 return task_current(rq, p);
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200850#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700851}
852
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200853#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar70b97a72006-07-03 00:25:42 -0700854static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700855{
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200856#ifdef CONFIG_SMP
857 /*
858 * We can optimise this out completely for !SMP, because the
859 * SMP rebalancing from interrupt is the only thing that cares
860 * here.
861 */
862 next->on_cpu = 1;
863#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700864}
865
Ingo Molnar70b97a72006-07-03 00:25:42 -0700866static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700867{
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200868#ifdef CONFIG_SMP
869 /*
870 * After ->on_cpu is cleared, the task can be moved to a different CPU.
871 * We must ensure this doesn't happen until the switch is completely
872 * finished.
873 */
874 smp_wmb();
875 prev->on_cpu = 0;
876#endif
Ingo Molnarda04c032005-09-13 11:17:59 +0200877#ifdef CONFIG_DEBUG_SPINLOCK
878 /* this is a valid case when another task releases the spinlock */
879 rq->lock.owner = current;
880#endif
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700881 /*
882 * If we are tracking spinlock dependencies then we have to
883 * fix up the runqueue lock - which gets 'carried over' from
884 * prev into current:
885 */
886 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
887
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100888 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700889}
890
891#else /* __ARCH_WANT_UNLOCKED_CTXSW */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700892static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700893{
894#ifdef CONFIG_SMP
895 /*
896 * We can optimise this out completely for !SMP, because the
897 * SMP rebalancing from interrupt is the only thing that cares
898 * here.
899 */
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200900 next->on_cpu = 1;
Nick Piggin4866cde2005-06-25 14:57:23 -0700901#endif
902#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100903 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700904#else
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100905 raw_spin_unlock(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700906#endif
907}
908
Ingo Molnar70b97a72006-07-03 00:25:42 -0700909static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700910{
911#ifdef CONFIG_SMP
912 /*
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200913 * After ->on_cpu is cleared, the task can be moved to a different CPU.
Nick Piggin4866cde2005-06-25 14:57:23 -0700914 * We must ensure this doesn't happen until the switch is completely
915 * finished.
916 */
917 smp_wmb();
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200918 prev->on_cpu = 0;
Nick Piggin4866cde2005-06-25 14:57:23 -0700919#endif
920#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
921 local_irq_enable();
922#endif
923}
924#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
926/*
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100927 * Check whether the task is waking, we use this to synchronize ->cpus_allowed
928 * against ttwu().
Peter Zijlstra0970d292010-02-15 14:45:54 +0100929 */
930static inline int task_is_waking(struct task_struct *p)
931{
Peter Zijlstra0017d732010-03-24 18:34:10 +0100932 return unlikely(p->state == TASK_WAKING);
Peter Zijlstra0970d292010-02-15 14:45:54 +0100933}
934
935/*
Ingo Molnarb29739f2006-06-27 02:54:51 -0700936 * __task_rq_lock - lock the runqueue a given task resides on.
937 * Must be called interrupts disabled.
938 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700939static inline struct rq *__task_rq_lock(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700940 __acquires(rq->lock)
941{
Peter Zijlstra0970d292010-02-15 14:45:54 +0100942 struct rq *rq;
943
Andi Kleen3a5c3592007-10-15 17:00:14 +0200944 for (;;) {
Peter Zijlstra0970d292010-02-15 14:45:54 +0100945 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100946 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100947 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200948 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100949 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700950 }
Ingo Molnarb29739f2006-06-27 02:54:51 -0700951}
952
953/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 * task_rq_lock - lock the runqueue a given task resides on and disable
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100955 * interrupts. Note the ordering: we can safely lookup the task_rq without
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 * explicitly disabling preemption.
957 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700958static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 __acquires(rq->lock)
960{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700961 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
Andi Kleen3a5c3592007-10-15 17:00:14 +0200963 for (;;) {
964 local_irq_save(*flags);
965 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100966 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100967 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200968 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100969 raw_spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
Alexey Dobriyana9957442007-10-15 17:00:13 +0200973static void __task_rq_unlock(struct rq *rq)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700974 __releases(rq->lock)
975{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100976 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700977}
978
Ingo Molnar70b97a72006-07-03 00:25:42 -0700979static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 __releases(rq->lock)
981{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100982 raw_spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985/*
Robert P. J. Daycc2a73b2006-12-10 02:20:00 -0800986 * this_rq_lock - lock this runqueue and disable interrupts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200988static struct rq *this_rq_lock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 __acquires(rq->lock)
990{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700991 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
993 local_irq_disable();
994 rq = this_rq();
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100995 raw_spin_lock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
997 return rq;
998}
999
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001000#ifdef CONFIG_SCHED_HRTICK
1001/*
1002 * Use HR-timers to deliver accurate preemption points.
1003 *
1004 * Its all a bit involved since we cannot program an hrt while holding the
1005 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1006 * reschedule event.
1007 *
1008 * When we get rescheduled we reprogram the hrtick_timer outside of the
1009 * rq->lock.
1010 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001011
1012/*
1013 * Use hrtick when:
1014 * - enabled by features
1015 * - hrtimer is actually high res
1016 */
1017static inline int hrtick_enabled(struct rq *rq)
1018{
1019 if (!sched_feat(HRTICK))
1020 return 0;
Ingo Molnarba420592008-07-20 11:02:06 +02001021 if (!cpu_active(cpu_of(rq)))
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001022 return 0;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001023 return hrtimer_is_hres_active(&rq->hrtick_timer);
1024}
1025
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001026static void hrtick_clear(struct rq *rq)
1027{
1028 if (hrtimer_active(&rq->hrtick_timer))
1029 hrtimer_cancel(&rq->hrtick_timer);
1030}
1031
1032/*
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001033 * High-resolution timer tick.
1034 * Runs from hardirq context with interrupts disabled.
1035 */
1036static enum hrtimer_restart hrtick(struct hrtimer *timer)
1037{
1038 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1039
1040 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1041
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001042 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001043 update_rq_clock(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001044 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001045 raw_spin_unlock(&rq->lock);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001046
1047 return HRTIMER_NORESTART;
1048}
1049
Rabin Vincent95e904c2008-05-11 05:55:33 +05301050#ifdef CONFIG_SMP
Peter Zijlstra31656512008-07-18 18:01:23 +02001051/*
1052 * called from hardirq (IPI) context
1053 */
1054static void __hrtick_start(void *arg)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001055{
Peter Zijlstra31656512008-07-18 18:01:23 +02001056 struct rq *rq = arg;
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001057
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001058 raw_spin_lock(&rq->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001059 hrtimer_restart(&rq->hrtick_timer);
1060 rq->hrtick_csd_pending = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001061 raw_spin_unlock(&rq->lock);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001062}
1063
Peter Zijlstra31656512008-07-18 18:01:23 +02001064/*
1065 * Called to set the hrtick timer state.
1066 *
1067 * called with rq->lock held and irqs disabled
1068 */
1069static void hrtick_start(struct rq *rq, u64 delay)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001070{
Peter Zijlstra31656512008-07-18 18:01:23 +02001071 struct hrtimer *timer = &rq->hrtick_timer;
1072 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001073
Arjan van de Vencc584b22008-09-01 15:02:30 -07001074 hrtimer_set_expires(timer, time);
Peter Zijlstra31656512008-07-18 18:01:23 +02001075
1076 if (rq == this_rq()) {
1077 hrtimer_restart(timer);
1078 } else if (!rq->hrtick_csd_pending) {
Peter Zijlstra6e275632009-02-25 13:59:48 +01001079 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001080 rq->hrtick_csd_pending = 1;
1081 }
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001082}
1083
1084static int
1085hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1086{
1087 int cpu = (int)(long)hcpu;
1088
1089 switch (action) {
1090 case CPU_UP_CANCELED:
1091 case CPU_UP_CANCELED_FROZEN:
1092 case CPU_DOWN_PREPARE:
1093 case CPU_DOWN_PREPARE_FROZEN:
1094 case CPU_DEAD:
1095 case CPU_DEAD_FROZEN:
Peter Zijlstra31656512008-07-18 18:01:23 +02001096 hrtick_clear(cpu_rq(cpu));
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001097 return NOTIFY_OK;
1098 }
1099
1100 return NOTIFY_DONE;
1101}
1102
Rakib Mullickfa748202008-09-22 14:55:45 -07001103static __init void init_hrtick(void)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001104{
1105 hotcpu_notifier(hotplug_hrtick, 0);
1106}
Peter Zijlstra31656512008-07-18 18:01:23 +02001107#else
1108/*
1109 * Called to set the hrtick timer state.
1110 *
1111 * called with rq->lock held and irqs disabled
1112 */
1113static void hrtick_start(struct rq *rq, u64 delay)
1114{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001115 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +05301116 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001117}
1118
Andrew Morton006c75f2008-09-22 14:55:46 -07001119static inline void init_hrtick(void)
Peter Zijlstra31656512008-07-18 18:01:23 +02001120{
1121}
Rabin Vincent95e904c2008-05-11 05:55:33 +05301122#endif /* CONFIG_SMP */
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001123
1124static void init_rq_hrtick(struct rq *rq)
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001125{
Peter Zijlstra31656512008-07-18 18:01:23 +02001126#ifdef CONFIG_SMP
1127 rq->hrtick_csd_pending = 0;
1128
1129 rq->hrtick_csd.flags = 0;
1130 rq->hrtick_csd.func = __hrtick_start;
1131 rq->hrtick_csd.info = rq;
1132#endif
1133
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001134 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1135 rq->hrtick_timer.function = hrtick;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001136}
Andrew Morton006c75f2008-09-22 14:55:46 -07001137#else /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001138static inline void hrtick_clear(struct rq *rq)
1139{
1140}
1141
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001142static inline void init_rq_hrtick(struct rq *rq)
1143{
1144}
1145
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001146static inline void init_hrtick(void)
1147{
1148}
Andrew Morton006c75f2008-09-22 14:55:46 -07001149#endif /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001150
Ingo Molnar1b9f19c2007-07-09 18:51:59 +02001151/*
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001152 * resched_task - mark a task 'to be rescheduled now'.
1153 *
1154 * On UP this means the setting of the need_resched flag, on SMP it
1155 * might also involve a cross-CPU call to trigger the scheduler on
1156 * the target CPU.
1157 */
1158#ifdef CONFIG_SMP
1159
1160#ifndef tsk_is_polling
1161#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1162#endif
1163
Peter Zijlstra31656512008-07-18 18:01:23 +02001164static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001165{
1166 int cpu;
1167
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001168 assert_raw_spin_locked(&task_rq(p)->lock);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001169
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001170 if (test_tsk_need_resched(p))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001171 return;
1172
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001173 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001174
1175 cpu = task_cpu(p);
1176 if (cpu == smp_processor_id())
1177 return;
1178
1179 /* NEED_RESCHED must be visible before we test polling */
1180 smp_mb();
1181 if (!tsk_is_polling(p))
1182 smp_send_reschedule(cpu);
1183}
1184
1185static void resched_cpu(int cpu)
1186{
1187 struct rq *rq = cpu_rq(cpu);
1188 unsigned long flags;
1189
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001190 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001191 return;
1192 resched_task(cpu_curr(cpu));
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001193 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001194}
Thomas Gleixner06d83082008-03-22 09:20:24 +01001195
1196#ifdef CONFIG_NO_HZ
1197/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07001198 * In the semi idle case, use the nearest busy cpu for migrating timers
1199 * from an idle cpu. This is good for power-savings.
1200 *
1201 * We don't do similar optimization for completely idle system, as
1202 * selecting an idle cpu will add more delays to the timers than intended
1203 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1204 */
1205int get_nohz_timer_target(void)
1206{
1207 int cpu = smp_processor_id();
1208 int i;
1209 struct sched_domain *sd;
1210
1211 for_each_domain(cpu, sd) {
1212 for_each_cpu(i, sched_domain_span(sd))
1213 if (!idle_cpu(i))
1214 return i;
1215 }
1216 return cpu;
1217}
1218/*
Thomas Gleixner06d83082008-03-22 09:20:24 +01001219 * When add_timer_on() enqueues a timer into the timer wheel of an
1220 * idle CPU then this timer might expire before the next timer event
1221 * which is scheduled to wake up that CPU. In case of a completely
1222 * idle system the next event might even be infinite time into the
1223 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1224 * leaves the inner idle loop so the newly added timer is taken into
1225 * account when the CPU goes back to idle and evaluates the timer
1226 * wheel for the next timer event.
1227 */
1228void wake_up_idle_cpu(int cpu)
1229{
1230 struct rq *rq = cpu_rq(cpu);
1231
1232 if (cpu == smp_processor_id())
1233 return;
1234
1235 /*
1236 * This is safe, as this function is called with the timer
1237 * wheel base lock of (cpu) held. When the CPU is on the way
1238 * to idle and has not yet set rq->curr to idle then it will
1239 * be serialized on the timer wheel base lock and take the new
1240 * timer into account automatically.
1241 */
1242 if (rq->curr != rq->idle)
1243 return;
1244
1245 /*
1246 * We can set TIF_RESCHED on the idle task of the other CPU
1247 * lockless. The worst case is that the other CPU runs the
1248 * idle task through an additional NOOP schedule()
1249 */
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001250 set_tsk_need_resched(rq->idle);
Thomas Gleixner06d83082008-03-22 09:20:24 +01001251
1252 /* NEED_RESCHED must be visible before we test polling */
1253 smp_mb();
1254 if (!tsk_is_polling(rq->idle))
1255 smp_send_reschedule(cpu);
1256}
Mike Galbraith39c0cbe2010-03-11 17:17:13 +01001257
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001258#endif /* CONFIG_NO_HZ */
Thomas Gleixner06d83082008-03-22 09:20:24 +01001259
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001260static u64 sched_avg_period(void)
1261{
1262 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1263}
1264
1265static void sched_avg_update(struct rq *rq)
1266{
1267 s64 period = sched_avg_period();
1268
1269 while ((s64)(rq->clock - rq->age_stamp) > period) {
Will Deacon0d98bb22010-05-24 12:11:43 -07001270 /*
1271 * Inline assembly required to prevent the compiler
1272 * optimising this loop into a divmod call.
1273 * See __iter_div_u64_rem() for another example of this.
1274 */
1275 asm("" : "+rm" (rq->age_stamp));
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001276 rq->age_stamp += period;
1277 rq->rt_avg /= 2;
1278 }
1279}
1280
1281static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1282{
1283 rq->rt_avg += rt_delta;
1284 sched_avg_update(rq);
1285}
1286
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001287#else /* !CONFIG_SMP */
Peter Zijlstra31656512008-07-18 18:01:23 +02001288static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001289{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001290 assert_raw_spin_locked(&task_rq(p)->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001291 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001292}
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001293
1294static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1295{
1296}
Suresh Siddhada2b71e2010-08-23 13:42:51 -07001297
1298static void sched_avg_update(struct rq *rq)
1299{
1300}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001301#endif /* CONFIG_SMP */
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001302
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001303#if BITS_PER_LONG == 32
1304# define WMULT_CONST (~0UL)
1305#else
1306# define WMULT_CONST (1UL << 32)
1307#endif
1308
1309#define WMULT_SHIFT 32
1310
Ingo Molnar194081e2007-08-09 11:16:51 +02001311/*
1312 * Shift right and round:
1313 */
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001314#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
Ingo Molnar194081e2007-08-09 11:16:51 +02001315
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001316/*
1317 * delta *= weight / lw
1318 */
Ingo Molnarcb1c4fc2007-08-02 17:41:40 +02001319static unsigned long
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001320calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1321 struct load_weight *lw)
1322{
1323 u64 tmp;
1324
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001325 if (!lw->inv_weight) {
1326 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1327 lw->inv_weight = 1;
1328 else
1329 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1330 / (lw->weight+1);
1331 }
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001332
1333 tmp = (u64)delta_exec * weight;
1334 /*
1335 * Check whether we'd overflow the 64-bit multiplication:
1336 */
Ingo Molnar194081e2007-08-09 11:16:51 +02001337 if (unlikely(tmp > WMULT_CONST))
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001338 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
Ingo Molnar194081e2007-08-09 11:16:51 +02001339 WMULT_SHIFT/2);
1340 else
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001341 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001342
Ingo Molnarecf691d2007-08-02 17:41:40 +02001343 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001344}
1345
Ingo Molnar10919852007-10-15 17:00:04 +02001346static inline void update_load_add(struct load_weight *lw, unsigned long inc)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001347{
1348 lw->weight += inc;
Ingo Molnare89996a2008-03-14 23:48:28 +01001349 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001350}
1351
Ingo Molnar10919852007-10-15 17:00:04 +02001352static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001353{
1354 lw->weight -= dec;
Ingo Molnare89996a2008-03-14 23:48:28 +01001355 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001356}
1357
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001358static inline void update_load_set(struct load_weight *lw, unsigned long w)
1359{
1360 lw->weight = w;
1361 lw->inv_weight = 0;
1362}
1363
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001365 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1366 * of tasks with abnormal "nice" values across CPUs the contribution that
1367 * each task makes to its run queue's load is weighted according to its
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01001368 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
Peter Williams2dd73a42006-06-27 02:54:34 -07001369 * scaled version of the new time slice allocation that they receive on time
1370 * slice expiry etc.
1371 */
1372
Peter Zijlstracce7ade2009-01-15 14:53:37 +01001373#define WEIGHT_IDLEPRIO 3
1374#define WMULT_IDLEPRIO 1431655765
Ingo Molnardd41f592007-07-09 18:51:59 +02001375
1376/*
1377 * Nice levels are multiplicative, with a gentle 10% change for every
1378 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1379 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1380 * that remained on nice 0.
1381 *
1382 * The "10% effect" is relative and cumulative: from _any_ nice level,
1383 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
Ingo Molnarf9153ee2007-07-16 09:46:30 +02001384 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1385 * If a task goes up by ~10% and another task goes down by ~10% then
1386 * the relative distance between them is ~25%.)
Ingo Molnardd41f592007-07-09 18:51:59 +02001387 */
1388static const int prio_to_weight[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001389 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1390 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1391 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1392 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1393 /* 0 */ 1024, 820, 655, 526, 423,
1394 /* 5 */ 335, 272, 215, 172, 137,
1395 /* 10 */ 110, 87, 70, 56, 45,
1396 /* 15 */ 36, 29, 23, 18, 15,
Ingo Molnardd41f592007-07-09 18:51:59 +02001397};
1398
Ingo Molnar5714d2d2007-07-16 09:46:31 +02001399/*
1400 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1401 *
1402 * In cases where the weight does not change often, we can use the
1403 * precalculated inverse to speed up arithmetics by turning divisions
1404 * into multiplications:
1405 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001406static const u32 prio_to_wmult[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001407 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1408 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1409 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1410 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1411 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1412 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1413 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1414 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
Ingo Molnardd41f592007-07-09 18:51:59 +02001415};
Peter Williams2dd73a42006-06-27 02:54:34 -07001416
Bharata B Raoef12fef2009-03-31 10:02:22 +05301417/* Time spent by the tasks of the cpu accounting group executing in ... */
1418enum cpuacct_stat_index {
1419 CPUACCT_STAT_USER, /* ... user mode */
1420 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1421
1422 CPUACCT_STAT_NSTATS,
1423};
1424
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001425#ifdef CONFIG_CGROUP_CPUACCT
1426static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
Bharata B Raoef12fef2009-03-31 10:02:22 +05301427static void cpuacct_update_stats(struct task_struct *tsk,
1428 enum cpuacct_stat_index idx, cputime_t val);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001429#else
1430static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
Bharata B Raoef12fef2009-03-31 10:02:22 +05301431static inline void cpuacct_update_stats(struct task_struct *tsk,
1432 enum cpuacct_stat_index idx, cputime_t val) {}
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001433#endif
1434
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001435static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1436{
1437 update_load_add(&rq->load, load);
1438}
1439
1440static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1441{
1442 update_load_sub(&rq->load, load);
1443}
1444
Ingo Molnar7940ca32008-08-19 13:40:47 +02001445#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
Peter Zijlstraeb755802008-08-19 12:33:05 +02001446typedef int (*tg_visitor)(struct task_group *, void *);
1447
1448/*
1449 * Iterate the full tree, calling @down when first entering a node and @up when
1450 * leaving it for the final time.
1451 */
1452static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1453{
1454 struct task_group *parent, *child;
1455 int ret;
1456
1457 rcu_read_lock();
1458 parent = &root_task_group;
1459down:
1460 ret = (*down)(parent, data);
1461 if (ret)
1462 goto out_unlock;
1463 list_for_each_entry_rcu(child, &parent->children, siblings) {
1464 parent = child;
1465 goto down;
1466
1467up:
1468 continue;
1469 }
1470 ret = (*up)(parent, data);
1471 if (ret)
1472 goto out_unlock;
1473
1474 child = parent;
1475 parent = parent->parent;
1476 if (parent)
1477 goto up;
1478out_unlock:
1479 rcu_read_unlock();
1480
1481 return ret;
1482}
1483
1484static int tg_nop(struct task_group *tg, void *data)
1485{
1486 return 0;
1487}
1488#endif
1489
Gregory Haskinse7693a32008-01-25 21:08:09 +01001490#ifdef CONFIG_SMP
Peter Zijlstraf5f08f32009-09-10 13:35:28 +02001491/* Used instead of source_load when we know the type == 0 */
1492static unsigned long weighted_cpuload(const int cpu)
1493{
1494 return cpu_rq(cpu)->load.weight;
1495}
1496
1497/*
1498 * Return a low guess at the load of a migration-source cpu weighted
1499 * according to the scheduling class and "nice" value.
1500 *
1501 * We want to under-estimate the load of migration sources, to
1502 * balance conservatively.
1503 */
1504static unsigned long source_load(int cpu, int type)
1505{
1506 struct rq *rq = cpu_rq(cpu);
1507 unsigned long total = weighted_cpuload(cpu);
1508
1509 if (type == 0 || !sched_feat(LB_BIAS))
1510 return total;
1511
1512 return min(rq->cpu_load[type-1], total);
1513}
1514
1515/*
1516 * Return a high guess at the load of a migration-target cpu weighted
1517 * according to the scheduling class and "nice" value.
1518 */
1519static unsigned long target_load(int cpu, int type)
1520{
1521 struct rq *rq = cpu_rq(cpu);
1522 unsigned long total = weighted_cpuload(cpu);
1523
1524 if (type == 0 || !sched_feat(LB_BIAS))
1525 return total;
1526
1527 return max(rq->cpu_load[type-1], total);
1528}
1529
Peter Zijlstraae154be2009-09-10 14:40:57 +02001530static unsigned long power_of(int cpu)
1531{
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02001532 return cpu_rq(cpu)->cpu_power;
Peter Zijlstraae154be2009-09-10 14:40:57 +02001533}
1534
Gregory Haskinse7693a32008-01-25 21:08:09 +01001535static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001536
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001537static unsigned long cpu_avg_load_per_task(int cpu)
1538{
1539 struct rq *rq = cpu_rq(cpu);
Ingo Molnaraf6d5962008-11-29 20:45:15 +01001540 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001541
Steven Rostedt4cd42622008-11-26 21:04:24 -05001542 if (nr_running)
1543 rq->avg_load_per_task = rq->load.weight / nr_running;
Balbir Singha2d47772008-11-12 16:19:00 +05301544 else
1545 rq->avg_load_per_task = 0;
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001546
1547 return rq->avg_load_per_task;
1548}
1549
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001550#ifdef CONFIG_FAIR_GROUP_SCHED
1551
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001552/*
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001553 * Compute the cpu's hierarchical load factor for each task group.
1554 * This needs to be done in a top-down fashion because the load of a child
1555 * group is a fraction of its parents load.
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001556 */
Peter Zijlstraeb755802008-08-19 12:33:05 +02001557static int tg_load_down(struct task_group *tg, void *data)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001558{
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001559 unsigned long load;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001560 long cpu = (long)data;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001561
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001562 if (!tg->parent) {
1563 load = cpu_rq(cpu)->load.weight;
1564 } else {
1565 load = tg->parent->cfs_rq[cpu]->h_load;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001566 load *= tg->se[cpu]->load.weight;
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001567 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1568 }
1569
1570 tg->cfs_rq[cpu]->h_load = load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001571
Peter Zijlstraeb755802008-08-19 12:33:05 +02001572 return 0;
Peter Zijlstra4d8d5952008-06-27 13:41:19 +02001573}
1574
Peter Zijlstraeb755802008-08-19 12:33:05 +02001575static void update_h_load(long cpu)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001576{
Peter Zijlstraeb755802008-08-19 12:33:05 +02001577 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001578}
1579
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001580#endif
1581
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001582#ifdef CONFIG_PREEMPT
1583
Peter Zijlstrab78bb862009-09-15 14:23:18 +02001584static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1585
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001586/*
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001587 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1588 * way at the expense of forcing extra atomic operations in all
1589 * invocations. This assures that the double_lock is acquired using the
1590 * same underlying policy as the spinlock_t on this architecture, which
1591 * reduces latency compared to the unfair variant below. However, it
1592 * also adds more overhead and therefore may reduce throughput.
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001593 */
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001594static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1595 __releases(this_rq->lock)
1596 __acquires(busiest->lock)
1597 __acquires(this_rq->lock)
1598{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001599 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001600 double_rq_lock(this_rq, busiest);
1601
1602 return 1;
1603}
1604
1605#else
1606/*
1607 * Unfair double_lock_balance: Optimizes throughput at the expense of
1608 * latency by eliminating extra atomic operations when the locks are
1609 * already in proper order on entry. This favors lower cpu-ids and will
1610 * grant the double lock to lower cpus over higher ids under contention,
1611 * regardless of entry order into the function.
1612 */
1613static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001614 __releases(this_rq->lock)
1615 __acquires(busiest->lock)
1616 __acquires(this_rq->lock)
1617{
1618 int ret = 0;
1619
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001620 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001621 if (busiest < this_rq) {
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001622 raw_spin_unlock(&this_rq->lock);
1623 raw_spin_lock(&busiest->lock);
1624 raw_spin_lock_nested(&this_rq->lock,
1625 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001626 ret = 1;
1627 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001628 raw_spin_lock_nested(&busiest->lock,
1629 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001630 }
1631 return ret;
1632}
1633
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001634#endif /* CONFIG_PREEMPT */
1635
1636/*
1637 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1638 */
1639static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1640{
1641 if (unlikely(!irqs_disabled())) {
1642 /* printk() doesn't work good under rq->lock */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001643 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001644 BUG_ON(1);
1645 }
1646
1647 return _double_lock_balance(this_rq, busiest);
1648}
1649
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001650static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1651 __releases(busiest->lock)
1652{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001653 raw_spin_unlock(&busiest->lock);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001654 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1655}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001656
1657/*
1658 * double_rq_lock - safely lock two runqueues
1659 *
1660 * Note this does not disable interrupts like task_rq_lock,
1661 * you need to do so manually before calling.
1662 */
1663static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1664 __acquires(rq1->lock)
1665 __acquires(rq2->lock)
1666{
1667 BUG_ON(!irqs_disabled());
1668 if (rq1 == rq2) {
1669 raw_spin_lock(&rq1->lock);
1670 __acquire(rq2->lock); /* Fake it out ;) */
1671 } else {
1672 if (rq1 < rq2) {
1673 raw_spin_lock(&rq1->lock);
1674 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1675 } else {
1676 raw_spin_lock(&rq2->lock);
1677 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1678 }
1679 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001680}
1681
1682/*
1683 * double_rq_unlock - safely unlock two runqueues
1684 *
1685 * Note this does not restore interrupts like task_rq_unlock,
1686 * you need to do so manually after calling.
1687 */
1688static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1689 __releases(rq1->lock)
1690 __releases(rq2->lock)
1691{
1692 raw_spin_unlock(&rq1->lock);
1693 if (rq1 != rq2)
1694 raw_spin_unlock(&rq2->lock);
1695 else
1696 __release(rq2->lock);
1697}
1698
Mike Galbraithd95f4122011-02-01 09:50:51 -05001699#else /* CONFIG_SMP */
1700
1701/*
1702 * double_rq_lock - safely lock two runqueues
1703 *
1704 * Note this does not disable interrupts like task_rq_lock,
1705 * you need to do so manually before calling.
1706 */
1707static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1708 __acquires(rq1->lock)
1709 __acquires(rq2->lock)
1710{
1711 BUG_ON(!irqs_disabled());
1712 BUG_ON(rq1 != rq2);
1713 raw_spin_lock(&rq1->lock);
1714 __acquire(rq2->lock); /* Fake it out ;) */
1715}
1716
1717/*
1718 * double_rq_unlock - safely unlock two runqueues
1719 *
1720 * Note this does not restore interrupts like task_rq_unlock,
1721 * you need to do so manually after calling.
1722 */
1723static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1724 __releases(rq1->lock)
1725 __releases(rq2->lock)
1726{
1727 BUG_ON(rq1 != rq2);
1728 raw_spin_unlock(&rq1->lock);
1729 __release(rq2->lock);
1730}
1731
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001732#endif
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001733
Peter Zijlstra74f51872010-04-22 21:50:19 +02001734static void calc_load_account_idle(struct rq *this_rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01001735static void update_sysctl(void);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01001736static int get_update_sysctl_factor(void);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07001737static void update_cpu_load(struct rq *this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02001738
Peter Zijlstracd29fe62009-11-27 17:32:46 +01001739static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1740{
1741 set_task_rq(p, cpu);
1742#ifdef CONFIG_SMP
1743 /*
1744 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1745 * successfuly executed on another CPU. We must ensure that updates of
1746 * per-task data have been completed by this moment.
1747 */
1748 smp_wmb();
1749 task_thread_info(p)->cpu = cpu;
1750#endif
1751}
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001752
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001753static const struct sched_class rt_sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02001754
Peter Zijlstra34f971f2010-09-22 13:53:15 +02001755#define sched_class_highest (&stop_sched_class)
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001756#define for_each_class(class) \
1757 for (class = sched_class_highest; class; class = class->next)
Ingo Molnardd41f592007-07-09 18:51:59 +02001758
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001759#include "sched_stats.h"
1760
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001761static void inc_nr_running(struct rq *rq)
Ingo Molnar6363ca52008-05-29 11:28:57 +02001762{
1763 rq->nr_running++;
Ingo Molnar6363ca52008-05-29 11:28:57 +02001764}
1765
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001766static void dec_nr_running(struct rq *rq)
Ingo Molnar9c217242007-08-02 17:41:40 +02001767{
1768 rq->nr_running--;
Ingo Molnar9c217242007-08-02 17:41:40 +02001769}
1770
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001771static void set_load_weight(struct task_struct *p)
1772{
Ingo Molnardd41f592007-07-09 18:51:59 +02001773 /*
1774 * SCHED_IDLE tasks get minimal weight:
1775 */
1776 if (p->policy == SCHED_IDLE) {
1777 p->se.load.weight = WEIGHT_IDLEPRIO;
1778 p->se.load.inv_weight = WMULT_IDLEPRIO;
1779 return;
1780 }
1781
1782 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1783 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001784}
1785
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001786static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
Gregory Haskins2087a1a2008-06-27 14:30:00 -06001787{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001788 update_rq_clock(rq);
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001789 sched_info_queued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001790 p->sched_class->enqueue_task(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02001791}
1792
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001793static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnardd41f592007-07-09 18:51:59 +02001794{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001795 update_rq_clock(rq);
Ankita Garg46ac22b2008-07-01 14:30:06 +05301796 sched_info_dequeued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001797 p->sched_class->dequeue_task(rq, p, flags);
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001798}
1799
1800/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001801 * activate_task - move a task to the runqueue.
1802 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001803static void activate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001804{
1805 if (task_contributes_to_load(p))
1806 rq->nr_uninterruptible--;
1807
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001808 enqueue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001809 inc_nr_running(rq);
1810}
1811
1812/*
1813 * deactivate_task - remove a task from the runqueue.
1814 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001815static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001816{
1817 if (task_contributes_to_load(p))
1818 rq->nr_uninterruptible++;
1819
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001820 dequeue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001821 dec_nr_running(rq);
1822}
1823
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001824#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1825
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001826/*
1827 * There are no locks covering percpu hardirq/softirq time.
1828 * They are only modified in account_system_vtime, on corresponding CPU
1829 * with interrupts disabled. So, writes are safe.
1830 * They are read and saved off onto struct rq in update_rq_clock().
1831 * This may result in other CPU reading this CPU's irq time and can
1832 * race with irq/account_system_vtime on this CPU. We would either get old
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001833 * or new value with a side effect of accounting a slice of irq time to wrong
1834 * task when irq is in progress while we read rq->clock. That is a worthy
1835 * compromise in place of having locks on each irq in account_system_time.
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001836 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001837static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1838static DEFINE_PER_CPU(u64, cpu_softirq_time);
1839
1840static DEFINE_PER_CPU(u64, irq_start_time);
1841static int sched_clock_irqtime;
1842
1843void enable_sched_clock_irqtime(void)
1844{
1845 sched_clock_irqtime = 1;
1846}
1847
1848void disable_sched_clock_irqtime(void)
1849{
1850 sched_clock_irqtime = 0;
1851}
1852
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001853#ifndef CONFIG_64BIT
1854static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
1855
1856static inline void irq_time_write_begin(void)
1857{
1858 __this_cpu_inc(irq_time_seq.sequence);
1859 smp_wmb();
1860}
1861
1862static inline void irq_time_write_end(void)
1863{
1864 smp_wmb();
1865 __this_cpu_inc(irq_time_seq.sequence);
1866}
1867
1868static inline u64 irq_time_read(int cpu)
1869{
1870 u64 irq_time;
1871 unsigned seq;
1872
1873 do {
1874 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1875 irq_time = per_cpu(cpu_softirq_time, cpu) +
1876 per_cpu(cpu_hardirq_time, cpu);
1877 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1878
1879 return irq_time;
1880}
1881#else /* CONFIG_64BIT */
1882static inline void irq_time_write_begin(void)
1883{
1884}
1885
1886static inline void irq_time_write_end(void)
1887{
1888}
1889
1890static inline u64 irq_time_read(int cpu)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001891{
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001892 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1893}
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001894#endif /* CONFIG_64BIT */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001895
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001896/*
1897 * Called before incrementing preempt_count on {soft,}irq_enter
1898 * and before decrementing preempt_count on {soft,}irq_exit.
1899 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001900void account_system_vtime(struct task_struct *curr)
1901{
1902 unsigned long flags;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001903 s64 delta;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001904 int cpu;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001905
1906 if (!sched_clock_irqtime)
1907 return;
1908
1909 local_irq_save(flags);
1910
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001911 cpu = smp_processor_id();
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001912 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
1913 __this_cpu_add(irq_start_time, delta);
1914
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001915 irq_time_write_begin();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001916 /*
1917 * We do not account for softirq time from ksoftirqd here.
1918 * We want to continue accounting softirq time to ksoftirqd thread
1919 * in that case, so as not to confuse scheduler with a special task
1920 * that do not consume any time, but still wants to run.
1921 */
1922 if (hardirq_count())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001923 __this_cpu_add(cpu_hardirq_time, delta);
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -08001924 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001925 __this_cpu_add(cpu_softirq_time, delta);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001926
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001927 irq_time_write_end();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001928 local_irq_restore(flags);
1929}
Ingo Molnarb7dadc32010-10-18 20:00:37 +02001930EXPORT_SYMBOL_GPL(account_system_vtime);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001931
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001932static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001933{
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001934 s64 irq_delta;
1935
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001936 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001937
1938 /*
1939 * Since irq_time is only updated on {soft,}irq_exit, we might run into
1940 * this case when a previous update_rq_clock() happened inside a
1941 * {soft,}irq region.
1942 *
1943 * When this happens, we stop ->clock_task and only update the
1944 * prev_irq_time stamp to account for the part that fit, so that a next
1945 * update will consume the rest. This ensures ->clock_task is
1946 * monotonic.
1947 *
1948 * It does however cause some slight miss-attribution of {soft,}irq
1949 * time, a more accurate solution would be to update the irq_time using
1950 * the current rq->clock timestamp, except that would require using
1951 * atomic ops.
1952 */
1953 if (irq_delta > delta)
1954 irq_delta = delta;
1955
1956 rq->prev_irq_time += irq_delta;
1957 delta -= irq_delta;
1958 rq->clock_task += delta;
1959
1960 if (irq_delta && sched_feat(NONIRQ_POWER))
1961 sched_rt_avg_update(rq, irq_delta);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001962}
1963
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08001964static int irqtime_account_hi_update(void)
1965{
1966 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1967 unsigned long flags;
1968 u64 latest_ns;
1969 int ret = 0;
1970
1971 local_irq_save(flags);
1972 latest_ns = this_cpu_read(cpu_hardirq_time);
1973 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
1974 ret = 1;
1975 local_irq_restore(flags);
1976 return ret;
1977}
1978
1979static int irqtime_account_si_update(void)
1980{
1981 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1982 unsigned long flags;
1983 u64 latest_ns;
1984 int ret = 0;
1985
1986 local_irq_save(flags);
1987 latest_ns = this_cpu_read(cpu_softirq_time);
1988 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
1989 ret = 1;
1990 local_irq_restore(flags);
1991 return ret;
1992}
1993
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001994#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001995
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08001996#define sched_clock_irqtime (0)
1997
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001998static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001999{
Peter Zijlstrafe44d622010-12-09 14:15:34 +01002000 rq->clock_task += delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07002001}
2002
Peter Zijlstrafe44d622010-12-09 14:15:34 +01002003#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07002004
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002005#include "sched_idletask.c"
2006#include "sched_fair.c"
2007#include "sched_rt.c"
Mike Galbraith5091faa2010-11-30 14:18:03 +01002008#include "sched_autogroup.c"
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002009#include "sched_stoptask.c"
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002010#ifdef CONFIG_SCHED_DEBUG
2011# include "sched_debug.c"
2012#endif
2013
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002014void sched_set_stop_task(int cpu, struct task_struct *stop)
2015{
2016 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2017 struct task_struct *old_stop = cpu_rq(cpu)->stop;
2018
2019 if (stop) {
2020 /*
2021 * Make it appear like a SCHED_FIFO task, its something
2022 * userspace knows about and won't get confused about.
2023 *
2024 * Also, it will make PI more or less work without too
2025 * much confusion -- but then, stop work should not
2026 * rely on PI working anyway.
2027 */
2028 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2029
2030 stop->sched_class = &stop_sched_class;
2031 }
2032
2033 cpu_rq(cpu)->stop = stop;
2034
2035 if (old_stop) {
2036 /*
2037 * Reset it back to a normal scheduling class so that
2038 * it can die in pieces.
2039 */
2040 old_stop->sched_class = &rt_sched_class;
2041 }
2042}
2043
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002044/*
Ingo Molnardd41f592007-07-09 18:51:59 +02002045 * __normal_prio - return the priority that is based on the static prio
Ingo Molnar71f8bd42007-07-09 18:51:59 +02002046 */
Ingo Molnar14531182007-07-09 18:51:59 +02002047static inline int __normal_prio(struct task_struct *p)
2048{
Ingo Molnardd41f592007-07-09 18:51:59 +02002049 return p->static_prio;
Ingo Molnar14531182007-07-09 18:51:59 +02002050}
2051
2052/*
Ingo Molnarb29739f2006-06-27 02:54:51 -07002053 * Calculate the expected normal priority: i.e. priority
2054 * without taking RT-inheritance into account. Might be
2055 * boosted by interactivity modifiers. Changes upon fork,
2056 * setprio syscalls, and whenever the interactivity
2057 * estimator recalculates.
2058 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002059static inline int normal_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002060{
2061 int prio;
2062
Ingo Molnare05606d2007-07-09 18:51:59 +02002063 if (task_has_rt_policy(p))
Ingo Molnarb29739f2006-06-27 02:54:51 -07002064 prio = MAX_RT_PRIO-1 - p->rt_priority;
2065 else
2066 prio = __normal_prio(p);
2067 return prio;
2068}
2069
2070/*
2071 * Calculate the current priority, i.e. the priority
2072 * taken into account by the scheduler. This value might
2073 * be boosted by RT tasks, or might be boosted by
2074 * interactivity modifiers. Will be RT if the task got
2075 * RT-boosted. If not then it returns p->normal_prio.
2076 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002077static int effective_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002078{
2079 p->normal_prio = normal_prio(p);
2080 /*
2081 * If we are RT tasks or we were boosted to RT priority,
2082 * keep the priority unchanged. Otherwise, update priority
2083 * to the normal priority:
2084 */
2085 if (!rt_prio(p->prio))
2086 return p->normal_prio;
2087 return p->prio;
2088}
2089
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090/**
2091 * task_curr - is this task currently executing on a CPU?
2092 * @p: the task in question.
2093 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002094inline int task_curr(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095{
2096 return cpu_curr(task_cpu(p)) == p;
2097}
2098
Steven Rostedtcb469842008-01-25 21:08:22 +01002099static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2100 const struct sched_class *prev_class,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002101 int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01002102{
2103 if (prev_class != p->sched_class) {
2104 if (prev_class->switched_from)
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002105 prev_class->switched_from(rq, p);
2106 p->sched_class->switched_to(rq, p);
2107 } else if (oldprio != p->prio)
2108 p->sched_class->prio_changed(rq, p, oldprio);
Steven Rostedtcb469842008-01-25 21:08:22 +01002109}
2110
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002111static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2112{
2113 const struct sched_class *class;
2114
2115 if (p->sched_class == rq->curr->sched_class) {
2116 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2117 } else {
2118 for_each_class(class) {
2119 if (class == rq->curr->sched_class)
2120 break;
2121 if (class == p->sched_class) {
2122 resched_task(rq->curr);
2123 break;
2124 }
2125 }
2126 }
2127
2128 /*
2129 * A queue event has occurred, and we're going to schedule. In
2130 * this case, we can save a useless back to back clock update.
2131 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002132 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002133 rq->skip_clock_update = 1;
2134}
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136#ifdef CONFIG_SMP
Ingo Molnarcc367732007-10-15 17:00:18 +02002137/*
2138 * Is this task likely cache-hot:
2139 */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002140static int
Ingo Molnarcc367732007-10-15 17:00:18 +02002141task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2142{
2143 s64 delta;
2144
Peter Zijlstrae6c8fba2009-12-16 18:04:33 +01002145 if (p->sched_class != &fair_sched_class)
2146 return 0;
2147
Nikhil Raoef8002f2010-10-13 12:09:35 -07002148 if (unlikely(p->policy == SCHED_IDLE))
2149 return 0;
2150
Ingo Molnarf540a602008-03-15 17:10:34 +01002151 /*
2152 * Buddy candidates are cache hot:
2153 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002154 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
Peter Zijlstra47932412008-11-04 21:25:09 +01002155 (&p->se == cfs_rq_of(&p->se)->next ||
2156 &p->se == cfs_rq_of(&p->se)->last))
Ingo Molnarf540a602008-03-15 17:10:34 +01002157 return 1;
2158
Ingo Molnar6bc16652007-10-15 17:00:18 +02002159 if (sysctl_sched_migration_cost == -1)
2160 return 1;
2161 if (sysctl_sched_migration_cost == 0)
2162 return 0;
2163
Ingo Molnarcc367732007-10-15 17:00:18 +02002164 delta = now - p->se.exec_start;
2165
2166 return delta < (s64)sysctl_sched_migration_cost;
2167}
2168
Ingo Molnardd41f592007-07-09 18:51:59 +02002169void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Ingo Molnarc65cc872007-07-09 18:51:58 +02002170{
Peter Zijlstrae2912002009-12-16 18:04:36 +01002171#ifdef CONFIG_SCHED_DEBUG
2172 /*
2173 * We should never call set_task_cpu() on a blocked task,
2174 * ttwu() will sort out the placement.
2175 */
Peter Zijlstra077614e2009-12-17 13:16:31 +01002176 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2177 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
Peter Zijlstrae2912002009-12-16 18:04:36 +01002178#endif
2179
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +08002180 trace_sched_migrate_task(p, new_cpu);
Peter Zijlstracbc34ed2008-12-10 08:08:22 +01002181
Peter Zijlstra0c697742009-12-22 15:43:19 +01002182 if (task_cpu(p) != new_cpu) {
2183 p->se.nr_migrations++;
2184 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2185 }
Ingo Molnardd41f592007-07-09 18:51:59 +02002186
2187 __set_task_cpu(p, new_cpu);
Ingo Molnarc65cc872007-07-09 18:51:58 +02002188}
2189
Tejun Heo969c7922010-05-06 18:49:21 +02002190struct migration_arg {
Ingo Molnar36c8b582006-07-03 00:25:41 -07002191 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 int dest_cpu;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002193};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
Tejun Heo969c7922010-05-06 18:49:21 +02002195static int migration_cpu_stop(void *data);
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197/*
2198 * The task's runqueue lock must be held.
2199 * Returns true if you have to wait for migration thread.
2200 */
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002201static bool need_migrate_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 /*
2204 * If the task is not on a runqueue (and not running), then
Peter Zijlstrae2912002009-12-16 18:04:36 +01002205 * the next wake-up will properly place the task.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 */
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002207 bool running = p->on_rq || p->on_cpu;
2208 smp_rmb(); /* finish_lock_switch() */
2209 return running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210}
2211
2212/*
2213 * wait_task_inactive - wait for a thread to unschedule.
2214 *
Roland McGrath85ba2d82008-07-25 19:45:58 -07002215 * If @match_state is nonzero, it's the @p->state value just checked and
2216 * not expected to change. If it changes, i.e. @p might have woken up,
2217 * then return zero. When we succeed in waiting for @p to be off its CPU,
2218 * we return a positive number (its total switch count). If a second call
2219 * a short while later returns the same number, the caller can be sure that
2220 * @p has remained unscheduled the whole time.
2221 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 * The caller must ensure that the task *will* unschedule sometime soon,
2223 * else this function might spin for a *long* time. This function can't
2224 * be called with interrupts off, or it may introduce deadlock with
2225 * smp_call_function() if an IPI is sent by the same process we are
2226 * waiting to become inactive.
2227 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002228unsigned long wait_task_inactive(struct task_struct *p, long match_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229{
2230 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002231 int running, on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002232 unsigned long ncsw;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002233 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
Andi Kleen3a5c3592007-10-15 17:00:14 +02002235 for (;;) {
2236 /*
2237 * We do the initial early heuristics without holding
2238 * any task-queue locks at all. We'll only try to get
2239 * the runqueue lock when things look like they will
2240 * work out!
2241 */
2242 rq = task_rq(p);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002243
Andi Kleen3a5c3592007-10-15 17:00:14 +02002244 /*
2245 * If the task is actively running on another CPU
2246 * still, just relax and busy-wait without holding
2247 * any locks.
2248 *
2249 * NOTE! Since we don't hold any locks, it's not
2250 * even sure that "rq" stays as the right runqueue!
2251 * But we don't care, since "task_running()" will
2252 * return false if the runqueue has changed and p
2253 * is actually now running somewhere else!
2254 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002255 while (task_running(rq, p)) {
2256 if (match_state && unlikely(p->state != match_state))
2257 return 0;
Andi Kleen3a5c3592007-10-15 17:00:14 +02002258 cpu_relax();
Roland McGrath85ba2d82008-07-25 19:45:58 -07002259 }
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002260
Andi Kleen3a5c3592007-10-15 17:00:14 +02002261 /*
2262 * Ok, time to look more closely! We need the rq
2263 * lock now, to be *sure*. If we're wrong, we'll
2264 * just go back and repeat.
2265 */
2266 rq = task_rq_lock(p, &flags);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002267 trace_sched_wait_task(p);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002268 running = task_running(rq, p);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002269 on_rq = p->on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002270 ncsw = 0;
Oleg Nesterovf31e11d2008-08-20 16:54:44 -07002271 if (!match_state || p->state == match_state)
Oleg Nesterov93dcf552008-08-20 16:54:44 -07002272 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
Andi Kleen3a5c3592007-10-15 17:00:14 +02002273 task_rq_unlock(rq, &flags);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002274
Andi Kleen3a5c3592007-10-15 17:00:14 +02002275 /*
Roland McGrath85ba2d82008-07-25 19:45:58 -07002276 * If it changed from the expected state, bail out now.
2277 */
2278 if (unlikely(!ncsw))
2279 break;
2280
2281 /*
Andi Kleen3a5c3592007-10-15 17:00:14 +02002282 * Was it really running after all now that we
2283 * checked with the proper locks actually held?
2284 *
2285 * Oops. Go back and try again..
2286 */
2287 if (unlikely(running)) {
2288 cpu_relax();
2289 continue;
2290 }
2291
2292 /*
2293 * It's not enough that it's not actively running,
2294 * it must be off the runqueue _entirely_, and not
2295 * preempted!
2296 *
Luis Henriques80dd99b2009-03-16 19:58:09 +00002297 * So if it was still runnable (but just not actively
Andi Kleen3a5c3592007-10-15 17:00:14 +02002298 * running right now), it's preempted, and we should
2299 * yield - it could be a while.
2300 */
2301 if (unlikely(on_rq)) {
Thomas Gleixner8eb90c32011-02-23 23:52:21 +00002302 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
2303
2304 set_current_state(TASK_UNINTERRUPTIBLE);
2305 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002306 continue;
2307 }
2308
2309 /*
2310 * Ahh, all good. It wasn't running, and it wasn't
2311 * runnable, which means that it will never become
2312 * running in the future either. We're all done!
2313 */
2314 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 }
Roland McGrath85ba2d82008-07-25 19:45:58 -07002316
2317 return ncsw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318}
2319
2320/***
2321 * kick_process - kick a running thread to enter/exit the kernel
2322 * @p: the to-be-kicked thread
2323 *
2324 * Cause a process which is running on another CPU to enter
2325 * kernel-mode, without any delay. (to get signals handled.)
2326 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002327 * NOTE: this function doesn't have to take the runqueue lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 * because all it wants to ensure is that the remote task enters
2329 * the kernel. If the IPI races and the task has been migrated
2330 * to another CPU then no harm is done and the purpose has been
2331 * achieved as well.
2332 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002333void kick_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334{
2335 int cpu;
2336
2337 preempt_disable();
2338 cpu = task_cpu(p);
2339 if ((cpu != smp_processor_id()) && task_curr(p))
2340 smp_send_reschedule(cpu);
2341 preempt_enable();
2342}
Rusty Russellb43e3522009-06-12 22:27:00 -06002343EXPORT_SYMBOL_GPL(kick_process);
Nick Piggin476d1392005-06-25 14:57:29 -07002344#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002346#ifdef CONFIG_SMP
Oleg Nesterov30da6882010-03-15 10:10:19 +01002347/*
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002348 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
Oleg Nesterov30da6882010-03-15 10:10:19 +01002349 */
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002350static int select_fallback_rq(int cpu, struct task_struct *p)
2351{
2352 int dest_cpu;
2353 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2354
2355 /* Look for allowed, online CPU in same node. */
2356 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2357 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2358 return dest_cpu;
2359
2360 /* Any allowed, online CPU? */
2361 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2362 if (dest_cpu < nr_cpu_ids)
2363 return dest_cpu;
2364
2365 /* No more Mr. Nice Guy. */
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01002366 dest_cpu = cpuset_cpus_allowed_fallback(p);
2367 /*
2368 * Don't tell them about moving exiting tasks or
2369 * kernel threads (both mm NULL), since they never
2370 * leave kernel.
2371 */
2372 if (p->mm && printk_ratelimit()) {
2373 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
2374 task_pid_nr(p), p->comm, cpu);
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002375 }
2376
2377 return dest_cpu;
2378}
2379
Peter Zijlstrae2912002009-12-16 18:04:36 +01002380/*
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002381 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
Peter Zijlstrae2912002009-12-16 18:04:36 +01002382 */
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002383static inline
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002384int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002385{
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002386 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002387
2388 /*
2389 * In order not to call set_task_cpu() on a blocking task we need
2390 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2391 * cpu.
2392 *
2393 * Since this is common to all placement strategies, this lives here.
2394 *
2395 * [ this allows ->select_task() to simply return task_cpu(p) and
2396 * not worry about this generic constraint ]
2397 */
2398 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
Peter Zijlstra70f11202009-12-20 17:36:27 +01002399 !cpu_online(cpu)))
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002400 cpu = select_fallback_rq(task_cpu(p), p);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002401
2402 return cpu;
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002403}
Mike Galbraith09a40af2010-04-15 07:29:59 +02002404
2405static void update_avg(u64 *avg, u64 sample)
2406{
2407 s64 diff = sample - *avg;
2408 *avg += diff >> 3;
2409}
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002410#endif
2411
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002412static void
2413ttwu_stat(struct rq *rq, struct task_struct *p, int cpu, int wake_flags)
Tejun Heo9ed38112009-12-03 15:08:03 +09002414{
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002415#ifdef CONFIG_SCHEDSTATS
2416#ifdef CONFIG_SMP
2417 int this_cpu = smp_processor_id();
Tejun Heo9ed38112009-12-03 15:08:03 +09002418
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002419 if (cpu == this_cpu) {
2420 schedstat_inc(rq, ttwu_local);
2421 schedstat_inc(p, se.statistics.nr_wakeups_local);
2422 } else {
2423 struct sched_domain *sd;
2424
2425 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2426 for_each_domain(this_cpu, sd) {
2427 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2428 schedstat_inc(sd, ttwu_wake_remote);
2429 break;
2430 }
2431 }
2432 }
2433#endif /* CONFIG_SMP */
2434
2435 schedstat_inc(rq, ttwu_count);
2436 schedstat_inc(p, se.statistics.nr_wakeups);
2437
2438 if (wake_flags & WF_SYNC)
2439 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2440
2441 if (cpu != task_cpu(p))
2442 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2443
2444#endif /* CONFIG_SCHEDSTATS */
2445}
2446
2447static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
2448{
Tejun Heo9ed38112009-12-03 15:08:03 +09002449 activate_task(rq, p, en_flags);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002450 p->on_rq = 1;
Peter Zijlstrac2f71152011-04-13 13:28:56 +02002451
2452 /* if a worker is waking up, notify workqueue */
2453 if (p->flags & PF_WQ_WORKER)
2454 wq_worker_waking_up(p, cpu_of(rq));
Tejun Heo9ed38112009-12-03 15:08:03 +09002455}
2456
Peter Zijlstra89363382011-04-05 17:23:42 +02002457static void
2458ttwu_post_activation(struct task_struct *p, struct rq *rq, int wake_flags)
Tejun Heo9ed38112009-12-03 15:08:03 +09002459{
Peter Zijlstra89363382011-04-05 17:23:42 +02002460 trace_sched_wakeup(p, true);
Tejun Heo9ed38112009-12-03 15:08:03 +09002461 check_preempt_curr(rq, p, wake_flags);
2462
2463 p->state = TASK_RUNNING;
2464#ifdef CONFIG_SMP
2465 if (p->sched_class->task_woken)
2466 p->sched_class->task_woken(rq, p);
2467
2468 if (unlikely(rq->idle_stamp)) {
2469 u64 delta = rq->clock - rq->idle_stamp;
2470 u64 max = 2*sysctl_sched_migration_cost;
2471
2472 if (delta > max)
2473 rq->avg_idle = max;
2474 else
2475 update_avg(&rq->avg_idle, delta);
2476 rq->idle_stamp = 0;
2477 }
2478#endif
2479}
2480
2481/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 * try_to_wake_up - wake up a thread
Tejun Heo9ed38112009-12-03 15:08:03 +09002483 * @p: the thread to be awakened
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 * @state: the mask of task states that can be woken
Tejun Heo9ed38112009-12-03 15:08:03 +09002485 * @wake_flags: wake modifier flags (WF_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 *
2487 * Put it on the run-queue if it's not already there. The "current"
2488 * thread is always on the run-queue (except when the actual
2489 * re-schedule is in progress), and as such you're allowed to do
2490 * the simpler "current->state = TASK_RUNNING" to mark yourself
2491 * runnable without the overhead of this.
2492 *
Tejun Heo9ed38112009-12-03 15:08:03 +09002493 * Returns %true if @p was woken up, %false if it was already running
2494 * or @state didn't match @p's state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02002496static int try_to_wake_up(struct task_struct *p, unsigned int state,
2497 int wake_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498{
Ingo Molnarcc367732007-10-15 17:00:18 +02002499 int cpu, orig_cpu, this_cpu, success = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 unsigned long flags;
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002501 unsigned long en_flags = ENQUEUE_WAKEUP;
Dan Carpenterab3b3aa2010-03-06 14:17:52 +03002502 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002504 this_cpu = get_cpu();
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02002505
Linus Torvalds04e2f172008-02-23 18:05:03 -08002506 smp_wmb();
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002507 raw_spin_lock_irqsave(&p->pi_lock, flags);
2508 rq = __task_rq_lock(p);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002509 if (!(p->state & state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 goto out;
2511
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002512 cpu = task_cpu(p);
2513
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002514 if (p->on_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 goto out_running;
2516
Ingo Molnarcc367732007-10-15 17:00:18 +02002517 orig_cpu = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518#ifdef CONFIG_SMP
2519 if (unlikely(task_running(rq, p)))
2520 goto out_activate;
2521
Peter Zijlstraa8e4f2e2011-04-05 17:23:49 +02002522 p->sched_contributes_to_load = !!task_contributes_to_load(p);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002523 p->state = TASK_WAKING;
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002524
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002525 if (p->sched_class->task_waking) {
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02002526 p->sched_class->task_waking(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002527 en_flags |= ENQUEUE_WAKING;
Peter Zijlstra0970d292010-02-15 14:45:54 +01002528 }
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002529
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002530 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002531 if (cpu != orig_cpu)
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002532 set_task_cpu(p, cpu);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002533 __task_rq_unlock(rq);
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002534
Peter Zijlstra0970d292010-02-15 14:45:54 +01002535 rq = cpu_rq(cpu);
2536 raw_spin_lock(&rq->lock);
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002537
Peter Zijlstra0970d292010-02-15 14:45:54 +01002538 /*
2539 * We migrated the task without holding either rq->lock, however
2540 * since the task is not on the task list itself, nobody else
2541 * will try and migrate the task, hence the rq should match the
2542 * cpu we just moved it to.
2543 */
2544 WARN_ON(task_cpu(p) != cpu);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002545 WARN_ON(p->state != TASK_WAKING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
Peter Zijlstraa8e4f2e2011-04-05 17:23:49 +02002547 if (p->sched_contributes_to_load)
2548 rq->nr_uninterruptible--;
2549
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550out_activate:
2551#endif /* CONFIG_SMP */
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002552 ttwu_activate(rq, p, en_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553out_running:
Peter Zijlstra89363382011-04-05 17:23:42 +02002554 ttwu_post_activation(p, rq, wake_flags);
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002555 ttwu_stat(rq, p, cpu, wake_flags);
Peter Zijlstra89363382011-04-05 17:23:42 +02002556 success = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557out:
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002558 __task_rq_unlock(rq);
2559 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002560 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561
2562 return success;
2563}
2564
David Howells50fa6102009-04-28 15:01:38 +01002565/**
Tejun Heo21aa9af2010-06-08 21:40:37 +02002566 * try_to_wake_up_local - try to wake up a local task with rq lock held
2567 * @p: the thread to be awakened
2568 *
Peter Zijlstra2acca552011-04-05 17:23:50 +02002569 * Put @p on the run-queue if it's not already there. The caller must
Tejun Heo21aa9af2010-06-08 21:40:37 +02002570 * ensure that this_rq() is locked, @p is bound to this_rq() and not
Peter Zijlstra2acca552011-04-05 17:23:50 +02002571 * the current task.
Tejun Heo21aa9af2010-06-08 21:40:37 +02002572 */
2573static void try_to_wake_up_local(struct task_struct *p)
2574{
2575 struct rq *rq = task_rq(p);
Tejun Heo21aa9af2010-06-08 21:40:37 +02002576
2577 BUG_ON(rq != this_rq());
2578 BUG_ON(p == current);
2579 lockdep_assert_held(&rq->lock);
2580
Peter Zijlstra2acca552011-04-05 17:23:50 +02002581 if (!raw_spin_trylock(&p->pi_lock)) {
2582 raw_spin_unlock(&rq->lock);
2583 raw_spin_lock(&p->pi_lock);
2584 raw_spin_lock(&rq->lock);
2585 }
2586
Tejun Heo21aa9af2010-06-08 21:40:37 +02002587 if (!(p->state & TASK_NORMAL))
Peter Zijlstra2acca552011-04-05 17:23:50 +02002588 goto out;
Tejun Heo21aa9af2010-06-08 21:40:37 +02002589
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002590 if (!p->on_rq)
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002591 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2592
Peter Zijlstra89363382011-04-05 17:23:42 +02002593 ttwu_post_activation(p, rq, 0);
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002594 ttwu_stat(rq, p, smp_processor_id(), 0);
Peter Zijlstra2acca552011-04-05 17:23:50 +02002595out:
2596 raw_spin_unlock(&p->pi_lock);
Tejun Heo21aa9af2010-06-08 21:40:37 +02002597}
2598
2599/**
David Howells50fa6102009-04-28 15:01:38 +01002600 * wake_up_process - Wake up a specific process
2601 * @p: The process to be woken up.
2602 *
2603 * Attempt to wake up the nominated process and move it to the set of runnable
2604 * processes. Returns 1 if the process was woken up, 0 if it was already
2605 * running.
2606 *
2607 * It may be assumed that this function implies a write memory barrier before
2608 * changing the task state if and only if any tasks are woken up.
2609 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002610int wake_up_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05002612 return try_to_wake_up(p, TASK_ALL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614EXPORT_SYMBOL(wake_up_process);
2615
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002616int wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617{
2618 return try_to_wake_up(p, state, 0);
2619}
2620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621/*
2622 * Perform scheduler related setup for a newly forked process p.
2623 * p is forked by current.
Ingo Molnardd41f592007-07-09 18:51:59 +02002624 *
2625 * __sched_fork() is basic setup used by init_idle() too:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002627static void __sched_fork(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628{
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002629 p->on_rq = 0;
2630
2631 p->se.on_rq = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02002632 p->se.exec_start = 0;
2633 p->se.sum_exec_runtime = 0;
Ingo Molnarf6cf8912007-08-28 12:53:24 +02002634 p->se.prev_sum_exec_runtime = 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002635 p->se.nr_migrations = 0;
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002636 p->se.vruntime = 0;
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002637 INIT_LIST_HEAD(&p->se.group_node);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002638
2639#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03002640 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002641#endif
Nick Piggin476d1392005-06-25 14:57:29 -07002642
Peter Zijlstrafa717062008-01-25 21:08:27 +01002643 INIT_LIST_HEAD(&p->rt.run_list);
Nick Piggin476d1392005-06-25 14:57:29 -07002644
Avi Kivitye107be32007-07-26 13:40:43 +02002645#ifdef CONFIG_PREEMPT_NOTIFIERS
2646 INIT_HLIST_HEAD(&p->preempt_notifiers);
2647#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002648}
2649
2650/*
2651 * fork()/clone()-time setup:
2652 */
2653void sched_fork(struct task_struct *p, int clone_flags)
2654{
2655 int cpu = get_cpu();
2656
2657 __sched_fork(p);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002658 /*
Peter Zijlstra0017d732010-03-24 18:34:10 +01002659 * We mark the process as running here. This guarantees that
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002660 * nobody will actually run it, and a signal or other external
2661 * event cannot wake it up and insert it on the runqueue either.
2662 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01002663 p->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02002664
Ingo Molnarb29739f2006-06-27 02:54:51 -07002665 /*
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002666 * Revert to default priority/policy on fork if requested.
2667 */
2668 if (unlikely(p->sched_reset_on_fork)) {
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002669 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002670 p->policy = SCHED_NORMAL;
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002671 p->normal_prio = p->static_prio;
2672 }
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002673
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002674 if (PRIO_TO_NICE(p->static_prio) < 0) {
2675 p->static_prio = NICE_TO_PRIO(0);
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002676 p->normal_prio = p->static_prio;
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002677 set_load_weight(p);
2678 }
2679
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002680 /*
2681 * We don't need the reset flag anymore after the fork. It has
2682 * fulfilled its duty:
2683 */
2684 p->sched_reset_on_fork = 0;
2685 }
Lennart Poetteringca94c442009-06-15 17:17:47 +02002686
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002687 /*
2688 * Make sure we do not leak PI boosting priority to the child.
2689 */
2690 p->prio = current->normal_prio;
2691
Hiroshi Shimamoto2ddbf952007-10-15 17:00:11 +02002692 if (!rt_prio(p->prio))
2693 p->sched_class = &fair_sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07002694
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002695 if (p->sched_class->task_fork)
2696 p->sched_class->task_fork(p);
2697
Peter Zijlstra86951592010-06-22 11:44:53 +02002698 /*
2699 * The child is not yet in the pid-hash so no cgroup attach races,
2700 * and the cgroup is pinned to this child due to cgroup_fork()
2701 * is ran before sched_fork().
2702 *
2703 * Silence PROVE_RCU.
2704 */
2705 rcu_read_lock();
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002706 set_task_cpu(p, cpu);
Peter Zijlstra86951592010-06-22 11:44:53 +02002707 rcu_read_unlock();
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002708
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002709#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Ingo Molnardd41f592007-07-09 18:51:59 +02002710 if (likely(sched_info_on()))
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002711 memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712#endif
Peter Zijlstra3ca7a442011-04-05 17:23:40 +02002713#if defined(CONFIG_SMP)
2714 p->on_cpu = 0;
Nick Piggin4866cde2005-06-25 14:57:23 -07002715#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716#ifdef CONFIG_PREEMPT
Nick Piggin4866cde2005-06-25 14:57:23 -07002717 /* Want to start with kernel preemption disabled. */
Al Viroa1261f52005-11-13 16:06:55 -08002718 task_thread_info(p)->preempt_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719#endif
Dario Faggioli806c09a2010-11-30 19:51:33 +01002720#ifdef CONFIG_SMP
Gregory Haskins917b6272008-12-29 09:39:53 -05002721 plist_node_init(&p->pushable_tasks, MAX_PRIO);
Dario Faggioli806c09a2010-11-30 19:51:33 +01002722#endif
Gregory Haskins917b6272008-12-29 09:39:53 -05002723
Nick Piggin476d1392005-06-25 14:57:29 -07002724 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725}
2726
2727/*
2728 * wake_up_new_task - wake up a newly created task for the first time.
2729 *
2730 * This function will do some initial scheduler statistics housekeeping
2731 * that must be done for every newly created context, then puts the task
2732 * on the runqueue and wakes it.
2733 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002734void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735{
2736 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002737 struct rq *rq;
Andrew Mortonc8906922010-03-11 14:08:43 -08002738 int cpu __maybe_unused = get_cpu();
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002739
2740#ifdef CONFIG_SMP
Peter Zijlstra0017d732010-03-24 18:34:10 +01002741 rq = task_rq_lock(p, &flags);
2742 p->state = TASK_WAKING;
2743
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002744 /*
2745 * Fork balancing, do it here and not earlier because:
2746 * - cpus_allowed can change in the fork path
2747 * - any previously selected cpu might disappear through hotplug
2748 *
Peter Zijlstra0017d732010-03-24 18:34:10 +01002749 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2750 * without people poking at ->cpus_allowed.
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002751 */
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002752 cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002753 set_task_cpu(p, cpu);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002754
2755 p->state = TASK_RUNNING;
2756 task_rq_unlock(rq, &flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002757#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758
Peter Zijlstra0017d732010-03-24 18:34:10 +01002759 rq = task_rq_lock(p, &flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002760 activate_task(rq, p, 0);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002761 p->on_rq = 1;
Peter Zijlstra89363382011-04-05 17:23:42 +02002762 trace_sched_wakeup_new(p, true);
Peter Zijlstraa7558e02009-09-14 20:02:34 +02002763 check_preempt_curr(rq, p, WF_FORK);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002764#ifdef CONFIG_SMP
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002765 if (p->sched_class->task_woken)
2766 p->sched_class->task_woken(rq, p);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002767#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002768 task_rq_unlock(rq, &flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002769 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770}
2771
Avi Kivitye107be32007-07-26 13:40:43 +02002772#ifdef CONFIG_PREEMPT_NOTIFIERS
2773
2774/**
Luis Henriques80dd99b2009-03-16 19:58:09 +00002775 * preempt_notifier_register - tell me when current is being preempted & rescheduled
Randy Dunlap421cee22007-07-31 00:37:50 -07002776 * @notifier: notifier struct to register
Avi Kivitye107be32007-07-26 13:40:43 +02002777 */
2778void preempt_notifier_register(struct preempt_notifier *notifier)
2779{
2780 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2781}
2782EXPORT_SYMBOL_GPL(preempt_notifier_register);
2783
2784/**
2785 * preempt_notifier_unregister - no longer interested in preemption notifications
Randy Dunlap421cee22007-07-31 00:37:50 -07002786 * @notifier: notifier struct to unregister
Avi Kivitye107be32007-07-26 13:40:43 +02002787 *
2788 * This is safe to call from within a preemption notifier.
2789 */
2790void preempt_notifier_unregister(struct preempt_notifier *notifier)
2791{
2792 hlist_del(&notifier->link);
2793}
2794EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2795
2796static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2797{
2798 struct preempt_notifier *notifier;
2799 struct hlist_node *node;
2800
2801 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2802 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2803}
2804
2805static void
2806fire_sched_out_preempt_notifiers(struct task_struct *curr,
2807 struct task_struct *next)
2808{
2809 struct preempt_notifier *notifier;
2810 struct hlist_node *node;
2811
2812 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2813 notifier->ops->sched_out(notifier, next);
2814}
2815
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002816#else /* !CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002817
2818static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2819{
2820}
2821
2822static void
2823fire_sched_out_preempt_notifiers(struct task_struct *curr,
2824 struct task_struct *next)
2825{
2826}
2827
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002828#endif /* CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002829
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830/**
Nick Piggin4866cde2005-06-25 14:57:23 -07002831 * prepare_task_switch - prepare to switch tasks
2832 * @rq: the runqueue preparing to switch
Randy Dunlap421cee22007-07-31 00:37:50 -07002833 * @prev: the current task that is being switched out
Nick Piggin4866cde2005-06-25 14:57:23 -07002834 * @next: the task we are going to switch to.
2835 *
2836 * This is called with the rq lock held and interrupts off. It must
2837 * be paired with a subsequent finish_task_switch after the context
2838 * switch.
2839 *
2840 * prepare_task_switch sets up locking and calls architecture specific
2841 * hooks.
2842 */
Avi Kivitye107be32007-07-26 13:40:43 +02002843static inline void
2844prepare_task_switch(struct rq *rq, struct task_struct *prev,
2845 struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -07002846{
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002847 sched_info_switch(prev, next);
2848 perf_event_task_sched_out(prev, next);
Avi Kivitye107be32007-07-26 13:40:43 +02002849 fire_sched_out_preempt_notifiers(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002850 prepare_lock_switch(rq, next);
2851 prepare_arch_switch(next);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002852 trace_sched_switch(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002853}
2854
2855/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 * finish_task_switch - clean up after a task-switch
Jeff Garzik344baba2005-09-07 01:15:17 -04002857 * @rq: runqueue associated with task-switch
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 * @prev: the thread we just switched away from.
2859 *
Nick Piggin4866cde2005-06-25 14:57:23 -07002860 * finish_task_switch must be called after the context switch, paired
2861 * with a prepare_task_switch call before the context switch.
2862 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2863 * and do any other architecture-specific cleanup actions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 *
2865 * Note that we may have delayed dropping an mm in context_switch(). If
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002866 * so, we finish that here outside of the runqueue lock. (Doing it
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 * with the lock held can cause deadlocks; see schedule() for
2868 * details.)
2869 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02002870static void finish_task_switch(struct rq *rq, struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 __releases(rq->lock)
2872{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 struct mm_struct *mm = rq->prev_mm;
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002874 long prev_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875
2876 rq->prev_mm = NULL;
2877
2878 /*
2879 * A task struct has one reference for the use as "current".
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002880 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002881 * schedule one last time. The schedule call will never return, and
2882 * the scheduled task must drop that reference.
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002883 * The test for TASK_DEAD must occur while the runqueue locks are
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 * still held, otherwise prev could be scheduled on another cpu, die
2885 * there before we look at prev->state, and then the reference would
2886 * be dropped twice.
2887 * Manfred Spraul <manfred@colorfullife.com>
2888 */
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002889 prev_state = prev->state;
Nick Piggin4866cde2005-06-25 14:57:23 -07002890 finish_arch_switch(prev);
Jamie Iles8381f652010-01-08 15:27:33 +00002891#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2892 local_irq_disable();
2893#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Peter Zijlstra49f47432009-12-27 11:51:52 +01002894 perf_event_task_sched_in(current);
Jamie Iles8381f652010-01-08 15:27:33 +00002895#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2896 local_irq_enable();
2897#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Nick Piggin4866cde2005-06-25 14:57:23 -07002898 finish_lock_switch(rq, prev);
Steven Rostedte8fa1362008-01-25 21:08:05 +01002899
Avi Kivitye107be32007-07-26 13:40:43 +02002900 fire_sched_in_preempt_notifiers(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 if (mm)
2902 mmdrop(mm);
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002903 if (unlikely(prev_state == TASK_DEAD)) {
bibo maoc6fd91f2006-03-26 01:38:20 -08002904 /*
2905 * Remove function-return probe instances associated with this
2906 * task and put them back on the free list.
Ingo Molnar9761eea2007-07-09 18:52:00 +02002907 */
bibo maoc6fd91f2006-03-26 01:38:20 -08002908 kprobe_flush_task(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 put_task_struct(prev);
bibo maoc6fd91f2006-03-26 01:38:20 -08002910 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911}
2912
Gregory Haskins3f029d32009-07-29 11:08:47 -04002913#ifdef CONFIG_SMP
2914
2915/* assumes rq->lock is held */
2916static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2917{
2918 if (prev->sched_class->pre_schedule)
2919 prev->sched_class->pre_schedule(rq, prev);
2920}
2921
2922/* rq->lock is NOT held, but preemption is disabled */
2923static inline void post_schedule(struct rq *rq)
2924{
2925 if (rq->post_schedule) {
2926 unsigned long flags;
2927
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002928 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002929 if (rq->curr->sched_class->post_schedule)
2930 rq->curr->sched_class->post_schedule(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002931 raw_spin_unlock_irqrestore(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002932
2933 rq->post_schedule = 0;
2934 }
2935}
2936
2937#else
2938
2939static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2940{
2941}
2942
2943static inline void post_schedule(struct rq *rq)
2944{
2945}
2946
2947#endif
2948
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949/**
2950 * schedule_tail - first thing a freshly forked thread must call.
2951 * @prev: the thread we just switched away from.
2952 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002953asmlinkage void schedule_tail(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 __releases(rq->lock)
2955{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002956 struct rq *rq = this_rq();
2957
Nick Piggin4866cde2005-06-25 14:57:23 -07002958 finish_task_switch(rq, prev);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002959
Gregory Haskins3f029d32009-07-29 11:08:47 -04002960 /*
2961 * FIXME: do we need to worry about rq being invalidated by the
2962 * task_switch?
2963 */
2964 post_schedule(rq);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002965
Nick Piggin4866cde2005-06-25 14:57:23 -07002966#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2967 /* In this case, finish_task_switch does not reenable preemption */
2968 preempt_enable();
2969#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 if (current->set_child_tid)
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002971 put_user(task_pid_vnr(current), current->set_child_tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972}
2973
2974/*
2975 * context_switch - switch to the new MM and the new
2976 * thread's register state.
2977 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002978static inline void
Ingo Molnar70b97a72006-07-03 00:25:42 -07002979context_switch(struct rq *rq, struct task_struct *prev,
Ingo Molnar36c8b582006-07-03 00:25:41 -07002980 struct task_struct *next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981{
Ingo Molnardd41f592007-07-09 18:51:59 +02002982 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983
Avi Kivitye107be32007-07-26 13:40:43 +02002984 prepare_task_switch(rq, prev, next);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002985
Ingo Molnardd41f592007-07-09 18:51:59 +02002986 mm = next->mm;
2987 oldmm = prev->active_mm;
Zachary Amsden9226d122007-02-13 13:26:21 +01002988 /*
2989 * For paravirt, this is coupled with an exit in switch_to to
2990 * combine the page table reload and the switch backend into
2991 * one hypercall.
2992 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -08002993 arch_start_context_switch(prev);
Zachary Amsden9226d122007-02-13 13:26:21 +01002994
Heiko Carstens31915ab2010-09-16 14:42:25 +02002995 if (!mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 next->active_mm = oldmm;
2997 atomic_inc(&oldmm->mm_count);
2998 enter_lazy_tlb(oldmm, next);
2999 } else
3000 switch_mm(oldmm, mm, next);
3001
Heiko Carstens31915ab2010-09-16 14:42:25 +02003002 if (!prev->mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 prev->active_mm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 rq->prev_mm = oldmm;
3005 }
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07003006 /*
3007 * Since the runqueue lock will be released by the next
3008 * task (which is an invalid locking op but in the case
3009 * of the scheduler it's an obvious special-case), so we
3010 * do an early lockdep release here:
3011 */
3012#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07003013 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07003014#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015
3016 /* Here we just switch the register state and the stack. */
3017 switch_to(prev, next, prev);
3018
Ingo Molnardd41f592007-07-09 18:51:59 +02003019 barrier();
3020 /*
3021 * this_rq must be evaluated again because prev may have moved
3022 * CPUs since it called schedule(), thus the 'rq' on its stack
3023 * frame will be invalid.
3024 */
3025 finish_task_switch(this_rq(), prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026}
3027
3028/*
3029 * nr_running, nr_uninterruptible and nr_context_switches:
3030 *
3031 * externally visible scheduler statistics: current number of runnable
3032 * threads, current number of uninterruptible-sleeping threads, total
3033 * number of context switches performed since bootup.
3034 */
3035unsigned long nr_running(void)
3036{
3037 unsigned long i, sum = 0;
3038
3039 for_each_online_cpu(i)
3040 sum += cpu_rq(i)->nr_running;
3041
3042 return sum;
3043}
3044
3045unsigned long nr_uninterruptible(void)
3046{
3047 unsigned long i, sum = 0;
3048
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003049 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 sum += cpu_rq(i)->nr_uninterruptible;
3051
3052 /*
3053 * Since we read the counters lockless, it might be slightly
3054 * inaccurate. Do not allow it to go below zero though:
3055 */
3056 if (unlikely((long)sum < 0))
3057 sum = 0;
3058
3059 return sum;
3060}
3061
3062unsigned long long nr_context_switches(void)
3063{
Steven Rostedtcc94abf2006-06-27 02:54:31 -07003064 int i;
3065 unsigned long long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003067 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 sum += cpu_rq(i)->nr_switches;
3069
3070 return sum;
3071}
3072
3073unsigned long nr_iowait(void)
3074{
3075 unsigned long i, sum = 0;
3076
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003077 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 sum += atomic_read(&cpu_rq(i)->nr_iowait);
3079
3080 return sum;
3081}
3082
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003083unsigned long nr_iowait_cpu(int cpu)
Arjan van de Ven69d25872009-09-21 17:04:08 -07003084{
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003085 struct rq *this = cpu_rq(cpu);
Arjan van de Ven69d25872009-09-21 17:04:08 -07003086 return atomic_read(&this->nr_iowait);
3087}
3088
3089unsigned long this_cpu_load(void)
3090{
3091 struct rq *this = this_rq();
3092 return this->cpu_load[0];
3093}
3094
3095
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003096/* Variables and functions for calc_load */
3097static atomic_long_t calc_load_tasks;
3098static unsigned long calc_load_update;
3099unsigned long avenrun[3];
3100EXPORT_SYMBOL(avenrun);
3101
Peter Zijlstra74f51872010-04-22 21:50:19 +02003102static long calc_load_fold_active(struct rq *this_rq)
3103{
3104 long nr_active, delta = 0;
3105
3106 nr_active = this_rq->nr_running;
3107 nr_active += (long) this_rq->nr_uninterruptible;
3108
3109 if (nr_active != this_rq->calc_load_active) {
3110 delta = nr_active - this_rq->calc_load_active;
3111 this_rq->calc_load_active = nr_active;
3112 }
3113
3114 return delta;
3115}
3116
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003117static unsigned long
3118calc_load(unsigned long load, unsigned long exp, unsigned long active)
3119{
3120 load *= exp;
3121 load += active * (FIXED_1 - exp);
3122 load += 1UL << (FSHIFT - 1);
3123 return load >> FSHIFT;
3124}
3125
Peter Zijlstra74f51872010-04-22 21:50:19 +02003126#ifdef CONFIG_NO_HZ
3127/*
3128 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
3129 *
3130 * When making the ILB scale, we should try to pull this in as well.
3131 */
3132static atomic_long_t calc_load_tasks_idle;
3133
3134static void calc_load_account_idle(struct rq *this_rq)
3135{
3136 long delta;
3137
3138 delta = calc_load_fold_active(this_rq);
3139 if (delta)
3140 atomic_long_add(delta, &calc_load_tasks_idle);
3141}
3142
3143static long calc_load_fold_idle(void)
3144{
3145 long delta = 0;
3146
3147 /*
3148 * Its got a race, we don't care...
3149 */
3150 if (atomic_long_read(&calc_load_tasks_idle))
3151 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3152
3153 return delta;
3154}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003155
3156/**
3157 * fixed_power_int - compute: x^n, in O(log n) time
3158 *
3159 * @x: base of the power
3160 * @frac_bits: fractional bits of @x
3161 * @n: power to raise @x to.
3162 *
3163 * By exploiting the relation between the definition of the natural power
3164 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
3165 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3166 * (where: n_i \elem {0, 1}, the binary vector representing n),
3167 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3168 * of course trivially computable in O(log_2 n), the length of our binary
3169 * vector.
3170 */
3171static unsigned long
3172fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3173{
3174 unsigned long result = 1UL << frac_bits;
3175
3176 if (n) for (;;) {
3177 if (n & 1) {
3178 result *= x;
3179 result += 1UL << (frac_bits - 1);
3180 result >>= frac_bits;
3181 }
3182 n >>= 1;
3183 if (!n)
3184 break;
3185 x *= x;
3186 x += 1UL << (frac_bits - 1);
3187 x >>= frac_bits;
3188 }
3189
3190 return result;
3191}
3192
3193/*
3194 * a1 = a0 * e + a * (1 - e)
3195 *
3196 * a2 = a1 * e + a * (1 - e)
3197 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3198 * = a0 * e^2 + a * (1 - e) * (1 + e)
3199 *
3200 * a3 = a2 * e + a * (1 - e)
3201 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3202 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3203 *
3204 * ...
3205 *
3206 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3207 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3208 * = a0 * e^n + a * (1 - e^n)
3209 *
3210 * [1] application of the geometric series:
3211 *
3212 * n 1 - x^(n+1)
3213 * S_n := \Sum x^i = -------------
3214 * i=0 1 - x
3215 */
3216static unsigned long
3217calc_load_n(unsigned long load, unsigned long exp,
3218 unsigned long active, unsigned int n)
3219{
3220
3221 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3222}
3223
3224/*
3225 * NO_HZ can leave us missing all per-cpu ticks calling
3226 * calc_load_account_active(), but since an idle CPU folds its delta into
3227 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3228 * in the pending idle delta if our idle period crossed a load cycle boundary.
3229 *
3230 * Once we've updated the global active value, we need to apply the exponential
3231 * weights adjusted to the number of cycles missed.
3232 */
3233static void calc_global_nohz(unsigned long ticks)
3234{
3235 long delta, active, n;
3236
3237 if (time_before(jiffies, calc_load_update))
3238 return;
3239
3240 /*
3241 * If we crossed a calc_load_update boundary, make sure to fold
3242 * any pending idle changes, the respective CPUs might have
3243 * missed the tick driven calc_load_account_active() update
3244 * due to NO_HZ.
3245 */
3246 delta = calc_load_fold_idle();
3247 if (delta)
3248 atomic_long_add(delta, &calc_load_tasks);
3249
3250 /*
3251 * If we were idle for multiple load cycles, apply them.
3252 */
3253 if (ticks >= LOAD_FREQ) {
3254 n = ticks / LOAD_FREQ;
3255
3256 active = atomic_long_read(&calc_load_tasks);
3257 active = active > 0 ? active * FIXED_1 : 0;
3258
3259 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3260 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3261 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3262
3263 calc_load_update += n * LOAD_FREQ;
3264 }
3265
3266 /*
3267 * Its possible the remainder of the above division also crosses
3268 * a LOAD_FREQ period, the regular check in calc_global_load()
3269 * which comes after this will take care of that.
3270 *
3271 * Consider us being 11 ticks before a cycle completion, and us
3272 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3273 * age us 4 cycles, and the test in calc_global_load() will
3274 * pick up the final one.
3275 */
3276}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003277#else
3278static void calc_load_account_idle(struct rq *this_rq)
3279{
3280}
3281
3282static inline long calc_load_fold_idle(void)
3283{
3284 return 0;
3285}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003286
3287static void calc_global_nohz(unsigned long ticks)
3288{
3289}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003290#endif
3291
Thomas Gleixner2d024942009-05-02 20:08:52 +02003292/**
3293 * get_avenrun - get the load average array
3294 * @loads: pointer to dest load array
3295 * @offset: offset to add
3296 * @shift: shift count to shift the result left
3297 *
3298 * These values are estimates at best, so no need for locking.
3299 */
3300void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3301{
3302 loads[0] = (avenrun[0] + offset) << shift;
3303 loads[1] = (avenrun[1] + offset) << shift;
3304 loads[2] = (avenrun[2] + offset) << shift;
3305}
3306
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003307/*
3308 * calc_load - update the avenrun load estimates 10 ticks after the
3309 * CPUs have updated calc_load_tasks.
3310 */
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003311void calc_global_load(unsigned long ticks)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003312{
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003313 long active;
3314
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003315 calc_global_nohz(ticks);
3316
3317 if (time_before(jiffies, calc_load_update + 10))
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003318 return;
3319
3320 active = atomic_long_read(&calc_load_tasks);
3321 active = active > 0 ? active * FIXED_1 : 0;
3322
3323 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3324 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3325 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3326
3327 calc_load_update += LOAD_FREQ;
3328}
3329
3330/*
Peter Zijlstra74f51872010-04-22 21:50:19 +02003331 * Called from update_cpu_load() to periodically update this CPU's
3332 * active count.
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003333 */
3334static void calc_load_account_active(struct rq *this_rq)
3335{
Peter Zijlstra74f51872010-04-22 21:50:19 +02003336 long delta;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003337
Peter Zijlstra74f51872010-04-22 21:50:19 +02003338 if (time_before(jiffies, this_rq->calc_load_update))
3339 return;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003340
Peter Zijlstra74f51872010-04-22 21:50:19 +02003341 delta = calc_load_fold_active(this_rq);
3342 delta += calc_load_fold_idle();
3343 if (delta)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003344 atomic_long_add(delta, &calc_load_tasks);
Peter Zijlstra74f51872010-04-22 21:50:19 +02003345
3346 this_rq->calc_load_update += LOAD_FREQ;
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08003347}
3348
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349/*
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003350 * The exact cpuload at various idx values, calculated at every tick would be
3351 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3352 *
3353 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3354 * on nth tick when cpu may be busy, then we have:
3355 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3356 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3357 *
3358 * decay_load_missed() below does efficient calculation of
3359 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3360 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3361 *
3362 * The calculation is approximated on a 128 point scale.
3363 * degrade_zero_ticks is the number of ticks after which load at any
3364 * particular idx is approximated to be zero.
3365 * degrade_factor is a precomputed table, a row for each load idx.
3366 * Each column corresponds to degradation factor for a power of two ticks,
3367 * based on 128 point scale.
3368 * Example:
3369 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3370 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3371 *
3372 * With this power of 2 load factors, we can degrade the load n times
3373 * by looking at 1 bits in n and doing as many mult/shift instead of
3374 * n mult/shifts needed by the exact degradation.
3375 */
3376#define DEGRADE_SHIFT 7
3377static const unsigned char
3378 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3379static const unsigned char
3380 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3381 {0, 0, 0, 0, 0, 0, 0, 0},
3382 {64, 32, 8, 0, 0, 0, 0, 0},
3383 {96, 72, 40, 12, 1, 0, 0},
3384 {112, 98, 75, 43, 15, 1, 0},
3385 {120, 112, 98, 76, 45, 16, 2} };
3386
3387/*
3388 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3389 * would be when CPU is idle and so we just decay the old load without
3390 * adding any new load.
3391 */
3392static unsigned long
3393decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3394{
3395 int j = 0;
3396
3397 if (!missed_updates)
3398 return load;
3399
3400 if (missed_updates >= degrade_zero_ticks[idx])
3401 return 0;
3402
3403 if (idx == 1)
3404 return load >> missed_updates;
3405
3406 while (missed_updates) {
3407 if (missed_updates % 2)
3408 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3409
3410 missed_updates >>= 1;
3411 j++;
3412 }
3413 return load;
3414}
3415
3416/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003417 * Update rq->cpu_load[] statistics. This function is usually called every
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003418 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3419 * every tick. We fix it up based on jiffies.
Ingo Molnar48f24c42006-07-03 00:25:40 -07003420 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003421static void update_cpu_load(struct rq *this_rq)
Ingo Molnar48f24c42006-07-03 00:25:40 -07003422{
Dmitry Adamushko495eca42007-10-15 17:00:06 +02003423 unsigned long this_load = this_rq->load.weight;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003424 unsigned long curr_jiffies = jiffies;
3425 unsigned long pending_updates;
Ingo Molnardd41f592007-07-09 18:51:59 +02003426 int i, scale;
3427
3428 this_rq->nr_load_updates++;
Ingo Molnardd41f592007-07-09 18:51:59 +02003429
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003430 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3431 if (curr_jiffies == this_rq->last_load_update_tick)
3432 return;
3433
3434 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3435 this_rq->last_load_update_tick = curr_jiffies;
3436
Ingo Molnardd41f592007-07-09 18:51:59 +02003437 /* Update our load: */
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003438 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3439 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
Ingo Molnardd41f592007-07-09 18:51:59 +02003440 unsigned long old_load, new_load;
3441
3442 /* scale is effectively 1 << i now, and >> i divides by scale */
3443
3444 old_load = this_rq->cpu_load[i];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003445 old_load = decay_load_missed(old_load, pending_updates - 1, i);
Ingo Molnardd41f592007-07-09 18:51:59 +02003446 new_load = this_load;
Ingo Molnara25707f2007-10-15 17:00:03 +02003447 /*
3448 * Round up the averaging division if load is increasing. This
3449 * prevents us from getting stuck on 9 if the load is 10, for
3450 * example.
3451 */
3452 if (new_load > old_load)
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003453 new_load += scale - 1;
3454
3455 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
Ingo Molnardd41f592007-07-09 18:51:59 +02003456 }
Suresh Siddhada2b71e2010-08-23 13:42:51 -07003457
3458 sched_avg_update(this_rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003459}
3460
3461static void update_cpu_load_active(struct rq *this_rq)
3462{
3463 update_cpu_load(this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003464
Peter Zijlstra74f51872010-04-22 21:50:19 +02003465 calc_load_account_active(this_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07003466}
3467
Ingo Molnardd41f592007-07-09 18:51:59 +02003468#ifdef CONFIG_SMP
3469
Ingo Molnar48f24c42006-07-03 00:25:40 -07003470/*
Peter Zijlstra38022902009-12-16 18:04:37 +01003471 * sched_exec - execve() is a valuable balancing opportunity, because at
3472 * this point the task has the smallest effective memory and cache footprint.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 */
Peter Zijlstra38022902009-12-16 18:04:37 +01003474void sched_exec(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475{
Peter Zijlstra38022902009-12-16 18:04:37 +01003476 struct task_struct *p = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003478 struct rq *rq;
Peter Zijlstra0017d732010-03-24 18:34:10 +01003479 int dest_cpu;
Peter Zijlstra38022902009-12-16 18:04:37 +01003480
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481 rq = task_rq_lock(p, &flags);
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003482 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
Peter Zijlstra0017d732010-03-24 18:34:10 +01003483 if (dest_cpu == smp_processor_id())
3484 goto unlock;
Peter Zijlstra38022902009-12-16 18:04:37 +01003485
3486 /*
3487 * select_task_rq() can race against ->cpus_allowed
3488 */
Oleg Nesterov30da6882010-03-15 10:10:19 +01003489 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003490 likely(cpu_active(dest_cpu)) && need_migrate_task(p)) {
Tejun Heo969c7922010-05-06 18:49:21 +02003491 struct migration_arg arg = { p, dest_cpu };
Ingo Molnar36c8b582006-07-03 00:25:41 -07003492
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 task_rq_unlock(rq, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02003494 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 return;
3496 }
Peter Zijlstra0017d732010-03-24 18:34:10 +01003497unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 task_rq_unlock(rq, &flags);
3499}
3500
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501#endif
3502
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503DEFINE_PER_CPU(struct kernel_stat, kstat);
3504
3505EXPORT_PER_CPU_SYMBOL(kstat);
3506
3507/*
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003508 * Return any ns on the sched_clock that have not yet been accounted in
Frank Mayharf06febc2008-09-12 09:54:39 -07003509 * @p in case that task is currently running.
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003510 *
3511 * Called with task_rq_lock() held on @rq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 */
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003513static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3514{
3515 u64 ns = 0;
3516
3517 if (task_current(rq, p)) {
3518 update_rq_clock(rq);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07003519 ns = rq->clock_task - p->se.exec_start;
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003520 if ((s64)ns < 0)
3521 ns = 0;
3522 }
3523
3524 return ns;
3525}
3526
Frank Mayharbb34d922008-09-12 09:54:39 -07003527unsigned long long task_delta_exec(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 unsigned long flags;
Ingo Molnar41b86e92007-07-09 18:51:58 +02003530 struct rq *rq;
Frank Mayharbb34d922008-09-12 09:54:39 -07003531 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003532
Ingo Molnar41b86e92007-07-09 18:51:58 +02003533 rq = task_rq_lock(p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003534 ns = do_task_delta_exec(p, rq);
3535 task_rq_unlock(rq, &flags);
Ingo Molnar15084872008-09-30 08:28:17 +02003536
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003537 return ns;
3538}
Frank Mayharf06febc2008-09-12 09:54:39 -07003539
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003540/*
3541 * Return accounted runtime for the task.
3542 * In case the task is currently running, return the runtime plus current's
3543 * pending runtime that have not been accounted yet.
3544 */
3545unsigned long long task_sched_runtime(struct task_struct *p)
3546{
3547 unsigned long flags;
3548 struct rq *rq;
3549 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003550
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003551 rq = task_rq_lock(p, &flags);
3552 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3553 task_rq_unlock(rq, &flags);
3554
3555 return ns;
3556}
3557
3558/*
3559 * Return sum_exec_runtime for the thread group.
3560 * In case the task is currently running, return the sum plus current's
3561 * pending runtime that have not been accounted yet.
3562 *
3563 * Note that the thread group might have other running tasks as well,
3564 * so the return value not includes other pending runtime that other
3565 * running tasks might have.
3566 */
3567unsigned long long thread_group_sched_runtime(struct task_struct *p)
3568{
3569 struct task_cputime totals;
3570 unsigned long flags;
3571 struct rq *rq;
3572 u64 ns;
3573
3574 rq = task_rq_lock(p, &flags);
3575 thread_group_cputime(p, &totals);
3576 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 task_rq_unlock(rq, &flags);
3578
3579 return ns;
3580}
3581
3582/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 * Account user cpu time to a process.
3584 * @p: the process that the cpu time gets accounted to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 * @cputime: the cpu time spent in user space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003586 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003588void account_user_time(struct task_struct *p, cputime_t cputime,
3589 cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590{
3591 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3592 cputime64_t tmp;
3593
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003594 /* Add user time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003596 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003597 account_group_user_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598
3599 /* Add user time to cpustat. */
3600 tmp = cputime_to_cputime64(cputime);
3601 if (TASK_NICE(p) > 0)
3602 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3603 else
3604 cpustat->user = cputime64_add(cpustat->user, tmp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05303605
3606 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
Jonathan Lim49b5cf32008-07-25 01:48:40 -07003607 /* Account for user time used */
3608 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609}
3610
3611/*
Laurent Vivier94886b82007-10-15 17:00:19 +02003612 * Account guest cpu time to a process.
3613 * @p: the process that the cpu time gets accounted to
3614 * @cputime: the cpu time spent in virtual machine since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003615 * @cputime_scaled: cputime scaled by cpu frequency
Laurent Vivier94886b82007-10-15 17:00:19 +02003616 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003617static void account_guest_time(struct task_struct *p, cputime_t cputime,
3618 cputime_t cputime_scaled)
Laurent Vivier94886b82007-10-15 17:00:19 +02003619{
3620 cputime64_t tmp;
3621 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3622
3623 tmp = cputime_to_cputime64(cputime);
3624
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003625 /* Add guest time to process. */
Laurent Vivier94886b82007-10-15 17:00:19 +02003626 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003627 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003628 account_group_user_time(p, cputime);
Laurent Vivier94886b82007-10-15 17:00:19 +02003629 p->gtime = cputime_add(p->gtime, cputime);
3630
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003631 /* Add guest time to cpustat. */
Ryota Ozakice0e7b22009-10-24 01:20:10 +09003632 if (TASK_NICE(p) > 0) {
3633 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3634 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3635 } else {
3636 cpustat->user = cputime64_add(cpustat->user, tmp);
3637 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3638 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003639}
3640
3641/*
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003642 * Account system cpu time to a process and desired cpustat field
3643 * @p: the process that the cpu time gets accounted to
3644 * @cputime: the cpu time spent in kernel space since the last update
3645 * @cputime_scaled: cputime scaled by cpu frequency
3646 * @target_cputime64: pointer to cpustat field that has to be updated
3647 */
3648static inline
3649void __account_system_time(struct task_struct *p, cputime_t cputime,
3650 cputime_t cputime_scaled, cputime64_t *target_cputime64)
3651{
3652 cputime64_t tmp = cputime_to_cputime64(cputime);
3653
3654 /* Add system time to process. */
3655 p->stime = cputime_add(p->stime, cputime);
3656 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3657 account_group_system_time(p, cputime);
3658
3659 /* Add system time to cpustat. */
3660 *target_cputime64 = cputime64_add(*target_cputime64, tmp);
3661 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3662
3663 /* Account for system time used */
3664 acct_update_integrals(p);
3665}
3666
3667/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668 * Account system cpu time to a process.
3669 * @p: the process that the cpu time gets accounted to
3670 * @hardirq_offset: the offset to subtract from hardirq_count()
3671 * @cputime: the cpu time spent in kernel space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003672 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673 */
3674void account_system_time(struct task_struct *p, int hardirq_offset,
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003675 cputime_t cputime, cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003676{
3677 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003678 cputime64_t *target_cputime64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003680 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003681 account_guest_time(p, cputime, cputime_scaled);
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003682 return;
3683 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003684
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 if (hardirq_count() - hardirq_offset)
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003686 target_cputime64 = &cpustat->irq;
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07003687 else if (in_serving_softirq())
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003688 target_cputime64 = &cpustat->softirq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689 else
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003690 target_cputime64 = &cpustat->system;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003691
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003692 __account_system_time(p, cputime, cputime_scaled, target_cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693}
3694
3695/*
3696 * Account for involuntary wait time.
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003697 * @cputime: the cpu time spent in involuntary wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003699void account_steal_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003702 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3703
3704 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705}
3706
Christoph Lameter7835b982006-12-10 02:20:22 -08003707/*
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003708 * Account for idle time.
3709 * @cputime: the cpu time spent in idle wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003711void account_idle_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712{
3713 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003714 cputime64_t cputime64 = cputime_to_cputime64(cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715 struct rq *rq = this_rq();
3716
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003717 if (atomic_read(&rq->nr_iowait) > 0)
3718 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3719 else
3720 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
Christoph Lameter7835b982006-12-10 02:20:22 -08003721}
3722
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003723#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3724
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003725#ifdef CONFIG_IRQ_TIME_ACCOUNTING
3726/*
3727 * Account a tick to a process and cpustat
3728 * @p: the process that the cpu time gets accounted to
3729 * @user_tick: is the tick from userspace
3730 * @rq: the pointer to rq
3731 *
3732 * Tick demultiplexing follows the order
3733 * - pending hardirq update
3734 * - pending softirq update
3735 * - user_time
3736 * - idle_time
3737 * - system time
3738 * - check for guest_time
3739 * - else account as system_time
3740 *
3741 * Check for hardirq is done both for system and user time as there is
3742 * no timer going off while we are on hardirq and hence we may never get an
3743 * opportunity to update it solely in system time.
3744 * p->stime and friends are only updated on system time and not on irq
3745 * softirq as those do not count in task exec_runtime any more.
3746 */
3747static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3748 struct rq *rq)
3749{
3750 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3751 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
3752 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3753
3754 if (irqtime_account_hi_update()) {
3755 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3756 } else if (irqtime_account_si_update()) {
3757 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
Venkatesh Pallipadi414bee92010-12-21 17:09:04 -08003758 } else if (this_cpu_ksoftirqd() == p) {
3759 /*
3760 * ksoftirqd time do not get accounted in cpu_softirq_time.
3761 * So, we have to handle it separately here.
3762 * Also, p->stime needs to be updated for ksoftirqd.
3763 */
3764 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3765 &cpustat->softirq);
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003766 } else if (user_tick) {
3767 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3768 } else if (p == rq->idle) {
3769 account_idle_time(cputime_one_jiffy);
3770 } else if (p->flags & PF_VCPU) { /* System time or guest time */
3771 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
3772 } else {
3773 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3774 &cpustat->system);
3775 }
3776}
3777
3778static void irqtime_account_idle_ticks(int ticks)
3779{
3780 int i;
3781 struct rq *rq = this_rq();
3782
3783 for (i = 0; i < ticks; i++)
3784 irqtime_account_process_tick(current, 0, rq);
3785}
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003786#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003787static void irqtime_account_idle_ticks(int ticks) {}
3788static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3789 struct rq *rq) {}
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003790#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003791
3792/*
3793 * Account a single tick of cpu time.
3794 * @p: the process that the cpu time gets accounted to
3795 * @user_tick: indicates if the tick is a user or a system tick
3796 */
3797void account_process_tick(struct task_struct *p, int user_tick)
3798{
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003799 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003800 struct rq *rq = this_rq();
3801
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003802 if (sched_clock_irqtime) {
3803 irqtime_account_process_tick(p, user_tick, rq);
3804 return;
3805 }
3806
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003807 if (user_tick)
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003808 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
Eric Dumazetf5f293a2009-04-29 14:44:49 +02003809 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003810 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003811 one_jiffy_scaled);
3812 else
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003813 account_idle_time(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003814}
3815
3816/*
3817 * Account multiple ticks of steal time.
3818 * @p: the process from which the cpu time has been stolen
3819 * @ticks: number of stolen ticks
3820 */
3821void account_steal_ticks(unsigned long ticks)
3822{
3823 account_steal_time(jiffies_to_cputime(ticks));
3824}
3825
3826/*
3827 * Account multiple ticks of idle time.
3828 * @ticks: number of stolen ticks
3829 */
3830void account_idle_ticks(unsigned long ticks)
3831{
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003832
3833 if (sched_clock_irqtime) {
3834 irqtime_account_idle_ticks(ticks);
3835 return;
3836 }
3837
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003838 account_idle_time(jiffies_to_cputime(ticks));
3839}
3840
3841#endif
3842
Christoph Lameter7835b982006-12-10 02:20:22 -08003843/*
Balbir Singh49048622008-09-05 18:12:23 +02003844 * Use precise platform statistics if available:
3845 */
3846#ifdef CONFIG_VIRT_CPU_ACCOUNTING
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003847void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003848{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003849 *ut = p->utime;
3850 *st = p->stime;
Balbir Singh49048622008-09-05 18:12:23 +02003851}
3852
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003853void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003854{
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003855 struct task_cputime cputime;
3856
3857 thread_group_cputime(p, &cputime);
3858
3859 *ut = cputime.utime;
3860 *st = cputime.stime;
Balbir Singh49048622008-09-05 18:12:23 +02003861}
3862#else
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003863
3864#ifndef nsecs_to_cputime
Hidetoshi Setob7b20df2009-11-26 14:49:27 +09003865# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003866#endif
3867
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003868void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003869{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003870 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
Balbir Singh49048622008-09-05 18:12:23 +02003871
3872 /*
3873 * Use CFS's precise accounting:
3874 */
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003875 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
Balbir Singh49048622008-09-05 18:12:23 +02003876
3877 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003878 u64 temp = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003879
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003880 temp *= utime;
Balbir Singh49048622008-09-05 18:12:23 +02003881 do_div(temp, total);
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003882 utime = (cputime_t)temp;
3883 } else
3884 utime = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003885
3886 /*
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003887 * Compare with previous values, to keep monotonicity:
Balbir Singh49048622008-09-05 18:12:23 +02003888 */
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003889 p->prev_utime = max(p->prev_utime, utime);
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003890 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
Balbir Singh49048622008-09-05 18:12:23 +02003891
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003892 *ut = p->prev_utime;
3893 *st = p->prev_stime;
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003894}
Balbir Singh49048622008-09-05 18:12:23 +02003895
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003896/*
3897 * Must be called with siglock held.
3898 */
3899void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3900{
3901 struct signal_struct *sig = p->signal;
3902 struct task_cputime cputime;
3903 cputime_t rtime, utime, total;
3904
3905 thread_group_cputime(p, &cputime);
3906
3907 total = cputime_add(cputime.utime, cputime.stime);
3908 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3909
3910 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003911 u64 temp = rtime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003912
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003913 temp *= cputime.utime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003914 do_div(temp, total);
3915 utime = (cputime_t)temp;
3916 } else
3917 utime = rtime;
3918
3919 sig->prev_utime = max(sig->prev_utime, utime);
3920 sig->prev_stime = max(sig->prev_stime,
3921 cputime_sub(rtime, sig->prev_utime));
3922
3923 *ut = sig->prev_utime;
3924 *st = sig->prev_stime;
Balbir Singh49048622008-09-05 18:12:23 +02003925}
3926#endif
3927
Balbir Singh49048622008-09-05 18:12:23 +02003928/*
Christoph Lameter7835b982006-12-10 02:20:22 -08003929 * This function gets called by the timer code, with HZ frequency.
3930 * We call it with interrupts disabled.
3931 *
3932 * It also gets called by the fork code, when changing the parent's
3933 * timeslices.
3934 */
3935void scheduler_tick(void)
3936{
Christoph Lameter7835b982006-12-10 02:20:22 -08003937 int cpu = smp_processor_id();
3938 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003939 struct task_struct *curr = rq->curr;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003940
3941 sched_clock_tick();
Christoph Lameter7835b982006-12-10 02:20:22 -08003942
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003943 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003944 update_rq_clock(rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003945 update_cpu_load_active(rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01003946 curr->sched_class->task_tick(rq, curr, 0);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003947 raw_spin_unlock(&rq->lock);
Ingo Molnardd41f592007-07-09 18:51:59 +02003948
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02003949 perf_event_task_tick();
Peter Zijlstrae220d2d2009-05-23 18:28:55 +02003950
Christoph Lametere418e1c2006-12-10 02:20:23 -08003951#ifdef CONFIG_SMP
Ingo Molnardd41f592007-07-09 18:51:59 +02003952 rq->idle_at_tick = idle_cpu(cpu);
3953 trigger_load_balance(rq, cpu);
Christoph Lametere418e1c2006-12-10 02:20:23 -08003954#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955}
3956
Lai Jiangshan132380a2009-04-02 14:18:25 +08003957notrace unsigned long get_parent_ip(unsigned long addr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003958{
3959 if (in_lock_functions(addr)) {
3960 addr = CALLER_ADDR2;
3961 if (in_lock_functions(addr))
3962 addr = CALLER_ADDR3;
3963 }
3964 return addr;
3965}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966
Steven Rostedt7e49fcc2009-01-22 19:01:40 -05003967#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3968 defined(CONFIG_PREEMPT_TRACER))
3969
Srinivasa Ds43627582008-02-23 15:24:04 -08003970void __kprobes add_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003972#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 /*
3974 * Underflow?
3975 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003976 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3977 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003978#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 preempt_count() += val;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003980#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981 /*
3982 * Spinlock count overflowing soon?
3983 */
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08003984 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3985 PREEMPT_MASK - 10);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003986#endif
3987 if (preempt_count() == val)
3988 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989}
3990EXPORT_SYMBOL(add_preempt_count);
3991
Srinivasa Ds43627582008-02-23 15:24:04 -08003992void __kprobes sub_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003994#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995 /*
3996 * Underflow?
3997 */
Ingo Molnar01e3eb82009-01-12 13:00:50 +01003998 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003999 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 /*
4001 * Is the spinlock portion underflowing?
4002 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07004003 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4004 !(preempt_count() & PREEMPT_MASK)))
4005 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004006#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07004007
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004008 if (preempt_count() == val)
4009 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010 preempt_count() -= val;
4011}
4012EXPORT_SYMBOL(sub_preempt_count);
4013
4014#endif
4015
4016/*
Ingo Molnardd41f592007-07-09 18:51:59 +02004017 * Print scheduling while atomic bug:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 */
Ingo Molnardd41f592007-07-09 18:51:59 +02004019static noinline void __schedule_bug(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020{
Satyam Sharma838225b2007-10-24 18:23:50 +02004021 struct pt_regs *regs = get_irq_regs();
4022
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004023 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4024 prev->comm, prev->pid, preempt_count());
Satyam Sharma838225b2007-10-24 18:23:50 +02004025
Ingo Molnardd41f592007-07-09 18:51:59 +02004026 debug_show_held_locks(prev);
Arjan van de Vene21f5b12008-05-23 09:05:58 -07004027 print_modules();
Ingo Molnardd41f592007-07-09 18:51:59 +02004028 if (irqs_disabled())
4029 print_irqtrace_events(prev);
Satyam Sharma838225b2007-10-24 18:23:50 +02004030
4031 if (regs)
4032 show_regs(regs);
4033 else
4034 dump_stack();
Ingo Molnardd41f592007-07-09 18:51:59 +02004035}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036
Ingo Molnardd41f592007-07-09 18:51:59 +02004037/*
4038 * Various schedule()-time debugging checks and statistics:
4039 */
4040static inline void schedule_debug(struct task_struct *prev)
4041{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 /*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004043 * Test if we are atomic. Since do_exit() needs to call into
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044 * schedule() atomically, we ignore that path for now.
4045 * Otherwise, whine if we are scheduling when we should not be.
4046 */
Roel Kluin3f33a7c2008-05-13 23:44:11 +02004047 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
Ingo Molnardd41f592007-07-09 18:51:59 +02004048 __schedule_bug(prev);
4049
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4051
Ingo Molnar2d723762007-10-15 17:00:12 +02004052 schedstat_inc(this_rq(), sched_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02004053#ifdef CONFIG_SCHEDSTATS
4054 if (unlikely(prev->lock_depth >= 0)) {
Yong Zhangfce20972011-01-14 15:57:39 +08004055 schedstat_inc(this_rq(), rq_sched_info.bkl_count);
Ingo Molnar2d723762007-10-15 17:00:12 +02004056 schedstat_inc(prev, sched_info.bkl_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02004057 }
4058#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02004059}
4060
Peter Zijlstra6cecd082009-11-30 13:00:37 +01004061static void put_prev_task(struct rq *rq, struct task_struct *prev)
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004062{
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004063 if (prev->on_rq)
Mike Galbraitha64692a2010-03-11 17:16:20 +01004064 update_rq_clock(rq);
Peter Zijlstra6cecd082009-11-30 13:00:37 +01004065 prev->sched_class->put_prev_task(rq, prev);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004066}
4067
Ingo Molnardd41f592007-07-09 18:51:59 +02004068/*
4069 * Pick up the highest-prio task:
4070 */
4071static inline struct task_struct *
Wang Chenb67802e2009-03-02 13:55:26 +08004072pick_next_task(struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02004073{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02004074 const struct sched_class *class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004075 struct task_struct *p;
4076
4077 /*
4078 * Optimization: we know that if all tasks are in
4079 * the fair class we can call that function directly:
4080 */
4081 if (likely(rq->nr_running == rq->cfs.nr_running)) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004082 p = fair_sched_class.pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004083 if (likely(p))
4084 return p;
4085 }
4086
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004087 for_each_class(class) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004088 p = class->pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004089 if (p)
4090 return p;
Ingo Molnardd41f592007-07-09 18:51:59 +02004091 }
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004092
4093 BUG(); /* the idle class will always have a runnable task */
Ingo Molnardd41f592007-07-09 18:51:59 +02004094}
4095
4096/*
4097 * schedule() is the main scheduler function.
4098 */
Peter Zijlstraff743342009-03-13 12:21:26 +01004099asmlinkage void __sched schedule(void)
Ingo Molnardd41f592007-07-09 18:51:59 +02004100{
4101 struct task_struct *prev, *next;
Harvey Harrison67ca7bd2008-02-15 09:56:36 -08004102 unsigned long *switch_count;
Ingo Molnardd41f592007-07-09 18:51:59 +02004103 struct rq *rq;
Peter Zijlstra31656512008-07-18 18:01:23 +02004104 int cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02004105
Peter Zijlstraff743342009-03-13 12:21:26 +01004106need_resched:
4107 preempt_disable();
Ingo Molnardd41f592007-07-09 18:51:59 +02004108 cpu = smp_processor_id();
4109 rq = cpu_rq(cpu);
Paul E. McKenney25502a62010-04-01 17:37:01 -07004110 rcu_note_context_switch(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02004111 prev = rq->curr;
Ingo Molnardd41f592007-07-09 18:51:59 +02004112
Ingo Molnardd41f592007-07-09 18:51:59 +02004113 schedule_debug(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114
Peter Zijlstra31656512008-07-18 18:01:23 +02004115 if (sched_feat(HRTICK))
Mike Galbraithf333fdc2008-05-12 21:20:55 +02004116 hrtick_clear(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004117
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004118 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004120 switch_count = &prev->nivcsw;
Ingo Molnardd41f592007-07-09 18:51:59 +02004121 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
Tejun Heo21aa9af2010-06-08 21:40:37 +02004122 if (unlikely(signal_pending_state(prev->state, prev))) {
Ingo Molnardd41f592007-07-09 18:51:59 +02004123 prev->state = TASK_RUNNING;
Tejun Heo21aa9af2010-06-08 21:40:37 +02004124 } else {
Peter Zijlstra2acca552011-04-05 17:23:50 +02004125 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4126 prev->on_rq = 0;
4127
Tejun Heo21aa9af2010-06-08 21:40:37 +02004128 /*
Peter Zijlstra2acca552011-04-05 17:23:50 +02004129 * If a worker went to sleep, notify and ask workqueue
4130 * whether it wants to wake up a task to maintain
4131 * concurrency.
Tejun Heo21aa9af2010-06-08 21:40:37 +02004132 */
4133 if (prev->flags & PF_WQ_WORKER) {
4134 struct task_struct *to_wakeup;
4135
4136 to_wakeup = wq_worker_sleeping(prev, cpu);
4137 if (to_wakeup)
4138 try_to_wake_up_local(to_wakeup);
4139 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004140
Linus Torvalds6631e632011-04-13 08:08:20 -07004141 /*
Peter Zijlstra2acca552011-04-05 17:23:50 +02004142 * If we are going to sleep and we have plugged IO
4143 * queued, make sure to submit it to avoid deadlocks.
Linus Torvalds6631e632011-04-13 08:08:20 -07004144 */
4145 if (blk_needs_flush_plug(prev)) {
4146 raw_spin_unlock(&rq->lock);
4147 blk_flush_plug(prev);
4148 raw_spin_lock(&rq->lock);
4149 }
Tejun Heo21aa9af2010-06-08 21:40:37 +02004150 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004151 switch_count = &prev->nvcsw;
4152 }
4153
Gregory Haskins3f029d32009-07-29 11:08:47 -04004154 pre_schedule(rq, prev);
Steven Rostedtf65eda42008-01-25 21:08:07 +01004155
Ingo Molnardd41f592007-07-09 18:51:59 +02004156 if (unlikely(!rq->nr_running))
4157 idle_balance(cpu, rq);
4158
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004159 put_prev_task(rq, prev);
Wang Chenb67802e2009-03-02 13:55:26 +08004160 next = pick_next_task(rq);
Mike Galbraithf26f9af2010-12-08 11:05:42 +01004161 clear_tsk_need_resched(prev);
4162 rq->skip_clock_update = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 if (likely(prev != next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165 rq->nr_switches++;
4166 rq->curr = next;
4167 ++*switch_count;
4168
Ingo Molnardd41f592007-07-09 18:51:59 +02004169 context_switch(rq, prev, next); /* unlocks the rq */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004170 /*
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004171 * The context switch have flipped the stack from under us
4172 * and restored the local variables which were saved when
4173 * this task called schedule() in the past. prev == current
4174 * is still correct, but it can be moved to another cpu/rq.
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004175 */
4176 cpu = smp_processor_id();
4177 rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004179 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180
Gregory Haskins3f029d32009-07-29 11:08:47 -04004181 post_schedule(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 preempt_enable_no_resched();
Peter Zijlstraff743342009-03-13 12:21:26 +01004184 if (need_resched())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 goto need_resched;
4186}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187EXPORT_SYMBOL(schedule);
4188
Frederic Weisbeckerc08f7822009-12-02 20:49:17 +01004189#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004190
4191static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
4192{
4193 bool ret = false;
4194
4195 rcu_read_lock();
4196 if (lock->owner != owner)
4197 goto fail;
4198
4199 /*
4200 * Ensure we emit the owner->on_cpu, dereference _after_ checking
4201 * lock->owner still matches owner, if that fails, owner might
4202 * point to free()d memory, if it still matches, the rcu_read_lock()
4203 * ensures the memory stays valid.
4204 */
4205 barrier();
4206
4207 ret = owner->on_cpu;
4208fail:
4209 rcu_read_unlock();
4210
4211 return ret;
4212}
4213
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004214/*
4215 * Look out! "owner" is an entirely speculative pointer
4216 * access and not reliable.
4217 */
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004218int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004219{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004220 if (!sched_feat(OWNER_SPIN))
4221 return 0;
4222
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004223 while (owner_running(lock, owner)) {
4224 if (need_resched())
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004225 return 0;
4226
Gerald Schaefer335d7af2010-11-22 15:47:36 +01004227 arch_mutex_cpu_relax();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004228 }
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004229
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004230 /*
4231 * If the owner changed to another task there is likely
4232 * heavy contention, stop spinning.
4233 */
4234 if (lock->owner)
4235 return 0;
4236
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004237 return 1;
4238}
4239#endif
4240
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241#ifdef CONFIG_PREEMPT
4242/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004243 * this is the entry point to schedule() from in-kernel preemption
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004244 * off of preempt_enable. Kernel preemptions off return from interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 * occur there and call schedule directly.
4246 */
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004247asmlinkage void __sched notrace preempt_schedule(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248{
4249 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004250
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251 /*
4252 * If there is a non-zero preempt_count or interrupts are disabled,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004253 * we do not want to preempt the current task. Just return..
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 */
Nick Pigginbeed33a2006-10-11 01:21:52 -07004255 if (likely(ti->preempt_count || irqs_disabled()))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 return;
4257
Andi Kleen3a5c3592007-10-15 17:00:14 +02004258 do {
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004259 add_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004260 schedule();
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004261 sub_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004262
4263 /*
4264 * Check again in case we missed a preemption opportunity
4265 * between schedule and now.
4266 */
4267 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004268 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270EXPORT_SYMBOL(preempt_schedule);
4271
4272/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004273 * this is the entry point to schedule() from kernel preemption
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274 * off of irq context.
4275 * Note, that this is called and return with irqs disabled. This will
4276 * protect us against recursive calling from irq.
4277 */
4278asmlinkage void __sched preempt_schedule_irq(void)
4279{
4280 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004281
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004282 /* Catch callers which need to be fixed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 BUG_ON(ti->preempt_count || !irqs_disabled());
4284
Andi Kleen3a5c3592007-10-15 17:00:14 +02004285 do {
4286 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004287 local_irq_enable();
4288 schedule();
4289 local_irq_disable();
Andi Kleen3a5c3592007-10-15 17:00:14 +02004290 sub_preempt_count(PREEMPT_ACTIVE);
4291
4292 /*
4293 * Check again in case we missed a preemption opportunity
4294 * between schedule and now.
4295 */
4296 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004297 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298}
4299
4300#endif /* CONFIG_PREEMPT */
4301
Peter Zijlstra63859d42009-09-15 19:14:42 +02004302int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004303 void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304{
Peter Zijlstra63859d42009-09-15 19:14:42 +02004305 return try_to_wake_up(curr->private, mode, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307EXPORT_SYMBOL(default_wake_function);
4308
4309/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004310 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4311 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312 * number) then we wake all the non-exclusive tasks and one exclusive task.
4313 *
4314 * There are circumstances in which we can try to wake a task which has already
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004315 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4317 */
Johannes Weiner78ddb082009-04-14 16:53:05 +02004318static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
Peter Zijlstra63859d42009-09-15 19:14:42 +02004319 int nr_exclusive, int wake_flags, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320{
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004321 wait_queue_t *curr, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004323 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07004324 unsigned flags = curr->flags;
4325
Peter Zijlstra63859d42009-09-15 19:14:42 +02004326 if (curr->func(curr, mode, wake_flags, key) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07004327 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 break;
4329 }
4330}
4331
4332/**
4333 * __wake_up - wake up threads blocked on a waitqueue.
4334 * @q: the waitqueue
4335 * @mode: which threads
4336 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Martin Waitz67be2dd2005-05-01 08:59:26 -07004337 * @key: is directly passed to the wakeup function
David Howells50fa6102009-04-28 15:01:38 +01004338 *
4339 * It may be assumed that this function implies a write memory barrier before
4340 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004342void __wake_up(wait_queue_head_t *q, unsigned int mode,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004343 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344{
4345 unsigned long flags;
4346
4347 spin_lock_irqsave(&q->lock, flags);
4348 __wake_up_common(q, mode, nr_exclusive, 0, key);
4349 spin_unlock_irqrestore(&q->lock, flags);
4350}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351EXPORT_SYMBOL(__wake_up);
4352
4353/*
4354 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4355 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004356void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357{
4358 __wake_up_common(q, mode, 1, 0, NULL);
4359}
Michal Nazarewicz22c43c82010-05-05 12:53:11 +02004360EXPORT_SYMBOL_GPL(__wake_up_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361
Davide Libenzi4ede8162009-03-31 15:24:20 -07004362void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4363{
4364 __wake_up_common(q, mode, 1, 0, key);
4365}
Trond Myklebustbf294b42011-02-21 11:05:41 -08004366EXPORT_SYMBOL_GPL(__wake_up_locked_key);
Davide Libenzi4ede8162009-03-31 15:24:20 -07004367
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368/**
Davide Libenzi4ede8162009-03-31 15:24:20 -07004369 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004370 * @q: the waitqueue
4371 * @mode: which threads
4372 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Davide Libenzi4ede8162009-03-31 15:24:20 -07004373 * @key: opaque value to be passed to wakeup targets
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 *
4375 * The sync wakeup differs that the waker knows that it will schedule
4376 * away soon, so while the target thread will be woken up, it will not
4377 * be migrated to another CPU - ie. the two threads are 'synchronized'
4378 * with each other. This can prevent needless bouncing between CPUs.
4379 *
4380 * On UP it can prevent extra preemption.
David Howells50fa6102009-04-28 15:01:38 +01004381 *
4382 * It may be assumed that this function implies a write memory barrier before
4383 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384 */
Davide Libenzi4ede8162009-03-31 15:24:20 -07004385void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4386 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387{
4388 unsigned long flags;
Peter Zijlstra7d478722009-09-14 19:55:44 +02004389 int wake_flags = WF_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390
4391 if (unlikely(!q))
4392 return;
4393
4394 if (unlikely(!nr_exclusive))
Peter Zijlstra7d478722009-09-14 19:55:44 +02004395 wake_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396
4397 spin_lock_irqsave(&q->lock, flags);
Peter Zijlstra7d478722009-09-14 19:55:44 +02004398 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004399 spin_unlock_irqrestore(&q->lock, flags);
4400}
Davide Libenzi4ede8162009-03-31 15:24:20 -07004401EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4402
4403/*
4404 * __wake_up_sync - see __wake_up_sync_key()
4405 */
4406void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4407{
4408 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4409}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4411
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004412/**
4413 * complete: - signals a single thread waiting on this completion
4414 * @x: holds the state of this particular completion
4415 *
4416 * This will wake up a single thread waiting on this completion. Threads will be
4417 * awakened in the same order in which they were queued.
4418 *
4419 * See also complete_all(), wait_for_completion() and related routines.
David Howells50fa6102009-04-28 15:01:38 +01004420 *
4421 * It may be assumed that this function implies a write memory barrier before
4422 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004423 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004424void complete(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425{
4426 unsigned long flags;
4427
4428 spin_lock_irqsave(&x->wait.lock, flags);
4429 x->done++;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004430 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 spin_unlock_irqrestore(&x->wait.lock, flags);
4432}
4433EXPORT_SYMBOL(complete);
4434
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004435/**
4436 * complete_all: - signals all threads waiting on this completion
4437 * @x: holds the state of this particular completion
4438 *
4439 * This will wake up all threads waiting on this particular completion event.
David Howells50fa6102009-04-28 15:01:38 +01004440 *
4441 * It may be assumed that this function implies a write memory barrier before
4442 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004443 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004444void complete_all(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445{
4446 unsigned long flags;
4447
4448 spin_lock_irqsave(&x->wait.lock, flags);
4449 x->done += UINT_MAX/2;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004450 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 spin_unlock_irqrestore(&x->wait.lock, flags);
4452}
4453EXPORT_SYMBOL(complete_all);
4454
Andi Kleen8cbbe862007-10-15 17:00:14 +02004455static inline long __sched
4456do_wait_for_common(struct completion *x, long timeout, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 if (!x->done) {
4459 DECLARE_WAITQUEUE(wait, current);
4460
Changli Gaoa93d2f12010-05-07 14:33:26 +08004461 __add_wait_queue_tail_exclusive(&x->wait, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 do {
Oleg Nesterov94d3d822008-08-20 16:54:41 -07004463 if (signal_pending_state(state, current)) {
Oleg Nesterovea71a542008-06-20 18:32:20 +04004464 timeout = -ERESTARTSYS;
4465 break;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004466 }
4467 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004469 timeout = schedule_timeout(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 spin_lock_irq(&x->wait.lock);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004471 } while (!x->done && timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 __remove_wait_queue(&x->wait, &wait);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004473 if (!x->done)
4474 return timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 }
4476 x->done--;
Oleg Nesterovea71a542008-06-20 18:32:20 +04004477 return timeout ?: 1;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004478}
4479
4480static long __sched
4481wait_for_common(struct completion *x, long timeout, int state)
4482{
4483 might_sleep();
4484
4485 spin_lock_irq(&x->wait.lock);
4486 timeout = do_wait_for_common(x, timeout, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004488 return timeout;
4489}
4490
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004491/**
4492 * wait_for_completion: - waits for completion of a task
4493 * @x: holds the state of this particular completion
4494 *
4495 * This waits to be signaled for completion of a specific task. It is NOT
4496 * interruptible and there is no timeout.
4497 *
4498 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4499 * and interrupt capability. Also see complete().
4500 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004501void __sched wait_for_completion(struct completion *x)
Andi Kleen8cbbe862007-10-15 17:00:14 +02004502{
4503 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504}
4505EXPORT_SYMBOL(wait_for_completion);
4506
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004507/**
4508 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4509 * @x: holds the state of this particular completion
4510 * @timeout: timeout value in jiffies
4511 *
4512 * This waits for either a completion of a specific task to be signaled or for a
4513 * specified timeout to expire. The timeout is in jiffies. It is not
4514 * interruptible.
4515 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004516unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4518{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004519 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520}
4521EXPORT_SYMBOL(wait_for_completion_timeout);
4522
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004523/**
4524 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4525 * @x: holds the state of this particular completion
4526 *
4527 * This waits for completion of a specific task to be signaled. It is
4528 * interruptible.
4529 */
Andi Kleen8cbbe862007-10-15 17:00:14 +02004530int __sched wait_for_completion_interruptible(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531{
Andi Kleen51e97992007-10-18 21:32:55 +02004532 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4533 if (t == -ERESTARTSYS)
4534 return t;
4535 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536}
4537EXPORT_SYMBOL(wait_for_completion_interruptible);
4538
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004539/**
4540 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4541 * @x: holds the state of this particular completion
4542 * @timeout: timeout value in jiffies
4543 *
4544 * This waits for either a completion of a specific task to be signaled or for a
4545 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4546 */
NeilBrown6bf41232011-01-05 12:50:16 +11004547long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548wait_for_completion_interruptible_timeout(struct completion *x,
4549 unsigned long timeout)
4550{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004551 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552}
4553EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4554
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004555/**
4556 * wait_for_completion_killable: - waits for completion of a task (killable)
4557 * @x: holds the state of this particular completion
4558 *
4559 * This waits to be signaled for completion of a specific task. It can be
4560 * interrupted by a kill signal.
4561 */
Matthew Wilcox009e5772007-12-06 12:29:54 -05004562int __sched wait_for_completion_killable(struct completion *x)
4563{
4564 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4565 if (t == -ERESTARTSYS)
4566 return t;
4567 return 0;
4568}
4569EXPORT_SYMBOL(wait_for_completion_killable);
4570
Dave Chinnerbe4de352008-08-15 00:40:44 -07004571/**
Sage Weil0aa12fb2010-05-29 09:12:30 -07004572 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4573 * @x: holds the state of this particular completion
4574 * @timeout: timeout value in jiffies
4575 *
4576 * This waits for either a completion of a specific task to be
4577 * signaled or for a specified timeout to expire. It can be
4578 * interrupted by a kill signal. The timeout is in jiffies.
4579 */
NeilBrown6bf41232011-01-05 12:50:16 +11004580long __sched
Sage Weil0aa12fb2010-05-29 09:12:30 -07004581wait_for_completion_killable_timeout(struct completion *x,
4582 unsigned long timeout)
4583{
4584 return wait_for_common(x, timeout, TASK_KILLABLE);
4585}
4586EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4587
4588/**
Dave Chinnerbe4de352008-08-15 00:40:44 -07004589 * try_wait_for_completion - try to decrement a completion without blocking
4590 * @x: completion structure
4591 *
4592 * Returns: 0 if a decrement cannot be done without blocking
4593 * 1 if a decrement succeeded.
4594 *
4595 * If a completion is being used as a counting completion,
4596 * attempt to decrement the counter without blocking. This
4597 * enables us to avoid waiting if the resource the completion
4598 * is protecting is not available.
4599 */
4600bool try_wait_for_completion(struct completion *x)
4601{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004602 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004603 int ret = 1;
4604
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004605 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004606 if (!x->done)
4607 ret = 0;
4608 else
4609 x->done--;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004610 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004611 return ret;
4612}
4613EXPORT_SYMBOL(try_wait_for_completion);
4614
4615/**
4616 * completion_done - Test to see if a completion has any waiters
4617 * @x: completion structure
4618 *
4619 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4620 * 1 if there are no waiters.
4621 *
4622 */
4623bool completion_done(struct completion *x)
4624{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004625 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004626 int ret = 1;
4627
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004628 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004629 if (!x->done)
4630 ret = 0;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004631 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004632 return ret;
4633}
4634EXPORT_SYMBOL(completion_done);
4635
Andi Kleen8cbbe862007-10-15 17:00:14 +02004636static long __sched
4637sleep_on_common(wait_queue_head_t *q, int state, long timeout)
Ingo Molnar0fec1712007-07-09 18:52:01 +02004638{
4639 unsigned long flags;
4640 wait_queue_t wait;
4641
4642 init_waitqueue_entry(&wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643
Andi Kleen8cbbe862007-10-15 17:00:14 +02004644 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645
Andi Kleen8cbbe862007-10-15 17:00:14 +02004646 spin_lock_irqsave(&q->lock, flags);
4647 __add_wait_queue(q, &wait);
4648 spin_unlock(&q->lock);
4649 timeout = schedule_timeout(timeout);
4650 spin_lock_irq(&q->lock);
4651 __remove_wait_queue(q, &wait);
4652 spin_unlock_irqrestore(&q->lock, flags);
4653
4654 return timeout;
4655}
4656
4657void __sched interruptible_sleep_on(wait_queue_head_t *q)
4658{
4659 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661EXPORT_SYMBOL(interruptible_sleep_on);
4662
Ingo Molnar0fec1712007-07-09 18:52:01 +02004663long __sched
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004664interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004666 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4669
Ingo Molnar0fec1712007-07-09 18:52:01 +02004670void __sched sleep_on(wait_queue_head_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004672 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674EXPORT_SYMBOL(sleep_on);
4675
Ingo Molnar0fec1712007-07-09 18:52:01 +02004676long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004678 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680EXPORT_SYMBOL(sleep_on_timeout);
4681
Ingo Molnarb29739f2006-06-27 02:54:51 -07004682#ifdef CONFIG_RT_MUTEXES
4683
4684/*
4685 * rt_mutex_setprio - set the current priority of a task
4686 * @p: task
4687 * @prio: prio value (kernel-internal form)
4688 *
4689 * This function changes the 'effective' priority of a task. It does
4690 * not touch ->normal_prio like __setscheduler().
4691 *
4692 * Used by the rt_mutex code to implement priority inheritance logic.
4693 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004694void rt_mutex_setprio(struct task_struct *p, int prio)
Ingo Molnarb29739f2006-06-27 02:54:51 -07004695{
4696 unsigned long flags;
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004697 int oldprio, on_rq, running;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004698 struct rq *rq;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004699 const struct sched_class *prev_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004700
4701 BUG_ON(prio < 0 || prio > MAX_PRIO);
4702
Peter Zijlstra013fdb82011-04-05 17:23:45 +02004703 lockdep_assert_held(&p->pi_lock);
4704
Ingo Molnarb29739f2006-06-27 02:54:51 -07004705 rq = task_rq_lock(p, &flags);
4706
Steven Rostedta8027072010-09-20 15:13:34 -04004707 trace_sched_pi_setprio(p, prio);
Andrew Mortond5f9f942007-05-08 20:27:06 -07004708 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004709 prev_class = p->sched_class;
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004710 on_rq = p->on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004711 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004712 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004713 dequeue_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004714 if (running)
4715 p->sched_class->put_prev_task(rq, p);
Ingo Molnardd41f592007-07-09 18:51:59 +02004716
4717 if (rt_prio(prio))
4718 p->sched_class = &rt_sched_class;
4719 else
4720 p->sched_class = &fair_sched_class;
4721
Ingo Molnarb29739f2006-06-27 02:54:51 -07004722 p->prio = prio;
4723
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004724 if (running)
4725 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01004726 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004727 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004728
Peter Zijlstrada7a7352011-01-17 17:03:27 +01004729 check_class_changed(rq, p, prev_class, oldprio);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004730 task_rq_unlock(rq, &flags);
4731}
4732
4733#endif
4734
Ingo Molnar36c8b582006-07-03 00:25:41 -07004735void set_user_nice(struct task_struct *p, long nice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736{
Ingo Molnardd41f592007-07-09 18:51:59 +02004737 int old_prio, delta, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004739 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740
4741 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4742 return;
4743 /*
4744 * We have to be careful, if called from sys_setpriority(),
4745 * the task might be in the middle of scheduling on another CPU.
4746 */
4747 rq = task_rq_lock(p, &flags);
4748 /*
4749 * The RT priorities are set via sched_setscheduler(), but we still
4750 * allow the 'normal' nice value to be set - but as expected
4751 * it wont have any effect on scheduling until the task is
Ingo Molnardd41f592007-07-09 18:51:59 +02004752 * SCHED_FIFO/SCHED_RR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753 */
Ingo Molnare05606d2007-07-09 18:51:59 +02004754 if (task_has_rt_policy(p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 p->static_prio = NICE_TO_PRIO(nice);
4756 goto out_unlock;
4757 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004758 on_rq = p->on_rq;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02004759 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004760 dequeue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762 p->static_prio = NICE_TO_PRIO(nice);
Peter Williams2dd73a42006-06-27 02:54:34 -07004763 set_load_weight(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004764 old_prio = p->prio;
4765 p->prio = effective_prio(p);
4766 delta = p->prio - old_prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004767
Ingo Molnardd41f592007-07-09 18:51:59 +02004768 if (on_rq) {
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004769 enqueue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770 /*
Andrew Mortond5f9f942007-05-08 20:27:06 -07004771 * If the task increased its priority or is running and
4772 * lowered its priority, then reschedule its CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773 */
Andrew Mortond5f9f942007-05-08 20:27:06 -07004774 if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 resched_task(rq->curr);
4776 }
4777out_unlock:
4778 task_rq_unlock(rq, &flags);
4779}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004780EXPORT_SYMBOL(set_user_nice);
4781
Matt Mackalle43379f2005-05-01 08:59:00 -07004782/*
4783 * can_nice - check if a task can reduce its nice value
4784 * @p: task
4785 * @nice: nice value
4786 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004787int can_nice(const struct task_struct *p, const int nice)
Matt Mackalle43379f2005-05-01 08:59:00 -07004788{
Matt Mackall024f4742005-08-18 11:24:19 -07004789 /* convert nice value [19,-20] to rlimit style value [1,40] */
4790 int nice_rlim = 20 - nice;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004791
Jiri Slaby78d7d402010-03-05 13:42:54 -08004792 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
Matt Mackalle43379f2005-05-01 08:59:00 -07004793 capable(CAP_SYS_NICE));
4794}
4795
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796#ifdef __ARCH_WANT_SYS_NICE
4797
4798/*
4799 * sys_nice - change the priority of the current process.
4800 * @increment: priority increment
4801 *
4802 * sys_setpriority is a more generic, but much slower function that
4803 * does similar things.
4804 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004805SYSCALL_DEFINE1(nice, int, increment)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806{
Ingo Molnar48f24c42006-07-03 00:25:40 -07004807 long nice, retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808
4809 /*
4810 * Setpriority might change our priority at the same moment.
4811 * We don't have to worry. Conceptually one call occurs first
4812 * and we have a single winner.
4813 */
Matt Mackalle43379f2005-05-01 08:59:00 -07004814 if (increment < -40)
4815 increment = -40;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816 if (increment > 40)
4817 increment = 40;
4818
Américo Wang2b8f8362009-02-16 18:54:21 +08004819 nice = TASK_NICE(current) + increment;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004820 if (nice < -20)
4821 nice = -20;
4822 if (nice > 19)
4823 nice = 19;
4824
Matt Mackalle43379f2005-05-01 08:59:00 -07004825 if (increment < 0 && !can_nice(current, nice))
4826 return -EPERM;
4827
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828 retval = security_task_setnice(current, nice);
4829 if (retval)
4830 return retval;
4831
4832 set_user_nice(current, nice);
4833 return 0;
4834}
4835
4836#endif
4837
4838/**
4839 * task_prio - return the priority value of a given task.
4840 * @p: the task in question.
4841 *
4842 * This is the priority value as seen by users in /proc.
4843 * RT tasks are offset by -200. Normal tasks are centered
4844 * around 0, value goes from -16 to +15.
4845 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004846int task_prio(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004847{
4848 return p->prio - MAX_RT_PRIO;
4849}
4850
4851/**
4852 * task_nice - return the nice value of a given task.
4853 * @p: the task in question.
4854 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004855int task_nice(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856{
4857 return TASK_NICE(p);
4858}
Pavel Roskin150d8be2008-03-05 16:56:37 -05004859EXPORT_SYMBOL(task_nice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860
4861/**
4862 * idle_cpu - is a given cpu idle currently?
4863 * @cpu: the processor in question.
4864 */
4865int idle_cpu(int cpu)
4866{
4867 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4868}
4869
Linus Torvalds1da177e2005-04-16 15:20:36 -07004870/**
4871 * idle_task - return the idle task for a given cpu.
4872 * @cpu: the processor in question.
4873 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004874struct task_struct *idle_task(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875{
4876 return cpu_rq(cpu)->idle;
4877}
4878
4879/**
4880 * find_process_by_pid - find a process with a matching PID value.
4881 * @pid: the pid in question.
4882 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02004883static struct task_struct *find_process_by_pid(pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884{
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07004885 return pid ? find_task_by_vpid(pid) : current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886}
4887
4888/* Actually do priority change: must hold rq lock. */
Ingo Molnardd41f592007-07-09 18:51:59 +02004889static void
4890__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004891{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004892 p->policy = policy;
4893 p->rt_priority = prio;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004894 p->normal_prio = normal_prio(p);
4895 /* we are holding p->pi_lock already */
4896 p->prio = rt_mutex_getprio(p);
Peter Zijlstraffd44db2009-11-10 20:12:01 +01004897 if (rt_prio(p->prio))
4898 p->sched_class = &rt_sched_class;
4899 else
4900 p->sched_class = &fair_sched_class;
Peter Williams2dd73a42006-06-27 02:54:34 -07004901 set_load_weight(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902}
4903
David Howellsc69e8d92008-11-14 10:39:19 +11004904/*
4905 * check the target process has a UID that matches the current process's
4906 */
4907static bool check_same_owner(struct task_struct *p)
4908{
4909 const struct cred *cred = current_cred(), *pcred;
4910 bool match;
4911
4912 rcu_read_lock();
4913 pcred = __task_cred(p);
Serge E. Hallynb0e77592011-03-23 16:43:24 -07004914 if (cred->user->user_ns == pcred->user->user_ns)
4915 match = (cred->euid == pcred->euid ||
4916 cred->euid == pcred->uid);
4917 else
4918 match = false;
David Howellsc69e8d92008-11-14 10:39:19 +11004919 rcu_read_unlock();
4920 return match;
4921}
4922
Rusty Russell961ccdd2008-06-23 13:55:38 +10004923static int __sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07004924 const struct sched_param *param, bool user)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004926 int retval, oldprio, oldpolicy = -1, on_rq, running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 unsigned long flags;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004928 const struct sched_class *prev_class;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004929 struct rq *rq;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004930 int reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004931
Steven Rostedt66e53932006-06-27 02:54:44 -07004932 /* may grab non-irq protected spin_locks */
4933 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004934recheck:
4935 /* double check policy once rq lock held */
Lennart Poetteringca94c442009-06-15 17:17:47 +02004936 if (policy < 0) {
4937 reset_on_fork = p->sched_reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938 policy = oldpolicy = p->policy;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004939 } else {
4940 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4941 policy &= ~SCHED_RESET_ON_FORK;
4942
4943 if (policy != SCHED_FIFO && policy != SCHED_RR &&
4944 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4945 policy != SCHED_IDLE)
4946 return -EINVAL;
4947 }
4948
Linus Torvalds1da177e2005-04-16 15:20:36 -07004949 /*
4950 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnardd41f592007-07-09 18:51:59 +02004951 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4952 * SCHED_BATCH and SCHED_IDLE is 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 */
4954 if (param->sched_priority < 0 ||
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004955 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
Steven Rostedtd46523e2005-07-25 16:28:39 -04004956 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957 return -EINVAL;
Ingo Molnare05606d2007-07-09 18:51:59 +02004958 if (rt_policy(policy) != (param->sched_priority != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959 return -EINVAL;
4960
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004961 /*
4962 * Allow unprivileged RT tasks to decrease priority:
4963 */
Rusty Russell961ccdd2008-06-23 13:55:38 +10004964 if (user && !capable(CAP_SYS_NICE)) {
Ingo Molnare05606d2007-07-09 18:51:59 +02004965 if (rt_policy(policy)) {
Oleg Nesterova44702e2010-06-11 01:09:44 +02004966 unsigned long rlim_rtprio =
4967 task_rlimit(p, RLIMIT_RTPRIO);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004968
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004969 /* can't set/change the rt policy */
4970 if (policy != p->policy && !rlim_rtprio)
4971 return -EPERM;
4972
4973 /* can't increase priority */
4974 if (param->sched_priority > p->rt_priority &&
4975 param->sched_priority > rlim_rtprio)
4976 return -EPERM;
4977 }
Darren Hartc02aa732011-02-17 15:37:07 -08004978
Ingo Molnardd41f592007-07-09 18:51:59 +02004979 /*
Darren Hartc02aa732011-02-17 15:37:07 -08004980 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4981 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
Ingo Molnardd41f592007-07-09 18:51:59 +02004982 */
Darren Hartc02aa732011-02-17 15:37:07 -08004983 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
4984 if (!can_nice(p, TASK_NICE(p)))
4985 return -EPERM;
4986 }
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004987
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004988 /* can't change other user's priorities */
David Howellsc69e8d92008-11-14 10:39:19 +11004989 if (!check_same_owner(p))
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004990 return -EPERM;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004991
4992 /* Normal users shall not reset the sched_reset_on_fork flag */
4993 if (p->sched_reset_on_fork && !reset_on_fork)
4994 return -EPERM;
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004997 if (user) {
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09004998 retval = security_task_setscheduler(p);
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004999 if (retval)
5000 return retval;
5001 }
5002
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 /*
Ingo Molnarb29739f2006-06-27 02:54:51 -07005004 * make sure no PI-waiters arrive (or leave) while we are
5005 * changing the priority of the task:
5006 */
Thomas Gleixner1d615482009-11-17 14:54:03 +01005007 raw_spin_lock_irqsave(&p->pi_lock, flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005008 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005009 * To be able to change p->policy safely, the appropriate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 * runqueue lock must be held.
5011 */
Ingo Molnarb29739f2006-06-27 02:54:51 -07005012 rq = __task_rq_lock(p);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005013
Peter Zijlstra34f971f2010-09-22 13:53:15 +02005014 /*
5015 * Changing the policy of the stop threads its a very bad idea
5016 */
5017 if (p == rq->stop) {
5018 __task_rq_unlock(rq);
5019 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5020 return -EINVAL;
5021 }
5022
Dario Faggiolia51e9192011-03-24 14:00:18 +01005023 /*
5024 * If not changing anything there's no need to proceed further:
5025 */
5026 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
5027 param->sched_priority == p->rt_priority))) {
5028
5029 __task_rq_unlock(rq);
5030 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5031 return 0;
5032 }
5033
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005034#ifdef CONFIG_RT_GROUP_SCHED
5035 if (user) {
5036 /*
5037 * Do not allow realtime tasks into groups that have no runtime
5038 * assigned.
5039 */
5040 if (rt_bandwidth_enabled() && rt_policy(policy) &&
Mike Galbraithf4493772011-01-13 04:54:50 +01005041 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5042 !task_group_is_autogroup(task_group(p))) {
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005043 __task_rq_unlock(rq);
5044 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5045 return -EPERM;
5046 }
5047 }
5048#endif
5049
Linus Torvalds1da177e2005-04-16 15:20:36 -07005050 /* recheck policy now with rq lock held */
5051 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5052 policy = oldpolicy = -1;
Ingo Molnarb29739f2006-06-27 02:54:51 -07005053 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01005054 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055 goto recheck;
5056 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02005057 on_rq = p->on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01005058 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005059 if (on_rq)
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005060 deactivate_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005061 if (running)
5062 p->sched_class->put_prev_task(rq, p);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02005063
Lennart Poetteringca94c442009-06-15 17:17:47 +02005064 p->sched_reset_on_fork = reset_on_fork;
5065
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01005067 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02005068 __setscheduler(rq, p, policy, param->sched_priority);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02005069
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005070 if (running)
5071 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005072 if (on_rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02005073 activate_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005074
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005075 check_class_changed(rq, p, prev_class, oldprio);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005076 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01005077 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005078
Thomas Gleixner95e02ca2006-06-27 02:55:02 -07005079 rt_mutex_adjust_pi(p);
5080
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081 return 0;
5082}
Rusty Russell961ccdd2008-06-23 13:55:38 +10005083
5084/**
5085 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5086 * @p: the task in question.
5087 * @policy: new policy.
5088 * @param: structure containing the new RT priority.
5089 *
5090 * NOTE that the task may be already dead.
5091 */
5092int sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005093 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10005094{
5095 return __sched_setscheduler(p, policy, param, true);
5096}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005097EXPORT_SYMBOL_GPL(sched_setscheduler);
5098
Rusty Russell961ccdd2008-06-23 13:55:38 +10005099/**
5100 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5101 * @p: the task in question.
5102 * @policy: new policy.
5103 * @param: structure containing the new RT priority.
5104 *
5105 * Just like sched_setscheduler, only don't bother checking if the
5106 * current context has permission. For example, this is needed in
5107 * stop_machine(): we create temporary high priority worker threads,
5108 * but our caller might not have that capability.
5109 */
5110int sched_setscheduler_nocheck(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005111 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10005112{
5113 return __sched_setscheduler(p, policy, param, false);
5114}
5115
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005116static int
5117do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005118{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119 struct sched_param lparam;
5120 struct task_struct *p;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005121 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122
5123 if (!param || pid < 0)
5124 return -EINVAL;
5125 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5126 return -EFAULT;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005127
5128 rcu_read_lock();
5129 retval = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005130 p = find_process_by_pid(pid);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005131 if (p != NULL)
5132 retval = sched_setscheduler(p, policy, &lparam);
5133 rcu_read_unlock();
Ingo Molnar36c8b582006-07-03 00:25:41 -07005134
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135 return retval;
5136}
5137
5138/**
5139 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5140 * @pid: the pid in question.
5141 * @policy: new policy.
5142 * @param: structure containing the new RT priority.
5143 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005144SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5145 struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146{
Jason Baronc21761f2006-01-18 17:43:03 -08005147 /* negative values for policy are not valid */
5148 if (policy < 0)
5149 return -EINVAL;
5150
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151 return do_sched_setscheduler(pid, policy, param);
5152}
5153
5154/**
5155 * sys_sched_setparam - set/change the RT priority of a thread
5156 * @pid: the pid in question.
5157 * @param: structure containing the new RT priority.
5158 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005159SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160{
5161 return do_sched_setscheduler(pid, -1, param);
5162}
5163
5164/**
5165 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5166 * @pid: the pid in question.
5167 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005168SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005170 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005171 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
5173 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005174 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175
5176 retval = -ESRCH;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005177 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 p = find_process_by_pid(pid);
5179 if (p) {
5180 retval = security_task_getscheduler(p);
5181 if (!retval)
Lennart Poetteringca94c442009-06-15 17:17:47 +02005182 retval = p->policy
5183 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184 }
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005185 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186 return retval;
5187}
5188
5189/**
Lennart Poetteringca94c442009-06-15 17:17:47 +02005190 * sys_sched_getparam - get the RT priority of a thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191 * @pid: the pid in question.
5192 * @param: structure containing the RT priority.
5193 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005194SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195{
5196 struct sched_param lp;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005197 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005198 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199
5200 if (!param || pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005201 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005203 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005204 p = find_process_by_pid(pid);
5205 retval = -ESRCH;
5206 if (!p)
5207 goto out_unlock;
5208
5209 retval = security_task_getscheduler(p);
5210 if (retval)
5211 goto out_unlock;
5212
5213 lp.sched_priority = p->rt_priority;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005214 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215
5216 /*
5217 * This one might sleep, we cannot do it with a spinlock held ...
5218 */
5219 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5220
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 return retval;
5222
5223out_unlock:
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005224 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225 return retval;
5226}
5227
Rusty Russell96f874e2008-11-25 02:35:14 +10305228long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305230 cpumask_var_t cpus_allowed, new_mask;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005231 struct task_struct *p;
5232 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005234 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005235 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236
5237 p = find_process_by_pid(pid);
5238 if (!p) {
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005239 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005240 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241 return -ESRCH;
5242 }
5243
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005244 /* Prevent p going away */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 get_task_struct(p);
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005246 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305248 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5249 retval = -ENOMEM;
5250 goto out_put_task;
5251 }
5252 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5253 retval = -ENOMEM;
5254 goto out_free_cpus_allowed;
5255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 retval = -EPERM;
Serge E. Hallynb0e77592011-03-23 16:43:24 -07005257 if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258 goto out_unlock;
5259
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09005260 retval = security_task_setscheduler(p);
David Quigleye7834f82006-06-23 02:03:59 -07005261 if (retval)
5262 goto out_unlock;
5263
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305264 cpuset_cpus_allowed(p, cpus_allowed);
5265 cpumask_and(new_mask, in_mask, cpus_allowed);
Peter Zijlstra49246272010-10-17 21:46:10 +02005266again:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305267 retval = set_cpus_allowed_ptr(p, new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268
Paul Menage8707d8b2007-10-18 23:40:22 -07005269 if (!retval) {
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305270 cpuset_cpus_allowed(p, cpus_allowed);
5271 if (!cpumask_subset(new_mask, cpus_allowed)) {
Paul Menage8707d8b2007-10-18 23:40:22 -07005272 /*
5273 * We must have raced with a concurrent cpuset
5274 * update. Just reset the cpus_allowed to the
5275 * cpuset's cpus_allowed
5276 */
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305277 cpumask_copy(new_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07005278 goto again;
5279 }
5280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005281out_unlock:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305282 free_cpumask_var(new_mask);
5283out_free_cpus_allowed:
5284 free_cpumask_var(cpus_allowed);
5285out_put_task:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286 put_task_struct(p);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005287 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288 return retval;
5289}
5290
5291static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
Rusty Russell96f874e2008-11-25 02:35:14 +10305292 struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005293{
Rusty Russell96f874e2008-11-25 02:35:14 +10305294 if (len < cpumask_size())
5295 cpumask_clear(new_mask);
5296 else if (len > cpumask_size())
5297 len = cpumask_size();
5298
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5300}
5301
5302/**
5303 * sys_sched_setaffinity - set the cpu affinity of a process
5304 * @pid: pid of the process
5305 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5306 * @user_mask_ptr: user-space pointer to the new cpu mask
5307 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005308SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5309 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305311 cpumask_var_t new_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005312 int retval;
5313
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305314 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5315 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305317 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5318 if (retval == 0)
5319 retval = sched_setaffinity(pid, new_mask);
5320 free_cpumask_var(new_mask);
5321 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322}
5323
Rusty Russell96f874e2008-11-25 02:35:14 +10305324long sched_getaffinity(pid_t pid, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005325{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005326 struct task_struct *p;
Thomas Gleixner31605682009-12-08 20:24:16 +00005327 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005330 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005331 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005332
5333 retval = -ESRCH;
5334 p = find_process_by_pid(pid);
5335 if (!p)
5336 goto out_unlock;
5337
David Quigleye7834f82006-06-23 02:03:59 -07005338 retval = security_task_getscheduler(p);
5339 if (retval)
5340 goto out_unlock;
5341
Peter Zijlstra013fdb82011-04-05 17:23:45 +02005342 raw_spin_lock_irqsave(&p->pi_lock, flags);
Rusty Russell96f874e2008-11-25 02:35:14 +10305343 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
Peter Zijlstra013fdb82011-04-05 17:23:45 +02005344 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005345
5346out_unlock:
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005347 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005348 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005349
Ulrich Drepper9531b622007-08-09 11:16:46 +02005350 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005351}
5352
5353/**
5354 * sys_sched_getaffinity - get the cpu affinity of a process
5355 * @pid: pid of the process
5356 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5357 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5358 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005359SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5360 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005361{
5362 int ret;
Rusty Russellf17c8602008-11-25 02:35:11 +10305363 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005364
Anton Blanchard84fba5e2010-04-06 17:02:19 +10005365 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005366 return -EINVAL;
5367 if (len & (sizeof(unsigned long)-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368 return -EINVAL;
5369
Rusty Russellf17c8602008-11-25 02:35:11 +10305370 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5371 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005372
Rusty Russellf17c8602008-11-25 02:35:11 +10305373 ret = sched_getaffinity(pid, mask);
5374 if (ret == 0) {
KOSAKI Motohiro8bc037f2010-03-17 09:36:58 +09005375 size_t retlen = min_t(size_t, len, cpumask_size());
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005376
5377 if (copy_to_user(user_mask_ptr, mask, retlen))
Rusty Russellf17c8602008-11-25 02:35:11 +10305378 ret = -EFAULT;
5379 else
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005380 ret = retlen;
Rusty Russellf17c8602008-11-25 02:35:11 +10305381 }
5382 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005383
Rusty Russellf17c8602008-11-25 02:35:11 +10305384 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385}
5386
5387/**
5388 * sys_sched_yield - yield the current processor to other threads.
5389 *
Ingo Molnardd41f592007-07-09 18:51:59 +02005390 * This function yields the current CPU to other tasks. If there are no
5391 * other threads running on this CPU then this function will return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005393SYSCALL_DEFINE0(sched_yield)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005395 struct rq *rq = this_rq_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005396
Ingo Molnar2d723762007-10-15 17:00:12 +02005397 schedstat_inc(rq, yld_count);
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02005398 current->sched_class->yield_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005399
5400 /*
5401 * Since we are going to call schedule() anyway, there's
5402 * no need to preempt or enable interrupts:
5403 */
5404 __release(rq->lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07005405 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +01005406 do_raw_spin_unlock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005407 preempt_enable_no_resched();
5408
5409 schedule();
5410
5411 return 0;
5412}
5413
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005414static inline int should_resched(void)
5415{
5416 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5417}
5418
Andrew Mortone7b38402006-06-30 01:56:00 -07005419static void __cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005420{
Frederic Weisbeckere7aaaa62009-07-16 15:44:29 +02005421 add_preempt_count(PREEMPT_ACTIVE);
5422 schedule();
5423 sub_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424}
5425
Herbert Xu02b67cc2008-01-25 21:08:28 +01005426int __sched _cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005427{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005428 if (should_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429 __cond_resched();
5430 return 1;
5431 }
5432 return 0;
5433}
Herbert Xu02b67cc2008-01-25 21:08:28 +01005434EXPORT_SYMBOL(_cond_resched);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435
5436/*
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005437 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 * call schedule, and on return reacquire the lock.
5439 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005440 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
Linus Torvalds1da177e2005-04-16 15:20:36 -07005441 * operations here to prevent schedule() from being called twice (once via
5442 * spin_unlock(), once by hand).
5443 */
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005444int __cond_resched_lock(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005446 int resched = should_resched();
Jan Kara6df3cec2005-06-13 15:52:32 -07005447 int ret = 0;
5448
Peter Zijlstraf607c662009-07-20 19:16:29 +02005449 lockdep_assert_held(lock);
5450
Nick Piggin95c354f2008-01-30 13:31:20 +01005451 if (spin_needbreak(lock) || resched) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005452 spin_unlock(lock);
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005453 if (resched)
Nick Piggin95c354f2008-01-30 13:31:20 +01005454 __cond_resched();
5455 else
5456 cpu_relax();
Jan Kara6df3cec2005-06-13 15:52:32 -07005457 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458 spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005459 }
Jan Kara6df3cec2005-06-13 15:52:32 -07005460 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005461}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005462EXPORT_SYMBOL(__cond_resched_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005463
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005464int __sched __cond_resched_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465{
5466 BUG_ON(!in_softirq());
5467
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005468 if (should_resched()) {
Thomas Gleixner98d82562007-05-23 13:58:18 -07005469 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470 __cond_resched();
5471 local_bh_disable();
5472 return 1;
5473 }
5474 return 0;
5475}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005476EXPORT_SYMBOL(__cond_resched_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005477
Linus Torvalds1da177e2005-04-16 15:20:36 -07005478/**
5479 * yield - yield the current processor to other threads.
5480 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08005481 * This is a shortcut for kernel-space yielding - it marks the
Linus Torvalds1da177e2005-04-16 15:20:36 -07005482 * thread runnable and calls sys_sched_yield().
5483 */
5484void __sched yield(void)
5485{
5486 set_current_state(TASK_RUNNING);
5487 sys_sched_yield();
5488}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489EXPORT_SYMBOL(yield);
5490
Mike Galbraithd95f4122011-02-01 09:50:51 -05005491/**
5492 * yield_to - yield the current processor to another thread in
5493 * your thread group, or accelerate that thread toward the
5494 * processor it's on.
Randy Dunlap16addf92011-03-18 09:34:53 -07005495 * @p: target task
5496 * @preempt: whether task preemption is allowed or not
Mike Galbraithd95f4122011-02-01 09:50:51 -05005497 *
5498 * It's the caller's job to ensure that the target task struct
5499 * can't go away on us before we can do any checks.
5500 *
5501 * Returns true if we indeed boosted the target task.
5502 */
5503bool __sched yield_to(struct task_struct *p, bool preempt)
5504{
5505 struct task_struct *curr = current;
5506 struct rq *rq, *p_rq;
5507 unsigned long flags;
5508 bool yielded = 0;
5509
5510 local_irq_save(flags);
5511 rq = this_rq();
5512
5513again:
5514 p_rq = task_rq(p);
5515 double_rq_lock(rq, p_rq);
5516 while (task_rq(p) != p_rq) {
5517 double_rq_unlock(rq, p_rq);
5518 goto again;
5519 }
5520
5521 if (!curr->sched_class->yield_to_task)
5522 goto out;
5523
5524 if (curr->sched_class != p->sched_class)
5525 goto out;
5526
5527 if (task_running(p_rq, p) || p->state)
5528 goto out;
5529
5530 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
Venkatesh Pallipadi6d1cafd2011-03-01 16:28:21 -08005531 if (yielded) {
Mike Galbraithd95f4122011-02-01 09:50:51 -05005532 schedstat_inc(rq, yld_count);
Venkatesh Pallipadi6d1cafd2011-03-01 16:28:21 -08005533 /*
5534 * Make p's CPU reschedule; pick_next_entity takes care of
5535 * fairness.
5536 */
5537 if (preempt && rq != p_rq)
5538 resched_task(p_rq->curr);
5539 }
Mike Galbraithd95f4122011-02-01 09:50:51 -05005540
5541out:
5542 double_rq_unlock(rq, p_rq);
5543 local_irq_restore(flags);
5544
5545 if (yielded)
5546 schedule();
5547
5548 return yielded;
5549}
5550EXPORT_SYMBOL_GPL(yield_to);
5551
Linus Torvalds1da177e2005-04-16 15:20:36 -07005552/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005553 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
Linus Torvalds1da177e2005-04-16 15:20:36 -07005554 * that process accounting knows that this is a task in IO wait state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005555 */
5556void __sched io_schedule(void)
5557{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005558 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005559
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005560 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005561 atomic_inc(&rq->nr_iowait);
Jens Axboe73c10102011-03-08 13:19:51 +01005562 blk_flush_plug(current);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005563 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005564 schedule();
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005565 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005567 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005568}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569EXPORT_SYMBOL(io_schedule);
5570
5571long __sched io_schedule_timeout(long timeout)
5572{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005573 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005574 long ret;
5575
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005576 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005577 atomic_inc(&rq->nr_iowait);
Jens Axboe73c10102011-03-08 13:19:51 +01005578 blk_flush_plug(current);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005579 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005580 ret = schedule_timeout(timeout);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005581 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005583 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005584 return ret;
5585}
5586
5587/**
5588 * sys_sched_get_priority_max - return maximum RT priority.
5589 * @policy: scheduling class.
5590 *
5591 * this syscall returns the maximum rt_priority that can be used
5592 * by a given scheduling class.
5593 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005594SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005595{
5596 int ret = -EINVAL;
5597
5598 switch (policy) {
5599 case SCHED_FIFO:
5600 case SCHED_RR:
5601 ret = MAX_USER_RT_PRIO-1;
5602 break;
5603 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005604 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005605 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005606 ret = 0;
5607 break;
5608 }
5609 return ret;
5610}
5611
5612/**
5613 * sys_sched_get_priority_min - return minimum RT priority.
5614 * @policy: scheduling class.
5615 *
5616 * this syscall returns the minimum rt_priority that can be used
5617 * by a given scheduling class.
5618 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005619SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005620{
5621 int ret = -EINVAL;
5622
5623 switch (policy) {
5624 case SCHED_FIFO:
5625 case SCHED_RR:
5626 ret = 1;
5627 break;
5628 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005629 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005630 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631 ret = 0;
5632 }
5633 return ret;
5634}
5635
5636/**
5637 * sys_sched_rr_get_interval - return the default timeslice of a process.
5638 * @pid: pid of the process.
5639 * @interval: userspace pointer to the timeslice value.
5640 *
5641 * this syscall writes the default timeslice value of a given process
5642 * into the user-space timespec buffer. A value of '0' means infinity.
5643 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01005644SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
Heiko Carstens754fe8d2009-01-14 14:14:09 +01005645 struct timespec __user *, interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005646{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005647 struct task_struct *p;
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005648 unsigned int time_slice;
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005649 unsigned long flags;
5650 struct rq *rq;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005651 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005652 struct timespec t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005653
5654 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005655 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005656
5657 retval = -ESRCH;
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005658 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659 p = find_process_by_pid(pid);
5660 if (!p)
5661 goto out_unlock;
5662
5663 retval = security_task_getscheduler(p);
5664 if (retval)
5665 goto out_unlock;
5666
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005667 rq = task_rq_lock(p, &flags);
5668 time_slice = p->sched_class->get_rr_interval(rq, p);
5669 task_rq_unlock(rq, &flags);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005670
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005671 rcu_read_unlock();
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005672 jiffies_to_timespec(time_slice, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005674 return retval;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005675
Linus Torvalds1da177e2005-04-16 15:20:36 -07005676out_unlock:
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005677 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678 return retval;
5679}
5680
Steven Rostedt7c731e02008-05-12 21:20:41 +02005681static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005682
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005683void sched_show_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005684{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685 unsigned long free = 0;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005686 unsigned state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005687
Linus Torvalds1da177e2005-04-16 15:20:36 -07005688 state = p->state ? __ffs(p->state) + 1 : 0;
Erik Gilling28d06862010-11-19 18:08:51 -08005689 printk(KERN_INFO "%-15.15s %c", p->comm,
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005690 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
Ingo Molnar4bd77322007-07-11 21:21:47 +02005691#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07005692 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005693 printk(KERN_CONT " running ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005694 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005695 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005696#else
5697 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005698 printk(KERN_CONT " running task ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005699 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005700 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005701#endif
5702#ifdef CONFIG_DEBUG_STACK_USAGE
Eric Sandeen7c9f8862008-04-22 16:38:23 -05005703 free = stack_not_used(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005704#endif
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005705 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
David Rientjesaa47b7e2009-05-04 01:38:05 -07005706 task_pid_nr(p), task_pid_nr(p->real_parent),
5707 (unsigned long)task_thread_info(p)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005708
Nick Piggin5fb5e6d2008-01-25 21:08:34 +01005709 show_stack(p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005710}
5711
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005712void show_state_filter(unsigned long state_filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005713{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005714 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715
Ingo Molnar4bd77322007-07-11 21:21:47 +02005716#if BITS_PER_LONG == 32
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005717 printk(KERN_INFO
5718 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005719#else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005720 printk(KERN_INFO
5721 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005722#endif
5723 read_lock(&tasklist_lock);
5724 do_each_thread(g, p) {
5725 /*
5726 * reset the NMI-timeout, listing all files on a slow
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005727 * console might take a lot of time:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005728 */
5729 touch_nmi_watchdog();
Ingo Molnar39bc89f2007-04-25 20:50:03 -07005730 if (!state_filter || (p->state & state_filter))
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005731 sched_show_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005732 } while_each_thread(g, p);
5733
Jeremy Fitzhardinge04c91672007-05-08 00:28:05 -07005734 touch_all_softlockup_watchdogs();
5735
Ingo Molnardd41f592007-07-09 18:51:59 +02005736#ifdef CONFIG_SCHED_DEBUG
5737 sysrq_sched_debug_show();
5738#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005739 read_unlock(&tasklist_lock);
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005740 /*
5741 * Only show locks if all tasks are dumped:
5742 */
Shmulik Ladkani93335a22009-11-25 15:23:41 +02005743 if (!state_filter)
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005744 debug_show_all_locks();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005745}
5746
Ingo Molnar1df21052007-07-09 18:51:58 +02005747void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5748{
Ingo Molnardd41f592007-07-09 18:51:59 +02005749 idle->sched_class = &idle_sched_class;
Ingo Molnar1df21052007-07-09 18:51:58 +02005750}
5751
Ingo Molnarf340c0d2005-06-28 16:40:42 +02005752/**
5753 * init_idle - set up an idle thread for a given CPU
5754 * @idle: task in question
5755 * @cpu: cpu the idle task belongs to
5756 *
5757 * NOTE: this function does not set the idle thread's NEED_RESCHED
5758 * flag, to make booting more robust.
5759 */
Nick Piggin5c1e1762006-10-03 01:14:04 -07005760void __cpuinit init_idle(struct task_struct *idle, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005761{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005762 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005763 unsigned long flags;
5764
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005765 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar5cbd54e2008-11-12 20:05:50 +01005766
Ingo Molnardd41f592007-07-09 18:51:59 +02005767 __sched_fork(idle);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01005768 idle->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02005769 idle->se.exec_start = sched_clock();
5770
Rusty Russell96f874e2008-11-25 02:35:14 +10305771 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005772 /*
5773 * We're having a chicken and egg problem, even though we are
5774 * holding rq->lock, the cpu isn't yet set to this cpu so the
5775 * lockdep check in task_group() will fail.
5776 *
5777 * Similar case to sched_fork(). / Alternatively we could
5778 * use task_rq_lock() here and obtain the other rq->lock.
5779 *
5780 * Silence PROVE_RCU
5781 */
5782 rcu_read_lock();
Ingo Molnardd41f592007-07-09 18:51:59 +02005783 __set_task_cpu(idle, cpu);
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005784 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005785
Linus Torvalds1da177e2005-04-16 15:20:36 -07005786 rq->curr = rq->idle = idle;
Peter Zijlstra3ca7a442011-04-05 17:23:40 +02005787#if defined(CONFIG_SMP)
5788 idle->on_cpu = 1;
Nick Piggin4866cde2005-06-25 14:57:23 -07005789#endif
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005790 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005791
5792 /* Set the preempt count _outside_ the spinlocks! */
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005793#if defined(CONFIG_PREEMPT)
5794 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5795#else
Al Viroa1261f52005-11-13 16:06:55 -08005796 task_thread_info(idle)->preempt_count = 0;
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005797#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02005798 /*
5799 * The idle tasks have their own, simple scheduling class:
5800 */
5801 idle->sched_class = &idle_sched_class;
Steven Rostedt868baf02011-02-10 21:26:13 -05005802 ftrace_graph_init_idle_task(idle, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005803}
5804
5805/*
5806 * In a system that switches off the HZ timer nohz_cpu_mask
5807 * indicates which cpus entered this state. This is used
5808 * in the rcu update to wait only for active cpus. For system
5809 * which do not switch off the HZ timer nohz_cpu_mask should
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305810 * always be CPU_BITS_NONE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005811 */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305812cpumask_var_t nohz_cpu_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005813
Ingo Molnar19978ca2007-11-09 22:39:38 +01005814/*
5815 * Increase the granularity value when there are more CPUs,
5816 * because with more CPUs the 'effective latency' as visible
5817 * to users decreases. But the relationship is not linear,
5818 * so pick a second-best guess by going with the log2 of the
5819 * number of CPUs.
5820 *
5821 * This idea comes from the SD scheduler of Con Kolivas:
5822 */
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005823static int get_update_sysctl_factor(void)
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005824{
Mike Galbraith4ca3ef72009-12-10 09:25:53 +01005825 unsigned int cpus = min_t(int, num_online_cpus(), 8);
Christian Ehrhardt1983a922009-11-30 12:16:47 +01005826 unsigned int factor;
5827
5828 switch (sysctl_sched_tunable_scaling) {
5829 case SCHED_TUNABLESCALING_NONE:
5830 factor = 1;
5831 break;
5832 case SCHED_TUNABLESCALING_LINEAR:
5833 factor = cpus;
5834 break;
5835 case SCHED_TUNABLESCALING_LOG:
5836 default:
5837 factor = 1 + ilog2(cpus);
5838 break;
5839 }
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005840
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005841 return factor;
5842}
5843
5844static void update_sysctl(void)
5845{
5846 unsigned int factor = get_update_sysctl_factor();
5847
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005848#define SET_SYSCTL(name) \
5849 (sysctl_##name = (factor) * normalized_sysctl_##name)
5850 SET_SYSCTL(sched_min_granularity);
5851 SET_SYSCTL(sched_latency);
5852 SET_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005853#undef SET_SYSCTL
5854}
5855
Ingo Molnar19978ca2007-11-09 22:39:38 +01005856static inline void sched_init_granularity(void)
5857{
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005858 update_sysctl();
Ingo Molnar19978ca2007-11-09 22:39:38 +01005859}
5860
Linus Torvalds1da177e2005-04-16 15:20:36 -07005861#ifdef CONFIG_SMP
5862/*
5863 * This is how migration works:
5864 *
Tejun Heo969c7922010-05-06 18:49:21 +02005865 * 1) we invoke migration_cpu_stop() on the target CPU using
5866 * stop_one_cpu().
5867 * 2) stopper starts to run (implicitly forcing the migrated thread
5868 * off the CPU)
5869 * 3) it checks whether the migrated task is still in the wrong runqueue.
5870 * 4) if it's in the wrong runqueue then the migration thread removes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005871 * it and puts it into the right queue.
Tejun Heo969c7922010-05-06 18:49:21 +02005872 * 5) stopper completes and stop_one_cpu() returns and the migration
5873 * is done.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005874 */
5875
5876/*
5877 * Change a given task's CPU affinity. Migrate the thread to a
5878 * proper CPU and schedule it away if the CPU it's executing on
5879 * is removed from the allowed bitmask.
5880 *
5881 * NOTE: the caller must have a valid reference to the task, the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005882 * task must not exit() & deallocate itself prematurely. The
Linus Torvalds1da177e2005-04-16 15:20:36 -07005883 * call is not atomic; no spinlocks may be held.
5884 */
Rusty Russell96f874e2008-11-25 02:35:14 +10305885int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005886{
5887 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005888 struct rq *rq;
Tejun Heo969c7922010-05-06 18:49:21 +02005889 unsigned int dest_cpu;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005890 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005891
Peter Zijlstra013fdb82011-04-05 17:23:45 +02005892 raw_spin_lock_irqsave(&p->pi_lock, flags);
5893 rq = __task_rq_lock(p);
Peter Zijlstrae2912002009-12-16 18:04:36 +01005894
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005895 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005896 ret = -EINVAL;
5897 goto out;
5898 }
5899
David Rientjes9985b0b2008-06-05 12:57:11 -07005900 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
Rusty Russell96f874e2008-11-25 02:35:14 +10305901 !cpumask_equal(&p->cpus_allowed, new_mask))) {
David Rientjes9985b0b2008-06-05 12:57:11 -07005902 ret = -EINVAL;
5903 goto out;
5904 }
5905
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005906 if (p->sched_class->set_cpus_allowed)
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005907 p->sched_class->set_cpus_allowed(p, new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005908 else {
Rusty Russell96f874e2008-11-25 02:35:14 +10305909 cpumask_copy(&p->cpus_allowed, new_mask);
5910 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005911 }
5912
Linus Torvalds1da177e2005-04-16 15:20:36 -07005913 /* Can the task run on the task's current CPU? If so, we're done */
Rusty Russell96f874e2008-11-25 02:35:14 +10305914 if (cpumask_test_cpu(task_cpu(p), new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005915 goto out;
5916
Tejun Heo969c7922010-05-06 18:49:21 +02005917 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
Peter Zijlstra7608dec2011-04-05 17:23:46 +02005918 if (need_migrate_task(p)) {
Tejun Heo969c7922010-05-06 18:49:21 +02005919 struct migration_arg arg = { p, dest_cpu };
Linus Torvalds1da177e2005-04-16 15:20:36 -07005920 /* Need help from migration thread: drop lock and wait. */
Peter Zijlstra013fdb82011-04-05 17:23:45 +02005921 __task_rq_unlock(rq);
5922 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005923 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005924 tlb_migrate_finish(p->mm);
5925 return 0;
5926 }
5927out:
Peter Zijlstra013fdb82011-04-05 17:23:45 +02005928 __task_rq_unlock(rq);
5929 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005930
Linus Torvalds1da177e2005-04-16 15:20:36 -07005931 return ret;
5932}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005933EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005934
5935/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005936 * Move (not current) task off this cpu, onto dest cpu. We're doing
Linus Torvalds1da177e2005-04-16 15:20:36 -07005937 * this because either it can't run here any more (set_cpus_allowed()
5938 * away from this CPU, or CPU going down), or because we're
5939 * attempting to rebalance this task on exec (sched_exec).
5940 *
5941 * So we race with normal scheduler movements, but that's OK, as long
5942 * as the task is no longer on this CPU.
Kirill Korotaevefc30812006-06-27 02:54:32 -07005943 *
5944 * Returns non-zero if task was successfully migrated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005945 */
Kirill Korotaevefc30812006-06-27 02:54:32 -07005946static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005947{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005948 struct rq *rq_dest, *rq_src;
Peter Zijlstrae2912002009-12-16 18:04:36 +01005949 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005950
Max Krasnyanskye761b772008-07-15 04:43:49 -07005951 if (unlikely(!cpu_active(dest_cpu)))
Kirill Korotaevefc30812006-06-27 02:54:32 -07005952 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005953
5954 rq_src = cpu_rq(src_cpu);
5955 rq_dest = cpu_rq(dest_cpu);
5956
5957 double_rq_lock(rq_src, rq_dest);
5958 /* Already moved. */
5959 if (task_cpu(p) != src_cpu)
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005960 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005961 /* Affinity changed (again). */
Rusty Russell96f874e2008-11-25 02:35:14 +10305962 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005963 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005964
Peter Zijlstrae2912002009-12-16 18:04:36 +01005965 /*
5966 * If we're not on a rq, the next wake-up will ensure we're
5967 * placed properly.
5968 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02005969 if (p->on_rq) {
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005970 deactivate_task(rq_src, p, 0);
Peter Zijlstrae2912002009-12-16 18:04:36 +01005971 set_task_cpu(p, dest_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005972 activate_task(rq_dest, p, 0);
Peter Zijlstra15afe092008-09-20 23:38:02 +02005973 check_preempt_curr(rq_dest, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005974 }
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005975done:
Kirill Korotaevefc30812006-06-27 02:54:32 -07005976 ret = 1;
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005977fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005978 double_rq_unlock(rq_src, rq_dest);
Kirill Korotaevefc30812006-06-27 02:54:32 -07005979 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005980}
5981
5982/*
Tejun Heo969c7922010-05-06 18:49:21 +02005983 * migration_cpu_stop - this will be executed by a highprio stopper thread
5984 * and performs thread migration by bumping thread off CPU then
5985 * 'pushing' onto another runqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005986 */
Tejun Heo969c7922010-05-06 18:49:21 +02005987static int migration_cpu_stop(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005988{
Tejun Heo969c7922010-05-06 18:49:21 +02005989 struct migration_arg *arg = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005990
Tejun Heo969c7922010-05-06 18:49:21 +02005991 /*
5992 * The original target cpu might have gone down and we might
5993 * be on another cpu but it doesn't matter.
5994 */
5995 local_irq_disable();
5996 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5997 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005998 return 0;
5999}
6000
6001#ifdef CONFIG_HOTPLUG_CPU
Linus Torvalds1da177e2005-04-16 15:20:36 -07006002
Ingo Molnar48f24c42006-07-03 00:25:40 -07006003/*
6004 * Ensures that the idle task is using init_mm right before its cpu goes
Linus Torvalds1da177e2005-04-16 15:20:36 -07006005 * offline.
6006 */
6007void idle_task_exit(void)
6008{
6009 struct mm_struct *mm = current->active_mm;
6010
6011 BUG_ON(cpu_online(smp_processor_id()));
6012
6013 if (mm != &init_mm)
6014 switch_mm(mm, &init_mm, current);
6015 mmdrop(mm);
6016}
6017
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006018/*
6019 * While a dead CPU has no uninterruptible tasks queued at this point,
6020 * it might still have a nonzero ->nr_uninterruptible counter, because
6021 * for performance reasons the counter is not stricly tracking tasks to
6022 * their home CPUs. So we just add the counter to another CPU's counter,
6023 * to keep the global sum constant after CPU-down:
6024 */
6025static void migrate_nr_uninterruptible(struct rq *rq_src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006026{
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006027 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006028
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006029 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
6030 rq_src->nr_uninterruptible = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006031}
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006032
6033/*
6034 * remove the tasks which were accounted by rq from calc_load_tasks.
6035 */
6036static void calc_global_load_remove(struct rq *rq)
6037{
6038 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
Thomas Gleixnera468d382009-07-17 14:15:46 +02006039 rq->calc_load_active = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006040}
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006041
6042/*
6043 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6044 * try_to_wake_up()->select_task_rq().
6045 *
6046 * Called with rq->lock held even though we'er in stop_machine() and
6047 * there's no concurrency possible, we hold the required locks anyway
6048 * because of lock validation efforts.
6049 */
6050static void migrate_tasks(unsigned int dead_cpu)
6051{
6052 struct rq *rq = cpu_rq(dead_cpu);
6053 struct task_struct *next, *stop = rq->stop;
6054 int dest_cpu;
6055
6056 /*
6057 * Fudge the rq selection such that the below task selection loop
6058 * doesn't get stuck on the currently eligible stop task.
6059 *
6060 * We're currently inside stop_machine() and the rq is either stuck
6061 * in the stop_machine_cpu_stop() loop, or we're executing this code,
6062 * either way we should never end up calling schedule() until we're
6063 * done here.
6064 */
6065 rq->stop = NULL;
6066
6067 for ( ; ; ) {
6068 /*
6069 * There's this thread running, bail when that's the only
6070 * remaining thread.
6071 */
6072 if (rq->nr_running == 1)
6073 break;
6074
6075 next = pick_next_task(rq);
6076 BUG_ON(!next);
6077 next->sched_class->put_prev_task(rq, next);
6078
6079 /* Find suitable destination for @next, with force if needed. */
6080 dest_cpu = select_fallback_rq(dead_cpu, next);
6081 raw_spin_unlock(&rq->lock);
6082
6083 __migrate_task(next, dead_cpu, dest_cpu);
6084
6085 raw_spin_lock(&rq->lock);
6086 }
6087
6088 rq->stop = stop;
6089}
6090
Linus Torvalds1da177e2005-04-16 15:20:36 -07006091#endif /* CONFIG_HOTPLUG_CPU */
6092
Nick Piggine692ab52007-07-26 13:40:43 +02006093#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
6094
6095static struct ctl_table sd_ctl_dir[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02006096 {
6097 .procname = "sched_domain",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006098 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02006099 },
Eric W. Biederman56992302009-11-05 15:38:40 -08006100 {}
Nick Piggine692ab52007-07-26 13:40:43 +02006101};
6102
6103static struct ctl_table sd_ctl_root[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02006104 {
6105 .procname = "kernel",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006106 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02006107 .child = sd_ctl_dir,
6108 },
Eric W. Biederman56992302009-11-05 15:38:40 -08006109 {}
Nick Piggine692ab52007-07-26 13:40:43 +02006110};
6111
6112static struct ctl_table *sd_alloc_ctl_entry(int n)
6113{
6114 struct ctl_table *entry =
Milton Miller5cf9f062007-10-15 17:00:19 +02006115 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
Nick Piggine692ab52007-07-26 13:40:43 +02006116
Nick Piggine692ab52007-07-26 13:40:43 +02006117 return entry;
6118}
6119
Milton Miller6382bc92007-10-15 17:00:19 +02006120static void sd_free_ctl_entry(struct ctl_table **tablep)
6121{
Milton Millercd790072007-10-17 16:55:11 +02006122 struct ctl_table *entry;
Milton Miller6382bc92007-10-15 17:00:19 +02006123
Milton Millercd790072007-10-17 16:55:11 +02006124 /*
6125 * In the intermediate directories, both the child directory and
6126 * procname are dynamically allocated and could fail but the mode
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006127 * will always be set. In the lowest directory the names are
Milton Millercd790072007-10-17 16:55:11 +02006128 * static strings and all have proc handlers.
6129 */
6130 for (entry = *tablep; entry->mode; entry++) {
Milton Miller6382bc92007-10-15 17:00:19 +02006131 if (entry->child)
6132 sd_free_ctl_entry(&entry->child);
Milton Millercd790072007-10-17 16:55:11 +02006133 if (entry->proc_handler == NULL)
6134 kfree(entry->procname);
6135 }
Milton Miller6382bc92007-10-15 17:00:19 +02006136
6137 kfree(*tablep);
6138 *tablep = NULL;
6139}
6140
Nick Piggine692ab52007-07-26 13:40:43 +02006141static void
Alexey Dobriyane0361852007-08-09 11:16:46 +02006142set_table_entry(struct ctl_table *entry,
Nick Piggine692ab52007-07-26 13:40:43 +02006143 const char *procname, void *data, int maxlen,
6144 mode_t mode, proc_handler *proc_handler)
6145{
Nick Piggine692ab52007-07-26 13:40:43 +02006146 entry->procname = procname;
6147 entry->data = data;
6148 entry->maxlen = maxlen;
6149 entry->mode = mode;
6150 entry->proc_handler = proc_handler;
6151}
6152
6153static struct ctl_table *
6154sd_alloc_ctl_domain_table(struct sched_domain *sd)
6155{
Ingo Molnara5d8c342008-10-09 11:35:51 +02006156 struct ctl_table *table = sd_alloc_ctl_entry(13);
Nick Piggine692ab52007-07-26 13:40:43 +02006157
Milton Millerad1cdc12007-10-15 17:00:19 +02006158 if (table == NULL)
6159 return NULL;
6160
Alexey Dobriyane0361852007-08-09 11:16:46 +02006161 set_table_entry(&table[0], "min_interval", &sd->min_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02006162 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006163 set_table_entry(&table[1], "max_interval", &sd->max_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02006164 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006165 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006166 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006167 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006168 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006169 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006170 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006171 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006172 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006173 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006174 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006175 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
Nick Piggine692ab52007-07-26 13:40:43 +02006176 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006177 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
Nick Piggine692ab52007-07-26 13:40:43 +02006178 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02006179 set_table_entry(&table[9], "cache_nice_tries",
Nick Piggine692ab52007-07-26 13:40:43 +02006180 &sd->cache_nice_tries,
6181 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02006182 set_table_entry(&table[10], "flags", &sd->flags,
Nick Piggine692ab52007-07-26 13:40:43 +02006183 sizeof(int), 0644, proc_dointvec_minmax);
Ingo Molnara5d8c342008-10-09 11:35:51 +02006184 set_table_entry(&table[11], "name", sd->name,
6185 CORENAME_MAX_SIZE, 0444, proc_dostring);
6186 /* &table[12] is terminator */
Nick Piggine692ab52007-07-26 13:40:43 +02006187
6188 return table;
6189}
6190
Ingo Molnar9a4e7152007-11-28 15:52:56 +01006191static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
Nick Piggine692ab52007-07-26 13:40:43 +02006192{
6193 struct ctl_table *entry, *table;
6194 struct sched_domain *sd;
6195 int domain_num = 0, i;
6196 char buf[32];
6197
6198 for_each_domain(cpu, sd)
6199 domain_num++;
6200 entry = table = sd_alloc_ctl_entry(domain_num + 1);
Milton Millerad1cdc12007-10-15 17:00:19 +02006201 if (table == NULL)
6202 return NULL;
Nick Piggine692ab52007-07-26 13:40:43 +02006203
6204 i = 0;
6205 for_each_domain(cpu, sd) {
6206 snprintf(buf, 32, "domain%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006207 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006208 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006209 entry->child = sd_alloc_ctl_domain_table(sd);
6210 entry++;
6211 i++;
6212 }
6213 return table;
6214}
6215
6216static struct ctl_table_header *sd_sysctl_header;
Milton Miller6382bc92007-10-15 17:00:19 +02006217static void register_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006218{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006219 int i, cpu_num = num_possible_cpus();
Nick Piggine692ab52007-07-26 13:40:43 +02006220 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6221 char buf[32];
6222
Milton Miller73785472007-10-24 18:23:48 +02006223 WARN_ON(sd_ctl_dir[0].child);
6224 sd_ctl_dir[0].child = entry;
6225
Milton Millerad1cdc12007-10-15 17:00:19 +02006226 if (entry == NULL)
6227 return;
6228
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006229 for_each_possible_cpu(i) {
Nick Piggine692ab52007-07-26 13:40:43 +02006230 snprintf(buf, 32, "cpu%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006231 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006232 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006233 entry->child = sd_alloc_ctl_cpu_table(i);
Milton Miller97b6ea72007-10-15 17:00:19 +02006234 entry++;
Nick Piggine692ab52007-07-26 13:40:43 +02006235 }
Milton Miller73785472007-10-24 18:23:48 +02006236
6237 WARN_ON(sd_sysctl_header);
Nick Piggine692ab52007-07-26 13:40:43 +02006238 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6239}
Milton Miller6382bc92007-10-15 17:00:19 +02006240
Milton Miller73785472007-10-24 18:23:48 +02006241/* may be called multiple times per register */
Milton Miller6382bc92007-10-15 17:00:19 +02006242static void unregister_sched_domain_sysctl(void)
6243{
Milton Miller73785472007-10-24 18:23:48 +02006244 if (sd_sysctl_header)
6245 unregister_sysctl_table(sd_sysctl_header);
Milton Miller6382bc92007-10-15 17:00:19 +02006246 sd_sysctl_header = NULL;
Milton Miller73785472007-10-24 18:23:48 +02006247 if (sd_ctl_dir[0].child)
6248 sd_free_ctl_entry(&sd_ctl_dir[0].child);
Milton Miller6382bc92007-10-15 17:00:19 +02006249}
Nick Piggine692ab52007-07-26 13:40:43 +02006250#else
Milton Miller6382bc92007-10-15 17:00:19 +02006251static void register_sched_domain_sysctl(void)
6252{
6253}
6254static void unregister_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006255{
6256}
6257#endif
6258
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006259static void set_rq_online(struct rq *rq)
6260{
6261 if (!rq->online) {
6262 const struct sched_class *class;
6263
Rusty Russellc6c49272008-11-25 02:35:05 +10306264 cpumask_set_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006265 rq->online = 1;
6266
6267 for_each_class(class) {
6268 if (class->rq_online)
6269 class->rq_online(rq);
6270 }
6271 }
6272}
6273
6274static void set_rq_offline(struct rq *rq)
6275{
6276 if (rq->online) {
6277 const struct sched_class *class;
6278
6279 for_each_class(class) {
6280 if (class->rq_offline)
6281 class->rq_offline(rq);
6282 }
6283
Rusty Russellc6c49272008-11-25 02:35:05 +10306284 cpumask_clear_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006285 rq->online = 0;
6286 }
6287}
6288
Linus Torvalds1da177e2005-04-16 15:20:36 -07006289/*
6290 * migration_call - callback that gets triggered when a CPU is added.
6291 * Here we can start up the necessary migration thread for the new CPU.
6292 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006293static int __cpuinit
6294migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006295{
Ingo Molnar48f24c42006-07-03 00:25:40 -07006296 int cpu = (long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006297 unsigned long flags;
Tejun Heo969c7922010-05-06 18:49:21 +02006298 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006299
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006300 switch (action & ~CPU_TASKS_FROZEN) {
Gautham R Shenoy5be93612007-05-09 02:34:04 -07006301
Linus Torvalds1da177e2005-04-16 15:20:36 -07006302 case CPU_UP_PREPARE:
Thomas Gleixnera468d382009-07-17 14:15:46 +02006303 rq->calc_load_update = calc_load_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006304 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006305
Linus Torvalds1da177e2005-04-16 15:20:36 -07006306 case CPU_ONLINE:
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006307 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006308 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006309 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306310 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006311
6312 set_rq_online(rq);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006313 }
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006314 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006315 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006316
Linus Torvalds1da177e2005-04-16 15:20:36 -07006317#ifdef CONFIG_HOTPLUG_CPU
Gregory Haskins08f503b2008-03-10 17:59:11 -04006318 case CPU_DYING:
Gregory Haskins57d885f2008-01-25 21:08:18 +01006319 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006320 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006321 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306322 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006323 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006324 }
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006325 migrate_tasks(cpu);
6326 BUG_ON(rq->nr_running != 1); /* the migration thread */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006327 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006328
6329 migrate_nr_uninterruptible(rq);
6330 calc_global_load_remove(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006331 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006332#endif
6333 }
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006334
6335 update_max_interval();
6336
Linus Torvalds1da177e2005-04-16 15:20:36 -07006337 return NOTIFY_OK;
6338}
6339
Paul Mackerrasf38b0822009-06-02 21:05:16 +10006340/*
6341 * Register at high priority so that task migration (migrate_all_tasks)
6342 * happens before everything else. This has to be lower priority than
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006343 * the notifier in the perf_event subsystem, though.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006344 */
Chandra Seetharaman26c21432006-06-27 02:54:10 -07006345static struct notifier_block __cpuinitdata migration_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006346 .notifier_call = migration_call,
Tejun Heo50a323b2010-06-08 21:40:36 +02006347 .priority = CPU_PRI_MIGRATION,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006348};
6349
Tejun Heo3a101d02010-06-08 21:40:36 +02006350static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
6351 unsigned long action, void *hcpu)
6352{
6353 switch (action & ~CPU_TASKS_FROZEN) {
6354 case CPU_ONLINE:
6355 case CPU_DOWN_FAILED:
6356 set_cpu_active((long)hcpu, true);
6357 return NOTIFY_OK;
6358 default:
6359 return NOTIFY_DONE;
6360 }
6361}
6362
6363static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6364 unsigned long action, void *hcpu)
6365{
6366 switch (action & ~CPU_TASKS_FROZEN) {
6367 case CPU_DOWN_PREPARE:
6368 set_cpu_active((long)hcpu, false);
6369 return NOTIFY_OK;
6370 default:
6371 return NOTIFY_DONE;
6372 }
6373}
6374
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006375static int __init migration_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006376{
6377 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -07006378 int err;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006379
Tejun Heo3a101d02010-06-08 21:40:36 +02006380 /* Initialize migration for the boot CPU */
Akinobu Mita07dccf32006-09-29 02:00:22 -07006381 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6382 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006383 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6384 register_cpu_notifier(&migration_notifier);
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006385
Tejun Heo3a101d02010-06-08 21:40:36 +02006386 /* Register cpu active notifiers */
6387 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6388 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6389
Thomas Gleixnera004cd42009-07-21 09:54:05 +02006390 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006391}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006392early_initcall(migration_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006393#endif
6394
6395#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07006396
Ingo Molnar3e9830d2007-10-15 17:00:13 +02006397#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006398
Mike Travisf6630112009-11-17 18:22:15 -06006399static __read_mostly int sched_domain_debug_enabled;
6400
6401static int __init sched_domain_debug_setup(char *str)
6402{
6403 sched_domain_debug_enabled = 1;
6404
6405 return 0;
6406}
6407early_param("sched_debug", sched_domain_debug_setup);
6408
Mike Travis7c16ec52008-04-04 18:11:11 -07006409static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
Rusty Russell96f874e2008-11-25 02:35:14 +10306410 struct cpumask *groupmask)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006411{
6412 struct sched_group *group = sd->groups;
Mike Travis434d53b2008-04-04 18:11:04 -07006413 char str[256];
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006414
Rusty Russell968ea6d2008-12-13 21:55:51 +10306415 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
Rusty Russell96f874e2008-11-25 02:35:14 +10306416 cpumask_clear(groupmask);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006417
6418 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6419
6420 if (!(sd->flags & SD_LOAD_BALANCE)) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006421 printk("does not load-balance\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006422 if (sd->parent)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006423 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6424 " has parent");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006425 return -1;
6426 }
6427
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006428 printk(KERN_CONT "span %s level %s\n", str, sd->name);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006429
Rusty Russell758b2cd2008-11-25 02:35:04 +10306430 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006431 printk(KERN_ERR "ERROR: domain->span does not contain "
6432 "CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006433 }
Rusty Russell758b2cd2008-11-25 02:35:04 +10306434 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006435 printk(KERN_ERR "ERROR: domain->groups does not contain"
6436 " CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006437 }
6438
6439 printk(KERN_DEBUG "%*s groups:", level + 1, "");
6440 do {
6441 if (!group) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006442 printk("\n");
6443 printk(KERN_ERR "ERROR: group is NULL\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006444 break;
6445 }
6446
Peter Zijlstra18a38852009-09-01 10:34:39 +02006447 if (!group->cpu_power) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006448 printk(KERN_CONT "\n");
6449 printk(KERN_ERR "ERROR: domain->cpu_power not "
6450 "set\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006451 break;
6452 }
6453
Rusty Russell758b2cd2008-11-25 02:35:04 +10306454 if (!cpumask_weight(sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006455 printk(KERN_CONT "\n");
6456 printk(KERN_ERR "ERROR: empty group\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006457 break;
6458 }
6459
Rusty Russell758b2cd2008-11-25 02:35:04 +10306460 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006461 printk(KERN_CONT "\n");
6462 printk(KERN_ERR "ERROR: repeated CPUs\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006463 break;
6464 }
6465
Rusty Russell758b2cd2008-11-25 02:35:04 +10306466 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006467
Rusty Russell968ea6d2008-12-13 21:55:51 +10306468 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306469
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006470 printk(KERN_CONT " %s", str);
Peter Zijlstra18a38852009-09-01 10:34:39 +02006471 if (group->cpu_power != SCHED_LOAD_SCALE) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006472 printk(KERN_CONT " (cpu_power = %d)",
6473 group->cpu_power);
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306474 }
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006475
6476 group = group->next;
6477 } while (group != sd->groups);
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006478 printk(KERN_CONT "\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006479
Rusty Russell758b2cd2008-11-25 02:35:04 +10306480 if (!cpumask_equal(sched_domain_span(sd), groupmask))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006481 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006482
Rusty Russell758b2cd2008-11-25 02:35:04 +10306483 if (sd->parent &&
6484 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006485 printk(KERN_ERR "ERROR: parent span is not a superset "
6486 "of domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006487 return 0;
6488}
6489
Linus Torvalds1da177e2005-04-16 15:20:36 -07006490static void sched_domain_debug(struct sched_domain *sd, int cpu)
6491{
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306492 cpumask_var_t groupmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006493 int level = 0;
6494
Mike Travisf6630112009-11-17 18:22:15 -06006495 if (!sched_domain_debug_enabled)
6496 return;
6497
Nick Piggin41c7ce92005-06-25 14:57:24 -07006498 if (!sd) {
6499 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6500 return;
6501 }
6502
Linus Torvalds1da177e2005-04-16 15:20:36 -07006503 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6504
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306505 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006506 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6507 return;
6508 }
6509
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006510 for (;;) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006511 if (sched_domain_debug_one(sd, cpu, level, groupmask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006512 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006513 level++;
6514 sd = sd->parent;
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08006515 if (!sd)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006516 break;
6517 }
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306518 free_cpumask_var(groupmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006519}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006520#else /* !CONFIG_SCHED_DEBUG */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006521# define sched_domain_debug(sd, cpu) do { } while (0)
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006522#endif /* CONFIG_SCHED_DEBUG */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006523
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006524static int sd_degenerate(struct sched_domain *sd)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006525{
Rusty Russell758b2cd2008-11-25 02:35:04 +10306526 if (cpumask_weight(sched_domain_span(sd)) == 1)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006527 return 1;
6528
6529 /* Following flags need at least 2 groups */
6530 if (sd->flags & (SD_LOAD_BALANCE |
6531 SD_BALANCE_NEWIDLE |
6532 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006533 SD_BALANCE_EXEC |
6534 SD_SHARE_CPUPOWER |
6535 SD_SHARE_PKG_RESOURCES)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006536 if (sd->groups != sd->groups->next)
6537 return 0;
6538 }
6539
6540 /* Following flags don't use groups */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02006541 if (sd->flags & (SD_WAKE_AFFINE))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006542 return 0;
6543
6544 return 1;
6545}
6546
Ingo Molnar48f24c42006-07-03 00:25:40 -07006547static int
6548sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006549{
6550 unsigned long cflags = sd->flags, pflags = parent->flags;
6551
6552 if (sd_degenerate(parent))
6553 return 1;
6554
Rusty Russell758b2cd2008-11-25 02:35:04 +10306555 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006556 return 0;
6557
Suresh Siddha245af2c2005-06-25 14:57:25 -07006558 /* Flags needing groups don't count if only 1 group in parent */
6559 if (parent->groups == parent->groups->next) {
6560 pflags &= ~(SD_LOAD_BALANCE |
6561 SD_BALANCE_NEWIDLE |
6562 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006563 SD_BALANCE_EXEC |
6564 SD_SHARE_CPUPOWER |
6565 SD_SHARE_PKG_RESOURCES);
Ken Chen54364992008-12-07 18:47:37 -08006566 if (nr_node_ids == 1)
6567 pflags &= ~SD_SERIALIZE;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006568 }
6569 if (~cflags & pflags)
6570 return 0;
6571
6572 return 1;
6573}
6574
Rusty Russellc6c49272008-11-25 02:35:05 +10306575static void free_rootdomain(struct root_domain *rd)
6576{
Peter Zijlstra047106a2009-11-16 10:28:09 +01006577 synchronize_sched();
6578
Rusty Russell68e74562008-11-25 02:35:13 +10306579 cpupri_cleanup(&rd->cpupri);
6580
Rusty Russellc6c49272008-11-25 02:35:05 +10306581 free_cpumask_var(rd->rto_mask);
6582 free_cpumask_var(rd->online);
6583 free_cpumask_var(rd->span);
6584 kfree(rd);
6585}
6586
Gregory Haskins57d885f2008-01-25 21:08:18 +01006587static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6588{
Ingo Molnara0490fa2009-02-12 11:35:40 +01006589 struct root_domain *old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006590 unsigned long flags;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006591
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006592 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006593
6594 if (rq->rd) {
Ingo Molnara0490fa2009-02-12 11:35:40 +01006595 old_rd = rq->rd;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006596
Rusty Russellc6c49272008-11-25 02:35:05 +10306597 if (cpumask_test_cpu(rq->cpu, old_rd->online))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006598 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006599
Rusty Russellc6c49272008-11-25 02:35:05 +10306600 cpumask_clear_cpu(rq->cpu, old_rd->span);
Gregory Haskinsdc938522008-01-25 21:08:26 +01006601
Ingo Molnara0490fa2009-02-12 11:35:40 +01006602 /*
6603 * If we dont want to free the old_rt yet then
6604 * set old_rd to NULL to skip the freeing later
6605 * in this function:
6606 */
6607 if (!atomic_dec_and_test(&old_rd->refcount))
6608 old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006609 }
6610
6611 atomic_inc(&rd->refcount);
6612 rq->rd = rd;
6613
Rusty Russellc6c49272008-11-25 02:35:05 +10306614 cpumask_set_cpu(rq->cpu, rd->span);
Gregory Haskins00aec932009-07-30 10:57:23 -04006615 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006616 set_rq_online(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006617
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006618 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnara0490fa2009-02-12 11:35:40 +01006619
6620 if (old_rd)
6621 free_rootdomain(old_rd);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006622}
6623
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006624static int init_rootdomain(struct root_domain *rd)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006625{
6626 memset(rd, 0, sizeof(*rd));
6627
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006628 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
Li Zefan0c910d22009-01-06 17:39:06 +08006629 goto out;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006630 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306631 goto free_span;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006632 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306633 goto free_online;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02006634
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006635 if (cpupri_init(&rd->cpupri) != 0)
Rusty Russell68e74562008-11-25 02:35:13 +10306636 goto free_rto_mask;
Rusty Russellc6c49272008-11-25 02:35:05 +10306637 return 0;
6638
Rusty Russell68e74562008-11-25 02:35:13 +10306639free_rto_mask:
6640 free_cpumask_var(rd->rto_mask);
Rusty Russellc6c49272008-11-25 02:35:05 +10306641free_online:
6642 free_cpumask_var(rd->online);
6643free_span:
6644 free_cpumask_var(rd->span);
Li Zefan0c910d22009-01-06 17:39:06 +08006645out:
Rusty Russellc6c49272008-11-25 02:35:05 +10306646 return -ENOMEM;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006647}
6648
6649static void init_defrootdomain(void)
6650{
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006651 init_rootdomain(&def_root_domain);
Rusty Russellc6c49272008-11-25 02:35:05 +10306652
Gregory Haskins57d885f2008-01-25 21:08:18 +01006653 atomic_set(&def_root_domain.refcount, 1);
6654}
6655
Gregory Haskinsdc938522008-01-25 21:08:26 +01006656static struct root_domain *alloc_rootdomain(void)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006657{
6658 struct root_domain *rd;
6659
6660 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6661 if (!rd)
6662 return NULL;
6663
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006664 if (init_rootdomain(rd) != 0) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306665 kfree(rd);
6666 return NULL;
6667 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01006668
6669 return rd;
6670}
6671
Linus Torvalds1da177e2005-04-16 15:20:36 -07006672/*
Ingo Molnar0eab9142008-01-25 21:08:19 +01006673 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
Linus Torvalds1da177e2005-04-16 15:20:36 -07006674 * hold the hotplug lock.
6675 */
Ingo Molnar0eab9142008-01-25 21:08:19 +01006676static void
6677cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006678{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006679 struct rq *rq = cpu_rq(cpu);
Suresh Siddha245af2c2005-06-25 14:57:25 -07006680 struct sched_domain *tmp;
6681
Peter Zijlstra669c55e2010-04-16 14:59:29 +02006682 for (tmp = sd; tmp; tmp = tmp->parent)
6683 tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
6684
Suresh Siddha245af2c2005-06-25 14:57:25 -07006685 /* Remove the sched domains which do not contribute to scheduling. */
Li Zefanf29c9b12008-11-06 09:45:16 +08006686 for (tmp = sd; tmp; ) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006687 struct sched_domain *parent = tmp->parent;
6688 if (!parent)
6689 break;
Li Zefanf29c9b12008-11-06 09:45:16 +08006690
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006691 if (sd_parent_degenerate(tmp, parent)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006692 tmp->parent = parent->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006693 if (parent->parent)
6694 parent->parent->child = tmp;
Li Zefanf29c9b12008-11-06 09:45:16 +08006695 } else
6696 tmp = tmp->parent;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006697 }
6698
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006699 if (sd && sd_degenerate(sd)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006700 sd = sd->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006701 if (sd)
6702 sd->child = NULL;
6703 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006704
6705 sched_domain_debug(sd, cpu);
6706
Gregory Haskins57d885f2008-01-25 21:08:18 +01006707 rq_attach_root(rq, rd);
Nick Piggin674311d2005-06-25 14:57:27 -07006708 rcu_assign_pointer(rq->sd, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006709}
6710
6711/* cpus with isolated domains */
Rusty Russelldcc30a32008-11-25 02:35:12 +10306712static cpumask_var_t cpu_isolated_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006713
6714/* Setup the mask of cpus configured for isolated domains */
6715static int __init isolated_cpu_setup(char *str)
6716{
Rusty Russellbdddd292009-12-02 14:09:16 +10306717 alloc_bootmem_cpumask_var(&cpu_isolated_map);
Rusty Russell968ea6d2008-12-13 21:55:51 +10306718 cpulist_parse(str, cpu_isolated_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006719 return 1;
6720}
6721
Ingo Molnar8927f492007-10-15 17:00:13 +02006722__setup("isolcpus=", isolated_cpu_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723
6724/*
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006725 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
6726 * to a function which identifies what group(along with sched group) a CPU
Rusty Russell96f874e2008-11-25 02:35:14 +10306727 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6728 * (due to the fact that we keep track of groups covered with a struct cpumask).
Linus Torvalds1da177e2005-04-16 15:20:36 -07006729 *
6730 * init_sched_build_groups will build a circular linked list of the groups
6731 * covered by the given span, and will set each group's ->cpumask correctly,
6732 * and ->cpu_power to 0.
6733 */
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006734static void
Rusty Russell96f874e2008-11-25 02:35:14 +10306735init_sched_build_groups(const struct cpumask *span,
6736 const struct cpumask *cpu_map,
6737 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
Mike Travis7c16ec52008-04-04 18:11:11 -07006738 struct sched_group **sg,
Rusty Russell96f874e2008-11-25 02:35:14 +10306739 struct cpumask *tmpmask),
6740 struct cpumask *covered, struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006741{
6742 struct sched_group *first = NULL, *last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006743 int i;
6744
Rusty Russell96f874e2008-11-25 02:35:14 +10306745 cpumask_clear(covered);
Mike Travis7c16ec52008-04-04 18:11:11 -07006746
Rusty Russellabcd0832008-11-25 02:35:02 +10306747 for_each_cpu(i, span) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006748 struct sched_group *sg;
Mike Travis7c16ec52008-04-04 18:11:11 -07006749 int group = group_fn(i, cpu_map, &sg, tmpmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006750 int j;
6751
Rusty Russell758b2cd2008-11-25 02:35:04 +10306752 if (cpumask_test_cpu(i, covered))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006753 continue;
6754
Rusty Russell758b2cd2008-11-25 02:35:04 +10306755 cpumask_clear(sched_group_cpus(sg));
Peter Zijlstra18a38852009-09-01 10:34:39 +02006756 sg->cpu_power = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006757
Rusty Russellabcd0832008-11-25 02:35:02 +10306758 for_each_cpu(j, span) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006759 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006760 continue;
6761
Rusty Russell96f874e2008-11-25 02:35:14 +10306762 cpumask_set_cpu(j, covered);
Rusty Russell758b2cd2008-11-25 02:35:04 +10306763 cpumask_set_cpu(j, sched_group_cpus(sg));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006764 }
6765 if (!first)
6766 first = sg;
6767 if (last)
6768 last->next = sg;
6769 last = sg;
6770 }
6771 last->next = first;
6772}
6773
John Hawkes9c1cfda2005-09-06 15:18:14 -07006774#define SD_NODES_PER_DOMAIN 16
Linus Torvalds1da177e2005-04-16 15:20:36 -07006775
John Hawkes9c1cfda2005-09-06 15:18:14 -07006776#ifdef CONFIG_NUMA
akpm@osdl.org198e2f12006-01-12 01:05:30 -08006777
John Hawkes9c1cfda2005-09-06 15:18:14 -07006778/**
6779 * find_next_best_node - find the next node to include in a sched_domain
6780 * @node: node whose sched_domain we're building
6781 * @used_nodes: nodes already in the sched_domain
6782 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006783 * Find the next node to include in a given scheduling domain. Simply
John Hawkes9c1cfda2005-09-06 15:18:14 -07006784 * finds the closest node not already in the @used_nodes map.
6785 *
6786 * Should use nodemask_t.
6787 */
Mike Travisc5f59f02008-04-04 18:11:10 -07006788static int find_next_best_node(int node, nodemask_t *used_nodes)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006789{
6790 int i, n, val, min_val, best_node = 0;
6791
6792 min_val = INT_MAX;
6793
Mike Travis076ac2a2008-05-12 21:21:12 +02006794 for (i = 0; i < nr_node_ids; i++) {
John Hawkes9c1cfda2005-09-06 15:18:14 -07006795 /* Start at @node */
Mike Travis076ac2a2008-05-12 21:21:12 +02006796 n = (node + i) % nr_node_ids;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006797
6798 if (!nr_cpus_node(n))
6799 continue;
6800
6801 /* Skip already used nodes */
Mike Travisc5f59f02008-04-04 18:11:10 -07006802 if (node_isset(n, *used_nodes))
John Hawkes9c1cfda2005-09-06 15:18:14 -07006803 continue;
6804
6805 /* Simple min distance search */
6806 val = node_distance(node, n);
6807
6808 if (val < min_val) {
6809 min_val = val;
6810 best_node = n;
6811 }
6812 }
6813
Mike Travisc5f59f02008-04-04 18:11:10 -07006814 node_set(best_node, *used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006815 return best_node;
6816}
6817
6818/**
6819 * sched_domain_node_span - get a cpumask for a node's sched_domain
6820 * @node: node whose cpumask we're constructing
Randy Dunlap73486722008-04-22 10:07:22 -07006821 * @span: resulting cpumask
John Hawkes9c1cfda2005-09-06 15:18:14 -07006822 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006823 * Given a node, construct a good cpumask for its sched_domain to span. It
John Hawkes9c1cfda2005-09-06 15:18:14 -07006824 * should be one that prevents unnecessary balancing, but also spreads tasks
6825 * out optimally.
6826 */
Rusty Russell96f874e2008-11-25 02:35:14 +10306827static void sched_domain_node_span(int node, struct cpumask *span)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006828{
Mike Travisc5f59f02008-04-04 18:11:10 -07006829 nodemask_t used_nodes;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006830 int i;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006831
Mike Travis6ca09df2008-12-31 18:08:45 -08006832 cpumask_clear(span);
Mike Travisc5f59f02008-04-04 18:11:10 -07006833 nodes_clear(used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006834
Mike Travis6ca09df2008-12-31 18:08:45 -08006835 cpumask_or(span, span, cpumask_of_node(node));
Mike Travisc5f59f02008-04-04 18:11:10 -07006836 node_set(node, used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006837
6838 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
Mike Travisc5f59f02008-04-04 18:11:10 -07006839 int next_node = find_next_best_node(node, &used_nodes);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006840
Mike Travis6ca09df2008-12-31 18:08:45 -08006841 cpumask_or(span, span, cpumask_of_node(next_node));
John Hawkes9c1cfda2005-09-06 15:18:14 -07006842 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006843}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006844#endif /* CONFIG_NUMA */
John Hawkes9c1cfda2005-09-06 15:18:14 -07006845
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006846int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006847
John Hawkes9c1cfda2005-09-06 15:18:14 -07006848/*
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306849 * The cpus mask in sched_group and sched_domain hangs off the end.
Ingo Molnar4200efd2009-05-19 09:22:19 +02006850 *
6851 * ( See the the comments in include/linux/sched.h:struct sched_group
6852 * and struct sched_domain. )
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306853 */
6854struct static_sched_group {
6855 struct sched_group sg;
6856 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
6857};
6858
6859struct static_sched_domain {
6860 struct sched_domain sd;
6861 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
6862};
6863
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006864struct s_data {
6865#ifdef CONFIG_NUMA
6866 int sd_allnodes;
6867 cpumask_var_t domainspan;
6868 cpumask_var_t covered;
6869 cpumask_var_t notcovered;
6870#endif
6871 cpumask_var_t nodemask;
6872 cpumask_var_t this_sibling_map;
6873 cpumask_var_t this_core_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02006874 cpumask_var_t this_book_map;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006875 cpumask_var_t send_covered;
6876 cpumask_var_t tmpmask;
6877 struct sched_group **sched_group_nodes;
6878 struct root_domain *rd;
6879};
6880
Andreas Herrmann2109b992009-08-18 12:53:00 +02006881enum s_alloc {
6882 sa_sched_groups = 0,
6883 sa_rootdomain,
6884 sa_tmpmask,
6885 sa_send_covered,
Heiko Carstens01a08542010-08-31 10:28:16 +02006886 sa_this_book_map,
Andreas Herrmann2109b992009-08-18 12:53:00 +02006887 sa_this_core_map,
6888 sa_this_sibling_map,
6889 sa_nodemask,
6890 sa_sched_group_nodes,
6891#ifdef CONFIG_NUMA
6892 sa_notcovered,
6893 sa_covered,
6894 sa_domainspan,
6895#endif
6896 sa_none,
6897};
6898
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306899/*
Ingo Molnar48f24c42006-07-03 00:25:40 -07006900 * SMT sched-domains:
John Hawkes9c1cfda2005-09-06 15:18:14 -07006901 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006902#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306903static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
Tejun Heo1871e522009-10-29 22:34:13 +09006904static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006905
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006906static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306907cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
6908 struct sched_group **sg, struct cpumask *unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006909{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006910 if (sg)
Tejun Heo1871e522009-10-29 22:34:13 +09006911 *sg = &per_cpu(sched_groups, cpu).sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006912 return cpu;
6913}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006914#endif /* CONFIG_SCHED_SMT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006915
Ingo Molnar48f24c42006-07-03 00:25:40 -07006916/*
6917 * multi-core sched-domains:
6918 */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006919#ifdef CONFIG_SCHED_MC
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306920static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
6921static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006922
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006923static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306924cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
6925 struct sched_group **sg, struct cpumask *mask)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006926{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006927 int group;
Heiko Carstensf2698932010-08-31 10:28:15 +02006928#ifdef CONFIG_SCHED_SMT
Rusty Russellc69fc562009-03-13 14:49:46 +10306929 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306930 group = cpumask_first(mask);
Heiko Carstensf2698932010-08-31 10:28:15 +02006931#else
6932 group = cpu;
6933#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006934 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306935 *sg = &per_cpu(sched_group_core, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006936 return group;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006937}
Heiko Carstensf2698932010-08-31 10:28:15 +02006938#endif /* CONFIG_SCHED_MC */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006939
Heiko Carstens01a08542010-08-31 10:28:16 +02006940/*
6941 * book sched-domains:
6942 */
6943#ifdef CONFIG_SCHED_BOOK
6944static DEFINE_PER_CPU(struct static_sched_domain, book_domains);
6945static DEFINE_PER_CPU(struct static_sched_group, sched_group_book);
6946
Linus Torvalds1da177e2005-04-16 15:20:36 -07006947static int
Heiko Carstens01a08542010-08-31 10:28:16 +02006948cpu_to_book_group(int cpu, const struct cpumask *cpu_map,
6949 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006950{
Heiko Carstens01a08542010-08-31 10:28:16 +02006951 int group = cpu;
6952#ifdef CONFIG_SCHED_MC
6953 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
6954 group = cpumask_first(mask);
6955#elif defined(CONFIG_SCHED_SMT)
6956 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
6957 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006958#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02006959 if (sg)
6960 *sg = &per_cpu(sched_group_book, group).sg;
6961 return group;
6962}
6963#endif /* CONFIG_SCHED_BOOK */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006964
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306965static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
6966static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006967
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006968static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306969cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
6970 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006971{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006972 int group;
Heiko Carstens01a08542010-08-31 10:28:16 +02006973#ifdef CONFIG_SCHED_BOOK
6974 cpumask_and(mask, cpu_book_mask(cpu), cpu_map);
6975 group = cpumask_first(mask);
6976#elif defined(CONFIG_SCHED_MC)
Mike Travis6ca09df2008-12-31 18:08:45 -08006977 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306978 group = cpumask_first(mask);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006979#elif defined(CONFIG_SCHED_SMT)
Rusty Russellc69fc562009-03-13 14:49:46 +10306980 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306981 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006982#else
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006983 group = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006984#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006985 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306986 *sg = &per_cpu(sched_group_phys, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006987 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006988}
6989
6990#ifdef CONFIG_NUMA
John Hawkes9c1cfda2005-09-06 15:18:14 -07006991/*
6992 * The init_sched_build_groups can't handle what we want to do with node
6993 * groups, so roll our own. Now each node has its own list of groups which
6994 * gets dynamically allocated.
6995 */
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006996static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
Mike Travis434d53b2008-04-04 18:11:04 -07006997static struct sched_group ***sched_group_nodes_bycpu;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006998
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006999static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307000static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007001
Rusty Russell96f874e2008-11-25 02:35:14 +10307002static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
7003 struct sched_group **sg,
7004 struct cpumask *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007005{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007006 int group;
7007
Mike Travis6ca09df2008-12-31 18:08:45 -08007008 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10307009 group = cpumask_first(nodemask);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007010
7011 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307012 *sg = &per_cpu(sched_group_allnodes, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007013 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007014}
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007015
Siddha, Suresh B08069032006-03-27 01:15:23 -08007016static void init_numa_sched_groups_power(struct sched_group *group_head)
7017{
7018 struct sched_group *sg = group_head;
7019 int j;
7020
7021 if (!sg)
7022 return;
Andi Kleen3a5c3592007-10-15 17:00:14 +02007023 do {
Rusty Russell758b2cd2008-11-25 02:35:04 +10307024 for_each_cpu(j, sched_group_cpus(sg)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02007025 struct sched_domain *sd;
Siddha, Suresh B08069032006-03-27 01:15:23 -08007026
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307027 sd = &per_cpu(phys_domains, j).sd;
Miao Xie13318a72009-04-15 09:59:10 +08007028 if (j != group_first_cpu(sd->groups)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02007029 /*
7030 * Only add "power" once for each
7031 * physical package.
7032 */
7033 continue;
7034 }
7035
Peter Zijlstra18a38852009-09-01 10:34:39 +02007036 sg->cpu_power += sd->groups->cpu_power;
Siddha, Suresh B08069032006-03-27 01:15:23 -08007037 }
Andi Kleen3a5c3592007-10-15 17:00:14 +02007038 sg = sg->next;
7039 } while (sg != group_head);
Siddha, Suresh B08069032006-03-27 01:15:23 -08007040}
Andreas Herrmann0601a882009-08-18 13:01:11 +02007041
7042static int build_numa_sched_groups(struct s_data *d,
7043 const struct cpumask *cpu_map, int num)
7044{
7045 struct sched_domain *sd;
7046 struct sched_group *sg, *prev;
7047 int n, j;
7048
7049 cpumask_clear(d->covered);
7050 cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
7051 if (cpumask_empty(d->nodemask)) {
7052 d->sched_group_nodes[num] = NULL;
7053 goto out;
7054 }
7055
7056 sched_domain_node_span(num, d->domainspan);
7057 cpumask_and(d->domainspan, d->domainspan, cpu_map);
7058
7059 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7060 GFP_KERNEL, num);
7061 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007062 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
7063 num);
Andreas Herrmann0601a882009-08-18 13:01:11 +02007064 return -ENOMEM;
7065 }
7066 d->sched_group_nodes[num] = sg;
7067
7068 for_each_cpu(j, d->nodemask) {
7069 sd = &per_cpu(node_domains, j).sd;
7070 sd->groups = sg;
7071 }
7072
Peter Zijlstra18a38852009-09-01 10:34:39 +02007073 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02007074 cpumask_copy(sched_group_cpus(sg), d->nodemask);
7075 sg->next = sg;
7076 cpumask_or(d->covered, d->covered, d->nodemask);
7077
7078 prev = sg;
7079 for (j = 0; j < nr_node_ids; j++) {
7080 n = (num + j) % nr_node_ids;
7081 cpumask_complement(d->notcovered, d->covered);
7082 cpumask_and(d->tmpmask, d->notcovered, cpu_map);
7083 cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
7084 if (cpumask_empty(d->tmpmask))
7085 break;
7086 cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
7087 if (cpumask_empty(d->tmpmask))
7088 continue;
7089 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7090 GFP_KERNEL, num);
7091 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007092 printk(KERN_WARNING
7093 "Can not alloc domain group for node %d\n", j);
Andreas Herrmann0601a882009-08-18 13:01:11 +02007094 return -ENOMEM;
7095 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02007096 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02007097 cpumask_copy(sched_group_cpus(sg), d->tmpmask);
7098 sg->next = prev->next;
7099 cpumask_or(d->covered, d->covered, d->tmpmask);
7100 prev->next = sg;
7101 prev = sg;
7102 }
7103out:
7104 return 0;
7105}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007106#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007107
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007108#ifdef CONFIG_NUMA
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007109/* Free memory allocated for various sched_group structures */
Rusty Russell96f874e2008-11-25 02:35:14 +10307110static void free_sched_groups(const struct cpumask *cpu_map,
7111 struct cpumask *nodemask)
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007112{
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007113 int cpu, i;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007114
Rusty Russellabcd0832008-11-25 02:35:02 +10307115 for_each_cpu(cpu, cpu_map) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007116 struct sched_group **sched_group_nodes
7117 = sched_group_nodes_bycpu[cpu];
7118
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007119 if (!sched_group_nodes)
7120 continue;
7121
Mike Travis076ac2a2008-05-12 21:21:12 +02007122 for (i = 0; i < nr_node_ids; i++) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007123 struct sched_group *oldsg, *sg = sched_group_nodes[i];
7124
Mike Travis6ca09df2008-12-31 18:08:45 -08007125 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10307126 if (cpumask_empty(nodemask))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007127 continue;
7128
7129 if (sg == NULL)
7130 continue;
7131 sg = sg->next;
7132next_sg:
7133 oldsg = sg;
7134 sg = sg->next;
7135 kfree(oldsg);
7136 if (oldsg != sched_group_nodes[i])
7137 goto next_sg;
7138 }
7139 kfree(sched_group_nodes);
7140 sched_group_nodes_bycpu[cpu] = NULL;
7141 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007142}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007143#else /* !CONFIG_NUMA */
Rusty Russell96f874e2008-11-25 02:35:14 +10307144static void free_sched_groups(const struct cpumask *cpu_map,
7145 struct cpumask *nodemask)
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007146{
7147}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007148#endif /* CONFIG_NUMA */
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007149
Linus Torvalds1da177e2005-04-16 15:20:36 -07007150/*
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007151 * Initialize sched groups cpu_power.
7152 *
7153 * cpu_power indicates the capacity of sched group, which is used while
7154 * distributing the load between different sched groups in a sched domain.
7155 * Typically cpu_power for all the groups in a sched domain will be same unless
7156 * there are asymmetries in the topology. If there are asymmetries, group
7157 * having more cpu_power will pickup more load compared to the group having
7158 * less cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007159 */
7160static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7161{
7162 struct sched_domain *child;
7163 struct sched_group *group;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007164 long power;
7165 int weight;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007166
7167 WARN_ON(!sd || !sd->groups);
7168
Miao Xie13318a72009-04-15 09:59:10 +08007169 if (cpu != group_first_cpu(sd->groups))
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007170 return;
7171
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07007172 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
7173
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007174 child = sd->child;
7175
Peter Zijlstra18a38852009-09-01 10:34:39 +02007176 sd->groups->cpu_power = 0;
Eric Dumazet5517d862007-05-08 00:32:57 -07007177
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007178 if (!child) {
7179 power = SCHED_LOAD_SCALE;
7180 weight = cpumask_weight(sched_domain_span(sd));
7181 /*
7182 * SMT siblings share the power of a single core.
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02007183 * Usually multiple threads get a better yield out of
7184 * that one core than a single thread would have,
7185 * reflect that in sd->smt_gain.
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007186 */
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02007187 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
7188 power *= sd->smt_gain;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007189 power /= weight;
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02007190 power >>= SCHED_LOAD_SHIFT;
7191 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02007192 sd->groups->cpu_power += power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007193 return;
7194 }
7195
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007196 /*
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007197 * Add cpu_power of each child group to this groups cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007198 */
7199 group = child->groups;
7200 do {
Peter Zijlstra18a38852009-09-01 10:34:39 +02007201 sd->groups->cpu_power += group->cpu_power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007202 group = group->next;
7203 } while (group != child->groups);
7204}
7205
7206/*
Mike Travis7c16ec52008-04-04 18:11:11 -07007207 * Initializers for schedule domains
7208 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
7209 */
7210
Ingo Molnara5d8c342008-10-09 11:35:51 +02007211#ifdef CONFIG_SCHED_DEBUG
7212# define SD_INIT_NAME(sd, type) sd->name = #type
7213#else
7214# define SD_INIT_NAME(sd, type) do { } while (0)
7215#endif
7216
Mike Travis7c16ec52008-04-04 18:11:11 -07007217#define SD_INIT(sd, type) sd_init_##type(sd)
Ingo Molnara5d8c342008-10-09 11:35:51 +02007218
Mike Travis7c16ec52008-04-04 18:11:11 -07007219#define SD_INIT_FUNC(type) \
7220static noinline void sd_init_##type(struct sched_domain *sd) \
7221{ \
7222 memset(sd, 0, sizeof(*sd)); \
7223 *sd = SD_##type##_INIT; \
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007224 sd->level = SD_LV_##type; \
Ingo Molnara5d8c342008-10-09 11:35:51 +02007225 SD_INIT_NAME(sd, type); \
Mike Travis7c16ec52008-04-04 18:11:11 -07007226}
7227
7228SD_INIT_FUNC(CPU)
7229#ifdef CONFIG_NUMA
7230 SD_INIT_FUNC(ALLNODES)
7231 SD_INIT_FUNC(NODE)
7232#endif
7233#ifdef CONFIG_SCHED_SMT
7234 SD_INIT_FUNC(SIBLING)
7235#endif
7236#ifdef CONFIG_SCHED_MC
7237 SD_INIT_FUNC(MC)
7238#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007239#ifdef CONFIG_SCHED_BOOK
7240 SD_INIT_FUNC(BOOK)
7241#endif
Mike Travis7c16ec52008-04-04 18:11:11 -07007242
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007243static int default_relax_domain_level = -1;
7244
7245static int __init setup_relax_domain_level(char *str)
7246{
Li Zefan30e0e172008-05-13 10:27:17 +08007247 unsigned long val;
7248
7249 val = simple_strtoul(str, NULL, 0);
7250 if (val < SD_LV_MAX)
7251 default_relax_domain_level = val;
7252
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007253 return 1;
7254}
7255__setup("relax_domain_level=", setup_relax_domain_level);
7256
7257static void set_domain_attribute(struct sched_domain *sd,
7258 struct sched_domain_attr *attr)
7259{
7260 int request;
7261
7262 if (!attr || attr->relax_domain_level < 0) {
7263 if (default_relax_domain_level < 0)
7264 return;
7265 else
7266 request = default_relax_domain_level;
7267 } else
7268 request = attr->relax_domain_level;
7269 if (request < sd->level) {
7270 /* turn off idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007271 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007272 } else {
7273 /* turn on idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007274 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007275 }
7276}
7277
Andreas Herrmann2109b992009-08-18 12:53:00 +02007278static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
7279 const struct cpumask *cpu_map)
7280{
7281 switch (what) {
7282 case sa_sched_groups:
7283 free_sched_groups(cpu_map, d->tmpmask); /* fall through */
7284 d->sched_group_nodes = NULL;
7285 case sa_rootdomain:
7286 free_rootdomain(d->rd); /* fall through */
7287 case sa_tmpmask:
7288 free_cpumask_var(d->tmpmask); /* fall through */
7289 case sa_send_covered:
7290 free_cpumask_var(d->send_covered); /* fall through */
Heiko Carstens01a08542010-08-31 10:28:16 +02007291 case sa_this_book_map:
7292 free_cpumask_var(d->this_book_map); /* fall through */
Andreas Herrmann2109b992009-08-18 12:53:00 +02007293 case sa_this_core_map:
7294 free_cpumask_var(d->this_core_map); /* fall through */
7295 case sa_this_sibling_map:
7296 free_cpumask_var(d->this_sibling_map); /* fall through */
7297 case sa_nodemask:
7298 free_cpumask_var(d->nodemask); /* fall through */
7299 case sa_sched_group_nodes:
7300#ifdef CONFIG_NUMA
7301 kfree(d->sched_group_nodes); /* fall through */
7302 case sa_notcovered:
7303 free_cpumask_var(d->notcovered); /* fall through */
7304 case sa_covered:
7305 free_cpumask_var(d->covered); /* fall through */
7306 case sa_domainspan:
7307 free_cpumask_var(d->domainspan); /* fall through */
7308#endif
7309 case sa_none:
7310 break;
7311 }
7312}
7313
7314static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7315 const struct cpumask *cpu_map)
7316{
7317#ifdef CONFIG_NUMA
7318 if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
7319 return sa_none;
7320 if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
7321 return sa_domainspan;
7322 if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
7323 return sa_covered;
7324 /* Allocate the per-node list of sched groups */
7325 d->sched_group_nodes = kcalloc(nr_node_ids,
7326 sizeof(struct sched_group *), GFP_KERNEL);
7327 if (!d->sched_group_nodes) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007328 printk(KERN_WARNING "Can not alloc sched group node list\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02007329 return sa_notcovered;
7330 }
7331 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
7332#endif
7333 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
7334 return sa_sched_group_nodes;
7335 if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
7336 return sa_nodemask;
7337 if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
7338 return sa_this_sibling_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02007339 if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL))
Andreas Herrmann2109b992009-08-18 12:53:00 +02007340 return sa_this_core_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02007341 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
7342 return sa_this_book_map;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007343 if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
7344 return sa_send_covered;
7345 d->rd = alloc_rootdomain();
7346 if (!d->rd) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007347 printk(KERN_WARNING "Cannot alloc root domain\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02007348 return sa_tmpmask;
7349 }
7350 return sa_rootdomain;
7351}
7352
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007353static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
7354 const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
7355{
7356 struct sched_domain *sd = NULL;
7357#ifdef CONFIG_NUMA
7358 struct sched_domain *parent;
7359
7360 d->sd_allnodes = 0;
7361 if (cpumask_weight(cpu_map) >
7362 SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
7363 sd = &per_cpu(allnodes_domains, i).sd;
7364 SD_INIT(sd, ALLNODES);
7365 set_domain_attribute(sd, attr);
7366 cpumask_copy(sched_domain_span(sd), cpu_map);
7367 cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
7368 d->sd_allnodes = 1;
7369 }
7370 parent = sd;
7371
7372 sd = &per_cpu(node_domains, i).sd;
7373 SD_INIT(sd, NODE);
7374 set_domain_attribute(sd, attr);
7375 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
7376 sd->parent = parent;
7377 if (parent)
7378 parent->child = sd;
7379 cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
7380#endif
7381 return sd;
7382}
7383
Andreas Herrmann87cce662009-08-18 12:54:55 +02007384static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
7385 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7386 struct sched_domain *parent, int i)
7387{
7388 struct sched_domain *sd;
7389 sd = &per_cpu(phys_domains, i).sd;
7390 SD_INIT(sd, CPU);
7391 set_domain_attribute(sd, attr);
7392 cpumask_copy(sched_domain_span(sd), d->nodemask);
7393 sd->parent = parent;
7394 if (parent)
7395 parent->child = sd;
7396 cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
7397 return sd;
7398}
7399
Heiko Carstens01a08542010-08-31 10:28:16 +02007400static struct sched_domain *__build_book_sched_domain(struct s_data *d,
7401 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7402 struct sched_domain *parent, int i)
7403{
7404 struct sched_domain *sd = parent;
7405#ifdef CONFIG_SCHED_BOOK
7406 sd = &per_cpu(book_domains, i).sd;
7407 SD_INIT(sd, BOOK);
7408 set_domain_attribute(sd, attr);
7409 cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i));
7410 sd->parent = parent;
7411 parent->child = sd;
7412 cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask);
7413#endif
7414 return sd;
7415}
7416
Andreas Herrmann410c4082009-08-18 12:56:14 +02007417static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
7418 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7419 struct sched_domain *parent, int i)
7420{
7421 struct sched_domain *sd = parent;
7422#ifdef CONFIG_SCHED_MC
7423 sd = &per_cpu(core_domains, i).sd;
7424 SD_INIT(sd, MC);
7425 set_domain_attribute(sd, attr);
7426 cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
7427 sd->parent = parent;
7428 parent->child = sd;
7429 cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
7430#endif
7431 return sd;
7432}
7433
Andreas Herrmannd8173532009-08-18 12:57:03 +02007434static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
7435 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7436 struct sched_domain *parent, int i)
7437{
7438 struct sched_domain *sd = parent;
7439#ifdef CONFIG_SCHED_SMT
7440 sd = &per_cpu(cpu_domains, i).sd;
7441 SD_INIT(sd, SIBLING);
7442 set_domain_attribute(sd, attr);
7443 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
7444 sd->parent = parent;
7445 parent->child = sd;
7446 cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
7447#endif
7448 return sd;
7449}
7450
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007451static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
7452 const struct cpumask *cpu_map, int cpu)
7453{
7454 switch (l) {
7455#ifdef CONFIG_SCHED_SMT
7456 case SD_LV_SIBLING: /* set up CPU (sibling) groups */
7457 cpumask_and(d->this_sibling_map, cpu_map,
7458 topology_thread_cpumask(cpu));
7459 if (cpu == cpumask_first(d->this_sibling_map))
7460 init_sched_build_groups(d->this_sibling_map, cpu_map,
7461 &cpu_to_cpu_group,
7462 d->send_covered, d->tmpmask);
7463 break;
7464#endif
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007465#ifdef CONFIG_SCHED_MC
7466 case SD_LV_MC: /* set up multi-core groups */
7467 cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
7468 if (cpu == cpumask_first(d->this_core_map))
7469 init_sched_build_groups(d->this_core_map, cpu_map,
7470 &cpu_to_core_group,
7471 d->send_covered, d->tmpmask);
7472 break;
7473#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007474#ifdef CONFIG_SCHED_BOOK
7475 case SD_LV_BOOK: /* set up book groups */
7476 cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu));
7477 if (cpu == cpumask_first(d->this_book_map))
7478 init_sched_build_groups(d->this_book_map, cpu_map,
7479 &cpu_to_book_group,
7480 d->send_covered, d->tmpmask);
7481 break;
7482#endif
Andreas Herrmann86548092009-08-18 12:59:28 +02007483 case SD_LV_CPU: /* set up physical groups */
7484 cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
7485 if (!cpumask_empty(d->nodemask))
7486 init_sched_build_groups(d->nodemask, cpu_map,
7487 &cpu_to_phys_group,
7488 d->send_covered, d->tmpmask);
7489 break;
Andreas Herrmannde616e32009-08-18 13:00:13 +02007490#ifdef CONFIG_NUMA
7491 case SD_LV_ALLNODES:
7492 init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
7493 d->send_covered, d->tmpmask);
7494 break;
7495#endif
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007496 default:
7497 break;
7498 }
7499}
7500
Mike Travis7c16ec52008-04-04 18:11:11 -07007501/*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007502 * Build sched domains for a given set of cpus and attach the sched domains
7503 * to the individual cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07007504 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307505static int __build_sched_domains(const struct cpumask *cpu_map,
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007506 struct sched_domain_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007507{
Andreas Herrmann2109b992009-08-18 12:53:00 +02007508 enum s_alloc alloc_state = sa_none;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007509 struct s_data d;
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007510 struct sched_domain *sd;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007511 int i;
John Hawkesd1b55132005-09-06 15:18:14 -07007512#ifdef CONFIG_NUMA
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007513 d.sd_allnodes = 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307514#endif
7515
Andreas Herrmann2109b992009-08-18 12:53:00 +02007516 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7517 if (alloc_state != sa_rootdomain)
7518 goto error;
7519 alloc_state = sa_sched_groups;
Mike Travis7c16ec52008-04-04 18:11:11 -07007520
Linus Torvalds1da177e2005-04-16 15:20:36 -07007521 /*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007522 * Set up domains for cpus specified by the cpu_map.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007523 */
Rusty Russellabcd0832008-11-25 02:35:02 +10307524 for_each_cpu(i, cpu_map) {
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007525 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
7526 cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007527
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007528 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
Andreas Herrmann87cce662009-08-18 12:54:55 +02007529 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
Heiko Carstens01a08542010-08-31 10:28:16 +02007530 sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmann410c4082009-08-18 12:56:14 +02007531 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmannd8173532009-08-18 12:57:03 +02007532 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007533 }
7534
Rusty Russellabcd0832008-11-25 02:35:02 +10307535 for_each_cpu(i, cpu_map) {
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007536 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
Heiko Carstens01a08542010-08-31 10:28:16 +02007537 build_sched_groups(&d, SD_LV_BOOK, cpu_map, i);
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007538 build_sched_groups(&d, SD_LV_MC, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007539 }
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007540
Linus Torvalds1da177e2005-04-16 15:20:36 -07007541 /* Set up physical groups */
Andreas Herrmann86548092009-08-18 12:59:28 +02007542 for (i = 0; i < nr_node_ids; i++)
7543 build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007544
7545#ifdef CONFIG_NUMA
7546 /* Set up node groups */
Andreas Herrmannde616e32009-08-18 13:00:13 +02007547 if (d.sd_allnodes)
7548 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007549
Andreas Herrmann0601a882009-08-18 13:01:11 +02007550 for (i = 0; i < nr_node_ids; i++)
7551 if (build_numa_sched_groups(&d, cpu_map, i))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007552 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007553#endif
7554
7555 /* Calculate CPU power for physical packages and nodes */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007556#ifdef CONFIG_SCHED_SMT
Rusty Russellabcd0832008-11-25 02:35:02 +10307557 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007558 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007559 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007560 }
7561#endif
7562#ifdef CONFIG_SCHED_MC
Rusty Russellabcd0832008-11-25 02:35:02 +10307563 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007564 sd = &per_cpu(core_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007565 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007566 }
7567#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007568#ifdef CONFIG_SCHED_BOOK
7569 for_each_cpu(i, cpu_map) {
7570 sd = &per_cpu(book_domains, i).sd;
7571 init_sched_groups_power(i, sd);
7572 }
7573#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07007574
Rusty Russellabcd0832008-11-25 02:35:02 +10307575 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007576 sd = &per_cpu(phys_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007577 init_sched_groups_power(i, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007578 }
7579
John Hawkes9c1cfda2005-09-06 15:18:14 -07007580#ifdef CONFIG_NUMA
Mike Travis076ac2a2008-05-12 21:21:12 +02007581 for (i = 0; i < nr_node_ids; i++)
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007582 init_numa_sched_groups_power(d.sched_group_nodes[i]);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007583
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007584 if (d.sd_allnodes) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007585 struct sched_group *sg;
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007586
Rusty Russell96f874e2008-11-25 02:35:14 +10307587 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007588 d.tmpmask);
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007589 init_numa_sched_groups_power(sg);
7590 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07007591#endif
7592
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593 /* Attach the domains */
Rusty Russellabcd0832008-11-25 02:35:02 +10307594 for_each_cpu(i, cpu_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007595#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307596 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007597#elif defined(CONFIG_SCHED_MC)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307598 sd = &per_cpu(core_domains, i).sd;
Heiko Carstens01a08542010-08-31 10:28:16 +02007599#elif defined(CONFIG_SCHED_BOOK)
7600 sd = &per_cpu(book_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007601#else
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307602 sd = &per_cpu(phys_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007603#endif
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007604 cpu_attach_domain(sd, d.rd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007605 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007606
Andreas Herrmann2109b992009-08-18 12:53:00 +02007607 d.sched_group_nodes = NULL; /* don't free this we still need it */
7608 __free_domain_allocs(&d, sa_tmpmask, cpu_map);
7609 return 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307610
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007611error:
Andreas Herrmann2109b992009-08-18 12:53:00 +02007612 __free_domain_allocs(&d, alloc_state, cpu_map);
7613 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007614}
Paul Jackson029190c2007-10-18 23:40:20 -07007615
Rusty Russell96f874e2008-11-25 02:35:14 +10307616static int build_sched_domains(const struct cpumask *cpu_map)
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007617{
7618 return __build_sched_domains(cpu_map, NULL);
7619}
7620
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307621static cpumask_var_t *doms_cur; /* current sched domains */
Paul Jackson029190c2007-10-18 23:40:20 -07007622static int ndoms_cur; /* number of sched domains in 'doms_cur' */
Ingo Molnar4285f5942008-05-16 17:47:14 +02007623static struct sched_domain_attr *dattr_cur;
7624 /* attribues of custom domains in 'doms_cur' */
Paul Jackson029190c2007-10-18 23:40:20 -07007625
7626/*
7627 * Special case: If a kmalloc of a doms_cur partition (array of
Rusty Russell42128232008-11-25 02:35:12 +10307628 * cpumask) fails, then fallback to a single sched domain,
7629 * as determined by the single cpumask fallback_doms.
Paul Jackson029190c2007-10-18 23:40:20 -07007630 */
Rusty Russell42128232008-11-25 02:35:12 +10307631static cpumask_var_t fallback_doms;
Paul Jackson029190c2007-10-18 23:40:20 -07007632
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007633/*
7634 * arch_update_cpu_topology lets virtualized architectures update the
7635 * cpu core maps. It is supposed to return 1 if the topology changed
7636 * or 0 if it stayed the same.
7637 */
7638int __attribute__((weak)) arch_update_cpu_topology(void)
Heiko Carstens22e52b02008-03-12 18:31:59 +01007639{
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007640 return 0;
Heiko Carstens22e52b02008-03-12 18:31:59 +01007641}
7642
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307643cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7644{
7645 int i;
7646 cpumask_var_t *doms;
7647
7648 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7649 if (!doms)
7650 return NULL;
7651 for (i = 0; i < ndoms; i++) {
7652 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7653 free_sched_domains(doms, i);
7654 return NULL;
7655 }
7656 }
7657 return doms;
7658}
7659
7660void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7661{
7662 unsigned int i;
7663 for (i = 0; i < ndoms; i++)
7664 free_cpumask_var(doms[i]);
7665 kfree(doms);
7666}
7667
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007668/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007669 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
Paul Jackson029190c2007-10-18 23:40:20 -07007670 * For now this just excludes isolated cpus, but could be used to
7671 * exclude other special cases in the future.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007672 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307673static int arch_init_sched_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007674{
Milton Miller73785472007-10-24 18:23:48 +02007675 int err;
7676
Heiko Carstens22e52b02008-03-12 18:31:59 +01007677 arch_update_cpu_topology();
Paul Jackson029190c2007-10-18 23:40:20 -07007678 ndoms_cur = 1;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307679 doms_cur = alloc_sched_domains(ndoms_cur);
Paul Jackson029190c2007-10-18 23:40:20 -07007680 if (!doms_cur)
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307681 doms_cur = &fallback_doms;
7682 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007683 dattr_cur = NULL;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307684 err = build_sched_domains(doms_cur[0]);
Milton Miller6382bc92007-10-15 17:00:19 +02007685 register_sched_domain_sysctl();
Milton Miller73785472007-10-24 18:23:48 +02007686
7687 return err;
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007688}
7689
Rusty Russell96f874e2008-11-25 02:35:14 +10307690static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
7691 struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007692{
Mike Travis7c16ec52008-04-04 18:11:11 -07007693 free_sched_groups(cpu_map, tmpmask);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007694}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007695
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007696/*
7697 * Detach sched domains from a group of cpus specified in cpu_map
7698 * These cpus will now be attached to the NULL domain
7699 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307700static void detach_destroy_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007701{
Rusty Russell96f874e2008-11-25 02:35:14 +10307702 /* Save because hotplug lock held. */
7703 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007704 int i;
7705
Rusty Russellabcd0832008-11-25 02:35:02 +10307706 for_each_cpu(i, cpu_map)
Gregory Haskins57d885f2008-01-25 21:08:18 +01007707 cpu_attach_domain(NULL, &def_root_domain, i);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007708 synchronize_sched();
Rusty Russell96f874e2008-11-25 02:35:14 +10307709 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007710}
7711
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007712/* handle null as "default" */
7713static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7714 struct sched_domain_attr *new, int idx_new)
7715{
7716 struct sched_domain_attr tmp;
7717
7718 /* fast path */
7719 if (!new && !cur)
7720 return 1;
7721
7722 tmp = SD_ATTR_INIT;
7723 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7724 new ? (new + idx_new) : &tmp,
7725 sizeof(struct sched_domain_attr));
7726}
7727
Paul Jackson029190c2007-10-18 23:40:20 -07007728/*
7729 * Partition sched domains as specified by the 'ndoms_new'
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007730 * cpumasks in the array doms_new[] of cpumasks. This compares
Paul Jackson029190c2007-10-18 23:40:20 -07007731 * doms_new[] to the current sched domain partitioning, doms_cur[].
7732 * It destroys each deleted domain and builds each new domain.
7733 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307734 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007735 * The masks don't intersect (don't overlap.) We should setup one
7736 * sched domain for each mask. CPUs not in any of the cpumasks will
7737 * not be load balanced. If the same cpumask appears both in the
Paul Jackson029190c2007-10-18 23:40:20 -07007738 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7739 * it as it is.
7740 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307741 * The passed in 'doms_new' should be allocated using
7742 * alloc_sched_domains. This routine takes ownership of it and will
7743 * free_sched_domains it when done with it. If the caller failed the
7744 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7745 * and partition_sched_domains() will fallback to the single partition
7746 * 'fallback_doms', it also forces the domains to be rebuilt.
Paul Jackson029190c2007-10-18 23:40:20 -07007747 *
Rusty Russell96f874e2008-11-25 02:35:14 +10307748 * If doms_new == NULL it will be replaced with cpu_online_mask.
Li Zefan700018e2008-11-18 14:02:03 +08007749 * ndoms_new == 0 is a special case for destroying existing domains,
7750 * and it will not create the default domain.
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007751 *
Paul Jackson029190c2007-10-18 23:40:20 -07007752 * Call with hotplug lock held
7753 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307754void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007755 struct sched_domain_attr *dattr_new)
Paul Jackson029190c2007-10-18 23:40:20 -07007756{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007757 int i, j, n;
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007758 int new_topology;
Paul Jackson029190c2007-10-18 23:40:20 -07007759
Heiko Carstens712555e2008-04-28 11:33:07 +02007760 mutex_lock(&sched_domains_mutex);
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007761
Milton Miller73785472007-10-24 18:23:48 +02007762 /* always unregister in case we don't destroy any domains */
7763 unregister_sched_domain_sysctl();
7764
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007765 /* Let architecture update cpu core mappings. */
7766 new_topology = arch_update_cpu_topology();
7767
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007768 n = doms_new ? ndoms_new : 0;
Paul Jackson029190c2007-10-18 23:40:20 -07007769
7770 /* Destroy deleted domains */
7771 for (i = 0; i < ndoms_cur; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007772 for (j = 0; j < n && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307773 if (cpumask_equal(doms_cur[i], doms_new[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007774 && dattrs_equal(dattr_cur, i, dattr_new, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007775 goto match1;
7776 }
7777 /* no match - a current sched domain not in new doms_new[] */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307778 detach_destroy_domains(doms_cur[i]);
Paul Jackson029190c2007-10-18 23:40:20 -07007779match1:
7780 ;
7781 }
7782
Max Krasnyanskye761b772008-07-15 04:43:49 -07007783 if (doms_new == NULL) {
7784 ndoms_cur = 0;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307785 doms_new = &fallback_doms;
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007786 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
Li Zefanfaa2f982008-11-04 16:20:23 +08007787 WARN_ON_ONCE(dattr_new);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007788 }
7789
Paul Jackson029190c2007-10-18 23:40:20 -07007790 /* Build new domains */
7791 for (i = 0; i < ndoms_new; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007792 for (j = 0; j < ndoms_cur && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307793 if (cpumask_equal(doms_new[i], doms_cur[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007794 && dattrs_equal(dattr_new, i, dattr_cur, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007795 goto match2;
7796 }
7797 /* no match - add a new doms_new */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307798 __build_sched_domains(doms_new[i],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007799 dattr_new ? dattr_new + i : NULL);
Paul Jackson029190c2007-10-18 23:40:20 -07007800match2:
7801 ;
7802 }
7803
7804 /* Remember the new sched domains */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307805 if (doms_cur != &fallback_doms)
7806 free_sched_domains(doms_cur, ndoms_cur);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007807 kfree(dattr_cur); /* kfree(NULL) is safe */
Paul Jackson029190c2007-10-18 23:40:20 -07007808 doms_cur = doms_new;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007809 dattr_cur = dattr_new;
Paul Jackson029190c2007-10-18 23:40:20 -07007810 ndoms_cur = ndoms_new;
Milton Miller73785472007-10-24 18:23:48 +02007811
7812 register_sched_domain_sysctl();
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007813
Heiko Carstens712555e2008-04-28 11:33:07 +02007814 mutex_unlock(&sched_domains_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -07007815}
7816
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007817#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Li Zefanc70f22d2009-01-05 19:07:50 +08007818static void arch_reinit_sched_domains(void)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007819{
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007820 get_online_cpus();
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007821
7822 /* Destroy domains first to force the rebuild */
7823 partition_sched_domains(0, NULL, NULL);
7824
Max Krasnyanskye761b772008-07-15 04:43:49 -07007825 rebuild_sched_domains();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007826 put_online_cpus();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007827}
7828
7829static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7830{
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307831 unsigned int level = 0;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007832
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307833 if (sscanf(buf, "%u", &level) != 1)
7834 return -EINVAL;
7835
7836 /*
7837 * level is always be positive so don't check for
7838 * level < POWERSAVINGS_BALANCE_NONE which is 0
7839 * What happens on 0 or 1 byte write,
7840 * need to check for count as well?
7841 */
7842
7843 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007844 return -EINVAL;
7845
7846 if (smt)
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307847 sched_smt_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007848 else
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307849 sched_mc_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007850
Li Zefanc70f22d2009-01-05 19:07:50 +08007851 arch_reinit_sched_domains();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007852
Li Zefanc70f22d2009-01-05 19:07:50 +08007853 return count;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007854}
7855
Adrian Bunk6707de002007-08-12 18:08:19 +02007856#ifdef CONFIG_SCHED_MC
Andi Kleenf718cd42008-07-29 22:33:52 -07007857static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007858 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007859 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007860{
7861 return sprintf(page, "%u\n", sched_mc_power_savings);
7862}
Andi Kleenf718cd42008-07-29 22:33:52 -07007863static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007864 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007865 const char *buf, size_t count)
7866{
7867 return sched_power_savings_store(buf, count, 0);
7868}
Andi Kleenf718cd42008-07-29 22:33:52 -07007869static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7870 sched_mc_power_savings_show,
7871 sched_mc_power_savings_store);
Adrian Bunk6707de002007-08-12 18:08:19 +02007872#endif
7873
7874#ifdef CONFIG_SCHED_SMT
Andi Kleenf718cd42008-07-29 22:33:52 -07007875static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007876 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007877 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007878{
7879 return sprintf(page, "%u\n", sched_smt_power_savings);
7880}
Andi Kleenf718cd42008-07-29 22:33:52 -07007881static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007882 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007883 const char *buf, size_t count)
7884{
7885 return sched_power_savings_store(buf, count, 1);
7886}
Andi Kleenf718cd42008-07-29 22:33:52 -07007887static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7888 sched_smt_power_savings_show,
Adrian Bunk6707de002007-08-12 18:08:19 +02007889 sched_smt_power_savings_store);
7890#endif
7891
Li Zefan39aac642009-01-05 19:18:02 +08007892int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007893{
7894 int err = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007895
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007896#ifdef CONFIG_SCHED_SMT
7897 if (smt_capable())
7898 err = sysfs_create_file(&cls->kset.kobj,
7899 &attr_sched_smt_power_savings.attr);
7900#endif
7901#ifdef CONFIG_SCHED_MC
7902 if (!err && mc_capable())
7903 err = sysfs_create_file(&cls->kset.kobj,
7904 &attr_sched_mc_power_savings.attr);
7905#endif
7906 return err;
7907}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007908#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007909
Linus Torvalds1da177e2005-04-16 15:20:36 -07007910/*
Tejun Heo3a101d02010-06-08 21:40:36 +02007911 * Update cpusets according to cpu_active mask. If cpusets are
7912 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7913 * around partition_sched_domains().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007914 */
Tejun Heo0b2e9182010-06-21 23:53:31 +02007915static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7916 void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007917{
Tejun Heo3a101d02010-06-08 21:40:36 +02007918 switch (action & ~CPU_TASKS_FROZEN) {
Max Krasnyanskye761b772008-07-15 04:43:49 -07007919 case CPU_ONLINE:
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007920 case CPU_DOWN_FAILED:
Tejun Heo3a101d02010-06-08 21:40:36 +02007921 cpuset_update_active_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007922 return NOTIFY_OK;
Max Krasnyanskye761b772008-07-15 04:43:49 -07007923 default:
7924 return NOTIFY_DONE;
7925 }
7926}
Tejun Heo3a101d02010-06-08 21:40:36 +02007927
Tejun Heo0b2e9182010-06-21 23:53:31 +02007928static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7929 void *hcpu)
Tejun Heo3a101d02010-06-08 21:40:36 +02007930{
7931 switch (action & ~CPU_TASKS_FROZEN) {
7932 case CPU_DOWN_PREPARE:
7933 cpuset_update_active_cpus();
7934 return NOTIFY_OK;
7935 default:
7936 return NOTIFY_DONE;
7937 }
7938}
Max Krasnyanskye761b772008-07-15 04:43:49 -07007939
7940static int update_runtime(struct notifier_block *nfb,
7941 unsigned long action, void *hcpu)
7942{
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007943 int cpu = (int)(long)hcpu;
7944
Linus Torvalds1da177e2005-04-16 15:20:36 -07007945 switch (action) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007946 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007947 case CPU_DOWN_PREPARE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007948 disable_runtime(cpu_rq(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007949 return NOTIFY_OK;
7950
Linus Torvalds1da177e2005-04-16 15:20:36 -07007951 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007952 case CPU_DOWN_FAILED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007954 case CPU_ONLINE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007955 enable_runtime(cpu_rq(cpu));
Max Krasnyanskye761b772008-07-15 04:43:49 -07007956 return NOTIFY_OK;
7957
Linus Torvalds1da177e2005-04-16 15:20:36 -07007958 default:
7959 return NOTIFY_DONE;
7960 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007961}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007962
7963void __init sched_init_smp(void)
7964{
Rusty Russelldcc30a32008-11-25 02:35:12 +10307965 cpumask_var_t non_isolated_cpus;
7966
7967 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
Yong Zhangcb5fd132009-09-14 20:20:16 +08007968 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
Nick Piggin5c1e1762006-10-03 01:14:04 -07007969
Mike Travis434d53b2008-04-04 18:11:04 -07007970#if defined(CONFIG_NUMA)
7971 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
7972 GFP_KERNEL);
7973 BUG_ON(sched_group_nodes_bycpu == NULL);
7974#endif
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007975 get_online_cpus();
Heiko Carstens712555e2008-04-28 11:33:07 +02007976 mutex_lock(&sched_domains_mutex);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007977 arch_init_sched_domains(cpu_active_mask);
Rusty Russelldcc30a32008-11-25 02:35:12 +10307978 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7979 if (cpumask_empty(non_isolated_cpus))
7980 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
Heiko Carstens712555e2008-04-28 11:33:07 +02007981 mutex_unlock(&sched_domains_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007982 put_online_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007983
Tejun Heo3a101d02010-06-08 21:40:36 +02007984 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7985 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007986
7987 /* RT runtime code needs to handle some hotplug events */
7988 hotcpu_notifier(update_runtime, 0);
7989
Peter Zijlstrab328ca12008-04-29 10:02:46 +02007990 init_hrtick();
Nick Piggin5c1e1762006-10-03 01:14:04 -07007991
7992 /* Move init over to a non-isolated CPU */
Rusty Russelldcc30a32008-11-25 02:35:12 +10307993 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
Nick Piggin5c1e1762006-10-03 01:14:04 -07007994 BUG();
Ingo Molnar19978ca2007-11-09 22:39:38 +01007995 sched_init_granularity();
Rusty Russelldcc30a32008-11-25 02:35:12 +10307996 free_cpumask_var(non_isolated_cpus);
Rusty Russell42128232008-11-25 02:35:12 +10307997
Rusty Russell0e3900e2008-11-25 02:35:13 +10307998 init_sched_rt_class();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007999}
8000#else
8001void __init sched_init_smp(void)
8002{
Ingo Molnar19978ca2007-11-09 22:39:38 +01008003 sched_init_granularity();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008004}
8005#endif /* CONFIG_SMP */
8006
Arun R Bharadwajcd1bb942009-04-16 12:15:34 +05308007const_debug unsigned int sysctl_timer_migration = 1;
8008
Linus Torvalds1da177e2005-04-16 15:20:36 -07008009int in_sched_functions(unsigned long addr)
8010{
Linus Torvalds1da177e2005-04-16 15:20:36 -07008011 return in_lock_functions(addr) ||
8012 (addr >= (unsigned long)__sched_text_start
8013 && addr < (unsigned long)__sched_text_end);
8014}
8015
Alexey Dobriyana9957442007-10-15 17:00:13 +02008016static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02008017{
8018 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02008019 INIT_LIST_HEAD(&cfs_rq->tasks);
Ingo Molnardd41f592007-07-09 18:51:59 +02008020#ifdef CONFIG_FAIR_GROUP_SCHED
8021 cfs_rq->rq = rq;
Paul Turnerf07333b2011-01-21 20:45:03 -08008022 /* allow initial update_cfs_load() to truncate */
Peter Zijlstra6ea72f12011-01-26 13:36:03 +01008023#ifdef CONFIG_SMP
Paul Turnerf07333b2011-01-21 20:45:03 -08008024 cfs_rq->load_stamp = 1;
Ingo Molnardd41f592007-07-09 18:51:59 +02008025#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02008026#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02008027 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
Ingo Molnardd41f592007-07-09 18:51:59 +02008028}
8029
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008030static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8031{
8032 struct rt_prio_array *array;
8033 int i;
8034
8035 array = &rt_rq->active;
8036 for (i = 0; i < MAX_RT_PRIO; i++) {
8037 INIT_LIST_HEAD(array->queue + i);
8038 __clear_bit(i, array->bitmap);
8039 }
8040 /* delimiter for bitsearch: */
8041 __set_bit(MAX_RT_PRIO, array->bitmap);
8042
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008043#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -05008044 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins398a1532009-01-14 09:10:04 -05008045#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -05008046 rt_rq->highest_prio.next = MAX_RT_PRIO;
Peter Zijlstra48d5e252008-01-25 21:08:31 +01008047#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008048#endif
8049#ifdef CONFIG_SMP
8050 rt_rq->rt_nr_migratory = 0;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008051 rt_rq->overloaded = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008052 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008053#endif
8054
8055 rt_rq->rt_time = 0;
8056 rt_rq->rt_throttled = 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008057 rt_rq->rt_runtime = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008058 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008059
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008060#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01008061 rt_rq->rt_nr_boosted = 0;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008062 rt_rq->rq = rq;
8063#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008064}
8065
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008066#ifdef CONFIG_FAIR_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008067static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008068 struct sched_entity *se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008069 struct sched_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008070{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008071 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008072 tg->cfs_rq[cpu] = cfs_rq;
8073 init_cfs_rq(cfs_rq, rq);
8074 cfs_rq->tg = tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008075
8076 tg->se[cpu] = se;
Yong Zhang07e06b02011-01-07 15:17:36 +08008077 /* se could be NULL for root_task_group */
Dhaval Giani354d60c2008-04-19 19:44:59 +02008078 if (!se)
8079 return;
8080
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008081 if (!parent)
8082 se->cfs_rq = &rq->cfs;
8083 else
8084 se->cfs_rq = parent->my_q;
8085
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008086 se->my_q = cfs_rq;
Paul Turner94371782010-11-15 15:47:10 -08008087 update_load_set(&se->load, 0);
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008088 se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008089}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008090#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008091
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008092#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008093static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008094 struct sched_rt_entity *rt_se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008095 struct sched_rt_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008096{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008097 struct rq *rq = cpu_rq(cpu);
8098
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008099 tg->rt_rq[cpu] = rt_rq;
8100 init_rt_rq(rt_rq, rq);
8101 rt_rq->tg = tg;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008102 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008103
8104 tg->rt_se[cpu] = rt_se;
Dhaval Giani354d60c2008-04-19 19:44:59 +02008105 if (!rt_se)
8106 return;
8107
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008108 if (!parent)
8109 rt_se->rt_rq = &rq->rt;
8110 else
8111 rt_se->rt_rq = parent->my_q;
8112
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008113 rt_se->my_q = rt_rq;
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008114 rt_se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008115 INIT_LIST_HEAD(&rt_se->run_list);
8116}
8117#endif
8118
Linus Torvalds1da177e2005-04-16 15:20:36 -07008119void __init sched_init(void)
8120{
Ingo Molnardd41f592007-07-09 18:51:59 +02008121 int i, j;
Mike Travis434d53b2008-04-04 18:11:04 -07008122 unsigned long alloc_size = 0, ptr;
8123
8124#ifdef CONFIG_FAIR_GROUP_SCHED
8125 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8126#endif
8127#ifdef CONFIG_RT_GROUP_SCHED
8128 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8129#endif
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308130#ifdef CONFIG_CPUMASK_OFFSTACK
Rusty Russell8c083f02009-03-19 15:22:20 +10308131 alloc_size += num_possible_cpus() * cpumask_size();
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308132#endif
Mike Travis434d53b2008-04-04 18:11:04 -07008133 if (alloc_size) {
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03008134 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
Mike Travis434d53b2008-04-04 18:11:04 -07008135
8136#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008137 root_task_group.se = (struct sched_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008138 ptr += nr_cpu_ids * sizeof(void **);
8139
Yong Zhang07e06b02011-01-07 15:17:36 +08008140 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008141 ptr += nr_cpu_ids * sizeof(void **);
Peter Zijlstraeff766a2008-04-19 19:45:00 +02008142
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008143#endif /* CONFIG_FAIR_GROUP_SCHED */
Mike Travis434d53b2008-04-04 18:11:04 -07008144#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008145 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008146 ptr += nr_cpu_ids * sizeof(void **);
8147
Yong Zhang07e06b02011-01-07 15:17:36 +08008148 root_task_group.rt_rq = (struct rt_rq **)ptr;
Peter Zijlstraeff766a2008-04-19 19:45:00 +02008149 ptr += nr_cpu_ids * sizeof(void **);
8150
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008151#endif /* CONFIG_RT_GROUP_SCHED */
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308152#ifdef CONFIG_CPUMASK_OFFSTACK
8153 for_each_possible_cpu(i) {
8154 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
8155 ptr += cpumask_size();
8156 }
8157#endif /* CONFIG_CPUMASK_OFFSTACK */
Mike Travis434d53b2008-04-04 18:11:04 -07008158 }
Ingo Molnardd41f592007-07-09 18:51:59 +02008159
Gregory Haskins57d885f2008-01-25 21:08:18 +01008160#ifdef CONFIG_SMP
8161 init_defrootdomain();
8162#endif
8163
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008164 init_rt_bandwidth(&def_rt_bandwidth,
8165 global_rt_period(), global_rt_runtime());
8166
8167#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008168 init_rt_bandwidth(&root_task_group.rt_bandwidth,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008169 global_rt_period(), global_rt_runtime());
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008170#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008171
Dhaval Giani7c941432010-01-20 13:26:18 +01008172#ifdef CONFIG_CGROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008173 list_add(&root_task_group.list, &task_groups);
8174 INIT_LIST_HEAD(&root_task_group.children);
Mike Galbraith5091faa2010-11-30 14:18:03 +01008175 autogroup_init(&init_task);
Dhaval Giani7c941432010-01-20 13:26:18 +01008176#endif /* CONFIG_CGROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008177
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08008178 for_each_possible_cpu(i) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07008179 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008180
8181 rq = cpu_rq(i);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008182 raw_spin_lock_init(&rq->lock);
Nick Piggin78979862005-06-25 14:57:13 -07008183 rq->nr_running = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02008184 rq->calc_load_active = 0;
8185 rq->calc_load_update = jiffies + LOAD_FREQ;
Ingo Molnardd41f592007-07-09 18:51:59 +02008186 init_cfs_rq(&rq->cfs, rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008187 init_rt_rq(&rq->rt, rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008188#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008189 root_task_group.shares = root_task_group_load;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008190 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02008191 /*
Yong Zhang07e06b02011-01-07 15:17:36 +08008192 * How much cpu bandwidth does root_task_group get?
Dhaval Giani354d60c2008-04-19 19:44:59 +02008193 *
8194 * In case of task-groups formed thr' the cgroup filesystem, it
8195 * gets 100% of the cpu resources in the system. This overall
8196 * system cpu resource is divided among the tasks of
Yong Zhang07e06b02011-01-07 15:17:36 +08008197 * root_task_group and its child task-groups in a fair manner,
Dhaval Giani354d60c2008-04-19 19:44:59 +02008198 * based on each entity's (task or task-group's) weight
8199 * (se->load.weight).
8200 *
Yong Zhang07e06b02011-01-07 15:17:36 +08008201 * In other words, if root_task_group has 10 tasks of weight
Dhaval Giani354d60c2008-04-19 19:44:59 +02008202 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8203 * then A0's share of the cpu resource is:
8204 *
Ingo Molnar0d905bc2009-05-04 19:13:30 +02008205 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
Dhaval Giani354d60c2008-04-19 19:44:59 +02008206 *
Yong Zhang07e06b02011-01-07 15:17:36 +08008207 * We achieve this by letting root_task_group's tasks sit
8208 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
Dhaval Giani354d60c2008-04-19 19:44:59 +02008209 */
Yong Zhang07e06b02011-01-07 15:17:36 +08008210 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02008211#endif /* CONFIG_FAIR_GROUP_SCHED */
8212
8213 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008214#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008215 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
Yong Zhang07e06b02011-01-07 15:17:36 +08008216 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008217#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008218
Ingo Molnardd41f592007-07-09 18:51:59 +02008219 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
8220 rq->cpu_load[j] = 0;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07008221
8222 rq->last_load_update_tick = jiffies;
8223
Linus Torvalds1da177e2005-04-16 15:20:36 -07008224#ifdef CONFIG_SMP
Nick Piggin41c7ce92005-06-25 14:57:24 -07008225 rq->sd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01008226 rq->rd = NULL;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02008227 rq->cpu_power = SCHED_LOAD_SCALE;
Gregory Haskins3f029d32009-07-29 11:08:47 -04008228 rq->post_schedule = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008229 rq->active_balance = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02008230 rq->next_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008231 rq->push_cpu = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07008232 rq->cpu = i;
Gregory Haskins1f11eb62008-06-04 15:04:05 -04008233 rq->online = 0;
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01008234 rq->idle_stamp = 0;
8235 rq->avg_idle = 2*sysctl_sched_migration_cost;
Gregory Haskinsdc938522008-01-25 21:08:26 +01008236 rq_attach_root(rq, &def_root_domain);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008237#ifdef CONFIG_NO_HZ
8238 rq->nohz_balance_kick = 0;
8239 init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
8240#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008241#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01008242 init_rq_hrtick(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008243 atomic_set(&rq->nr_iowait, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008244 }
8245
Peter Williams2dd73a42006-06-27 02:54:34 -07008246 set_load_weight(&init_task);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008247
Avi Kivitye107be32007-07-26 13:40:43 +02008248#ifdef CONFIG_PREEMPT_NOTIFIERS
8249 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
8250#endif
8251
Christoph Lameterc9819f42006-12-10 02:20:25 -08008252#ifdef CONFIG_SMP
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008253 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
Christoph Lameterc9819f42006-12-10 02:20:25 -08008254#endif
8255
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008256#ifdef CONFIG_RT_MUTEXES
Thomas Gleixner1d615482009-11-17 14:54:03 +01008257 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008258#endif
8259
Linus Torvalds1da177e2005-04-16 15:20:36 -07008260 /*
8261 * The boot idle thread does lazy MMU switching as well:
8262 */
8263 atomic_inc(&init_mm.mm_count);
8264 enter_lazy_tlb(&init_mm, current);
8265
8266 /*
8267 * Make us the idle thread. Technically, schedule() should not be
8268 * called from this thread, however somewhere below it might be,
8269 * but because we are the idle thread, we just pick up running again
8270 * when this runqueue becomes "idle".
8271 */
8272 init_idle(current, smp_processor_id());
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02008273
8274 calc_load_update = jiffies + LOAD_FREQ;
8275
Ingo Molnardd41f592007-07-09 18:51:59 +02008276 /*
8277 * During early bootup we pretend to be a normal task:
8278 */
8279 current->sched_class = &fair_sched_class;
Ingo Molnar6892b752008-02-13 14:02:36 +01008280
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308281 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
Rusty Russell49557e62009-11-02 20:37:20 +10308282 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308283#ifdef CONFIG_SMP
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308284#ifdef CONFIG_NO_HZ
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008285 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8286 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
8287 atomic_set(&nohz.load_balancer, nr_cpu_ids);
8288 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
8289 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308290#endif
Rusty Russellbdddd292009-12-02 14:09:16 +10308291 /* May be allocated at isolcpus cmdline parse time */
8292 if (cpu_isolated_map == NULL)
8293 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308294#endif /* SMP */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308295
Ingo Molnar6892b752008-02-13 14:02:36 +01008296 scheduler_running = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008297}
8298
8299#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008300static inline int preempt_count_equals(int preempt_offset)
8301{
Frederic Weisbecker234da7b2009-12-16 20:21:05 +01008302 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008303
Arnd Bergmann4ba82162011-01-25 22:52:22 +01008304 return (nested == preempt_offset);
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008305}
8306
Simon Kagstromd8948372009-12-23 11:08:18 +01008307void __might_sleep(const char *file, int line, int preempt_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008308{
Ingo Molnar48f24c42006-07-03 00:25:40 -07008309#ifdef in_atomic
Linus Torvalds1da177e2005-04-16 15:20:36 -07008310 static unsigned long prev_jiffy; /* ratelimiting */
8311
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008312 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8313 system_state != SYSTEM_RUNNING || oops_in_progress)
Ingo Molnaraef745f2008-08-28 11:34:43 +02008314 return;
8315 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8316 return;
8317 prev_jiffy = jiffies;
8318
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01008319 printk(KERN_ERR
8320 "BUG: sleeping function called from invalid context at %s:%d\n",
8321 file, line);
8322 printk(KERN_ERR
8323 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8324 in_atomic(), irqs_disabled(),
8325 current->pid, current->comm);
Ingo Molnaraef745f2008-08-28 11:34:43 +02008326
8327 debug_show_held_locks(current);
8328 if (irqs_disabled())
8329 print_irqtrace_events(current);
8330 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008331#endif
8332}
8333EXPORT_SYMBOL(__might_sleep);
8334#endif
8335
8336#ifdef CONFIG_MAGIC_SYSRQ
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008337static void normalize_task(struct rq *rq, struct task_struct *p)
8338{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008339 const struct sched_class *prev_class = p->sched_class;
8340 int old_prio = p->prio;
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008341 int on_rq;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02008342
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02008343 on_rq = p->on_rq;
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008344 if (on_rq)
8345 deactivate_task(rq, p, 0);
8346 __setscheduler(rq, p, SCHED_NORMAL, 0);
8347 if (on_rq) {
8348 activate_task(rq, p, 0);
8349 resched_task(rq->curr);
8350 }
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008351
8352 check_class_changed(rq, p, prev_class, old_prio);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008353}
8354
Linus Torvalds1da177e2005-04-16 15:20:36 -07008355void normalize_rt_tasks(void)
8356{
Ingo Molnara0f98a12007-06-17 18:37:45 +02008357 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008358 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07008359 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008360
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008361 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008362 do_each_thread(g, p) {
Ingo Molnar178be792007-10-15 17:00:18 +02008363 /*
8364 * Only normalize user tasks:
8365 */
8366 if (!p->mm)
8367 continue;
8368
Ingo Molnardd41f592007-07-09 18:51:59 +02008369 p->se.exec_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008370#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03008371 p->se.statistics.wait_start = 0;
8372 p->se.statistics.sleep_start = 0;
8373 p->se.statistics.block_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008374#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02008375
8376 if (!rt_task(p)) {
8377 /*
8378 * Renice negative nice level userspace
8379 * tasks back to 0:
8380 */
8381 if (TASK_NICE(p) < 0 && p->mm)
8382 set_user_nice(p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008383 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02008384 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008385
Thomas Gleixner1d615482009-11-17 14:54:03 +01008386 raw_spin_lock(&p->pi_lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -07008387 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008388
Ingo Molnar178be792007-10-15 17:00:18 +02008389 normalize_task(rq, p);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008390
Ingo Molnarb29739f2006-06-27 02:54:51 -07008391 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01008392 raw_spin_unlock(&p->pi_lock);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008393 } while_each_thread(g, p);
8394
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008395 read_unlock_irqrestore(&tasklist_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008396}
8397
8398#endif /* CONFIG_MAGIC_SYSRQ */
Linus Torvalds1df5c102005-09-12 07:59:21 -07008399
Jason Wessel67fc4e02010-05-20 21:04:21 -05008400#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008401/*
Jason Wessel67fc4e02010-05-20 21:04:21 -05008402 * These functions are only useful for the IA64 MCA handling, or kdb.
Linus Torvalds1df5c102005-09-12 07:59:21 -07008403 *
8404 * They can only be called when the whole system has been
8405 * stopped - every CPU needs to be quiescent, and no scheduling
8406 * activity can take place. Using them for anything else would
8407 * be a serious bug, and as a result, they aren't even visible
8408 * under any other configuration.
8409 */
8410
8411/**
8412 * curr_task - return the current task for a given cpu.
8413 * @cpu: the processor in question.
8414 *
8415 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8416 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008417struct task_struct *curr_task(int cpu)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008418{
8419 return cpu_curr(cpu);
8420}
8421
Jason Wessel67fc4e02010-05-20 21:04:21 -05008422#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8423
8424#ifdef CONFIG_IA64
Linus Torvalds1df5c102005-09-12 07:59:21 -07008425/**
8426 * set_curr_task - set the current task for a given cpu.
8427 * @cpu: the processor in question.
8428 * @p: the task pointer to set.
8429 *
8430 * Description: This function must only be used when non-maskable interrupts
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008431 * are serviced on a separate stack. It allows the architecture to switch the
8432 * notion of the current task on a cpu in a non-blocking manner. This function
Linus Torvalds1df5c102005-09-12 07:59:21 -07008433 * must be called with all CPU's synchronized, and interrupts disabled, the
8434 * and caller must save the original value of the current task (see
8435 * curr_task() above) and restore that value before reenabling interrupts and
8436 * re-starting the system.
8437 *
8438 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8439 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008440void set_curr_task(int cpu, struct task_struct *p)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008441{
8442 cpu_curr(cpu) = p;
8443}
8444
8445#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008446
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008447#ifdef CONFIG_FAIR_GROUP_SCHED
8448static void free_fair_sched_group(struct task_group *tg)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008449{
8450 int i;
8451
8452 for_each_possible_cpu(i) {
8453 if (tg->cfs_rq)
8454 kfree(tg->cfs_rq[i]);
8455 if (tg->se)
8456 kfree(tg->se[i]);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008457 }
8458
8459 kfree(tg->cfs_rq);
8460 kfree(tg->se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008461}
8462
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008463static
8464int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008465{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008466 struct cfs_rq *cfs_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008467 struct sched_entity *se;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008468 int i;
8469
Mike Travis434d53b2008-04-04 18:11:04 -07008470 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008471 if (!tg->cfs_rq)
8472 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008473 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008474 if (!tg->se)
8475 goto err;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008476
8477 tg->shares = NICE_0_LOAD;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008478
8479 for_each_possible_cpu(i) {
Li Zefaneab17222008-10-29 17:03:22 +08008480 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8481 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008482 if (!cfs_rq)
8483 goto err;
8484
Li Zefaneab17222008-10-29 17:03:22 +08008485 se = kzalloc_node(sizeof(struct sched_entity),
8486 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008487 if (!se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008488 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008489
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008490 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008491 }
8492
8493 return 1;
8494
Peter Zijlstra49246272010-10-17 21:46:10 +02008495err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008496 kfree(cfs_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008497err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008498 return 0;
8499}
8500
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008501static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8502{
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008503 struct rq *rq = cpu_rq(cpu);
8504 unsigned long flags;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008505
8506 /*
8507 * Only empty task groups can be destroyed; so we can speculatively
8508 * check on_list without danger of it being re-added.
8509 */
8510 if (!tg->cfs_rq[cpu]->on_list)
8511 return;
8512
8513 raw_spin_lock_irqsave(&rq->lock, flags);
Paul Turner822bc182010-11-29 16:55:40 -08008514 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008515 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008516}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008517#else /* !CONFG_FAIR_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008518static inline void free_fair_sched_group(struct task_group *tg)
8519{
8520}
8521
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008522static inline
8523int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008524{
8525 return 1;
8526}
8527
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008528static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8529{
8530}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008531#endif /* CONFIG_FAIR_GROUP_SCHED */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008532
8533#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008534static void free_rt_sched_group(struct task_group *tg)
8535{
8536 int i;
8537
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008538 destroy_rt_bandwidth(&tg->rt_bandwidth);
8539
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008540 for_each_possible_cpu(i) {
8541 if (tg->rt_rq)
8542 kfree(tg->rt_rq[i]);
8543 if (tg->rt_se)
8544 kfree(tg->rt_se[i]);
8545 }
8546
8547 kfree(tg->rt_rq);
8548 kfree(tg->rt_se);
8549}
8550
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008551static
8552int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008553{
8554 struct rt_rq *rt_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008555 struct sched_rt_entity *rt_se;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008556 struct rq *rq;
8557 int i;
8558
Mike Travis434d53b2008-04-04 18:11:04 -07008559 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008560 if (!tg->rt_rq)
8561 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008562 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008563 if (!tg->rt_se)
8564 goto err;
8565
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008566 init_rt_bandwidth(&tg->rt_bandwidth,
8567 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008568
8569 for_each_possible_cpu(i) {
8570 rq = cpu_rq(i);
8571
Li Zefaneab17222008-10-29 17:03:22 +08008572 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8573 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008574 if (!rt_rq)
8575 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008576
Li Zefaneab17222008-10-29 17:03:22 +08008577 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8578 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008579 if (!rt_se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008580 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008581
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008582 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008583 }
8584
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008585 return 1;
8586
Peter Zijlstra49246272010-10-17 21:46:10 +02008587err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008588 kfree(rt_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008589err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008590 return 0;
8591}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008592#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008593static inline void free_rt_sched_group(struct task_group *tg)
8594{
8595}
8596
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008597static inline
8598int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008599{
8600 return 1;
8601}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008602#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008603
Dhaval Giani7c941432010-01-20 13:26:18 +01008604#ifdef CONFIG_CGROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008605static void free_sched_group(struct task_group *tg)
8606{
8607 free_fair_sched_group(tg);
8608 free_rt_sched_group(tg);
Mike Galbraithe9aa1dd2011-01-05 11:11:25 +01008609 autogroup_free(tg);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008610 kfree(tg);
8611}
8612
8613/* allocate runqueue etc for a new task group */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008614struct task_group *sched_create_group(struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008615{
8616 struct task_group *tg;
8617 unsigned long flags;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008618
8619 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8620 if (!tg)
8621 return ERR_PTR(-ENOMEM);
8622
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008623 if (!alloc_fair_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008624 goto err;
8625
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008626 if (!alloc_rt_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008627 goto err;
8628
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008629 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008630 list_add_rcu(&tg->list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008631
8632 WARN_ON(!parent); /* root should already exist */
8633
8634 tg->parent = parent;
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008635 INIT_LIST_HEAD(&tg->children);
Zhang, Yanmin09f27242030-08-14 15:56:40 +08008636 list_add_rcu(&tg->siblings, &parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008637 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008638
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008639 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008640
8641err:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008642 free_sched_group(tg);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008643 return ERR_PTR(-ENOMEM);
8644}
8645
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008646/* rcu callback to free various structures associated with a task group */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008647static void free_sched_group_rcu(struct rcu_head *rhp)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008648{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008649 /* now it should be safe to free those cfs_rqs */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008650 free_sched_group(container_of(rhp, struct task_group, rcu));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008651}
8652
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008653/* Destroy runqueue etc associated with a task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008654void sched_destroy_group(struct task_group *tg)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008655{
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008656 unsigned long flags;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008657 int i;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008658
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008659 /* end participation in shares distribution */
8660 for_each_possible_cpu(i)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008661 unregister_fair_sched_group(tg, i);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008662
8663 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008664 list_del_rcu(&tg->list);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008665 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008666 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008667
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008668 /* wait for possible concurrent references to cfs_rqs complete */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008669 call_rcu(&tg->rcu, free_sched_group_rcu);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008670}
8671
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008672/* change task's runqueue when it moves between groups.
Ingo Molnar3a252012007-10-15 17:00:12 +02008673 * The caller of this function should have put the task in its new group
8674 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8675 * reflect its new group.
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008676 */
8677void sched_move_task(struct task_struct *tsk)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008678{
8679 int on_rq, running;
8680 unsigned long flags;
8681 struct rq *rq;
8682
8683 rq = task_rq_lock(tsk, &flags);
8684
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01008685 running = task_current(rq, tsk);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02008686 on_rq = tsk->on_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008687
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008688 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008689 dequeue_task(rq, tsk, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008690 if (unlikely(running))
8691 tsk->sched_class->put_prev_task(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008692
Peter Zijlstra810b3812008-02-29 15:21:01 -05008693#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008694 if (tsk->sched_class->task_move_group)
8695 tsk->sched_class->task_move_group(tsk, on_rq);
8696 else
Peter Zijlstra810b3812008-02-29 15:21:01 -05008697#endif
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008698 set_task_rq(tsk, task_cpu(tsk));
Peter Zijlstra810b3812008-02-29 15:21:01 -05008699
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008700 if (unlikely(running))
8701 tsk->sched_class->set_curr_task(rq);
8702 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01008703 enqueue_task(rq, tsk, 0);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008704
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008705 task_rq_unlock(rq, &flags);
8706}
Dhaval Giani7c941432010-01-20 13:26:18 +01008707#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008708
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008709#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008710static DEFINE_MUTEX(shares_mutex);
8711
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008712int sched_group_set_shares(struct task_group *tg, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008713{
8714 int i;
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008715 unsigned long flags;
Ingo Molnarc61935f2008-01-22 11:24:58 +01008716
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008717 /*
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008718 * We can't change the weight of the root cgroup.
8719 */
8720 if (!tg->se[0])
8721 return -EINVAL;
8722
Peter Zijlstra18d95a22008-04-19 19:45:00 +02008723 if (shares < MIN_SHARES)
8724 shares = MIN_SHARES;
Miao Xiecb4ad1f2008-04-28 12:54:56 +08008725 else if (shares > MAX_SHARES)
8726 shares = MAX_SHARES;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008727
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008728 mutex_lock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008729 if (tg->shares == shares)
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008730 goto done;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008731
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008732 tg->shares = shares;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008733 for_each_possible_cpu(i) {
Paul Turner94371782010-11-15 15:47:10 -08008734 struct rq *rq = cpu_rq(i);
8735 struct sched_entity *se;
8736
8737 se = tg->se[i];
8738 /* Propagate contribution to hierarchy */
8739 raw_spin_lock_irqsave(&rq->lock, flags);
8740 for_each_sched_entity(se)
Paul Turner6d5ab292011-01-21 20:45:01 -08008741 update_cfs_shares(group_cfs_rq(se));
Paul Turner94371782010-11-15 15:47:10 -08008742 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008743 }
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008744
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008745done:
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008746 mutex_unlock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008747 return 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008748}
8749
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008750unsigned long sched_group_shares(struct task_group *tg)
8751{
8752 return tg->shares;
8753}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008754#endif
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008755
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008756#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008757/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008758 * Ensure that the real time constraints are schedulable.
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008759 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008760static DEFINE_MUTEX(rt_constraints_mutex);
8761
8762static unsigned long to_ratio(u64 period, u64 runtime)
8763{
8764 if (runtime == RUNTIME_INF)
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008765 return 1ULL << 20;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008766
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008767 return div64_u64(runtime << 20, period);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008768}
8769
Dhaval Giani521f1a242008-02-28 15:21:56 +05308770/* Must be called with tasklist_lock held */
8771static inline int tg_has_rt_tasks(struct task_group *tg)
8772{
8773 struct task_struct *g, *p;
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008774
Dhaval Giani521f1a242008-02-28 15:21:56 +05308775 do_each_thread(g, p) {
8776 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8777 return 1;
8778 } while_each_thread(g, p);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008779
Dhaval Giani521f1a242008-02-28 15:21:56 +05308780 return 0;
8781}
8782
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008783struct rt_schedulable_data {
8784 struct task_group *tg;
8785 u64 rt_period;
8786 u64 rt_runtime;
8787};
8788
8789static int tg_schedulable(struct task_group *tg, void *data)
8790{
8791 struct rt_schedulable_data *d = data;
8792 struct task_group *child;
8793 unsigned long total, sum = 0;
8794 u64 period, runtime;
8795
8796 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8797 runtime = tg->rt_bandwidth.rt_runtime;
8798
8799 if (tg == d->tg) {
8800 period = d->rt_period;
8801 runtime = d->rt_runtime;
8802 }
8803
Peter Zijlstra4653f802008-09-23 15:33:44 +02008804 /*
8805 * Cannot have more runtime than the period.
8806 */
8807 if (runtime > period && runtime != RUNTIME_INF)
8808 return -EINVAL;
8809
8810 /*
8811 * Ensure we don't starve existing RT tasks.
8812 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008813 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8814 return -EBUSY;
8815
8816 total = to_ratio(period, runtime);
8817
Peter Zijlstra4653f802008-09-23 15:33:44 +02008818 /*
8819 * Nobody can have more than the global setting allows.
8820 */
8821 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8822 return -EINVAL;
8823
8824 /*
8825 * The sum of our children's runtime should not exceed our own.
8826 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008827 list_for_each_entry_rcu(child, &tg->children, siblings) {
8828 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8829 runtime = child->rt_bandwidth.rt_runtime;
8830
8831 if (child == d->tg) {
8832 period = d->rt_period;
8833 runtime = d->rt_runtime;
8834 }
8835
8836 sum += to_ratio(period, runtime);
8837 }
8838
8839 if (sum > total)
8840 return -EINVAL;
8841
8842 return 0;
8843}
8844
8845static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8846{
8847 struct rt_schedulable_data data = {
8848 .tg = tg,
8849 .rt_period = period,
8850 .rt_runtime = runtime,
8851 };
8852
8853 return walk_tg_tree(tg_schedulable, tg_nop, &data);
8854}
8855
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008856static int tg_set_bandwidth(struct task_group *tg,
8857 u64 rt_period, u64 rt_runtime)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008858{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008859 int i, err = 0;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008860
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008861 mutex_lock(&rt_constraints_mutex);
Dhaval Giani521f1a242008-02-28 15:21:56 +05308862 read_lock(&tasklist_lock);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008863 err = __rt_schedulable(tg, rt_period, rt_runtime);
8864 if (err)
Dhaval Giani521f1a242008-02-28 15:21:56 +05308865 goto unlock;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008866
Thomas Gleixner0986b112009-11-17 15:32:06 +01008867 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008868 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8869 tg->rt_bandwidth.rt_runtime = rt_runtime;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008870
8871 for_each_possible_cpu(i) {
8872 struct rt_rq *rt_rq = tg->rt_rq[i];
8873
Thomas Gleixner0986b112009-11-17 15:32:06 +01008874 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008875 rt_rq->rt_runtime = rt_runtime;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008876 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008877 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008878 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstra49246272010-10-17 21:46:10 +02008879unlock:
Dhaval Giani521f1a242008-02-28 15:21:56 +05308880 read_unlock(&tasklist_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008881 mutex_unlock(&rt_constraints_mutex);
8882
8883 return err;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008884}
8885
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008886int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8887{
8888 u64 rt_runtime, rt_period;
8889
8890 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8891 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8892 if (rt_runtime_us < 0)
8893 rt_runtime = RUNTIME_INF;
8894
8895 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8896}
8897
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008898long sched_group_rt_runtime(struct task_group *tg)
8899{
8900 u64 rt_runtime_us;
8901
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008902 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008903 return -1;
8904
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008905 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008906 do_div(rt_runtime_us, NSEC_PER_USEC);
8907 return rt_runtime_us;
8908}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008909
8910int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8911{
8912 u64 rt_runtime, rt_period;
8913
8914 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8915 rt_runtime = tg->rt_bandwidth.rt_runtime;
8916
Raistlin619b0482008-06-26 18:54:09 +02008917 if (rt_period == 0)
8918 return -EINVAL;
8919
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008920 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8921}
8922
8923long sched_group_rt_period(struct task_group *tg)
8924{
8925 u64 rt_period_us;
8926
8927 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8928 do_div(rt_period_us, NSEC_PER_USEC);
8929 return rt_period_us;
8930}
8931
8932static int sched_rt_global_constraints(void)
8933{
Peter Zijlstra4653f802008-09-23 15:33:44 +02008934 u64 runtime, period;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008935 int ret = 0;
8936
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008937 if (sysctl_sched_rt_period <= 0)
8938 return -EINVAL;
8939
Peter Zijlstra4653f802008-09-23 15:33:44 +02008940 runtime = global_rt_runtime();
8941 period = global_rt_period();
8942
8943 /*
8944 * Sanity check on the sysctl variables.
8945 */
8946 if (runtime > period && runtime != RUNTIME_INF)
8947 return -EINVAL;
Peter Zijlstra10b612f2008-06-19 14:22:27 +02008948
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008949 mutex_lock(&rt_constraints_mutex);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008950 read_lock(&tasklist_lock);
Peter Zijlstra4653f802008-09-23 15:33:44 +02008951 ret = __rt_schedulable(NULL, 0, 0);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008952 read_unlock(&tasklist_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008953 mutex_unlock(&rt_constraints_mutex);
8954
8955 return ret;
8956}
Dhaval Giani54e99122009-02-27 15:13:54 +05308957
8958int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8959{
8960 /* Don't accept realtime tasks when there is no way for them to run */
8961 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8962 return 0;
8963
8964 return 1;
8965}
8966
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008967#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008968static int sched_rt_global_constraints(void)
8969{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008970 unsigned long flags;
8971 int i;
8972
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008973 if (sysctl_sched_rt_period <= 0)
8974 return -EINVAL;
8975
Peter Zijlstra60aa6052009-05-05 17:50:21 +02008976 /*
8977 * There's always some RT tasks in the root group
8978 * -- migration, kstopmachine etc..
8979 */
8980 if (sysctl_sched_rt_runtime == 0)
8981 return -EBUSY;
8982
Thomas Gleixner0986b112009-11-17 15:32:06 +01008983 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008984 for_each_possible_cpu(i) {
8985 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8986
Thomas Gleixner0986b112009-11-17 15:32:06 +01008987 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008988 rt_rq->rt_runtime = global_rt_runtime();
Thomas Gleixner0986b112009-11-17 15:32:06 +01008989 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008990 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008991 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008992
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008993 return 0;
8994}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008995#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008996
8997int sched_rt_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008998 void __user *buffer, size_t *lenp,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008999 loff_t *ppos)
9000{
9001 int ret;
9002 int old_period, old_runtime;
9003 static DEFINE_MUTEX(mutex);
9004
9005 mutex_lock(&mutex);
9006 old_period = sysctl_sched_rt_period;
9007 old_runtime = sysctl_sched_rt_runtime;
9008
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07009009 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009010
9011 if (!ret && write) {
9012 ret = sched_rt_global_constraints();
9013 if (ret) {
9014 sysctl_sched_rt_period = old_period;
9015 sysctl_sched_rt_runtime = old_runtime;
9016 } else {
9017 def_rt_bandwidth.rt_runtime = global_rt_runtime();
9018 def_rt_bandwidth.rt_period =
9019 ns_to_ktime(global_rt_period());
9020 }
9021 }
9022 mutex_unlock(&mutex);
9023
9024 return ret;
9025}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009026
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009027#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009028
9029/* return corresponding task_group object of a cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +02009030static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009031{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009032 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
9033 struct task_group, css);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009034}
9035
9036static struct cgroup_subsys_state *
Paul Menage2b01dfe2007-10-24 18:23:50 +02009037cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009038{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009039 struct task_group *tg, *parent;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009040
Paul Menage2b01dfe2007-10-24 18:23:50 +02009041 if (!cgrp->parent) {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009042 /* This is early initialization for the top cgroup */
Yong Zhang07e06b02011-01-07 15:17:36 +08009043 return &root_task_group.css;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009044 }
9045
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009046 parent = cgroup_tg(cgrp->parent);
9047 tg = sched_create_group(parent);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009048 if (IS_ERR(tg))
9049 return ERR_PTR(-ENOMEM);
9050
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009051 return &tg->css;
9052}
9053
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009054static void
9055cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009056{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009057 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009058
9059 sched_destroy_group(tg);
9060}
9061
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009062static int
Ben Blumbe367d02009-09-23 15:56:31 -07009063cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009064{
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009065#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Giani54e99122009-02-27 15:13:54 +05309066 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009067 return -EINVAL;
9068#else
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009069 /* We don't support RT-tasks being in separate groups */
9070 if (tsk->sched_class != &fair_sched_class)
9071 return -EINVAL;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009072#endif
Ben Blumbe367d02009-09-23 15:56:31 -07009073 return 0;
9074}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009075
Ben Blumbe367d02009-09-23 15:56:31 -07009076static int
9077cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9078 struct task_struct *tsk, bool threadgroup)
9079{
9080 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
9081 if (retval)
9082 return retval;
9083 if (threadgroup) {
9084 struct task_struct *c;
9085 rcu_read_lock();
9086 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
9087 retval = cpu_cgroup_can_attach_task(cgrp, c);
9088 if (retval) {
9089 rcu_read_unlock();
9090 return retval;
9091 }
9092 }
9093 rcu_read_unlock();
9094 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009095 return 0;
9096}
9097
9098static void
Paul Menage2b01dfe2007-10-24 18:23:50 +02009099cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
Ben Blumbe367d02009-09-23 15:56:31 -07009100 struct cgroup *old_cont, struct task_struct *tsk,
9101 bool threadgroup)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009102{
9103 sched_move_task(tsk);
Ben Blumbe367d02009-09-23 15:56:31 -07009104 if (threadgroup) {
9105 struct task_struct *c;
9106 rcu_read_lock();
9107 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
9108 sched_move_task(c);
9109 }
9110 rcu_read_unlock();
9111 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009112}
9113
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009114static void
Peter Zijlstrad41d5a02011-02-07 17:02:20 +01009115cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
9116 struct cgroup *old_cgrp, struct task_struct *task)
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009117{
9118 /*
9119 * cgroup_exit() is called in the copy_process() failure path.
9120 * Ignore this case since the task hasn't ran yet, this avoids
9121 * trying to poke a half freed task state from generic code.
9122 */
9123 if (!(task->flags & PF_EXITING))
9124 return;
9125
9126 sched_move_task(task);
9127}
9128
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009129#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagef4c753b2008-04-29 00:59:56 -07009130static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
Paul Menage2b01dfe2007-10-24 18:23:50 +02009131 u64 shareval)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009132{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009133 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009134}
9135
Paul Menagef4c753b2008-04-29 00:59:56 -07009136static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009137{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009138 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009139
9140 return (u64) tg->shares;
9141}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009142#endif /* CONFIG_FAIR_GROUP_SCHED */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009143
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009144#ifdef CONFIG_RT_GROUP_SCHED
Mirco Tischler0c708142008-05-14 16:05:46 -07009145static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
Paul Menage06ecb272008-04-29 01:00:06 -07009146 s64 val)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009147{
Paul Menage06ecb272008-04-29 01:00:06 -07009148 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009149}
9150
Paul Menage06ecb272008-04-29 01:00:06 -07009151static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009152{
Paul Menage06ecb272008-04-29 01:00:06 -07009153 return sched_group_rt_runtime(cgroup_tg(cgrp));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009154}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009155
9156static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
9157 u64 rt_period_us)
9158{
9159 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
9160}
9161
9162static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
9163{
9164 return sched_group_rt_period(cgroup_tg(cgrp));
9165}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009166#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009167
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009168static struct cftype cpu_files[] = {
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009169#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009170 {
9171 .name = "shares",
Paul Menagef4c753b2008-04-29 00:59:56 -07009172 .read_u64 = cpu_shares_read_u64,
9173 .write_u64 = cpu_shares_write_u64,
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009174 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009175#endif
9176#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009177 {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01009178 .name = "rt_runtime_us",
Paul Menage06ecb272008-04-29 01:00:06 -07009179 .read_s64 = cpu_rt_runtime_read,
9180 .write_s64 = cpu_rt_runtime_write,
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009181 },
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009182 {
9183 .name = "rt_period_us",
Paul Menagef4c753b2008-04-29 00:59:56 -07009184 .read_u64 = cpu_rt_period_read_uint,
9185 .write_u64 = cpu_rt_period_write_uint,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009186 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009187#endif
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009188};
9189
9190static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
9191{
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009192 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009193}
9194
9195struct cgroup_subsys cpu_cgroup_subsys = {
Ingo Molnar38605ca2007-10-29 21:18:11 +01009196 .name = "cpu",
9197 .create = cpu_cgroup_create,
9198 .destroy = cpu_cgroup_destroy,
9199 .can_attach = cpu_cgroup_can_attach,
9200 .attach = cpu_cgroup_attach,
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009201 .exit = cpu_cgroup_exit,
Ingo Molnar38605ca2007-10-29 21:18:11 +01009202 .populate = cpu_cgroup_populate,
9203 .subsys_id = cpu_cgroup_subsys_id,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009204 .early_init = 1,
9205};
9206
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009207#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009208
9209#ifdef CONFIG_CGROUP_CPUACCT
9210
9211/*
9212 * CPU accounting code for task groups.
9213 *
9214 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
9215 * (balbir@in.ibm.com).
9216 */
9217
Bharata B Rao934352f2008-11-10 20:41:13 +05309218/* track cpu usage of a group of tasks and its child groups */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009219struct cpuacct {
9220 struct cgroup_subsys_state css;
9221 /* cpuusage holds pointer to a u64-type object on every cpu */
Tejun Heo43cf38e2010-02-02 14:38:57 +09009222 u64 __percpu *cpuusage;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309223 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
Bharata B Rao934352f2008-11-10 20:41:13 +05309224 struct cpuacct *parent;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009225};
9226
9227struct cgroup_subsys cpuacct_subsys;
9228
9229/* return cpu accounting group corresponding to this container */
Dhaval Giani32cd7562008-02-29 10:02:43 +05309230static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009231{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309232 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009233 struct cpuacct, css);
9234}
9235
9236/* return cpu accounting group to which this task belongs */
9237static inline struct cpuacct *task_ca(struct task_struct *tsk)
9238{
9239 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
9240 struct cpuacct, css);
9241}
9242
9243/* create a new cpu accounting group */
9244static struct cgroup_subsys_state *cpuacct_create(
Dhaval Giani32cd7562008-02-29 10:02:43 +05309245 struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009246{
9247 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309248 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009249
9250 if (!ca)
Bharata B Raoef12fef2009-03-31 10:02:22 +05309251 goto out;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009252
9253 ca->cpuusage = alloc_percpu(u64);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309254 if (!ca->cpuusage)
9255 goto out_free_ca;
9256
9257 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9258 if (percpu_counter_init(&ca->cpustat[i], 0))
9259 goto out_free_counters;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009260
Bharata B Rao934352f2008-11-10 20:41:13 +05309261 if (cgrp->parent)
9262 ca->parent = cgroup_ca(cgrp->parent);
9263
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009264 return &ca->css;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309265
9266out_free_counters:
9267 while (--i >= 0)
9268 percpu_counter_destroy(&ca->cpustat[i]);
9269 free_percpu(ca->cpuusage);
9270out_free_ca:
9271 kfree(ca);
9272out:
9273 return ERR_PTR(-ENOMEM);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009274}
9275
9276/* destroy an existing cpu accounting group */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009277static void
Dhaval Giani32cd7562008-02-29 10:02:43 +05309278cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009279{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309280 struct cpuacct *ca = cgroup_ca(cgrp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309281 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009282
Bharata B Raoef12fef2009-03-31 10:02:22 +05309283 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9284 percpu_counter_destroy(&ca->cpustat[i]);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009285 free_percpu(ca->cpuusage);
9286 kfree(ca);
9287}
9288
Ken Chen720f5492008-12-15 22:02:01 -08009289static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9290{
Rusty Russellb36128c2009-02-20 16:29:08 +09009291 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009292 u64 data;
9293
9294#ifndef CONFIG_64BIT
9295 /*
9296 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
9297 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009298 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009299 data = *cpuusage;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009300 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009301#else
9302 data = *cpuusage;
9303#endif
9304
9305 return data;
9306}
9307
9308static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9309{
Rusty Russellb36128c2009-02-20 16:29:08 +09009310 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009311
9312#ifndef CONFIG_64BIT
9313 /*
9314 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
9315 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009316 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009317 *cpuusage = val;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009318 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009319#else
9320 *cpuusage = val;
9321#endif
9322}
9323
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009324/* return total cpu usage (in nanoseconds) of a group */
Dhaval Giani32cd7562008-02-29 10:02:43 +05309325static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009326{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309327 struct cpuacct *ca = cgroup_ca(cgrp);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009328 u64 totalcpuusage = 0;
9329 int i;
9330
Ken Chen720f5492008-12-15 22:02:01 -08009331 for_each_present_cpu(i)
9332 totalcpuusage += cpuacct_cpuusage_read(ca, i);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009333
9334 return totalcpuusage;
9335}
9336
Dhaval Giani0297b802008-02-29 10:02:44 +05309337static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
9338 u64 reset)
9339{
9340 struct cpuacct *ca = cgroup_ca(cgrp);
9341 int err = 0;
9342 int i;
9343
9344 if (reset) {
9345 err = -EINVAL;
9346 goto out;
9347 }
9348
Ken Chen720f5492008-12-15 22:02:01 -08009349 for_each_present_cpu(i)
9350 cpuacct_cpuusage_write(ca, i, 0);
Dhaval Giani0297b802008-02-29 10:02:44 +05309351
Dhaval Giani0297b802008-02-29 10:02:44 +05309352out:
9353 return err;
9354}
9355
Ken Chene9515c32008-12-15 22:04:15 -08009356static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
9357 struct seq_file *m)
9358{
9359 struct cpuacct *ca = cgroup_ca(cgroup);
9360 u64 percpu;
9361 int i;
9362
9363 for_each_present_cpu(i) {
9364 percpu = cpuacct_cpuusage_read(ca, i);
9365 seq_printf(m, "%llu ", (unsigned long long) percpu);
9366 }
9367 seq_printf(m, "\n");
9368 return 0;
9369}
9370
Bharata B Raoef12fef2009-03-31 10:02:22 +05309371static const char *cpuacct_stat_desc[] = {
9372 [CPUACCT_STAT_USER] = "user",
9373 [CPUACCT_STAT_SYSTEM] = "system",
9374};
9375
9376static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9377 struct cgroup_map_cb *cb)
9378{
9379 struct cpuacct *ca = cgroup_ca(cgrp);
9380 int i;
9381
9382 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
9383 s64 val = percpu_counter_read(&ca->cpustat[i]);
9384 val = cputime64_to_clock_t(val);
9385 cb->fill(cb, cpuacct_stat_desc[i], val);
9386 }
9387 return 0;
9388}
9389
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009390static struct cftype files[] = {
9391 {
9392 .name = "usage",
Paul Menagef4c753b2008-04-29 00:59:56 -07009393 .read_u64 = cpuusage_read,
9394 .write_u64 = cpuusage_write,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009395 },
Ken Chene9515c32008-12-15 22:04:15 -08009396 {
9397 .name = "usage_percpu",
9398 .read_seq_string = cpuacct_percpu_seq_read,
9399 },
Bharata B Raoef12fef2009-03-31 10:02:22 +05309400 {
9401 .name = "stat",
9402 .read_map = cpuacct_stats_show,
9403 },
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009404};
9405
Dhaval Giani32cd7562008-02-29 10:02:43 +05309406static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009407{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309408 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009409}
9410
9411/*
9412 * charge this task's execution time to its accounting group.
9413 *
9414 * called with rq->lock held.
9415 */
9416static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9417{
9418 struct cpuacct *ca;
Bharata B Rao934352f2008-11-10 20:41:13 +05309419 int cpu;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009420
Li Zefanc40c6f82009-02-26 15:40:15 +08009421 if (unlikely(!cpuacct_subsys.active))
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009422 return;
9423
Bharata B Rao934352f2008-11-10 20:41:13 +05309424 cpu = task_cpu(tsk);
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309425
9426 rcu_read_lock();
9427
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009428 ca = task_ca(tsk);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009429
Bharata B Rao934352f2008-11-10 20:41:13 +05309430 for (; ca; ca = ca->parent) {
Rusty Russellb36128c2009-02-20 16:29:08 +09009431 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009432 *cpuusage += cputime;
9433 }
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309434
9435 rcu_read_unlock();
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009436}
9437
Bharata B Raoef12fef2009-03-31 10:02:22 +05309438/*
Anton Blanchardfa535a72010-02-02 14:46:13 -08009439 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9440 * in cputime_t units. As a result, cpuacct_update_stats calls
9441 * percpu_counter_add with values large enough to always overflow the
9442 * per cpu batch limit causing bad SMP scalability.
9443 *
9444 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9445 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9446 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9447 */
9448#ifdef CONFIG_SMP
9449#define CPUACCT_BATCH \
9450 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9451#else
9452#define CPUACCT_BATCH 0
9453#endif
9454
9455/*
Bharata B Raoef12fef2009-03-31 10:02:22 +05309456 * Charge the system/user time to the task's accounting group.
9457 */
9458static void cpuacct_update_stats(struct task_struct *tsk,
9459 enum cpuacct_stat_index idx, cputime_t val)
9460{
9461 struct cpuacct *ca;
Anton Blanchardfa535a72010-02-02 14:46:13 -08009462 int batch = CPUACCT_BATCH;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309463
9464 if (unlikely(!cpuacct_subsys.active))
9465 return;
9466
9467 rcu_read_lock();
9468 ca = task_ca(tsk);
9469
9470 do {
Anton Blanchardfa535a72010-02-02 14:46:13 -08009471 __percpu_counter_add(&ca->cpustat[idx], val, batch);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309472 ca = ca->parent;
9473 } while (ca);
9474 rcu_read_unlock();
9475}
9476
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009477struct cgroup_subsys cpuacct_subsys = {
9478 .name = "cpuacct",
9479 .create = cpuacct_create,
9480 .destroy = cpuacct_destroy,
9481 .populate = cpuacct_populate,
9482 .subsys_id = cpuacct_subsys_id,
9483};
9484#endif /* CONFIG_CGROUP_CPUACCT */
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009485