blob: 79bca166bed7c82bc5df76b86ca36ad0ad49da8b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
Ingo Molnarc31f2e82007-07-09 18:52:01 +020019 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
Ingo Molnarb9131762008-01-25 21:08:19 +010025 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/highmem.h>
35#include <linux/smp_lock.h>
36#include <asm/mmu_context.h>
37#include <linux/interrupt.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080038#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/completion.h>
40#include <linux/kernel_stat.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070041#include <linux/debug_locks.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020042#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/security.h>
44#include <linux/notifier.h>
45#include <linux/profile.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080046#include <linux/freezer.h>
akpm@osdl.org198e2f12006-01-12 01:05:30 -080047#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/blkdev.h>
49#include <linux/delay.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070050#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/smp.h>
52#include <linux/threads.h>
53#include <linux/timer.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/cpuset.h>
57#include <linux/percpu.h>
Alexey Dobriyanb5aadf72008-10-06 13:23:43 +040058#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <linux/seq_file.h>
Tejun Heo969c7922010-05-06 18:49:21 +020060#include <linux/stop_machine.h>
Nick Piggine692ab52007-07-26 13:40:43 +020061#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#include <linux/syscalls.h>
63#include <linux/times.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070064#include <linux/tsacct_kern.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080065#include <linux/kprobes.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070066#include <linux/delayacct.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020067#include <linux/unistd.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020068#include <linux/pagemap.h>
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010069#include <linux/hrtimer.h>
Reynes Philippe30914a52008-03-17 16:19:05 -070070#include <linux/tick.h>
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020071#include <linux/debugfs.h>
72#include <linux/ctype.h>
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020073#include <linux/ftrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090074#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Eric Dumazet5517d862007-05-08 00:32:57 -070076#include <asm/tlb.h>
Satyam Sharma838225b2007-10-24 18:23:50 +020077#include <asm/irq_regs.h>
Gerald Schaefer335d7af2010-11-22 15:47:36 +010078#include <asm/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Gregory Haskins6e0534f2008-05-12 21:21:01 +020080#include "sched_cpupri.h"
Tejun Heo21aa9af2010-06-08 21:40:37 +020081#include "workqueue_sched.h"
Mike Galbraith5091faa2010-11-30 14:18:03 +010082#include "sched_autogroup.h"
Gregory Haskins6e0534f2008-05-12 21:21:01 +020083
Steven Rostedta8d154b2009-04-10 09:36:00 -040084#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040085#include <trace/events/sched.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040086
Linus Torvalds1da177e2005-04-16 15:20:36 -070087/*
88 * Convert user-nice values [ -20 ... 0 ... 19 ]
89 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
90 * and back.
91 */
92#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
93#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
94#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
95
96/*
97 * 'User priority' is the nice value converted to something we
98 * can work with better when scaling various scheduler parameters,
99 * it's a [ 0 ... 39 ] range.
100 */
101#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
102#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
103#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
104
105/*
Ingo Molnard7876a02008-01-25 21:08:19 +0100106 * Helpers for converting nanosecond timing to jiffy resolution
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 */
Eric Dumazetd6322fa2007-11-09 22:39:38 +0100108#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200110#define NICE_0_LOAD SCHED_LOAD_SCALE
111#define NICE_0_SHIFT SCHED_LOAD_SHIFT
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/*
114 * These are the 'tuning knobs' of the scheduler:
115 *
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +0200116 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 * Timeslices get refilled after they expire.
118 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#define DEF_TIMESLICE (100 * HZ / 1000)
Peter Williams2dd73a42006-06-27 02:54:34 -0700120
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200121/*
122 * single value that denotes runtime == period, ie unlimited time.
123 */
124#define RUNTIME_INF ((u64)~0ULL)
125
Ingo Molnare05606d2007-07-09 18:51:59 +0200126static inline int rt_policy(int policy)
127{
Roel Kluin3f33a7c2008-05-13 23:44:11 +0200128 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
Ingo Molnare05606d2007-07-09 18:51:59 +0200129 return 1;
130 return 0;
131}
132
133static inline int task_has_rt_policy(struct task_struct *p)
134{
135 return rt_policy(p->policy);
136}
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/*
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200139 * This is the priority-queue data structure of the RT scheduling class:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 */
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200141struct rt_prio_array {
142 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
143 struct list_head queue[MAX_RT_PRIO];
144};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200146struct rt_bandwidth {
Ingo Molnarea736ed2008-03-25 13:51:45 +0100147 /* nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100148 raw_spinlock_t rt_runtime_lock;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100149 ktime_t rt_period;
150 u64 rt_runtime;
151 struct hrtimer rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200152};
153
154static struct rt_bandwidth def_rt_bandwidth;
155
156static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
157
158static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
159{
160 struct rt_bandwidth *rt_b =
161 container_of(timer, struct rt_bandwidth, rt_period_timer);
162 ktime_t now;
163 int overrun;
164 int idle = 0;
165
166 for (;;) {
167 now = hrtimer_cb_get_time(timer);
168 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
169
170 if (!overrun)
171 break;
172
173 idle = do_sched_rt_period_timer(rt_b, overrun);
174 }
175
176 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
177}
178
179static
180void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
181{
182 rt_b->rt_period = ns_to_ktime(period);
183 rt_b->rt_runtime = runtime;
184
Thomas Gleixner0986b112009-11-17 15:32:06 +0100185 raw_spin_lock_init(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200186
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200187 hrtimer_init(&rt_b->rt_period_timer,
188 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
189 rt_b->rt_period_timer.function = sched_rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200190}
191
Krzysztof Heltc8bfff62008-09-05 23:46:19 +0200192static inline int rt_bandwidth_enabled(void)
193{
194 return sysctl_sched_rt_runtime >= 0;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200195}
196
197static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
198{
199 ktime_t now;
200
Hiroshi Shimamotocac64d02009-02-25 09:59:26 -0800201 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200202 return;
203
204 if (hrtimer_active(&rt_b->rt_period_timer))
205 return;
206
Thomas Gleixner0986b112009-11-17 15:32:06 +0100207 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200208 for (;;) {
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100209 unsigned long delta;
210 ktime_t soft, hard;
211
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200212 if (hrtimer_active(&rt_b->rt_period_timer))
213 break;
214
215 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
216 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100217
218 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
219 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
220 delta = ktime_to_ns(ktime_sub(hard, soft));
221 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530222 HRTIMER_MODE_ABS_PINNED, 0);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200223 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100224 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200225}
226
227#ifdef CONFIG_RT_GROUP_SCHED
228static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
229{
230 hrtimer_cancel(&rt_b->rt_period_timer);
231}
232#endif
233
Heiko Carstens712555e2008-04-28 11:33:07 +0200234/*
235 * sched_domains_mutex serializes calls to arch_init_sched_domains,
236 * detach_destroy_domains and partition_sched_domains.
237 */
238static DEFINE_MUTEX(sched_domains_mutex);
239
Dhaval Giani7c941432010-01-20 13:26:18 +0100240#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200241
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700242#include <linux/cgroup.h>
243
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200244struct cfs_rq;
245
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100246static LIST_HEAD(task_groups);
247
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200248/* task group related information */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200249struct task_group {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700250 struct cgroup_subsys_state css;
Arun R Bharadwaj6c415b92008-12-01 20:49:05 +0530251
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100252#ifdef CONFIG_FAIR_GROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200253 /* schedulable entities of this group on each cpu */
254 struct sched_entity **se;
255 /* runqueue "owned" by this group on each cpu */
256 struct cfs_rq **cfs_rq;
257 unsigned long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800258
259 atomic_t load_weight;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100260#endif
261
262#ifdef CONFIG_RT_GROUP_SCHED
263 struct sched_rt_entity **rt_se;
264 struct rt_rq **rt_rq;
265
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200266 struct rt_bandwidth rt_bandwidth;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100267#endif
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100268
Srivatsa Vaddagiriae8393e2007-10-29 21:18:11 +0100269 struct rcu_head rcu;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100270 struct list_head list;
Peter Zijlstraf473aa52008-04-19 19:45:00 +0200271
272 struct task_group *parent;
273 struct list_head siblings;
274 struct list_head children;
Mike Galbraith5091faa2010-11-30 14:18:03 +0100275
276#ifdef CONFIG_SCHED_AUTOGROUP
277 struct autogroup *autogroup;
278#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200279};
280
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800281/* task_group_lock serializes the addition/removal of task groups */
Peter Zijlstra8ed36992008-02-13 15:45:39 +0100282static DEFINE_SPINLOCK(task_group_lock);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100283
Cyrill Gorcunove9036b32009-10-26 22:24:14 +0300284#ifdef CONFIG_FAIR_GROUP_SCHED
285
Yong Zhang07e06b02011-01-07 15:17:36 +0800286# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200287
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800288/*
Lai Jiangshan2e084782008-06-12 16:42:58 +0800289 * A weight of 0 or 1 can cause arithmetics problems.
290 * A weight of a cfs_rq is the sum of weights of which entities
291 * are queued on this cfs_rq, so a weight of a entity should not be
292 * too large, so as the shares value of a task group.
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800293 * (The default weight is 1024 - so there's no practical
294 * limitation from this.)
295 */
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200296#define MIN_SHARES 2
Lai Jiangshan2e084782008-06-12 16:42:58 +0800297#define MAX_SHARES (1UL << 18)
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200298
Yong Zhang07e06b02011-01-07 15:17:36 +0800299static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100300#endif
301
302/* Default task group.
303 * Every task in system belong to this group at bootup.
304 */
Yong Zhang07e06b02011-01-07 15:17:36 +0800305struct task_group root_task_group;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200306
Dhaval Giani7c941432010-01-20 13:26:18 +0100307#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200308
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200309/* CFS-related fields in a runqueue */
310struct cfs_rq {
311 struct load_weight load;
312 unsigned long nr_running;
313
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200314 u64 exec_clock;
Ingo Molnare9acbff2007-10-15 17:00:04 +0200315 u64 min_vruntime;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200316
317 struct rb_root tasks_timeline;
318 struct rb_node *rb_leftmost;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +0200319
320 struct list_head tasks;
321 struct list_head *balance_iterator;
322
323 /*
324 * 'curr' points to currently running entity on this cfs_rq.
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200325 * It is set to NULL otherwise (i.e when none are currently running).
326 */
Rik van Rielac53db52011-02-01 09:51:03 -0500327 struct sched_entity *curr, *next, *last, *skip;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200328
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100329 unsigned int nr_spread_over;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200330
Ingo Molnar62160e3f2007-10-15 17:00:03 +0200331#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200332 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
333
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100334 /*
335 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200336 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
337 * (like users, containers etc.)
338 *
339 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
340 * list is used during load balance.
341 */
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800342 int on_list;
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100343 struct list_head leaf_cfs_rq_list;
344 struct task_group *tg; /* group that "owns" this runqueue */
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200345
346#ifdef CONFIG_SMP
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200347 /*
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200348 * the part of load.weight contributed by tasks
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200349 */
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200350 unsigned long task_weight;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200351
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200352 /*
353 * h_load = weight * f(tg)
354 *
355 * Where f(tg) is the recursive weight fraction assigned to
356 * this group.
357 */
358 unsigned long h_load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200359
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200360 /*
Paul Turner3b3d1902010-11-15 15:47:08 -0800361 * Maintaining per-cpu shares distribution for group scheduling
362 *
363 * load_stamp is the last time we updated the load average
364 * load_last is the last time we updated the load average and saw load
365 * load_unacc_exec_time is currently unaccounted execution time
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200366 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800367 u64 load_avg;
368 u64 load_period;
Paul Turner3b3d1902010-11-15 15:47:08 -0800369 u64 load_stamp, load_last, load_unacc_exec_time;
Peter Zijlstraf1d239f2008-06-27 13:41:38 +0200370
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800371 unsigned long load_contribution;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200372#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200373#endif
374};
375
376/* Real-Time classes' related field in a runqueue: */
377struct rt_rq {
378 struct rt_prio_array active;
Steven Rostedt63489e42008-01-25 21:08:03 +0100379 unsigned long rt_nr_running;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100380#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -0500381 struct {
382 int curr; /* highest queued rt task prio */
Gregory Haskins398a1532009-01-14 09:10:04 -0500383#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500384 int next; /* next highest */
Gregory Haskins398a1532009-01-14 09:10:04 -0500385#endif
Gregory Haskinse864c492008-12-29 09:39:49 -0500386 } highest_prio;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100387#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100388#ifdef CONFIG_SMP
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100389 unsigned long rt_nr_migratory;
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200390 unsigned long rt_nr_total;
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +0100391 int overloaded;
Gregory Haskins917b6272008-12-29 09:39:53 -0500392 struct plist_head pushable_tasks;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100393#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100394 int rt_throttled;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100395 u64 rt_time;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200396 u64 rt_runtime;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100397 /* Nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100398 raw_spinlock_t rt_runtime_lock;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100399
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100400#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100401 unsigned long rt_nr_boosted;
402
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100403 struct rq *rq;
404 struct list_head leaf_rt_rq_list;
405 struct task_group *tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100406#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200407};
408
Gregory Haskins57d885f2008-01-25 21:08:18 +0100409#ifdef CONFIG_SMP
410
411/*
412 * We add the notion of a root-domain which will be used to define per-domain
Ingo Molnar0eab9142008-01-25 21:08:19 +0100413 * variables. Each exclusive cpuset essentially defines an island domain by
414 * fully partitioning the member cpus from any other cpuset. Whenever a new
Gregory Haskins57d885f2008-01-25 21:08:18 +0100415 * exclusive cpuset is created, we also create and attach a new root-domain
416 * object.
417 *
Gregory Haskins57d885f2008-01-25 21:08:18 +0100418 */
419struct root_domain {
420 atomic_t refcount;
Rusty Russellc6c49272008-11-25 02:35:05 +1030421 cpumask_var_t span;
422 cpumask_var_t online;
Gregory Haskins637f5082008-01-25 21:08:18 +0100423
Ingo Molnar0eab9142008-01-25 21:08:19 +0100424 /*
Gregory Haskins637f5082008-01-25 21:08:18 +0100425 * The "RT overload" flag: it gets set if a CPU has more than
426 * one runnable RT task.
427 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030428 cpumask_var_t rto_mask;
Ingo Molnar0eab9142008-01-25 21:08:19 +0100429 atomic_t rto_count;
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200430 struct cpupri cpupri;
Gregory Haskins57d885f2008-01-25 21:08:18 +0100431};
432
Gregory Haskinsdc938522008-01-25 21:08:26 +0100433/*
434 * By default the system creates a single root-domain with all cpus as
435 * members (mimicking the global state we have today).
436 */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100437static struct root_domain def_root_domain;
438
Christian Dietriched2d3722010-09-06 16:37:05 +0200439#endif /* CONFIG_SMP */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100440
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200441/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 * This is the main, per-CPU runqueue data structure.
443 *
444 * Locking rule: those places that want to lock multiple runqueues
445 * (such as the load balancing or the thread migration code), lock
446 * acquire operations must be ordered by ascending &runqueue.
447 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700448struct rq {
Ingo Molnard8016492007-10-18 21:32:55 +0200449 /* runqueue lock: */
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100450 raw_spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452 /*
453 * nr_running and cpu_load should be in the same cacheline because
454 * remote CPUs use both these fields when doing load calculation.
455 */
456 unsigned long nr_running;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200457 #define CPU_LOAD_IDX_MAX 5
458 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -0700459 unsigned long last_load_update_tick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700460#ifdef CONFIG_NO_HZ
Mike Galbraith39c0cbe2010-03-11 17:17:13 +0100461 u64 nohz_stamp;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700462 unsigned char nohz_balance_kick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700463#endif
Mike Galbraitha64692a2010-03-11 17:16:20 +0100464 unsigned int skip_clock_update;
465
Ingo Molnard8016492007-10-18 21:32:55 +0200466 /* capture load from *all* tasks on this cpu: */
467 struct load_weight load;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200468 unsigned long nr_load_updates;
469 u64 nr_switches;
470
471 struct cfs_rq cfs;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100472 struct rt_rq rt;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100473
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200474#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnard8016492007-10-18 21:32:55 +0200475 /* list of leaf cfs_rq on this cpu: */
476 struct list_head leaf_cfs_rq_list;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100477#endif
478#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100479 struct list_head leaf_rt_rq_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
482 /*
483 * This is part of a global counter where only the total sum
484 * over all CPUs matters. A task can increase this counter on
485 * one CPU and if it got migrated afterwards it may decrease
486 * it on another CPU. Always updated under the runqueue lock:
487 */
488 unsigned long nr_uninterruptible;
489
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200490 struct task_struct *curr, *idle, *stop;
Christoph Lameterc9819f42006-12-10 02:20:25 -0800491 unsigned long next_balance;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 struct mm_struct *prev_mm;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200493
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200494 u64 clock;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700495 u64 clock_task;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 atomic_t nr_iowait;
498
499#ifdef CONFIG_SMP
Ingo Molnar0eab9142008-01-25 21:08:19 +0100500 struct root_domain *rd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 struct sched_domain *sd;
502
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +0200503 unsigned long cpu_power;
504
Henrik Austada0a522c2009-02-13 20:35:45 +0100505 unsigned char idle_at_tick;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 /* For active balancing */
Gregory Haskins3f029d32009-07-29 11:08:47 -0400507 int post_schedule;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 int active_balance;
509 int push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +0200510 struct cpu_stop_work active_balance_work;
Ingo Molnard8016492007-10-18 21:32:55 +0200511 /* cpu of this runqueue: */
512 int cpu;
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400513 int online;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Peter Zijlstraa8a51d52008-06-27 13:41:26 +0200515 unsigned long avg_load_per_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200517 u64 rt_avg;
518 u64 age_stamp;
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100519 u64 idle_stamp;
520 u64 avg_idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521#endif
522
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700523#ifdef CONFIG_IRQ_TIME_ACCOUNTING
524 u64 prev_irq_time;
525#endif
526
Thomas Gleixnerdce48a82009-04-11 10:43:41 +0200527 /* calc_load related fields */
528 unsigned long calc_load_update;
529 long calc_load_active;
530
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100531#ifdef CONFIG_SCHED_HRTICK
Peter Zijlstra31656512008-07-18 18:01:23 +0200532#ifdef CONFIG_SMP
533 int hrtick_csd_pending;
534 struct call_single_data hrtick_csd;
535#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100536 struct hrtimer hrtick_timer;
537#endif
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539#ifdef CONFIG_SCHEDSTATS
540 /* latency stats */
541 struct sched_info rq_sched_info;
Ken Chen9c2c4802008-12-16 23:41:22 -0800542 unsigned long long rq_cpu_time;
543 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
545 /* sys_sched_yield() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200546 unsigned int yld_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
548 /* schedule() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200549 unsigned int sched_switch;
550 unsigned int sched_count;
551 unsigned int sched_goidle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553 /* try_to_wake_up() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200554 unsigned int ttwu_count;
555 unsigned int ttwu_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556#endif
557};
558
Fenghua Yuf34e3b62007-07-19 01:48:13 -0700559static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Mike Galbraitha64692a2010-03-11 17:16:20 +0100561
Peter Zijlstra1e5a7402010-10-31 12:37:04 +0100562static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
Ingo Molnardd41f592007-07-09 18:51:59 +0200563
Christoph Lameter0a2966b2006-09-25 23:30:51 -0700564static inline int cpu_of(struct rq *rq)
565{
566#ifdef CONFIG_SMP
567 return rq->cpu;
568#else
569 return 0;
570#endif
571}
572
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800573#define rcu_dereference_check_sched_domain(p) \
Paul E. McKenneyd11c5632010-02-22 17:04:50 -0800574 rcu_dereference_check((p), \
575 rcu_read_lock_sched_held() || \
576 lockdep_is_held(&sched_domains_mutex))
577
Ingo Molnar20d315d2007-07-09 18:51:58 +0200578/*
Nick Piggin674311d2005-06-25 14:57:27 -0700579 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -0700580 * See detach_destroy_domains: synchronize_sched for details.
Nick Piggin674311d2005-06-25 14:57:27 -0700581 *
582 * The domain tree of any CPU may only be accessed from within
583 * preempt-disabled sections.
584 */
Ingo Molnar48f24c42006-07-03 00:25:40 -0700585#define for_each_domain(cpu, __sd) \
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800586 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
589#define this_rq() (&__get_cpu_var(runqueues))
590#define task_rq(p) cpu_rq(task_cpu(p))
591#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Hitoshi Mitake54d35f22009-06-29 14:44:57 +0900592#define raw_rq() (&__raw_get_cpu_var(runqueues))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200594#ifdef CONFIG_CGROUP_SCHED
595
596/*
597 * Return the group to which this tasks belongs.
598 *
599 * We use task_subsys_state_check() and extend the RCU verification
600 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
601 * holds that lock for each task it moves into the cgroup. Therefore
602 * by holding that lock, we pin the task to the current cgroup.
603 */
604static inline struct task_group *task_group(struct task_struct *p)
605{
Mike Galbraith5091faa2010-11-30 14:18:03 +0100606 struct task_group *tg;
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200607 struct cgroup_subsys_state *css;
608
Peter Zijlstra068c5cc2011-01-19 12:26:11 +0100609 if (p->flags & PF_EXITING)
610 return &root_task_group;
611
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200612 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
613 lockdep_is_held(&task_rq(p)->lock));
Mike Galbraith5091faa2010-11-30 14:18:03 +0100614 tg = container_of(css, struct task_group, css);
615
616 return autogroup_task_group(p, tg);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200617}
618
619/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
620static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
621{
622#ifdef CONFIG_FAIR_GROUP_SCHED
623 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
624 p->se.parent = task_group(p)->se[cpu];
625#endif
626
627#ifdef CONFIG_RT_GROUP_SCHED
628 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
629 p->rt.parent = task_group(p)->rt_se[cpu];
630#endif
631}
632
633#else /* CONFIG_CGROUP_SCHED */
634
635static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
636static inline struct task_group *task_group(struct task_struct *p)
637{
638 return NULL;
639}
640
641#endif /* CONFIG_CGROUP_SCHED */
642
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100643static void update_rq_clock_task(struct rq *rq, s64 delta);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700644
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100645static void update_rq_clock(struct rq *rq)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200646{
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100647 s64 delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700648
Mike Galbraithf26f9af2010-12-08 11:05:42 +0100649 if (rq->skip_clock_update)
650 return;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700651
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100652 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
653 rq->clock += delta;
654 update_rq_clock_task(rq, delta);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200655}
656
Ingo Molnare436d802007-07-19 21:28:35 +0200657/*
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200658 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
659 */
660#ifdef CONFIG_SCHED_DEBUG
661# define const_debug __read_mostly
662#else
663# define const_debug static const
664#endif
665
Ingo Molnar017730c2008-05-12 21:20:52 +0200666/**
667 * runqueue_is_locked
Randy Dunlape17b38b2009-10-11 19:12:00 -0700668 * @cpu: the processor in question.
Ingo Molnar017730c2008-05-12 21:20:52 +0200669 *
670 * Returns true if the current cpu runqueue is locked.
671 * This interface allows printk to be called with the runqueue lock
672 * held and know whether or not it is OK to wake up the klogd.
673 */
Andrew Morton89f19f02009-09-19 11:55:44 -0700674int runqueue_is_locked(int cpu)
Ingo Molnar017730c2008-05-12 21:20:52 +0200675{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100676 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
Ingo Molnar017730c2008-05-12 21:20:52 +0200677}
678
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200679/*
680 * Debugging: various feature bits
681 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200682
683#define SCHED_FEAT(name, enabled) \
684 __SCHED_FEAT_##name ,
685
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200686enum {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200687#include "sched_features.h"
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200688};
689
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200690#undef SCHED_FEAT
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200691
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200692#define SCHED_FEAT(name, enabled) \
693 (1UL << __SCHED_FEAT_##name) * enabled |
694
695const_debug unsigned int sysctl_sched_features =
696#include "sched_features.h"
697 0;
698
699#undef SCHED_FEAT
700
701#ifdef CONFIG_SCHED_DEBUG
702#define SCHED_FEAT(name, enabled) \
703 #name ,
704
Harvey Harrison983ed7a2008-04-24 18:17:55 -0700705static __read_mostly char *sched_feat_names[] = {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200706#include "sched_features.h"
707 NULL
708};
709
710#undef SCHED_FEAT
711
Li Zefan34f3a812008-10-30 15:23:32 +0800712static int sched_feat_show(struct seq_file *m, void *v)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200713{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200714 int i;
715
716 for (i = 0; sched_feat_names[i]; i++) {
Li Zefan34f3a812008-10-30 15:23:32 +0800717 if (!(sysctl_sched_features & (1UL << i)))
718 seq_puts(m, "NO_");
719 seq_printf(m, "%s ", sched_feat_names[i]);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200720 }
Li Zefan34f3a812008-10-30 15:23:32 +0800721 seq_puts(m, "\n");
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200722
Li Zefan34f3a812008-10-30 15:23:32 +0800723 return 0;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200724}
725
726static ssize_t
727sched_feat_write(struct file *filp, const char __user *ubuf,
728 size_t cnt, loff_t *ppos)
729{
730 char buf[64];
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400731 char *cmp;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200732 int neg = 0;
733 int i;
734
735 if (cnt > 63)
736 cnt = 63;
737
738 if (copy_from_user(&buf, ubuf, cnt))
739 return -EFAULT;
740
741 buf[cnt] = 0;
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400742 cmp = strstrip(buf);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200743
Hillf Danton524429c2011-01-06 20:58:12 +0800744 if (strncmp(cmp, "NO_", 3) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200745 neg = 1;
746 cmp += 3;
747 }
748
749 for (i = 0; sched_feat_names[i]; i++) {
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400750 if (strcmp(cmp, sched_feat_names[i]) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200751 if (neg)
752 sysctl_sched_features &= ~(1UL << i);
753 else
754 sysctl_sched_features |= (1UL << i);
755 break;
756 }
757 }
758
759 if (!sched_feat_names[i])
760 return -EINVAL;
761
Jan Blunck42994722009-11-20 17:40:37 +0100762 *ppos += cnt;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200763
764 return cnt;
765}
766
Li Zefan34f3a812008-10-30 15:23:32 +0800767static int sched_feat_open(struct inode *inode, struct file *filp)
768{
769 return single_open(filp, sched_feat_show, NULL);
770}
771
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700772static const struct file_operations sched_feat_fops = {
Li Zefan34f3a812008-10-30 15:23:32 +0800773 .open = sched_feat_open,
774 .write = sched_feat_write,
775 .read = seq_read,
776 .llseek = seq_lseek,
777 .release = single_release,
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200778};
779
780static __init int sched_init_debug(void)
781{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200782 debugfs_create_file("sched_features", 0644, NULL, NULL,
783 &sched_feat_fops);
784
785 return 0;
786}
787late_initcall(sched_init_debug);
788
789#endif
790
791#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200792
793/*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +0100794 * Number of tasks to iterate in a single balance run.
795 * Limited because this is done with IRQs disabled.
796 */
797const_debug unsigned int sysctl_sched_nr_migrate = 32;
798
799/*
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200800 * period over which we average the RT time consumption, measured
801 * in ms.
802 *
803 * default: 1s
804 */
805const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
806
807/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100808 * period over which we measure -rt task cpu usage in us.
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100809 * default: 1s
810 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100811unsigned int sysctl_sched_rt_period = 1000000;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100812
Ingo Molnar6892b752008-02-13 14:02:36 +0100813static __read_mostly int scheduler_running;
814
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100815/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100816 * part of the period that we allow rt tasks to run in us.
817 * default: 0.95s
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100818 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100819int sysctl_sched_rt_runtime = 950000;
820
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200821static inline u64 global_rt_period(void)
822{
823 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
824}
825
826static inline u64 global_rt_runtime(void)
827{
roel kluine26873b2008-07-22 16:51:15 -0400828 if (sysctl_sched_rt_runtime < 0)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200829 return RUNTIME_INF;
830
831 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
832}
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834#ifndef prepare_arch_switch
Nick Piggin4866cde2005-06-25 14:57:23 -0700835# define prepare_arch_switch(next) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700837#ifndef finish_arch_switch
838# define finish_arch_switch(prev) do { } while (0)
839#endif
840
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100841static inline int task_current(struct rq *rq, struct task_struct *p)
842{
843 return rq->curr == p;
844}
845
Nick Piggin4866cde2005-06-25 14:57:23 -0700846#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar70b97a72006-07-03 00:25:42 -0700847static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700848{
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100849 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700850}
851
Ingo Molnar70b97a72006-07-03 00:25:42 -0700852static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700853{
854}
855
Ingo Molnar70b97a72006-07-03 00:25:42 -0700856static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700857{
Ingo Molnarda04c032005-09-13 11:17:59 +0200858#ifdef CONFIG_DEBUG_SPINLOCK
859 /* this is a valid case when another task releases the spinlock */
860 rq->lock.owner = current;
861#endif
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700862 /*
863 * If we are tracking spinlock dependencies then we have to
864 * fix up the runqueue lock - which gets 'carried over' from
865 * prev into current:
866 */
867 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
868
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100869 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700870}
871
872#else /* __ARCH_WANT_UNLOCKED_CTXSW */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700873static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700874{
875#ifdef CONFIG_SMP
876 return p->oncpu;
877#else
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100878 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700879#endif
880}
881
Ingo Molnar70b97a72006-07-03 00:25:42 -0700882static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700883{
884#ifdef CONFIG_SMP
885 /*
886 * We can optimise this out completely for !SMP, because the
887 * SMP rebalancing from interrupt is the only thing that cares
888 * here.
889 */
890 next->oncpu = 1;
891#endif
892#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100893 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700894#else
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100895 raw_spin_unlock(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700896#endif
897}
898
Ingo Molnar70b97a72006-07-03 00:25:42 -0700899static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700900{
901#ifdef CONFIG_SMP
902 /*
903 * After ->oncpu is cleared, the task can be moved to a different CPU.
904 * We must ensure this doesn't happen until the switch is completely
905 * finished.
906 */
907 smp_wmb();
908 prev->oncpu = 0;
909#endif
910#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
911 local_irq_enable();
912#endif
913}
914#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
916/*
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100917 * Check whether the task is waking, we use this to synchronize ->cpus_allowed
918 * against ttwu().
Peter Zijlstra0970d292010-02-15 14:45:54 +0100919 */
920static inline int task_is_waking(struct task_struct *p)
921{
Peter Zijlstra0017d732010-03-24 18:34:10 +0100922 return unlikely(p->state == TASK_WAKING);
Peter Zijlstra0970d292010-02-15 14:45:54 +0100923}
924
925/*
Ingo Molnarb29739f2006-06-27 02:54:51 -0700926 * __task_rq_lock - lock the runqueue a given task resides on.
927 * Must be called interrupts disabled.
928 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700929static inline struct rq *__task_rq_lock(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700930 __acquires(rq->lock)
931{
Peter Zijlstra0970d292010-02-15 14:45:54 +0100932 struct rq *rq;
933
Andi Kleen3a5c3592007-10-15 17:00:14 +0200934 for (;;) {
Peter Zijlstra0970d292010-02-15 14:45:54 +0100935 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100936 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100937 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200938 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100939 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700940 }
Ingo Molnarb29739f2006-06-27 02:54:51 -0700941}
942
943/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 * task_rq_lock - lock the runqueue a given task resides on and disable
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100945 * interrupts. Note the ordering: we can safely lookup the task_rq without
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 * explicitly disabling preemption.
947 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700948static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 __acquires(rq->lock)
950{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700951 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Andi Kleen3a5c3592007-10-15 17:00:14 +0200953 for (;;) {
954 local_irq_save(*flags);
955 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100956 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100957 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200958 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100959 raw_spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961}
962
Alexey Dobriyana9957442007-10-15 17:00:13 +0200963static void __task_rq_unlock(struct rq *rq)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700964 __releases(rq->lock)
965{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100966 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700967}
968
Ingo Molnar70b97a72006-07-03 00:25:42 -0700969static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 __releases(rq->lock)
971{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100972 raw_spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973}
974
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975/*
Robert P. J. Daycc2a73b2006-12-10 02:20:00 -0800976 * this_rq_lock - lock this runqueue and disable interrupts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200978static struct rq *this_rq_lock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 __acquires(rq->lock)
980{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700981 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 local_irq_disable();
984 rq = this_rq();
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100985 raw_spin_lock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 return rq;
988}
989
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100990#ifdef CONFIG_SCHED_HRTICK
991/*
992 * Use HR-timers to deliver accurate preemption points.
993 *
994 * Its all a bit involved since we cannot program an hrt while holding the
995 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
996 * reschedule event.
997 *
998 * When we get rescheduled we reprogram the hrtick_timer outside of the
999 * rq->lock.
1000 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001001
1002/*
1003 * Use hrtick when:
1004 * - enabled by features
1005 * - hrtimer is actually high res
1006 */
1007static inline int hrtick_enabled(struct rq *rq)
1008{
1009 if (!sched_feat(HRTICK))
1010 return 0;
Ingo Molnarba420592008-07-20 11:02:06 +02001011 if (!cpu_active(cpu_of(rq)))
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001012 return 0;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001013 return hrtimer_is_hres_active(&rq->hrtick_timer);
1014}
1015
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001016static void hrtick_clear(struct rq *rq)
1017{
1018 if (hrtimer_active(&rq->hrtick_timer))
1019 hrtimer_cancel(&rq->hrtick_timer);
1020}
1021
1022/*
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001023 * High-resolution timer tick.
1024 * Runs from hardirq context with interrupts disabled.
1025 */
1026static enum hrtimer_restart hrtick(struct hrtimer *timer)
1027{
1028 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1029
1030 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1031
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001032 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001033 update_rq_clock(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001034 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001035 raw_spin_unlock(&rq->lock);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001036
1037 return HRTIMER_NORESTART;
1038}
1039
Rabin Vincent95e904c2008-05-11 05:55:33 +05301040#ifdef CONFIG_SMP
Peter Zijlstra31656512008-07-18 18:01:23 +02001041/*
1042 * called from hardirq (IPI) context
1043 */
1044static void __hrtick_start(void *arg)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001045{
Peter Zijlstra31656512008-07-18 18:01:23 +02001046 struct rq *rq = arg;
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001047
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001048 raw_spin_lock(&rq->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001049 hrtimer_restart(&rq->hrtick_timer);
1050 rq->hrtick_csd_pending = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001051 raw_spin_unlock(&rq->lock);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001052}
1053
Peter Zijlstra31656512008-07-18 18:01:23 +02001054/*
1055 * Called to set the hrtick timer state.
1056 *
1057 * called with rq->lock held and irqs disabled
1058 */
1059static void hrtick_start(struct rq *rq, u64 delay)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001060{
Peter Zijlstra31656512008-07-18 18:01:23 +02001061 struct hrtimer *timer = &rq->hrtick_timer;
1062 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001063
Arjan van de Vencc584b22008-09-01 15:02:30 -07001064 hrtimer_set_expires(timer, time);
Peter Zijlstra31656512008-07-18 18:01:23 +02001065
1066 if (rq == this_rq()) {
1067 hrtimer_restart(timer);
1068 } else if (!rq->hrtick_csd_pending) {
Peter Zijlstra6e275632009-02-25 13:59:48 +01001069 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001070 rq->hrtick_csd_pending = 1;
1071 }
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001072}
1073
1074static int
1075hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1076{
1077 int cpu = (int)(long)hcpu;
1078
1079 switch (action) {
1080 case CPU_UP_CANCELED:
1081 case CPU_UP_CANCELED_FROZEN:
1082 case CPU_DOWN_PREPARE:
1083 case CPU_DOWN_PREPARE_FROZEN:
1084 case CPU_DEAD:
1085 case CPU_DEAD_FROZEN:
Peter Zijlstra31656512008-07-18 18:01:23 +02001086 hrtick_clear(cpu_rq(cpu));
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001087 return NOTIFY_OK;
1088 }
1089
1090 return NOTIFY_DONE;
1091}
1092
Rakib Mullickfa748202008-09-22 14:55:45 -07001093static __init void init_hrtick(void)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001094{
1095 hotcpu_notifier(hotplug_hrtick, 0);
1096}
Peter Zijlstra31656512008-07-18 18:01:23 +02001097#else
1098/*
1099 * Called to set the hrtick timer state.
1100 *
1101 * called with rq->lock held and irqs disabled
1102 */
1103static void hrtick_start(struct rq *rq, u64 delay)
1104{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001105 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +05301106 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001107}
1108
Andrew Morton006c75f2008-09-22 14:55:46 -07001109static inline void init_hrtick(void)
Peter Zijlstra31656512008-07-18 18:01:23 +02001110{
1111}
Rabin Vincent95e904c2008-05-11 05:55:33 +05301112#endif /* CONFIG_SMP */
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001113
1114static void init_rq_hrtick(struct rq *rq)
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001115{
Peter Zijlstra31656512008-07-18 18:01:23 +02001116#ifdef CONFIG_SMP
1117 rq->hrtick_csd_pending = 0;
1118
1119 rq->hrtick_csd.flags = 0;
1120 rq->hrtick_csd.func = __hrtick_start;
1121 rq->hrtick_csd.info = rq;
1122#endif
1123
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001124 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1125 rq->hrtick_timer.function = hrtick;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001126}
Andrew Morton006c75f2008-09-22 14:55:46 -07001127#else /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001128static inline void hrtick_clear(struct rq *rq)
1129{
1130}
1131
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001132static inline void init_rq_hrtick(struct rq *rq)
1133{
1134}
1135
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001136static inline void init_hrtick(void)
1137{
1138}
Andrew Morton006c75f2008-09-22 14:55:46 -07001139#endif /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001140
Ingo Molnar1b9f19c2007-07-09 18:51:59 +02001141/*
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001142 * resched_task - mark a task 'to be rescheduled now'.
1143 *
1144 * On UP this means the setting of the need_resched flag, on SMP it
1145 * might also involve a cross-CPU call to trigger the scheduler on
1146 * the target CPU.
1147 */
1148#ifdef CONFIG_SMP
1149
1150#ifndef tsk_is_polling
1151#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1152#endif
1153
Peter Zijlstra31656512008-07-18 18:01:23 +02001154static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001155{
1156 int cpu;
1157
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001158 assert_raw_spin_locked(&task_rq(p)->lock);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001159
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001160 if (test_tsk_need_resched(p))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001161 return;
1162
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001163 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001164
1165 cpu = task_cpu(p);
1166 if (cpu == smp_processor_id())
1167 return;
1168
1169 /* NEED_RESCHED must be visible before we test polling */
1170 smp_mb();
1171 if (!tsk_is_polling(p))
1172 smp_send_reschedule(cpu);
1173}
1174
1175static void resched_cpu(int cpu)
1176{
1177 struct rq *rq = cpu_rq(cpu);
1178 unsigned long flags;
1179
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001180 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001181 return;
1182 resched_task(cpu_curr(cpu));
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001183 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001184}
Thomas Gleixner06d83082008-03-22 09:20:24 +01001185
1186#ifdef CONFIG_NO_HZ
1187/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07001188 * In the semi idle case, use the nearest busy cpu for migrating timers
1189 * from an idle cpu. This is good for power-savings.
1190 *
1191 * We don't do similar optimization for completely idle system, as
1192 * selecting an idle cpu will add more delays to the timers than intended
1193 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1194 */
1195int get_nohz_timer_target(void)
1196{
1197 int cpu = smp_processor_id();
1198 int i;
1199 struct sched_domain *sd;
1200
1201 for_each_domain(cpu, sd) {
1202 for_each_cpu(i, sched_domain_span(sd))
1203 if (!idle_cpu(i))
1204 return i;
1205 }
1206 return cpu;
1207}
1208/*
Thomas Gleixner06d83082008-03-22 09:20:24 +01001209 * When add_timer_on() enqueues a timer into the timer wheel of an
1210 * idle CPU then this timer might expire before the next timer event
1211 * which is scheduled to wake up that CPU. In case of a completely
1212 * idle system the next event might even be infinite time into the
1213 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1214 * leaves the inner idle loop so the newly added timer is taken into
1215 * account when the CPU goes back to idle and evaluates the timer
1216 * wheel for the next timer event.
1217 */
1218void wake_up_idle_cpu(int cpu)
1219{
1220 struct rq *rq = cpu_rq(cpu);
1221
1222 if (cpu == smp_processor_id())
1223 return;
1224
1225 /*
1226 * This is safe, as this function is called with the timer
1227 * wheel base lock of (cpu) held. When the CPU is on the way
1228 * to idle and has not yet set rq->curr to idle then it will
1229 * be serialized on the timer wheel base lock and take the new
1230 * timer into account automatically.
1231 */
1232 if (rq->curr != rq->idle)
1233 return;
1234
1235 /*
1236 * We can set TIF_RESCHED on the idle task of the other CPU
1237 * lockless. The worst case is that the other CPU runs the
1238 * idle task through an additional NOOP schedule()
1239 */
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001240 set_tsk_need_resched(rq->idle);
Thomas Gleixner06d83082008-03-22 09:20:24 +01001241
1242 /* NEED_RESCHED must be visible before we test polling */
1243 smp_mb();
1244 if (!tsk_is_polling(rq->idle))
1245 smp_send_reschedule(cpu);
1246}
Mike Galbraith39c0cbe2010-03-11 17:17:13 +01001247
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001248#endif /* CONFIG_NO_HZ */
Thomas Gleixner06d83082008-03-22 09:20:24 +01001249
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001250static u64 sched_avg_period(void)
1251{
1252 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1253}
1254
1255static void sched_avg_update(struct rq *rq)
1256{
1257 s64 period = sched_avg_period();
1258
1259 while ((s64)(rq->clock - rq->age_stamp) > period) {
Will Deacon0d98bb22010-05-24 12:11:43 -07001260 /*
1261 * Inline assembly required to prevent the compiler
1262 * optimising this loop into a divmod call.
1263 * See __iter_div_u64_rem() for another example of this.
1264 */
1265 asm("" : "+rm" (rq->age_stamp));
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001266 rq->age_stamp += period;
1267 rq->rt_avg /= 2;
1268 }
1269}
1270
1271static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1272{
1273 rq->rt_avg += rt_delta;
1274 sched_avg_update(rq);
1275}
1276
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001277#else /* !CONFIG_SMP */
Peter Zijlstra31656512008-07-18 18:01:23 +02001278static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001279{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001280 assert_raw_spin_locked(&task_rq(p)->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001281 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001282}
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001283
1284static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1285{
1286}
Suresh Siddhada2b71e2010-08-23 13:42:51 -07001287
1288static void sched_avg_update(struct rq *rq)
1289{
1290}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001291#endif /* CONFIG_SMP */
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001292
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001293#if BITS_PER_LONG == 32
1294# define WMULT_CONST (~0UL)
1295#else
1296# define WMULT_CONST (1UL << 32)
1297#endif
1298
1299#define WMULT_SHIFT 32
1300
Ingo Molnar194081e2007-08-09 11:16:51 +02001301/*
1302 * Shift right and round:
1303 */
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001304#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
Ingo Molnar194081e2007-08-09 11:16:51 +02001305
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001306/*
1307 * delta *= weight / lw
1308 */
Ingo Molnarcb1c4fc2007-08-02 17:41:40 +02001309static unsigned long
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001310calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1311 struct load_weight *lw)
1312{
1313 u64 tmp;
1314
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001315 if (!lw->inv_weight) {
1316 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1317 lw->inv_weight = 1;
1318 else
1319 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1320 / (lw->weight+1);
1321 }
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001322
1323 tmp = (u64)delta_exec * weight;
1324 /*
1325 * Check whether we'd overflow the 64-bit multiplication:
1326 */
Ingo Molnar194081e2007-08-09 11:16:51 +02001327 if (unlikely(tmp > WMULT_CONST))
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001328 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
Ingo Molnar194081e2007-08-09 11:16:51 +02001329 WMULT_SHIFT/2);
1330 else
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001331 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001332
Ingo Molnarecf691d2007-08-02 17:41:40 +02001333 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001334}
1335
Ingo Molnar10919852007-10-15 17:00:04 +02001336static inline void update_load_add(struct load_weight *lw, unsigned long inc)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001337{
1338 lw->weight += inc;
Ingo Molnare89996a2008-03-14 23:48:28 +01001339 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001340}
1341
Ingo Molnar10919852007-10-15 17:00:04 +02001342static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001343{
1344 lw->weight -= dec;
Ingo Molnare89996a2008-03-14 23:48:28 +01001345 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001346}
1347
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001348static inline void update_load_set(struct load_weight *lw, unsigned long w)
1349{
1350 lw->weight = w;
1351 lw->inv_weight = 0;
1352}
1353
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001355 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1356 * of tasks with abnormal "nice" values across CPUs the contribution that
1357 * each task makes to its run queue's load is weighted according to its
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01001358 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
Peter Williams2dd73a42006-06-27 02:54:34 -07001359 * scaled version of the new time slice allocation that they receive on time
1360 * slice expiry etc.
1361 */
1362
Peter Zijlstracce7ade2009-01-15 14:53:37 +01001363#define WEIGHT_IDLEPRIO 3
1364#define WMULT_IDLEPRIO 1431655765
Ingo Molnardd41f592007-07-09 18:51:59 +02001365
1366/*
1367 * Nice levels are multiplicative, with a gentle 10% change for every
1368 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1369 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1370 * that remained on nice 0.
1371 *
1372 * The "10% effect" is relative and cumulative: from _any_ nice level,
1373 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
Ingo Molnarf9153ee2007-07-16 09:46:30 +02001374 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1375 * If a task goes up by ~10% and another task goes down by ~10% then
1376 * the relative distance between them is ~25%.)
Ingo Molnardd41f592007-07-09 18:51:59 +02001377 */
1378static const int prio_to_weight[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001379 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1380 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1381 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1382 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1383 /* 0 */ 1024, 820, 655, 526, 423,
1384 /* 5 */ 335, 272, 215, 172, 137,
1385 /* 10 */ 110, 87, 70, 56, 45,
1386 /* 15 */ 36, 29, 23, 18, 15,
Ingo Molnardd41f592007-07-09 18:51:59 +02001387};
1388
Ingo Molnar5714d2d2007-07-16 09:46:31 +02001389/*
1390 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1391 *
1392 * In cases where the weight does not change often, we can use the
1393 * precalculated inverse to speed up arithmetics by turning divisions
1394 * into multiplications:
1395 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001396static const u32 prio_to_wmult[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001397 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1398 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1399 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1400 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1401 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1402 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1403 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1404 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
Ingo Molnardd41f592007-07-09 18:51:59 +02001405};
Peter Williams2dd73a42006-06-27 02:54:34 -07001406
Bharata B Raoef12fef2009-03-31 10:02:22 +05301407/* Time spent by the tasks of the cpu accounting group executing in ... */
1408enum cpuacct_stat_index {
1409 CPUACCT_STAT_USER, /* ... user mode */
1410 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1411
1412 CPUACCT_STAT_NSTATS,
1413};
1414
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001415#ifdef CONFIG_CGROUP_CPUACCT
1416static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
Bharata B Raoef12fef2009-03-31 10:02:22 +05301417static void cpuacct_update_stats(struct task_struct *tsk,
1418 enum cpuacct_stat_index idx, cputime_t val);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001419#else
1420static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
Bharata B Raoef12fef2009-03-31 10:02:22 +05301421static inline void cpuacct_update_stats(struct task_struct *tsk,
1422 enum cpuacct_stat_index idx, cputime_t val) {}
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001423#endif
1424
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001425static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1426{
1427 update_load_add(&rq->load, load);
1428}
1429
1430static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1431{
1432 update_load_sub(&rq->load, load);
1433}
1434
Ingo Molnar7940ca32008-08-19 13:40:47 +02001435#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
Peter Zijlstraeb755802008-08-19 12:33:05 +02001436typedef int (*tg_visitor)(struct task_group *, void *);
1437
1438/*
1439 * Iterate the full tree, calling @down when first entering a node and @up when
1440 * leaving it for the final time.
1441 */
1442static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1443{
1444 struct task_group *parent, *child;
1445 int ret;
1446
1447 rcu_read_lock();
1448 parent = &root_task_group;
1449down:
1450 ret = (*down)(parent, data);
1451 if (ret)
1452 goto out_unlock;
1453 list_for_each_entry_rcu(child, &parent->children, siblings) {
1454 parent = child;
1455 goto down;
1456
1457up:
1458 continue;
1459 }
1460 ret = (*up)(parent, data);
1461 if (ret)
1462 goto out_unlock;
1463
1464 child = parent;
1465 parent = parent->parent;
1466 if (parent)
1467 goto up;
1468out_unlock:
1469 rcu_read_unlock();
1470
1471 return ret;
1472}
1473
1474static int tg_nop(struct task_group *tg, void *data)
1475{
1476 return 0;
1477}
1478#endif
1479
Gregory Haskinse7693a32008-01-25 21:08:09 +01001480#ifdef CONFIG_SMP
Peter Zijlstraf5f08f32009-09-10 13:35:28 +02001481/* Used instead of source_load when we know the type == 0 */
1482static unsigned long weighted_cpuload(const int cpu)
1483{
1484 return cpu_rq(cpu)->load.weight;
1485}
1486
1487/*
1488 * Return a low guess at the load of a migration-source cpu weighted
1489 * according to the scheduling class and "nice" value.
1490 *
1491 * We want to under-estimate the load of migration sources, to
1492 * balance conservatively.
1493 */
1494static unsigned long source_load(int cpu, int type)
1495{
1496 struct rq *rq = cpu_rq(cpu);
1497 unsigned long total = weighted_cpuload(cpu);
1498
1499 if (type == 0 || !sched_feat(LB_BIAS))
1500 return total;
1501
1502 return min(rq->cpu_load[type-1], total);
1503}
1504
1505/*
1506 * Return a high guess at the load of a migration-target cpu weighted
1507 * according to the scheduling class and "nice" value.
1508 */
1509static unsigned long target_load(int cpu, int type)
1510{
1511 struct rq *rq = cpu_rq(cpu);
1512 unsigned long total = weighted_cpuload(cpu);
1513
1514 if (type == 0 || !sched_feat(LB_BIAS))
1515 return total;
1516
1517 return max(rq->cpu_load[type-1], total);
1518}
1519
Peter Zijlstraae154be2009-09-10 14:40:57 +02001520static unsigned long power_of(int cpu)
1521{
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02001522 return cpu_rq(cpu)->cpu_power;
Peter Zijlstraae154be2009-09-10 14:40:57 +02001523}
1524
Gregory Haskinse7693a32008-01-25 21:08:09 +01001525static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001526
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001527static unsigned long cpu_avg_load_per_task(int cpu)
1528{
1529 struct rq *rq = cpu_rq(cpu);
Ingo Molnaraf6d5962008-11-29 20:45:15 +01001530 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001531
Steven Rostedt4cd42622008-11-26 21:04:24 -05001532 if (nr_running)
1533 rq->avg_load_per_task = rq->load.weight / nr_running;
Balbir Singha2d47772008-11-12 16:19:00 +05301534 else
1535 rq->avg_load_per_task = 0;
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001536
1537 return rq->avg_load_per_task;
1538}
1539
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001540#ifdef CONFIG_FAIR_GROUP_SCHED
1541
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001542/*
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001543 * Compute the cpu's hierarchical load factor for each task group.
1544 * This needs to be done in a top-down fashion because the load of a child
1545 * group is a fraction of its parents load.
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001546 */
Peter Zijlstraeb755802008-08-19 12:33:05 +02001547static int tg_load_down(struct task_group *tg, void *data)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001548{
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001549 unsigned long load;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001550 long cpu = (long)data;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001551
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001552 if (!tg->parent) {
1553 load = cpu_rq(cpu)->load.weight;
1554 } else {
1555 load = tg->parent->cfs_rq[cpu]->h_load;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001556 load *= tg->se[cpu]->load.weight;
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001557 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1558 }
1559
1560 tg->cfs_rq[cpu]->h_load = load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001561
Peter Zijlstraeb755802008-08-19 12:33:05 +02001562 return 0;
Peter Zijlstra4d8d5952008-06-27 13:41:19 +02001563}
1564
Peter Zijlstraeb755802008-08-19 12:33:05 +02001565static void update_h_load(long cpu)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001566{
Peter Zijlstraeb755802008-08-19 12:33:05 +02001567 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001568}
1569
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001570#endif
1571
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001572#ifdef CONFIG_PREEMPT
1573
Peter Zijlstrab78bb862009-09-15 14:23:18 +02001574static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1575
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001576/*
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001577 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1578 * way at the expense of forcing extra atomic operations in all
1579 * invocations. This assures that the double_lock is acquired using the
1580 * same underlying policy as the spinlock_t on this architecture, which
1581 * reduces latency compared to the unfair variant below. However, it
1582 * also adds more overhead and therefore may reduce throughput.
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001583 */
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001584static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1585 __releases(this_rq->lock)
1586 __acquires(busiest->lock)
1587 __acquires(this_rq->lock)
1588{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001589 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001590 double_rq_lock(this_rq, busiest);
1591
1592 return 1;
1593}
1594
1595#else
1596/*
1597 * Unfair double_lock_balance: Optimizes throughput at the expense of
1598 * latency by eliminating extra atomic operations when the locks are
1599 * already in proper order on entry. This favors lower cpu-ids and will
1600 * grant the double lock to lower cpus over higher ids under contention,
1601 * regardless of entry order into the function.
1602 */
1603static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001604 __releases(this_rq->lock)
1605 __acquires(busiest->lock)
1606 __acquires(this_rq->lock)
1607{
1608 int ret = 0;
1609
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001610 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001611 if (busiest < this_rq) {
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001612 raw_spin_unlock(&this_rq->lock);
1613 raw_spin_lock(&busiest->lock);
1614 raw_spin_lock_nested(&this_rq->lock,
1615 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001616 ret = 1;
1617 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001618 raw_spin_lock_nested(&busiest->lock,
1619 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001620 }
1621 return ret;
1622}
1623
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001624#endif /* CONFIG_PREEMPT */
1625
1626/*
1627 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1628 */
1629static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1630{
1631 if (unlikely(!irqs_disabled())) {
1632 /* printk() doesn't work good under rq->lock */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001633 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001634 BUG_ON(1);
1635 }
1636
1637 return _double_lock_balance(this_rq, busiest);
1638}
1639
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001640static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(busiest->lock)
1642{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001643 raw_spin_unlock(&busiest->lock);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001644 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1645}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001646
1647/*
1648 * double_rq_lock - safely lock two runqueues
1649 *
1650 * Note this does not disable interrupts like task_rq_lock,
1651 * you need to do so manually before calling.
1652 */
1653static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1654 __acquires(rq1->lock)
1655 __acquires(rq2->lock)
1656{
1657 BUG_ON(!irqs_disabled());
1658 if (rq1 == rq2) {
1659 raw_spin_lock(&rq1->lock);
1660 __acquire(rq2->lock); /* Fake it out ;) */
1661 } else {
1662 if (rq1 < rq2) {
1663 raw_spin_lock(&rq1->lock);
1664 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1665 } else {
1666 raw_spin_lock(&rq2->lock);
1667 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1668 }
1669 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001670}
1671
1672/*
1673 * double_rq_unlock - safely unlock two runqueues
1674 *
1675 * Note this does not restore interrupts like task_rq_unlock,
1676 * you need to do so manually after calling.
1677 */
1678static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1679 __releases(rq1->lock)
1680 __releases(rq2->lock)
1681{
1682 raw_spin_unlock(&rq1->lock);
1683 if (rq1 != rq2)
1684 raw_spin_unlock(&rq2->lock);
1685 else
1686 __release(rq2->lock);
1687}
1688
Mike Galbraithd95f4122011-02-01 09:50:51 -05001689#else /* CONFIG_SMP */
1690
1691/*
1692 * double_rq_lock - safely lock two runqueues
1693 *
1694 * Note this does not disable interrupts like task_rq_lock,
1695 * you need to do so manually before calling.
1696 */
1697static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1698 __acquires(rq1->lock)
1699 __acquires(rq2->lock)
1700{
1701 BUG_ON(!irqs_disabled());
1702 BUG_ON(rq1 != rq2);
1703 raw_spin_lock(&rq1->lock);
1704 __acquire(rq2->lock); /* Fake it out ;) */
1705}
1706
1707/*
1708 * double_rq_unlock - safely unlock two runqueues
1709 *
1710 * Note this does not restore interrupts like task_rq_unlock,
1711 * you need to do so manually after calling.
1712 */
1713static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1714 __releases(rq1->lock)
1715 __releases(rq2->lock)
1716{
1717 BUG_ON(rq1 != rq2);
1718 raw_spin_unlock(&rq1->lock);
1719 __release(rq2->lock);
1720}
1721
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001722#endif
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001723
Peter Zijlstra74f51872010-04-22 21:50:19 +02001724static void calc_load_account_idle(struct rq *this_rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01001725static void update_sysctl(void);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01001726static int get_update_sysctl_factor(void);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07001727static void update_cpu_load(struct rq *this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02001728
Peter Zijlstracd29fe62009-11-27 17:32:46 +01001729static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1730{
1731 set_task_rq(p, cpu);
1732#ifdef CONFIG_SMP
1733 /*
1734 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1735 * successfuly executed on another CPU. We must ensure that updates of
1736 * per-task data have been completed by this moment.
1737 */
1738 smp_wmb();
1739 task_thread_info(p)->cpu = cpu;
1740#endif
1741}
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001742
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001743static const struct sched_class rt_sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02001744
Peter Zijlstra34f971f2010-09-22 13:53:15 +02001745#define sched_class_highest (&stop_sched_class)
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001746#define for_each_class(class) \
1747 for (class = sched_class_highest; class; class = class->next)
Ingo Molnardd41f592007-07-09 18:51:59 +02001748
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001749#include "sched_stats.h"
1750
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001751static void inc_nr_running(struct rq *rq)
Ingo Molnar6363ca52008-05-29 11:28:57 +02001752{
1753 rq->nr_running++;
Ingo Molnar6363ca52008-05-29 11:28:57 +02001754}
1755
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001756static void dec_nr_running(struct rq *rq)
Ingo Molnar9c217242007-08-02 17:41:40 +02001757{
1758 rq->nr_running--;
Ingo Molnar9c217242007-08-02 17:41:40 +02001759}
1760
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001761static void set_load_weight(struct task_struct *p)
1762{
Ingo Molnardd41f592007-07-09 18:51:59 +02001763 /*
1764 * SCHED_IDLE tasks get minimal weight:
1765 */
1766 if (p->policy == SCHED_IDLE) {
1767 p->se.load.weight = WEIGHT_IDLEPRIO;
1768 p->se.load.inv_weight = WMULT_IDLEPRIO;
1769 return;
1770 }
1771
1772 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1773 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001774}
1775
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001776static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
Gregory Haskins2087a1a2008-06-27 14:30:00 -06001777{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001778 update_rq_clock(rq);
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001779 sched_info_queued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001780 p->sched_class->enqueue_task(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02001781 p->se.on_rq = 1;
1782}
1783
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001784static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnardd41f592007-07-09 18:51:59 +02001785{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001786 update_rq_clock(rq);
Ankita Garg46ac22b2008-07-01 14:30:06 +05301787 sched_info_dequeued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001788 p->sched_class->dequeue_task(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02001789 p->se.on_rq = 0;
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001790}
1791
1792/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001793 * activate_task - move a task to the runqueue.
1794 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001795static void activate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001796{
1797 if (task_contributes_to_load(p))
1798 rq->nr_uninterruptible--;
1799
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001800 enqueue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001801 inc_nr_running(rq);
1802}
1803
1804/*
1805 * deactivate_task - remove a task from the runqueue.
1806 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001807static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001808{
1809 if (task_contributes_to_load(p))
1810 rq->nr_uninterruptible++;
1811
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001812 dequeue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001813 dec_nr_running(rq);
1814}
1815
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001816#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1817
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001818/*
1819 * There are no locks covering percpu hardirq/softirq time.
1820 * They are only modified in account_system_vtime, on corresponding CPU
1821 * with interrupts disabled. So, writes are safe.
1822 * They are read and saved off onto struct rq in update_rq_clock().
1823 * This may result in other CPU reading this CPU's irq time and can
1824 * race with irq/account_system_vtime on this CPU. We would either get old
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001825 * or new value with a side effect of accounting a slice of irq time to wrong
1826 * task when irq is in progress while we read rq->clock. That is a worthy
1827 * compromise in place of having locks on each irq in account_system_time.
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001828 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001829static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1830static DEFINE_PER_CPU(u64, cpu_softirq_time);
1831
1832static DEFINE_PER_CPU(u64, irq_start_time);
1833static int sched_clock_irqtime;
1834
1835void enable_sched_clock_irqtime(void)
1836{
1837 sched_clock_irqtime = 1;
1838}
1839
1840void disable_sched_clock_irqtime(void)
1841{
1842 sched_clock_irqtime = 0;
1843}
1844
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001845#ifndef CONFIG_64BIT
1846static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
1847
1848static inline void irq_time_write_begin(void)
1849{
1850 __this_cpu_inc(irq_time_seq.sequence);
1851 smp_wmb();
1852}
1853
1854static inline void irq_time_write_end(void)
1855{
1856 smp_wmb();
1857 __this_cpu_inc(irq_time_seq.sequence);
1858}
1859
1860static inline u64 irq_time_read(int cpu)
1861{
1862 u64 irq_time;
1863 unsigned seq;
1864
1865 do {
1866 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1867 irq_time = per_cpu(cpu_softirq_time, cpu) +
1868 per_cpu(cpu_hardirq_time, cpu);
1869 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1870
1871 return irq_time;
1872}
1873#else /* CONFIG_64BIT */
1874static inline void irq_time_write_begin(void)
1875{
1876}
1877
1878static inline void irq_time_write_end(void)
1879{
1880}
1881
1882static inline u64 irq_time_read(int cpu)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001883{
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001884 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1885}
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001886#endif /* CONFIG_64BIT */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001887
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001888/*
1889 * Called before incrementing preempt_count on {soft,}irq_enter
1890 * and before decrementing preempt_count on {soft,}irq_exit.
1891 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001892void account_system_vtime(struct task_struct *curr)
1893{
1894 unsigned long flags;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001895 s64 delta;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001896 int cpu;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001897
1898 if (!sched_clock_irqtime)
1899 return;
1900
1901 local_irq_save(flags);
1902
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001903 cpu = smp_processor_id();
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001904 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
1905 __this_cpu_add(irq_start_time, delta);
1906
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001907 irq_time_write_begin();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001908 /*
1909 * We do not account for softirq time from ksoftirqd here.
1910 * We want to continue accounting softirq time to ksoftirqd thread
1911 * in that case, so as not to confuse scheduler with a special task
1912 * that do not consume any time, but still wants to run.
1913 */
1914 if (hardirq_count())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001915 __this_cpu_add(cpu_hardirq_time, delta);
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -08001916 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001917 __this_cpu_add(cpu_softirq_time, delta);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001918
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001919 irq_time_write_end();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001920 local_irq_restore(flags);
1921}
Ingo Molnarb7dadc32010-10-18 20:00:37 +02001922EXPORT_SYMBOL_GPL(account_system_vtime);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001923
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001924static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001925{
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001926 s64 irq_delta;
1927
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001928 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001929
1930 /*
1931 * Since irq_time is only updated on {soft,}irq_exit, we might run into
1932 * this case when a previous update_rq_clock() happened inside a
1933 * {soft,}irq region.
1934 *
1935 * When this happens, we stop ->clock_task and only update the
1936 * prev_irq_time stamp to account for the part that fit, so that a next
1937 * update will consume the rest. This ensures ->clock_task is
1938 * monotonic.
1939 *
1940 * It does however cause some slight miss-attribution of {soft,}irq
1941 * time, a more accurate solution would be to update the irq_time using
1942 * the current rq->clock timestamp, except that would require using
1943 * atomic ops.
1944 */
1945 if (irq_delta > delta)
1946 irq_delta = delta;
1947
1948 rq->prev_irq_time += irq_delta;
1949 delta -= irq_delta;
1950 rq->clock_task += delta;
1951
1952 if (irq_delta && sched_feat(NONIRQ_POWER))
1953 sched_rt_avg_update(rq, irq_delta);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001954}
1955
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08001956static int irqtime_account_hi_update(void)
1957{
1958 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1959 unsigned long flags;
1960 u64 latest_ns;
1961 int ret = 0;
1962
1963 local_irq_save(flags);
1964 latest_ns = this_cpu_read(cpu_hardirq_time);
1965 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
1966 ret = 1;
1967 local_irq_restore(flags);
1968 return ret;
1969}
1970
1971static int irqtime_account_si_update(void)
1972{
1973 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1974 unsigned long flags;
1975 u64 latest_ns;
1976 int ret = 0;
1977
1978 local_irq_save(flags);
1979 latest_ns = this_cpu_read(cpu_softirq_time);
1980 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
1981 ret = 1;
1982 local_irq_restore(flags);
1983 return ret;
1984}
1985
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001986#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001987
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08001988#define sched_clock_irqtime (0)
1989
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001990static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001991{
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001992 rq->clock_task += delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001993}
1994
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001995#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001996
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001997#include "sched_idletask.c"
1998#include "sched_fair.c"
1999#include "sched_rt.c"
Mike Galbraith5091faa2010-11-30 14:18:03 +01002000#include "sched_autogroup.c"
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002001#include "sched_stoptask.c"
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002002#ifdef CONFIG_SCHED_DEBUG
2003# include "sched_debug.c"
2004#endif
2005
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002006void sched_set_stop_task(int cpu, struct task_struct *stop)
2007{
2008 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2009 struct task_struct *old_stop = cpu_rq(cpu)->stop;
2010
2011 if (stop) {
2012 /*
2013 * Make it appear like a SCHED_FIFO task, its something
2014 * userspace knows about and won't get confused about.
2015 *
2016 * Also, it will make PI more or less work without too
2017 * much confusion -- but then, stop work should not
2018 * rely on PI working anyway.
2019 */
2020 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2021
2022 stop->sched_class = &stop_sched_class;
2023 }
2024
2025 cpu_rq(cpu)->stop = stop;
2026
2027 if (old_stop) {
2028 /*
2029 * Reset it back to a normal scheduling class so that
2030 * it can die in pieces.
2031 */
2032 old_stop->sched_class = &rt_sched_class;
2033 }
2034}
2035
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002036/*
Ingo Molnardd41f592007-07-09 18:51:59 +02002037 * __normal_prio - return the priority that is based on the static prio
Ingo Molnar71f8bd42007-07-09 18:51:59 +02002038 */
Ingo Molnar14531182007-07-09 18:51:59 +02002039static inline int __normal_prio(struct task_struct *p)
2040{
Ingo Molnardd41f592007-07-09 18:51:59 +02002041 return p->static_prio;
Ingo Molnar14531182007-07-09 18:51:59 +02002042}
2043
2044/*
Ingo Molnarb29739f2006-06-27 02:54:51 -07002045 * Calculate the expected normal priority: i.e. priority
2046 * without taking RT-inheritance into account. Might be
2047 * boosted by interactivity modifiers. Changes upon fork,
2048 * setprio syscalls, and whenever the interactivity
2049 * estimator recalculates.
2050 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002051static inline int normal_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002052{
2053 int prio;
2054
Ingo Molnare05606d2007-07-09 18:51:59 +02002055 if (task_has_rt_policy(p))
Ingo Molnarb29739f2006-06-27 02:54:51 -07002056 prio = MAX_RT_PRIO-1 - p->rt_priority;
2057 else
2058 prio = __normal_prio(p);
2059 return prio;
2060}
2061
2062/*
2063 * Calculate the current priority, i.e. the priority
2064 * taken into account by the scheduler. This value might
2065 * be boosted by RT tasks, or might be boosted by
2066 * interactivity modifiers. Will be RT if the task got
2067 * RT-boosted. If not then it returns p->normal_prio.
2068 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002069static int effective_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002070{
2071 p->normal_prio = normal_prio(p);
2072 /*
2073 * If we are RT tasks or we were boosted to RT priority,
2074 * keep the priority unchanged. Otherwise, update priority
2075 * to the normal priority:
2076 */
2077 if (!rt_prio(p->prio))
2078 return p->normal_prio;
2079 return p->prio;
2080}
2081
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082/**
2083 * task_curr - is this task currently executing on a CPU?
2084 * @p: the task in question.
2085 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002086inline int task_curr(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087{
2088 return cpu_curr(task_cpu(p)) == p;
2089}
2090
Steven Rostedtcb469842008-01-25 21:08:22 +01002091static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2092 const struct sched_class *prev_class,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002093 int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01002094{
2095 if (prev_class != p->sched_class) {
2096 if (prev_class->switched_from)
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002097 prev_class->switched_from(rq, p);
2098 p->sched_class->switched_to(rq, p);
2099 } else if (oldprio != p->prio)
2100 p->sched_class->prio_changed(rq, p, oldprio);
Steven Rostedtcb469842008-01-25 21:08:22 +01002101}
2102
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002103static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2104{
2105 const struct sched_class *class;
2106
2107 if (p->sched_class == rq->curr->sched_class) {
2108 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2109 } else {
2110 for_each_class(class) {
2111 if (class == rq->curr->sched_class)
2112 break;
2113 if (class == p->sched_class) {
2114 resched_task(rq->curr);
2115 break;
2116 }
2117 }
2118 }
2119
2120 /*
2121 * A queue event has occurred, and we're going to schedule. In
2122 * this case, we can save a useless back to back clock update.
2123 */
Mike Galbraithf26f9af2010-12-08 11:05:42 +01002124 if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002125 rq->skip_clock_update = 1;
2126}
2127
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128#ifdef CONFIG_SMP
Ingo Molnarcc367732007-10-15 17:00:18 +02002129/*
2130 * Is this task likely cache-hot:
2131 */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002132static int
Ingo Molnarcc367732007-10-15 17:00:18 +02002133task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2134{
2135 s64 delta;
2136
Peter Zijlstrae6c8fba2009-12-16 18:04:33 +01002137 if (p->sched_class != &fair_sched_class)
2138 return 0;
2139
Nikhil Raoef8002f2010-10-13 12:09:35 -07002140 if (unlikely(p->policy == SCHED_IDLE))
2141 return 0;
2142
Ingo Molnarf540a602008-03-15 17:10:34 +01002143 /*
2144 * Buddy candidates are cache hot:
2145 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002146 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
Peter Zijlstra47932412008-11-04 21:25:09 +01002147 (&p->se == cfs_rq_of(&p->se)->next ||
2148 &p->se == cfs_rq_of(&p->se)->last))
Ingo Molnarf540a602008-03-15 17:10:34 +01002149 return 1;
2150
Ingo Molnar6bc16652007-10-15 17:00:18 +02002151 if (sysctl_sched_migration_cost == -1)
2152 return 1;
2153 if (sysctl_sched_migration_cost == 0)
2154 return 0;
2155
Ingo Molnarcc367732007-10-15 17:00:18 +02002156 delta = now - p->se.exec_start;
2157
2158 return delta < (s64)sysctl_sched_migration_cost;
2159}
2160
Ingo Molnardd41f592007-07-09 18:51:59 +02002161void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Ingo Molnarc65cc872007-07-09 18:51:58 +02002162{
Peter Zijlstrae2912002009-12-16 18:04:36 +01002163#ifdef CONFIG_SCHED_DEBUG
2164 /*
2165 * We should never call set_task_cpu() on a blocked task,
2166 * ttwu() will sort out the placement.
2167 */
Peter Zijlstra077614e2009-12-17 13:16:31 +01002168 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2169 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
Peter Zijlstrae2912002009-12-16 18:04:36 +01002170#endif
2171
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +08002172 trace_sched_migrate_task(p, new_cpu);
Peter Zijlstracbc34ed2008-12-10 08:08:22 +01002173
Peter Zijlstra0c697742009-12-22 15:43:19 +01002174 if (task_cpu(p) != new_cpu) {
2175 p->se.nr_migrations++;
2176 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2177 }
Ingo Molnardd41f592007-07-09 18:51:59 +02002178
2179 __set_task_cpu(p, new_cpu);
Ingo Molnarc65cc872007-07-09 18:51:58 +02002180}
2181
Tejun Heo969c7922010-05-06 18:49:21 +02002182struct migration_arg {
Ingo Molnar36c8b582006-07-03 00:25:41 -07002183 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 int dest_cpu;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002185};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
Tejun Heo969c7922010-05-06 18:49:21 +02002187static int migration_cpu_stop(void *data);
2188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189/*
2190 * The task's runqueue lock must be held.
2191 * Returns true if you have to wait for migration thread.
2192 */
Nikanth Karthikesanb7a2b392010-11-26 12:37:09 +05302193static bool migrate_task(struct task_struct *p, struct rq *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 /*
2196 * If the task is not on a runqueue (and not running), then
Peter Zijlstrae2912002009-12-16 18:04:36 +01002197 * the next wake-up will properly place the task.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 */
Tejun Heo969c7922010-05-06 18:49:21 +02002199 return p->se.on_rq || task_running(rq, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200}
2201
2202/*
2203 * wait_task_inactive - wait for a thread to unschedule.
2204 *
Roland McGrath85ba2d82008-07-25 19:45:58 -07002205 * If @match_state is nonzero, it's the @p->state value just checked and
2206 * not expected to change. If it changes, i.e. @p might have woken up,
2207 * then return zero. When we succeed in waiting for @p to be off its CPU,
2208 * we return a positive number (its total switch count). If a second call
2209 * a short while later returns the same number, the caller can be sure that
2210 * @p has remained unscheduled the whole time.
2211 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 * The caller must ensure that the task *will* unschedule sometime soon,
2213 * else this function might spin for a *long* time. This function can't
2214 * be called with interrupts off, or it may introduce deadlock with
2215 * smp_call_function() if an IPI is sent by the same process we are
2216 * waiting to become inactive.
2217 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002218unsigned long wait_task_inactive(struct task_struct *p, long match_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219{
2220 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002221 int running, on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002222 unsigned long ncsw;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002223 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Andi Kleen3a5c3592007-10-15 17:00:14 +02002225 for (;;) {
2226 /*
2227 * We do the initial early heuristics without holding
2228 * any task-queue locks at all. We'll only try to get
2229 * the runqueue lock when things look like they will
2230 * work out!
2231 */
2232 rq = task_rq(p);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002233
Andi Kleen3a5c3592007-10-15 17:00:14 +02002234 /*
2235 * If the task is actively running on another CPU
2236 * still, just relax and busy-wait without holding
2237 * any locks.
2238 *
2239 * NOTE! Since we don't hold any locks, it's not
2240 * even sure that "rq" stays as the right runqueue!
2241 * But we don't care, since "task_running()" will
2242 * return false if the runqueue has changed and p
2243 * is actually now running somewhere else!
2244 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002245 while (task_running(rq, p)) {
2246 if (match_state && unlikely(p->state != match_state))
2247 return 0;
Andi Kleen3a5c3592007-10-15 17:00:14 +02002248 cpu_relax();
Roland McGrath85ba2d82008-07-25 19:45:58 -07002249 }
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002250
Andi Kleen3a5c3592007-10-15 17:00:14 +02002251 /*
2252 * Ok, time to look more closely! We need the rq
2253 * lock now, to be *sure*. If we're wrong, we'll
2254 * just go back and repeat.
2255 */
2256 rq = task_rq_lock(p, &flags);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002257 trace_sched_wait_task(p);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002258 running = task_running(rq, p);
2259 on_rq = p->se.on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002260 ncsw = 0;
Oleg Nesterovf31e11d2008-08-20 16:54:44 -07002261 if (!match_state || p->state == match_state)
Oleg Nesterov93dcf552008-08-20 16:54:44 -07002262 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
Andi Kleen3a5c3592007-10-15 17:00:14 +02002263 task_rq_unlock(rq, &flags);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002264
Andi Kleen3a5c3592007-10-15 17:00:14 +02002265 /*
Roland McGrath85ba2d82008-07-25 19:45:58 -07002266 * If it changed from the expected state, bail out now.
2267 */
2268 if (unlikely(!ncsw))
2269 break;
2270
2271 /*
Andi Kleen3a5c3592007-10-15 17:00:14 +02002272 * Was it really running after all now that we
2273 * checked with the proper locks actually held?
2274 *
2275 * Oops. Go back and try again..
2276 */
2277 if (unlikely(running)) {
2278 cpu_relax();
2279 continue;
2280 }
2281
2282 /*
2283 * It's not enough that it's not actively running,
2284 * it must be off the runqueue _entirely_, and not
2285 * preempted!
2286 *
Luis Henriques80dd99b2009-03-16 19:58:09 +00002287 * So if it was still runnable (but just not actively
Andi Kleen3a5c3592007-10-15 17:00:14 +02002288 * running right now), it's preempted, and we should
2289 * yield - it could be a while.
2290 */
2291 if (unlikely(on_rq)) {
2292 schedule_timeout_uninterruptible(1);
2293 continue;
2294 }
2295
2296 /*
2297 * Ahh, all good. It wasn't running, and it wasn't
2298 * runnable, which means that it will never become
2299 * running in the future either. We're all done!
2300 */
2301 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 }
Roland McGrath85ba2d82008-07-25 19:45:58 -07002303
2304 return ncsw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305}
2306
2307/***
2308 * kick_process - kick a running thread to enter/exit the kernel
2309 * @p: the to-be-kicked thread
2310 *
2311 * Cause a process which is running on another CPU to enter
2312 * kernel-mode, without any delay. (to get signals handled.)
2313 *
2314 * NOTE: this function doesnt have to take the runqueue lock,
2315 * because all it wants to ensure is that the remote task enters
2316 * the kernel. If the IPI races and the task has been migrated
2317 * to another CPU then no harm is done and the purpose has been
2318 * achieved as well.
2319 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002320void kick_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321{
2322 int cpu;
2323
2324 preempt_disable();
2325 cpu = task_cpu(p);
2326 if ((cpu != smp_processor_id()) && task_curr(p))
2327 smp_send_reschedule(cpu);
2328 preempt_enable();
2329}
Rusty Russellb43e3522009-06-12 22:27:00 -06002330EXPORT_SYMBOL_GPL(kick_process);
Nick Piggin476d1392005-06-25 14:57:29 -07002331#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
Thomas Gleixner0793a612008-12-04 20:12:29 +01002333/**
2334 * task_oncpu_function_call - call a function on the cpu on which a task runs
2335 * @p: the task to evaluate
2336 * @func: the function to be called
2337 * @info: the function call argument
2338 *
2339 * Calls the function @func when the task is currently running. This might
2340 * be on the current CPU, which just calls the function directly
2341 */
2342void task_oncpu_function_call(struct task_struct *p,
2343 void (*func) (void *info), void *info)
2344{
2345 int cpu;
2346
2347 preempt_disable();
2348 cpu = task_cpu(p);
2349 if (task_curr(p))
2350 smp_call_function_single(cpu, func, info, 1);
2351 preempt_enable();
2352}
2353
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002354#ifdef CONFIG_SMP
Oleg Nesterov30da6882010-03-15 10:10:19 +01002355/*
2356 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
2357 */
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002358static int select_fallback_rq(int cpu, struct task_struct *p)
2359{
2360 int dest_cpu;
2361 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2362
2363 /* Look for allowed, online CPU in same node. */
2364 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2365 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2366 return dest_cpu;
2367
2368 /* Any allowed, online CPU? */
2369 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2370 if (dest_cpu < nr_cpu_ids)
2371 return dest_cpu;
2372
2373 /* No more Mr. Nice Guy. */
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01002374 dest_cpu = cpuset_cpus_allowed_fallback(p);
2375 /*
2376 * Don't tell them about moving exiting tasks or
2377 * kernel threads (both mm NULL), since they never
2378 * leave kernel.
2379 */
2380 if (p->mm && printk_ratelimit()) {
2381 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
2382 task_pid_nr(p), p->comm, cpu);
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002383 }
2384
2385 return dest_cpu;
2386}
2387
Peter Zijlstrae2912002009-12-16 18:04:36 +01002388/*
Oleg Nesterov30da6882010-03-15 10:10:19 +01002389 * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
Peter Zijlstrae2912002009-12-16 18:04:36 +01002390 */
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002391static inline
Peter Zijlstra0017d732010-03-24 18:34:10 +01002392int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002393{
Peter Zijlstra0017d732010-03-24 18:34:10 +01002394 int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002395
2396 /*
2397 * In order not to call set_task_cpu() on a blocking task we need
2398 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2399 * cpu.
2400 *
2401 * Since this is common to all placement strategies, this lives here.
2402 *
2403 * [ this allows ->select_task() to simply return task_cpu(p) and
2404 * not worry about this generic constraint ]
2405 */
2406 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
Peter Zijlstra70f11202009-12-20 17:36:27 +01002407 !cpu_online(cpu)))
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002408 cpu = select_fallback_rq(task_cpu(p), p);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002409
2410 return cpu;
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002411}
Mike Galbraith09a40af2010-04-15 07:29:59 +02002412
2413static void update_avg(u64 *avg, u64 sample)
2414{
2415 s64 diff = sample - *avg;
2416 *avg += diff >> 3;
2417}
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002418#endif
2419
Tejun Heo9ed38112009-12-03 15:08:03 +09002420static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
2421 bool is_sync, bool is_migrate, bool is_local,
2422 unsigned long en_flags)
2423{
2424 schedstat_inc(p, se.statistics.nr_wakeups);
2425 if (is_sync)
2426 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2427 if (is_migrate)
2428 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2429 if (is_local)
2430 schedstat_inc(p, se.statistics.nr_wakeups_local);
2431 else
2432 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2433
2434 activate_task(rq, p, en_flags);
2435}
2436
2437static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
2438 int wake_flags, bool success)
2439{
2440 trace_sched_wakeup(p, success);
2441 check_preempt_curr(rq, p, wake_flags);
2442
2443 p->state = TASK_RUNNING;
2444#ifdef CONFIG_SMP
2445 if (p->sched_class->task_woken)
2446 p->sched_class->task_woken(rq, p);
2447
2448 if (unlikely(rq->idle_stamp)) {
2449 u64 delta = rq->clock - rq->idle_stamp;
2450 u64 max = 2*sysctl_sched_migration_cost;
2451
2452 if (delta > max)
2453 rq->avg_idle = max;
2454 else
2455 update_avg(&rq->avg_idle, delta);
2456 rq->idle_stamp = 0;
2457 }
2458#endif
Tejun Heo21aa9af2010-06-08 21:40:37 +02002459 /* if a worker is waking up, notify workqueue */
2460 if ((p->flags & PF_WQ_WORKER) && success)
2461 wq_worker_waking_up(p, cpu_of(rq));
Tejun Heo9ed38112009-12-03 15:08:03 +09002462}
2463
2464/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 * try_to_wake_up - wake up a thread
Tejun Heo9ed38112009-12-03 15:08:03 +09002466 * @p: the thread to be awakened
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 * @state: the mask of task states that can be woken
Tejun Heo9ed38112009-12-03 15:08:03 +09002468 * @wake_flags: wake modifier flags (WF_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 *
2470 * Put it on the run-queue if it's not already there. The "current"
2471 * thread is always on the run-queue (except when the actual
2472 * re-schedule is in progress), and as such you're allowed to do
2473 * the simpler "current->state = TASK_RUNNING" to mark yourself
2474 * runnable without the overhead of this.
2475 *
Tejun Heo9ed38112009-12-03 15:08:03 +09002476 * Returns %true if @p was woken up, %false if it was already running
2477 * or @state didn't match @p's state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02002479static int try_to_wake_up(struct task_struct *p, unsigned int state,
2480 int wake_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481{
Ingo Molnarcc367732007-10-15 17:00:18 +02002482 int cpu, orig_cpu, this_cpu, success = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 unsigned long flags;
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002484 unsigned long en_flags = ENQUEUE_WAKEUP;
Dan Carpenterab3b3aa2010-03-06 14:17:52 +03002485 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002487 this_cpu = get_cpu();
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02002488
Linus Torvalds04e2f172008-02-23 18:05:03 -08002489 smp_wmb();
Dan Carpenterab3b3aa2010-03-06 14:17:52 +03002490 rq = task_rq_lock(p, &flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002491 if (!(p->state & state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 goto out;
2493
Ingo Molnardd41f592007-07-09 18:51:59 +02002494 if (p->se.on_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 goto out_running;
2496
2497 cpu = task_cpu(p);
Ingo Molnarcc367732007-10-15 17:00:18 +02002498 orig_cpu = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499
2500#ifdef CONFIG_SMP
2501 if (unlikely(task_running(rq, p)))
2502 goto out_activate;
2503
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002504 /*
2505 * In order to handle concurrent wakeups and release the rq->lock
2506 * we put the task in TASK_WAKING state.
Ingo Molnareb24073b2009-09-16 21:09:13 +02002507 *
2508 * First fix up the nr_uninterruptible count:
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002509 */
Peter Zijlstracc87f762010-03-26 12:22:14 +01002510 if (task_contributes_to_load(p)) {
2511 if (likely(cpu_online(orig_cpu)))
2512 rq->nr_uninterruptible--;
2513 else
2514 this_rq()->nr_uninterruptible--;
2515 }
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002516 p->state = TASK_WAKING;
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002517
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002518 if (p->sched_class->task_waking) {
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002519 p->sched_class->task_waking(rq, p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002520 en_flags |= ENQUEUE_WAKING;
Peter Zijlstra0970d292010-02-15 14:45:54 +01002521 }
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002522
Peter Zijlstra0017d732010-03-24 18:34:10 +01002523 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2524 if (cpu != orig_cpu)
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002525 set_task_cpu(p, cpu);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002526 __task_rq_unlock(rq);
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002527
Peter Zijlstra0970d292010-02-15 14:45:54 +01002528 rq = cpu_rq(cpu);
2529 raw_spin_lock(&rq->lock);
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002530
Peter Zijlstra0970d292010-02-15 14:45:54 +01002531 /*
2532 * We migrated the task without holding either rq->lock, however
2533 * since the task is not on the task list itself, nobody else
2534 * will try and migrate the task, hence the rq should match the
2535 * cpu we just moved it to.
2536 */
2537 WARN_ON(task_cpu(p) != cpu);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002538 WARN_ON(p->state != TASK_WAKING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
Gregory Haskinse7693a32008-01-25 21:08:09 +01002540#ifdef CONFIG_SCHEDSTATS
2541 schedstat_inc(rq, ttwu_count);
2542 if (cpu == this_cpu)
2543 schedstat_inc(rq, ttwu_local);
2544 else {
2545 struct sched_domain *sd;
2546 for_each_domain(this_cpu, sd) {
Rusty Russell758b2cd2008-11-25 02:35:04 +10302547 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Gregory Haskinse7693a32008-01-25 21:08:09 +01002548 schedstat_inc(sd, ttwu_wake_remote);
2549 break;
2550 }
2551 }
2552 }
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002553#endif /* CONFIG_SCHEDSTATS */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002554
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555out_activate:
2556#endif /* CONFIG_SMP */
Tejun Heo9ed38112009-12-03 15:08:03 +09002557 ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
2558 cpu == this_cpu, en_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 success = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560out_running:
Tejun Heo9ed38112009-12-03 15:08:03 +09002561 ttwu_post_activation(p, rq, wake_flags, success);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562out:
2563 task_rq_unlock(rq, &flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002564 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565
2566 return success;
2567}
2568
David Howells50fa6102009-04-28 15:01:38 +01002569/**
Tejun Heo21aa9af2010-06-08 21:40:37 +02002570 * try_to_wake_up_local - try to wake up a local task with rq lock held
2571 * @p: the thread to be awakened
2572 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04002573 * Put @p on the run-queue if it's not already there. The caller must
Tejun Heo21aa9af2010-06-08 21:40:37 +02002574 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2575 * the current task. this_rq() stays locked over invocation.
2576 */
2577static void try_to_wake_up_local(struct task_struct *p)
2578{
2579 struct rq *rq = task_rq(p);
2580 bool success = false;
2581
2582 BUG_ON(rq != this_rq());
2583 BUG_ON(p == current);
2584 lockdep_assert_held(&rq->lock);
2585
2586 if (!(p->state & TASK_NORMAL))
2587 return;
2588
2589 if (!p->se.on_rq) {
2590 if (likely(!task_running(rq, p))) {
2591 schedstat_inc(rq, ttwu_count);
2592 schedstat_inc(rq, ttwu_local);
2593 }
2594 ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP);
2595 success = true;
2596 }
2597 ttwu_post_activation(p, rq, 0, success);
2598}
2599
2600/**
David Howells50fa6102009-04-28 15:01:38 +01002601 * wake_up_process - Wake up a specific process
2602 * @p: The process to be woken up.
2603 *
2604 * Attempt to wake up the nominated process and move it to the set of runnable
2605 * processes. Returns 1 if the process was woken up, 0 if it was already
2606 * running.
2607 *
2608 * It may be assumed that this function implies a write memory barrier before
2609 * changing the task state if and only if any tasks are woken up.
2610 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002611int wake_up_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05002613 return try_to_wake_up(p, TASK_ALL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615EXPORT_SYMBOL(wake_up_process);
2616
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002617int wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618{
2619 return try_to_wake_up(p, state, 0);
2620}
2621
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622/*
2623 * Perform scheduler related setup for a newly forked process p.
2624 * p is forked by current.
Ingo Molnardd41f592007-07-09 18:51:59 +02002625 *
2626 * __sched_fork() is basic setup used by init_idle() too:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002628static void __sched_fork(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629{
Ingo Molnardd41f592007-07-09 18:51:59 +02002630 p->se.exec_start = 0;
2631 p->se.sum_exec_runtime = 0;
Ingo Molnarf6cf8912007-08-28 12:53:24 +02002632 p->se.prev_sum_exec_runtime = 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002633 p->se.nr_migrations = 0;
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002634 p->se.vruntime = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002635
2636#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03002637 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002638#endif
Nick Piggin476d1392005-06-25 14:57:29 -07002639
Peter Zijlstrafa717062008-01-25 21:08:27 +01002640 INIT_LIST_HEAD(&p->rt.run_list);
Ingo Molnardd41f592007-07-09 18:51:59 +02002641 p->se.on_rq = 0;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02002642 INIT_LIST_HEAD(&p->se.group_node);
Nick Piggin476d1392005-06-25 14:57:29 -07002643
Avi Kivitye107be32007-07-26 13:40:43 +02002644#ifdef CONFIG_PREEMPT_NOTIFIERS
2645 INIT_HLIST_HEAD(&p->preempt_notifiers);
2646#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002647}
2648
2649/*
2650 * fork()/clone()-time setup:
2651 */
2652void sched_fork(struct task_struct *p, int clone_flags)
2653{
2654 int cpu = get_cpu();
2655
2656 __sched_fork(p);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002657 /*
Peter Zijlstra0017d732010-03-24 18:34:10 +01002658 * We mark the process as running here. This guarantees that
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002659 * nobody will actually run it, and a signal or other external
2660 * event cannot wake it up and insert it on the runqueue either.
2661 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01002662 p->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02002663
Ingo Molnarb29739f2006-06-27 02:54:51 -07002664 /*
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002665 * Revert to default priority/policy on fork if requested.
2666 */
2667 if (unlikely(p->sched_reset_on_fork)) {
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002668 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002669 p->policy = SCHED_NORMAL;
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002670 p->normal_prio = p->static_prio;
2671 }
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002672
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002673 if (PRIO_TO_NICE(p->static_prio) < 0) {
2674 p->static_prio = NICE_TO_PRIO(0);
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002675 p->normal_prio = p->static_prio;
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002676 set_load_weight(p);
2677 }
2678
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002679 /*
2680 * We don't need the reset flag anymore after the fork. It has
2681 * fulfilled its duty:
2682 */
2683 p->sched_reset_on_fork = 0;
2684 }
Lennart Poetteringca94c442009-06-15 17:17:47 +02002685
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002686 /*
2687 * Make sure we do not leak PI boosting priority to the child.
2688 */
2689 p->prio = current->normal_prio;
2690
Hiroshi Shimamoto2ddbf952007-10-15 17:00:11 +02002691 if (!rt_prio(p->prio))
2692 p->sched_class = &fair_sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07002693
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002694 if (p->sched_class->task_fork)
2695 p->sched_class->task_fork(p);
2696
Peter Zijlstra86951592010-06-22 11:44:53 +02002697 /*
2698 * The child is not yet in the pid-hash so no cgroup attach races,
2699 * and the cgroup is pinned to this child due to cgroup_fork()
2700 * is ran before sched_fork().
2701 *
2702 * Silence PROVE_RCU.
2703 */
2704 rcu_read_lock();
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002705 set_task_cpu(p, cpu);
Peter Zijlstra86951592010-06-22 11:44:53 +02002706 rcu_read_unlock();
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002707
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002708#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Ingo Molnardd41f592007-07-09 18:51:59 +02002709 if (likely(sched_info_on()))
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002710 memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711#endif
Chen, Kenneth Wd6077cb2006-02-14 13:53:10 -08002712#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
Nick Piggin4866cde2005-06-25 14:57:23 -07002713 p->oncpu = 0;
2714#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715#ifdef CONFIG_PREEMPT
Nick Piggin4866cde2005-06-25 14:57:23 -07002716 /* Want to start with kernel preemption disabled. */
Al Viroa1261f52005-11-13 16:06:55 -08002717 task_thread_info(p)->preempt_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718#endif
Dario Faggioli806c09a2010-11-30 19:51:33 +01002719#ifdef CONFIG_SMP
Gregory Haskins917b6272008-12-29 09:39:53 -05002720 plist_node_init(&p->pushable_tasks, MAX_PRIO);
Dario Faggioli806c09a2010-11-30 19:51:33 +01002721#endif
Gregory Haskins917b6272008-12-29 09:39:53 -05002722
Nick Piggin476d1392005-06-25 14:57:29 -07002723 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724}
2725
2726/*
2727 * wake_up_new_task - wake up a newly created task for the first time.
2728 *
2729 * This function will do some initial scheduler statistics housekeeping
2730 * that must be done for every newly created context, then puts the task
2731 * on the runqueue and wakes it.
2732 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002733void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734{
2735 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002736 struct rq *rq;
Andrew Mortonc8906922010-03-11 14:08:43 -08002737 int cpu __maybe_unused = get_cpu();
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002738
2739#ifdef CONFIG_SMP
Peter Zijlstra0017d732010-03-24 18:34:10 +01002740 rq = task_rq_lock(p, &flags);
2741 p->state = TASK_WAKING;
2742
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002743 /*
2744 * Fork balancing, do it here and not earlier because:
2745 * - cpus_allowed can change in the fork path
2746 * - any previously selected cpu might disappear through hotplug
2747 *
Peter Zijlstra0017d732010-03-24 18:34:10 +01002748 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2749 * without people poking at ->cpus_allowed.
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002750 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01002751 cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002752 set_task_cpu(p, cpu);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002753
2754 p->state = TASK_RUNNING;
2755 task_rq_unlock(rq, &flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002756#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757
Peter Zijlstra0017d732010-03-24 18:34:10 +01002758 rq = task_rq_lock(p, &flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002759 activate_task(rq, p, 0);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002760 trace_sched_wakeup_new(p, 1);
Peter Zijlstraa7558e02009-09-14 20:02:34 +02002761 check_preempt_curr(rq, p, WF_FORK);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002762#ifdef CONFIG_SMP
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002763 if (p->sched_class->task_woken)
2764 p->sched_class->task_woken(rq, p);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002765#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002766 task_rq_unlock(rq, &flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002767 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768}
2769
Avi Kivitye107be32007-07-26 13:40:43 +02002770#ifdef CONFIG_PREEMPT_NOTIFIERS
2771
2772/**
Luis Henriques80dd99b2009-03-16 19:58:09 +00002773 * preempt_notifier_register - tell me when current is being preempted & rescheduled
Randy Dunlap421cee22007-07-31 00:37:50 -07002774 * @notifier: notifier struct to register
Avi Kivitye107be32007-07-26 13:40:43 +02002775 */
2776void preempt_notifier_register(struct preempt_notifier *notifier)
2777{
2778 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2779}
2780EXPORT_SYMBOL_GPL(preempt_notifier_register);
2781
2782/**
2783 * preempt_notifier_unregister - no longer interested in preemption notifications
Randy Dunlap421cee22007-07-31 00:37:50 -07002784 * @notifier: notifier struct to unregister
Avi Kivitye107be32007-07-26 13:40:43 +02002785 *
2786 * This is safe to call from within a preemption notifier.
2787 */
2788void preempt_notifier_unregister(struct preempt_notifier *notifier)
2789{
2790 hlist_del(&notifier->link);
2791}
2792EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2793
2794static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2795{
2796 struct preempt_notifier *notifier;
2797 struct hlist_node *node;
2798
2799 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2800 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2801}
2802
2803static void
2804fire_sched_out_preempt_notifiers(struct task_struct *curr,
2805 struct task_struct *next)
2806{
2807 struct preempt_notifier *notifier;
2808 struct hlist_node *node;
2809
2810 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2811 notifier->ops->sched_out(notifier, next);
2812}
2813
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002814#else /* !CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002815
2816static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2817{
2818}
2819
2820static void
2821fire_sched_out_preempt_notifiers(struct task_struct *curr,
2822 struct task_struct *next)
2823{
2824}
2825
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002826#endif /* CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002827
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828/**
Nick Piggin4866cde2005-06-25 14:57:23 -07002829 * prepare_task_switch - prepare to switch tasks
2830 * @rq: the runqueue preparing to switch
Randy Dunlap421cee22007-07-31 00:37:50 -07002831 * @prev: the current task that is being switched out
Nick Piggin4866cde2005-06-25 14:57:23 -07002832 * @next: the task we are going to switch to.
2833 *
2834 * This is called with the rq lock held and interrupts off. It must
2835 * be paired with a subsequent finish_task_switch after the context
2836 * switch.
2837 *
2838 * prepare_task_switch sets up locking and calls architecture specific
2839 * hooks.
2840 */
Avi Kivitye107be32007-07-26 13:40:43 +02002841static inline void
2842prepare_task_switch(struct rq *rq, struct task_struct *prev,
2843 struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -07002844{
Avi Kivitye107be32007-07-26 13:40:43 +02002845 fire_sched_out_preempt_notifiers(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002846 prepare_lock_switch(rq, next);
2847 prepare_arch_switch(next);
2848}
2849
2850/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 * finish_task_switch - clean up after a task-switch
Jeff Garzik344baba2005-09-07 01:15:17 -04002852 * @rq: runqueue associated with task-switch
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 * @prev: the thread we just switched away from.
2854 *
Nick Piggin4866cde2005-06-25 14:57:23 -07002855 * finish_task_switch must be called after the context switch, paired
2856 * with a prepare_task_switch call before the context switch.
2857 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2858 * and do any other architecture-specific cleanup actions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 *
2860 * Note that we may have delayed dropping an mm in context_switch(). If
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002861 * so, we finish that here outside of the runqueue lock. (Doing it
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 * with the lock held can cause deadlocks; see schedule() for
2863 * details.)
2864 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02002865static void finish_task_switch(struct rq *rq, struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 __releases(rq->lock)
2867{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 struct mm_struct *mm = rq->prev_mm;
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002869 long prev_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870
2871 rq->prev_mm = NULL;
2872
2873 /*
2874 * A task struct has one reference for the use as "current".
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002875 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002876 * schedule one last time. The schedule call will never return, and
2877 * the scheduled task must drop that reference.
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002878 * The test for TASK_DEAD must occur while the runqueue locks are
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 * still held, otherwise prev could be scheduled on another cpu, die
2880 * there before we look at prev->state, and then the reference would
2881 * be dropped twice.
2882 * Manfred Spraul <manfred@colorfullife.com>
2883 */
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002884 prev_state = prev->state;
Nick Piggin4866cde2005-06-25 14:57:23 -07002885 finish_arch_switch(prev);
Jamie Iles8381f652010-01-08 15:27:33 +00002886#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2887 local_irq_disable();
2888#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Peter Zijlstra49f47432009-12-27 11:51:52 +01002889 perf_event_task_sched_in(current);
Jamie Iles8381f652010-01-08 15:27:33 +00002890#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2891 local_irq_enable();
2892#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Nick Piggin4866cde2005-06-25 14:57:23 -07002893 finish_lock_switch(rq, prev);
Steven Rostedte8fa1362008-01-25 21:08:05 +01002894
Avi Kivitye107be32007-07-26 13:40:43 +02002895 fire_sched_in_preempt_notifiers(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 if (mm)
2897 mmdrop(mm);
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002898 if (unlikely(prev_state == TASK_DEAD)) {
bibo maoc6fd91f2006-03-26 01:38:20 -08002899 /*
2900 * Remove function-return probe instances associated with this
2901 * task and put them back on the free list.
Ingo Molnar9761eea2007-07-09 18:52:00 +02002902 */
bibo maoc6fd91f2006-03-26 01:38:20 -08002903 kprobe_flush_task(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 put_task_struct(prev);
bibo maoc6fd91f2006-03-26 01:38:20 -08002905 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906}
2907
Gregory Haskins3f029d32009-07-29 11:08:47 -04002908#ifdef CONFIG_SMP
2909
2910/* assumes rq->lock is held */
2911static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2912{
2913 if (prev->sched_class->pre_schedule)
2914 prev->sched_class->pre_schedule(rq, prev);
2915}
2916
2917/* rq->lock is NOT held, but preemption is disabled */
2918static inline void post_schedule(struct rq *rq)
2919{
2920 if (rq->post_schedule) {
2921 unsigned long flags;
2922
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002923 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002924 if (rq->curr->sched_class->post_schedule)
2925 rq->curr->sched_class->post_schedule(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002926 raw_spin_unlock_irqrestore(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002927
2928 rq->post_schedule = 0;
2929 }
2930}
2931
2932#else
2933
2934static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2935{
2936}
2937
2938static inline void post_schedule(struct rq *rq)
2939{
2940}
2941
2942#endif
2943
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944/**
2945 * schedule_tail - first thing a freshly forked thread must call.
2946 * @prev: the thread we just switched away from.
2947 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002948asmlinkage void schedule_tail(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 __releases(rq->lock)
2950{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002951 struct rq *rq = this_rq();
2952
Nick Piggin4866cde2005-06-25 14:57:23 -07002953 finish_task_switch(rq, prev);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002954
Gregory Haskins3f029d32009-07-29 11:08:47 -04002955 /*
2956 * FIXME: do we need to worry about rq being invalidated by the
2957 * task_switch?
2958 */
2959 post_schedule(rq);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002960
Nick Piggin4866cde2005-06-25 14:57:23 -07002961#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2962 /* In this case, finish_task_switch does not reenable preemption */
2963 preempt_enable();
2964#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 if (current->set_child_tid)
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002966 put_user(task_pid_vnr(current), current->set_child_tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967}
2968
2969/*
2970 * context_switch - switch to the new MM and the new
2971 * thread's register state.
2972 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002973static inline void
Ingo Molnar70b97a72006-07-03 00:25:42 -07002974context_switch(struct rq *rq, struct task_struct *prev,
Ingo Molnar36c8b582006-07-03 00:25:41 -07002975 struct task_struct *next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976{
Ingo Molnardd41f592007-07-09 18:51:59 +02002977 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
Avi Kivitye107be32007-07-26 13:40:43 +02002979 prepare_task_switch(rq, prev, next);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002980 trace_sched_switch(prev, next);
Ingo Molnardd41f592007-07-09 18:51:59 +02002981 mm = next->mm;
2982 oldmm = prev->active_mm;
Zachary Amsden9226d122007-02-13 13:26:21 +01002983 /*
2984 * For paravirt, this is coupled with an exit in switch_to to
2985 * combine the page table reload and the switch backend into
2986 * one hypercall.
2987 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -08002988 arch_start_context_switch(prev);
Zachary Amsden9226d122007-02-13 13:26:21 +01002989
Heiko Carstens31915ab2010-09-16 14:42:25 +02002990 if (!mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991 next->active_mm = oldmm;
2992 atomic_inc(&oldmm->mm_count);
2993 enter_lazy_tlb(oldmm, next);
2994 } else
2995 switch_mm(oldmm, mm, next);
2996
Heiko Carstens31915ab2010-09-16 14:42:25 +02002997 if (!prev->mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 prev->active_mm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 rq->prev_mm = oldmm;
3000 }
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07003001 /*
3002 * Since the runqueue lock will be released by the next
3003 * task (which is an invalid locking op but in the case
3004 * of the scheduler it's an obvious special-case), so we
3005 * do an early lockdep release here:
3006 */
3007#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07003008 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07003009#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010
3011 /* Here we just switch the register state and the stack. */
3012 switch_to(prev, next, prev);
3013
Ingo Molnardd41f592007-07-09 18:51:59 +02003014 barrier();
3015 /*
3016 * this_rq must be evaluated again because prev may have moved
3017 * CPUs since it called schedule(), thus the 'rq' on its stack
3018 * frame will be invalid.
3019 */
3020 finish_task_switch(this_rq(), prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021}
3022
3023/*
3024 * nr_running, nr_uninterruptible and nr_context_switches:
3025 *
3026 * externally visible scheduler statistics: current number of runnable
3027 * threads, current number of uninterruptible-sleeping threads, total
3028 * number of context switches performed since bootup.
3029 */
3030unsigned long nr_running(void)
3031{
3032 unsigned long i, sum = 0;
3033
3034 for_each_online_cpu(i)
3035 sum += cpu_rq(i)->nr_running;
3036
3037 return sum;
3038}
3039
3040unsigned long nr_uninterruptible(void)
3041{
3042 unsigned long i, sum = 0;
3043
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003044 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 sum += cpu_rq(i)->nr_uninterruptible;
3046
3047 /*
3048 * Since we read the counters lockless, it might be slightly
3049 * inaccurate. Do not allow it to go below zero though:
3050 */
3051 if (unlikely((long)sum < 0))
3052 sum = 0;
3053
3054 return sum;
3055}
3056
3057unsigned long long nr_context_switches(void)
3058{
Steven Rostedtcc94abf2006-06-27 02:54:31 -07003059 int i;
3060 unsigned long long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003062 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 sum += cpu_rq(i)->nr_switches;
3064
3065 return sum;
3066}
3067
3068unsigned long nr_iowait(void)
3069{
3070 unsigned long i, sum = 0;
3071
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003072 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 sum += atomic_read(&cpu_rq(i)->nr_iowait);
3074
3075 return sum;
3076}
3077
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003078unsigned long nr_iowait_cpu(int cpu)
Arjan van de Ven69d25872009-09-21 17:04:08 -07003079{
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003080 struct rq *this = cpu_rq(cpu);
Arjan van de Ven69d25872009-09-21 17:04:08 -07003081 return atomic_read(&this->nr_iowait);
3082}
3083
3084unsigned long this_cpu_load(void)
3085{
3086 struct rq *this = this_rq();
3087 return this->cpu_load[0];
3088}
3089
3090
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003091/* Variables and functions for calc_load */
3092static atomic_long_t calc_load_tasks;
3093static unsigned long calc_load_update;
3094unsigned long avenrun[3];
3095EXPORT_SYMBOL(avenrun);
3096
Peter Zijlstra74f51872010-04-22 21:50:19 +02003097static long calc_load_fold_active(struct rq *this_rq)
3098{
3099 long nr_active, delta = 0;
3100
3101 nr_active = this_rq->nr_running;
3102 nr_active += (long) this_rq->nr_uninterruptible;
3103
3104 if (nr_active != this_rq->calc_load_active) {
3105 delta = nr_active - this_rq->calc_load_active;
3106 this_rq->calc_load_active = nr_active;
3107 }
3108
3109 return delta;
3110}
3111
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003112static unsigned long
3113calc_load(unsigned long load, unsigned long exp, unsigned long active)
3114{
3115 load *= exp;
3116 load += active * (FIXED_1 - exp);
3117 load += 1UL << (FSHIFT - 1);
3118 return load >> FSHIFT;
3119}
3120
Peter Zijlstra74f51872010-04-22 21:50:19 +02003121#ifdef CONFIG_NO_HZ
3122/*
3123 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
3124 *
3125 * When making the ILB scale, we should try to pull this in as well.
3126 */
3127static atomic_long_t calc_load_tasks_idle;
3128
3129static void calc_load_account_idle(struct rq *this_rq)
3130{
3131 long delta;
3132
3133 delta = calc_load_fold_active(this_rq);
3134 if (delta)
3135 atomic_long_add(delta, &calc_load_tasks_idle);
3136}
3137
3138static long calc_load_fold_idle(void)
3139{
3140 long delta = 0;
3141
3142 /*
3143 * Its got a race, we don't care...
3144 */
3145 if (atomic_long_read(&calc_load_tasks_idle))
3146 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3147
3148 return delta;
3149}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003150
3151/**
3152 * fixed_power_int - compute: x^n, in O(log n) time
3153 *
3154 * @x: base of the power
3155 * @frac_bits: fractional bits of @x
3156 * @n: power to raise @x to.
3157 *
3158 * By exploiting the relation between the definition of the natural power
3159 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
3160 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3161 * (where: n_i \elem {0, 1}, the binary vector representing n),
3162 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3163 * of course trivially computable in O(log_2 n), the length of our binary
3164 * vector.
3165 */
3166static unsigned long
3167fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3168{
3169 unsigned long result = 1UL << frac_bits;
3170
3171 if (n) for (;;) {
3172 if (n & 1) {
3173 result *= x;
3174 result += 1UL << (frac_bits - 1);
3175 result >>= frac_bits;
3176 }
3177 n >>= 1;
3178 if (!n)
3179 break;
3180 x *= x;
3181 x += 1UL << (frac_bits - 1);
3182 x >>= frac_bits;
3183 }
3184
3185 return result;
3186}
3187
3188/*
3189 * a1 = a0 * e + a * (1 - e)
3190 *
3191 * a2 = a1 * e + a * (1 - e)
3192 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3193 * = a0 * e^2 + a * (1 - e) * (1 + e)
3194 *
3195 * a3 = a2 * e + a * (1 - e)
3196 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3197 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3198 *
3199 * ...
3200 *
3201 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3202 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3203 * = a0 * e^n + a * (1 - e^n)
3204 *
3205 * [1] application of the geometric series:
3206 *
3207 * n 1 - x^(n+1)
3208 * S_n := \Sum x^i = -------------
3209 * i=0 1 - x
3210 */
3211static unsigned long
3212calc_load_n(unsigned long load, unsigned long exp,
3213 unsigned long active, unsigned int n)
3214{
3215
3216 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3217}
3218
3219/*
3220 * NO_HZ can leave us missing all per-cpu ticks calling
3221 * calc_load_account_active(), but since an idle CPU folds its delta into
3222 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3223 * in the pending idle delta if our idle period crossed a load cycle boundary.
3224 *
3225 * Once we've updated the global active value, we need to apply the exponential
3226 * weights adjusted to the number of cycles missed.
3227 */
3228static void calc_global_nohz(unsigned long ticks)
3229{
3230 long delta, active, n;
3231
3232 if (time_before(jiffies, calc_load_update))
3233 return;
3234
3235 /*
3236 * If we crossed a calc_load_update boundary, make sure to fold
3237 * any pending idle changes, the respective CPUs might have
3238 * missed the tick driven calc_load_account_active() update
3239 * due to NO_HZ.
3240 */
3241 delta = calc_load_fold_idle();
3242 if (delta)
3243 atomic_long_add(delta, &calc_load_tasks);
3244
3245 /*
3246 * If we were idle for multiple load cycles, apply them.
3247 */
3248 if (ticks >= LOAD_FREQ) {
3249 n = ticks / LOAD_FREQ;
3250
3251 active = atomic_long_read(&calc_load_tasks);
3252 active = active > 0 ? active * FIXED_1 : 0;
3253
3254 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3255 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3256 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3257
3258 calc_load_update += n * LOAD_FREQ;
3259 }
3260
3261 /*
3262 * Its possible the remainder of the above division also crosses
3263 * a LOAD_FREQ period, the regular check in calc_global_load()
3264 * which comes after this will take care of that.
3265 *
3266 * Consider us being 11 ticks before a cycle completion, and us
3267 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3268 * age us 4 cycles, and the test in calc_global_load() will
3269 * pick up the final one.
3270 */
3271}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003272#else
3273static void calc_load_account_idle(struct rq *this_rq)
3274{
3275}
3276
3277static inline long calc_load_fold_idle(void)
3278{
3279 return 0;
3280}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003281
3282static void calc_global_nohz(unsigned long ticks)
3283{
3284}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003285#endif
3286
Thomas Gleixner2d024942009-05-02 20:08:52 +02003287/**
3288 * get_avenrun - get the load average array
3289 * @loads: pointer to dest load array
3290 * @offset: offset to add
3291 * @shift: shift count to shift the result left
3292 *
3293 * These values are estimates at best, so no need for locking.
3294 */
3295void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3296{
3297 loads[0] = (avenrun[0] + offset) << shift;
3298 loads[1] = (avenrun[1] + offset) << shift;
3299 loads[2] = (avenrun[2] + offset) << shift;
3300}
3301
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003302/*
3303 * calc_load - update the avenrun load estimates 10 ticks after the
3304 * CPUs have updated calc_load_tasks.
3305 */
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003306void calc_global_load(unsigned long ticks)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003307{
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003308 long active;
3309
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003310 calc_global_nohz(ticks);
3311
3312 if (time_before(jiffies, calc_load_update + 10))
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003313 return;
3314
3315 active = atomic_long_read(&calc_load_tasks);
3316 active = active > 0 ? active * FIXED_1 : 0;
3317
3318 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3319 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3320 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3321
3322 calc_load_update += LOAD_FREQ;
3323}
3324
3325/*
Peter Zijlstra74f51872010-04-22 21:50:19 +02003326 * Called from update_cpu_load() to periodically update this CPU's
3327 * active count.
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003328 */
3329static void calc_load_account_active(struct rq *this_rq)
3330{
Peter Zijlstra74f51872010-04-22 21:50:19 +02003331 long delta;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003332
Peter Zijlstra74f51872010-04-22 21:50:19 +02003333 if (time_before(jiffies, this_rq->calc_load_update))
3334 return;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003335
Peter Zijlstra74f51872010-04-22 21:50:19 +02003336 delta = calc_load_fold_active(this_rq);
3337 delta += calc_load_fold_idle();
3338 if (delta)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003339 atomic_long_add(delta, &calc_load_tasks);
Peter Zijlstra74f51872010-04-22 21:50:19 +02003340
3341 this_rq->calc_load_update += LOAD_FREQ;
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08003342}
3343
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344/*
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003345 * The exact cpuload at various idx values, calculated at every tick would be
3346 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3347 *
3348 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3349 * on nth tick when cpu may be busy, then we have:
3350 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3351 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3352 *
3353 * decay_load_missed() below does efficient calculation of
3354 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3355 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3356 *
3357 * The calculation is approximated on a 128 point scale.
3358 * degrade_zero_ticks is the number of ticks after which load at any
3359 * particular idx is approximated to be zero.
3360 * degrade_factor is a precomputed table, a row for each load idx.
3361 * Each column corresponds to degradation factor for a power of two ticks,
3362 * based on 128 point scale.
3363 * Example:
3364 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3365 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3366 *
3367 * With this power of 2 load factors, we can degrade the load n times
3368 * by looking at 1 bits in n and doing as many mult/shift instead of
3369 * n mult/shifts needed by the exact degradation.
3370 */
3371#define DEGRADE_SHIFT 7
3372static const unsigned char
3373 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3374static const unsigned char
3375 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3376 {0, 0, 0, 0, 0, 0, 0, 0},
3377 {64, 32, 8, 0, 0, 0, 0, 0},
3378 {96, 72, 40, 12, 1, 0, 0},
3379 {112, 98, 75, 43, 15, 1, 0},
3380 {120, 112, 98, 76, 45, 16, 2} };
3381
3382/*
3383 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3384 * would be when CPU is idle and so we just decay the old load without
3385 * adding any new load.
3386 */
3387static unsigned long
3388decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3389{
3390 int j = 0;
3391
3392 if (!missed_updates)
3393 return load;
3394
3395 if (missed_updates >= degrade_zero_ticks[idx])
3396 return 0;
3397
3398 if (idx == 1)
3399 return load >> missed_updates;
3400
3401 while (missed_updates) {
3402 if (missed_updates % 2)
3403 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3404
3405 missed_updates >>= 1;
3406 j++;
3407 }
3408 return load;
3409}
3410
3411/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003412 * Update rq->cpu_load[] statistics. This function is usually called every
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003413 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3414 * every tick. We fix it up based on jiffies.
Ingo Molnar48f24c42006-07-03 00:25:40 -07003415 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003416static void update_cpu_load(struct rq *this_rq)
Ingo Molnar48f24c42006-07-03 00:25:40 -07003417{
Dmitry Adamushko495eca42007-10-15 17:00:06 +02003418 unsigned long this_load = this_rq->load.weight;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003419 unsigned long curr_jiffies = jiffies;
3420 unsigned long pending_updates;
Ingo Molnardd41f592007-07-09 18:51:59 +02003421 int i, scale;
3422
3423 this_rq->nr_load_updates++;
Ingo Molnardd41f592007-07-09 18:51:59 +02003424
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003425 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3426 if (curr_jiffies == this_rq->last_load_update_tick)
3427 return;
3428
3429 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3430 this_rq->last_load_update_tick = curr_jiffies;
3431
Ingo Molnardd41f592007-07-09 18:51:59 +02003432 /* Update our load: */
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003433 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3434 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
Ingo Molnardd41f592007-07-09 18:51:59 +02003435 unsigned long old_load, new_load;
3436
3437 /* scale is effectively 1 << i now, and >> i divides by scale */
3438
3439 old_load = this_rq->cpu_load[i];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003440 old_load = decay_load_missed(old_load, pending_updates - 1, i);
Ingo Molnardd41f592007-07-09 18:51:59 +02003441 new_load = this_load;
Ingo Molnara25707f2007-10-15 17:00:03 +02003442 /*
3443 * Round up the averaging division if load is increasing. This
3444 * prevents us from getting stuck on 9 if the load is 10, for
3445 * example.
3446 */
3447 if (new_load > old_load)
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003448 new_load += scale - 1;
3449
3450 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
Ingo Molnardd41f592007-07-09 18:51:59 +02003451 }
Suresh Siddhada2b71e2010-08-23 13:42:51 -07003452
3453 sched_avg_update(this_rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003454}
3455
3456static void update_cpu_load_active(struct rq *this_rq)
3457{
3458 update_cpu_load(this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003459
Peter Zijlstra74f51872010-04-22 21:50:19 +02003460 calc_load_account_active(this_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07003461}
3462
Ingo Molnardd41f592007-07-09 18:51:59 +02003463#ifdef CONFIG_SMP
3464
Ingo Molnar48f24c42006-07-03 00:25:40 -07003465/*
Peter Zijlstra38022902009-12-16 18:04:37 +01003466 * sched_exec - execve() is a valuable balancing opportunity, because at
3467 * this point the task has the smallest effective memory and cache footprint.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 */
Peter Zijlstra38022902009-12-16 18:04:37 +01003469void sched_exec(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470{
Peter Zijlstra38022902009-12-16 18:04:37 +01003471 struct task_struct *p = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003473 struct rq *rq;
Peter Zijlstra0017d732010-03-24 18:34:10 +01003474 int dest_cpu;
Peter Zijlstra38022902009-12-16 18:04:37 +01003475
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 rq = task_rq_lock(p, &flags);
Peter Zijlstra0017d732010-03-24 18:34:10 +01003477 dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
3478 if (dest_cpu == smp_processor_id())
3479 goto unlock;
Peter Zijlstra38022902009-12-16 18:04:37 +01003480
3481 /*
3482 * select_task_rq() can race against ->cpus_allowed
3483 */
Oleg Nesterov30da6882010-03-15 10:10:19 +01003484 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
Nikanth Karthikesanb7a2b392010-11-26 12:37:09 +05303485 likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) {
Tejun Heo969c7922010-05-06 18:49:21 +02003486 struct migration_arg arg = { p, dest_cpu };
Ingo Molnar36c8b582006-07-03 00:25:41 -07003487
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 task_rq_unlock(rq, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02003489 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 return;
3491 }
Peter Zijlstra0017d732010-03-24 18:34:10 +01003492unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 task_rq_unlock(rq, &flags);
3494}
3495
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496#endif
3497
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498DEFINE_PER_CPU(struct kernel_stat, kstat);
3499
3500EXPORT_PER_CPU_SYMBOL(kstat);
3501
3502/*
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003503 * Return any ns on the sched_clock that have not yet been accounted in
Frank Mayharf06febc2008-09-12 09:54:39 -07003504 * @p in case that task is currently running.
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003505 *
3506 * Called with task_rq_lock() held on @rq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 */
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003508static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3509{
3510 u64 ns = 0;
3511
3512 if (task_current(rq, p)) {
3513 update_rq_clock(rq);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07003514 ns = rq->clock_task - p->se.exec_start;
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003515 if ((s64)ns < 0)
3516 ns = 0;
3517 }
3518
3519 return ns;
3520}
3521
Frank Mayharbb34d922008-09-12 09:54:39 -07003522unsigned long long task_delta_exec(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 unsigned long flags;
Ingo Molnar41b86e92007-07-09 18:51:58 +02003525 struct rq *rq;
Frank Mayharbb34d922008-09-12 09:54:39 -07003526 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003527
Ingo Molnar41b86e92007-07-09 18:51:58 +02003528 rq = task_rq_lock(p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003529 ns = do_task_delta_exec(p, rq);
3530 task_rq_unlock(rq, &flags);
Ingo Molnar15084872008-09-30 08:28:17 +02003531
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003532 return ns;
3533}
Frank Mayharf06febc2008-09-12 09:54:39 -07003534
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003535/*
3536 * Return accounted runtime for the task.
3537 * In case the task is currently running, return the runtime plus current's
3538 * pending runtime that have not been accounted yet.
3539 */
3540unsigned long long task_sched_runtime(struct task_struct *p)
3541{
3542 unsigned long flags;
3543 struct rq *rq;
3544 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003545
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003546 rq = task_rq_lock(p, &flags);
3547 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3548 task_rq_unlock(rq, &flags);
3549
3550 return ns;
3551}
3552
3553/*
3554 * Return sum_exec_runtime for the thread group.
3555 * In case the task is currently running, return the sum plus current's
3556 * pending runtime that have not been accounted yet.
3557 *
3558 * Note that the thread group might have other running tasks as well,
3559 * so the return value not includes other pending runtime that other
3560 * running tasks might have.
3561 */
3562unsigned long long thread_group_sched_runtime(struct task_struct *p)
3563{
3564 struct task_cputime totals;
3565 unsigned long flags;
3566 struct rq *rq;
3567 u64 ns;
3568
3569 rq = task_rq_lock(p, &flags);
3570 thread_group_cputime(p, &totals);
3571 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572 task_rq_unlock(rq, &flags);
3573
3574 return ns;
3575}
3576
3577/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 * Account user cpu time to a process.
3579 * @p: the process that the cpu time gets accounted to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 * @cputime: the cpu time spent in user space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003581 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003583void account_user_time(struct task_struct *p, cputime_t cputime,
3584 cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585{
3586 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3587 cputime64_t tmp;
3588
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003589 /* Add user time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003591 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003592 account_group_user_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593
3594 /* Add user time to cpustat. */
3595 tmp = cputime_to_cputime64(cputime);
3596 if (TASK_NICE(p) > 0)
3597 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3598 else
3599 cpustat->user = cputime64_add(cpustat->user, tmp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05303600
3601 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
Jonathan Lim49b5cf32008-07-25 01:48:40 -07003602 /* Account for user time used */
3603 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604}
3605
3606/*
Laurent Vivier94886b82007-10-15 17:00:19 +02003607 * Account guest cpu time to a process.
3608 * @p: the process that the cpu time gets accounted to
3609 * @cputime: the cpu time spent in virtual machine since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003610 * @cputime_scaled: cputime scaled by cpu frequency
Laurent Vivier94886b82007-10-15 17:00:19 +02003611 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003612static void account_guest_time(struct task_struct *p, cputime_t cputime,
3613 cputime_t cputime_scaled)
Laurent Vivier94886b82007-10-15 17:00:19 +02003614{
3615 cputime64_t tmp;
3616 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3617
3618 tmp = cputime_to_cputime64(cputime);
3619
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003620 /* Add guest time to process. */
Laurent Vivier94886b82007-10-15 17:00:19 +02003621 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003622 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003623 account_group_user_time(p, cputime);
Laurent Vivier94886b82007-10-15 17:00:19 +02003624 p->gtime = cputime_add(p->gtime, cputime);
3625
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003626 /* Add guest time to cpustat. */
Ryota Ozakice0e7b22009-10-24 01:20:10 +09003627 if (TASK_NICE(p) > 0) {
3628 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3629 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3630 } else {
3631 cpustat->user = cputime64_add(cpustat->user, tmp);
3632 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3633 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003634}
3635
3636/*
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003637 * Account system cpu time to a process and desired cpustat field
3638 * @p: the process that the cpu time gets accounted to
3639 * @cputime: the cpu time spent in kernel space since the last update
3640 * @cputime_scaled: cputime scaled by cpu frequency
3641 * @target_cputime64: pointer to cpustat field that has to be updated
3642 */
3643static inline
3644void __account_system_time(struct task_struct *p, cputime_t cputime,
3645 cputime_t cputime_scaled, cputime64_t *target_cputime64)
3646{
3647 cputime64_t tmp = cputime_to_cputime64(cputime);
3648
3649 /* Add system time to process. */
3650 p->stime = cputime_add(p->stime, cputime);
3651 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3652 account_group_system_time(p, cputime);
3653
3654 /* Add system time to cpustat. */
3655 *target_cputime64 = cputime64_add(*target_cputime64, tmp);
3656 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3657
3658 /* Account for system time used */
3659 acct_update_integrals(p);
3660}
3661
3662/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663 * Account system cpu time to a process.
3664 * @p: the process that the cpu time gets accounted to
3665 * @hardirq_offset: the offset to subtract from hardirq_count()
3666 * @cputime: the cpu time spent in kernel space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003667 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668 */
3669void account_system_time(struct task_struct *p, int hardirq_offset,
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003670 cputime_t cputime, cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671{
3672 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003673 cputime64_t *target_cputime64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003675 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003676 account_guest_time(p, cputime, cputime_scaled);
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003677 return;
3678 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003679
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 if (hardirq_count() - hardirq_offset)
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003681 target_cputime64 = &cpustat->irq;
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07003682 else if (in_serving_softirq())
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003683 target_cputime64 = &cpustat->softirq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 else
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003685 target_cputime64 = &cpustat->system;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003686
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003687 __account_system_time(p, cputime, cputime_scaled, target_cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688}
3689
Heiko Carstens7e9498702011-02-25 14:32:28 +01003690#ifndef CONFIG_VIRT_CPU_ACCOUNTING
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003691#ifdef CONFIG_IRQ_TIME_ACCOUNTING
3692/*
3693 * Account a tick to a process and cpustat
3694 * @p: the process that the cpu time gets accounted to
3695 * @user_tick: is the tick from userspace
3696 * @rq: the pointer to rq
3697 *
3698 * Tick demultiplexing follows the order
3699 * - pending hardirq update
3700 * - pending softirq update
3701 * - user_time
3702 * - idle_time
3703 * - system time
3704 * - check for guest_time
3705 * - else account as system_time
3706 *
3707 * Check for hardirq is done both for system and user time as there is
3708 * no timer going off while we are on hardirq and hence we may never get an
3709 * opportunity to update it solely in system time.
3710 * p->stime and friends are only updated on system time and not on irq
3711 * softirq as those do not count in task exec_runtime any more.
3712 */
3713static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3714 struct rq *rq)
3715{
3716 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3717 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
3718 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3719
3720 if (irqtime_account_hi_update()) {
3721 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3722 } else if (irqtime_account_si_update()) {
3723 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
Venkatesh Pallipadi414bee92010-12-21 17:09:04 -08003724 } else if (this_cpu_ksoftirqd() == p) {
3725 /*
3726 * ksoftirqd time do not get accounted in cpu_softirq_time.
3727 * So, we have to handle it separately here.
3728 * Also, p->stime needs to be updated for ksoftirqd.
3729 */
3730 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3731 &cpustat->softirq);
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003732 } else if (user_tick) {
3733 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3734 } else if (p == rq->idle) {
3735 account_idle_time(cputime_one_jiffy);
3736 } else if (p->flags & PF_VCPU) { /* System time or guest time */
3737 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
3738 } else {
3739 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3740 &cpustat->system);
3741 }
3742}
3743
3744static void irqtime_account_idle_ticks(int ticks)
3745{
3746 int i;
3747 struct rq *rq = this_rq();
3748
3749 for (i = 0; i < ticks; i++)
3750 irqtime_account_process_tick(current, 0, rq);
3751}
3752#else
3753static void irqtime_account_idle_ticks(int ticks) {}
3754static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3755 struct rq *rq) {}
3756#endif
Heiko Carstens7e9498702011-02-25 14:32:28 +01003757#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003758
Linus Torvalds1da177e2005-04-16 15:20:36 -07003759/*
3760 * Account for involuntary wait time.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761 * @steal: the cpu time spent in involuntary wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003763void account_steal_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003766 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3767
3768 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769}
3770
Christoph Lameter7835b982006-12-10 02:20:22 -08003771/*
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003772 * Account for idle time.
3773 * @cputime: the cpu time spent in idle wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003775void account_idle_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776{
3777 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003778 cputime64_t cputime64 = cputime_to_cputime64(cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779 struct rq *rq = this_rq();
3780
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003781 if (atomic_read(&rq->nr_iowait) > 0)
3782 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3783 else
3784 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
Christoph Lameter7835b982006-12-10 02:20:22 -08003785}
3786
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003787#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3788
3789/*
3790 * Account a single tick of cpu time.
3791 * @p: the process that the cpu time gets accounted to
3792 * @user_tick: indicates if the tick is a user or a system tick
3793 */
3794void account_process_tick(struct task_struct *p, int user_tick)
3795{
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003796 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003797 struct rq *rq = this_rq();
3798
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003799 if (sched_clock_irqtime) {
3800 irqtime_account_process_tick(p, user_tick, rq);
3801 return;
3802 }
3803
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003804 if (user_tick)
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003805 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
Eric Dumazetf5f293a2009-04-29 14:44:49 +02003806 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003807 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003808 one_jiffy_scaled);
3809 else
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003810 account_idle_time(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003811}
3812
3813/*
3814 * Account multiple ticks of steal time.
3815 * @p: the process from which the cpu time has been stolen
3816 * @ticks: number of stolen ticks
3817 */
3818void account_steal_ticks(unsigned long ticks)
3819{
3820 account_steal_time(jiffies_to_cputime(ticks));
3821}
3822
3823/*
3824 * Account multiple ticks of idle time.
3825 * @ticks: number of stolen ticks
3826 */
3827void account_idle_ticks(unsigned long ticks)
3828{
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003829
3830 if (sched_clock_irqtime) {
3831 irqtime_account_idle_ticks(ticks);
3832 return;
3833 }
3834
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003835 account_idle_time(jiffies_to_cputime(ticks));
3836}
3837
3838#endif
3839
Christoph Lameter7835b982006-12-10 02:20:22 -08003840/*
Balbir Singh49048622008-09-05 18:12:23 +02003841 * Use precise platform statistics if available:
3842 */
3843#ifdef CONFIG_VIRT_CPU_ACCOUNTING
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003844void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003845{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003846 *ut = p->utime;
3847 *st = p->stime;
Balbir Singh49048622008-09-05 18:12:23 +02003848}
3849
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003850void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003851{
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003852 struct task_cputime cputime;
3853
3854 thread_group_cputime(p, &cputime);
3855
3856 *ut = cputime.utime;
3857 *st = cputime.stime;
Balbir Singh49048622008-09-05 18:12:23 +02003858}
3859#else
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003860
3861#ifndef nsecs_to_cputime
Hidetoshi Setob7b20df92009-11-26 14:49:27 +09003862# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003863#endif
3864
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003865void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003866{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003867 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
Balbir Singh49048622008-09-05 18:12:23 +02003868
3869 /*
3870 * Use CFS's precise accounting:
3871 */
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003872 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
Balbir Singh49048622008-09-05 18:12:23 +02003873
3874 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003875 u64 temp = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003876
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003877 temp *= utime;
Balbir Singh49048622008-09-05 18:12:23 +02003878 do_div(temp, total);
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003879 utime = (cputime_t)temp;
3880 } else
3881 utime = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003882
3883 /*
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003884 * Compare with previous values, to keep monotonicity:
Balbir Singh49048622008-09-05 18:12:23 +02003885 */
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003886 p->prev_utime = max(p->prev_utime, utime);
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003887 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
Balbir Singh49048622008-09-05 18:12:23 +02003888
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003889 *ut = p->prev_utime;
3890 *st = p->prev_stime;
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003891}
Balbir Singh49048622008-09-05 18:12:23 +02003892
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003893/*
3894 * Must be called with siglock held.
3895 */
3896void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3897{
3898 struct signal_struct *sig = p->signal;
3899 struct task_cputime cputime;
3900 cputime_t rtime, utime, total;
3901
3902 thread_group_cputime(p, &cputime);
3903
3904 total = cputime_add(cputime.utime, cputime.stime);
3905 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3906
3907 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003908 u64 temp = rtime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003909
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003910 temp *= cputime.utime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003911 do_div(temp, total);
3912 utime = (cputime_t)temp;
3913 } else
3914 utime = rtime;
3915
3916 sig->prev_utime = max(sig->prev_utime, utime);
3917 sig->prev_stime = max(sig->prev_stime,
3918 cputime_sub(rtime, sig->prev_utime));
3919
3920 *ut = sig->prev_utime;
3921 *st = sig->prev_stime;
Balbir Singh49048622008-09-05 18:12:23 +02003922}
3923#endif
3924
Balbir Singh49048622008-09-05 18:12:23 +02003925/*
Christoph Lameter7835b982006-12-10 02:20:22 -08003926 * This function gets called by the timer code, with HZ frequency.
3927 * We call it with interrupts disabled.
3928 *
3929 * It also gets called by the fork code, when changing the parent's
3930 * timeslices.
3931 */
3932void scheduler_tick(void)
3933{
Christoph Lameter7835b982006-12-10 02:20:22 -08003934 int cpu = smp_processor_id();
3935 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003936 struct task_struct *curr = rq->curr;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003937
3938 sched_clock_tick();
Christoph Lameter7835b982006-12-10 02:20:22 -08003939
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003940 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003941 update_rq_clock(rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003942 update_cpu_load_active(rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01003943 curr->sched_class->task_tick(rq, curr, 0);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003944 raw_spin_unlock(&rq->lock);
Ingo Molnardd41f592007-07-09 18:51:59 +02003945
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02003946 perf_event_task_tick();
Peter Zijlstrae220d2d2009-05-23 18:28:55 +02003947
Christoph Lametere418e1c2006-12-10 02:20:23 -08003948#ifdef CONFIG_SMP
Ingo Molnardd41f592007-07-09 18:51:59 +02003949 rq->idle_at_tick = idle_cpu(cpu);
3950 trigger_load_balance(rq, cpu);
Christoph Lametere418e1c2006-12-10 02:20:23 -08003951#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952}
3953
Lai Jiangshan132380a2009-04-02 14:18:25 +08003954notrace unsigned long get_parent_ip(unsigned long addr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003955{
3956 if (in_lock_functions(addr)) {
3957 addr = CALLER_ADDR2;
3958 if (in_lock_functions(addr))
3959 addr = CALLER_ADDR3;
3960 }
3961 return addr;
3962}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963
Steven Rostedt7e49fcc2009-01-22 19:01:40 -05003964#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3965 defined(CONFIG_PREEMPT_TRACER))
3966
Srinivasa Ds43627582008-02-23 15:24:04 -08003967void __kprobes add_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003969#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 /*
3971 * Underflow?
3972 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003973 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3974 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003975#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976 preempt_count() += val;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003977#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 /*
3979 * Spinlock count overflowing soon?
3980 */
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08003981 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3982 PREEMPT_MASK - 10);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003983#endif
3984 if (preempt_count() == val)
3985 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986}
3987EXPORT_SYMBOL(add_preempt_count);
3988
Srinivasa Ds43627582008-02-23 15:24:04 -08003989void __kprobes sub_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003991#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 /*
3993 * Underflow?
3994 */
Ingo Molnar01e3eb82009-01-12 13:00:50 +01003995 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003996 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 /*
3998 * Is the spinlock portion underflowing?
3999 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07004000 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4001 !(preempt_count() & PREEMPT_MASK)))
4002 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004003#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07004004
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004005 if (preempt_count() == val)
4006 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 preempt_count() -= val;
4008}
4009EXPORT_SYMBOL(sub_preempt_count);
4010
4011#endif
4012
4013/*
Ingo Molnardd41f592007-07-09 18:51:59 +02004014 * Print scheduling while atomic bug:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015 */
Ingo Molnardd41f592007-07-09 18:51:59 +02004016static noinline void __schedule_bug(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017{
Satyam Sharma838225b2007-10-24 18:23:50 +02004018 struct pt_regs *regs = get_irq_regs();
4019
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004020 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4021 prev->comm, prev->pid, preempt_count());
Satyam Sharma838225b2007-10-24 18:23:50 +02004022
Ingo Molnardd41f592007-07-09 18:51:59 +02004023 debug_show_held_locks(prev);
Arjan van de Vene21f5b12008-05-23 09:05:58 -07004024 print_modules();
Ingo Molnardd41f592007-07-09 18:51:59 +02004025 if (irqs_disabled())
4026 print_irqtrace_events(prev);
Satyam Sharma838225b2007-10-24 18:23:50 +02004027
4028 if (regs)
4029 show_regs(regs);
4030 else
4031 dump_stack();
Ingo Molnardd41f592007-07-09 18:51:59 +02004032}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033
Ingo Molnardd41f592007-07-09 18:51:59 +02004034/*
4035 * Various schedule()-time debugging checks and statistics:
4036 */
4037static inline void schedule_debug(struct task_struct *prev)
4038{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039 /*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004040 * Test if we are atomic. Since do_exit() needs to call into
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041 * schedule() atomically, we ignore that path for now.
4042 * Otherwise, whine if we are scheduling when we should not be.
4043 */
Roel Kluin3f33a7c2008-05-13 23:44:11 +02004044 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
Ingo Molnardd41f592007-07-09 18:51:59 +02004045 __schedule_bug(prev);
4046
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4048
Ingo Molnar2d723762007-10-15 17:00:12 +02004049 schedstat_inc(this_rq(), sched_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02004050#ifdef CONFIG_SCHEDSTATS
4051 if (unlikely(prev->lock_depth >= 0)) {
Yong Zhangfce20972011-01-14 15:57:39 +08004052 schedstat_inc(this_rq(), rq_sched_info.bkl_count);
Ingo Molnar2d723762007-10-15 17:00:12 +02004053 schedstat_inc(prev, sched_info.bkl_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02004054 }
4055#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02004056}
4057
Peter Zijlstra6cecd082009-11-30 13:00:37 +01004058static void put_prev_task(struct rq *rq, struct task_struct *prev)
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004059{
Mike Galbraitha64692a2010-03-11 17:16:20 +01004060 if (prev->se.on_rq)
4061 update_rq_clock(rq);
Peter Zijlstra6cecd082009-11-30 13:00:37 +01004062 prev->sched_class->put_prev_task(rq, prev);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004063}
4064
Ingo Molnardd41f592007-07-09 18:51:59 +02004065/*
4066 * Pick up the highest-prio task:
4067 */
4068static inline struct task_struct *
Wang Chenb67802e2009-03-02 13:55:26 +08004069pick_next_task(struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02004070{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02004071 const struct sched_class *class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004072 struct task_struct *p;
4073
4074 /*
4075 * Optimization: we know that if all tasks are in
4076 * the fair class we can call that function directly:
4077 */
4078 if (likely(rq->nr_running == rq->cfs.nr_running)) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004079 p = fair_sched_class.pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004080 if (likely(p))
4081 return p;
4082 }
4083
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004084 for_each_class(class) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004085 p = class->pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004086 if (p)
4087 return p;
Ingo Molnardd41f592007-07-09 18:51:59 +02004088 }
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004089
4090 BUG(); /* the idle class will always have a runnable task */
Ingo Molnardd41f592007-07-09 18:51:59 +02004091}
4092
4093/*
4094 * schedule() is the main scheduler function.
4095 */
Peter Zijlstraff743342009-03-13 12:21:26 +01004096asmlinkage void __sched schedule(void)
Ingo Molnardd41f592007-07-09 18:51:59 +02004097{
4098 struct task_struct *prev, *next;
Harvey Harrison67ca7bd2008-02-15 09:56:36 -08004099 unsigned long *switch_count;
Ingo Molnardd41f592007-07-09 18:51:59 +02004100 struct rq *rq;
Peter Zijlstra31656512008-07-18 18:01:23 +02004101 int cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02004102
Peter Zijlstraff743342009-03-13 12:21:26 +01004103need_resched:
4104 preempt_disable();
Ingo Molnardd41f592007-07-09 18:51:59 +02004105 cpu = smp_processor_id();
4106 rq = cpu_rq(cpu);
Paul E. McKenney25502a62010-04-01 17:37:01 -07004107 rcu_note_context_switch(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02004108 prev = rq->curr;
Ingo Molnardd41f592007-07-09 18:51:59 +02004109
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110 release_kernel_lock(prev);
4111need_resched_nonpreemptible:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112
Ingo Molnardd41f592007-07-09 18:51:59 +02004113 schedule_debug(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114
Peter Zijlstra31656512008-07-18 18:01:23 +02004115 if (sched_feat(HRTICK))
Mike Galbraithf333fdc2008-05-12 21:20:55 +02004116 hrtick_clear(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004117
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004118 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004120 switch_count = &prev->nivcsw;
Ingo Molnardd41f592007-07-09 18:51:59 +02004121 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
Tejun Heo21aa9af2010-06-08 21:40:37 +02004122 if (unlikely(signal_pending_state(prev->state, prev))) {
Ingo Molnardd41f592007-07-09 18:51:59 +02004123 prev->state = TASK_RUNNING;
Tejun Heo21aa9af2010-06-08 21:40:37 +02004124 } else {
4125 /*
4126 * If a worker is going to sleep, notify and
4127 * ask workqueue whether it wants to wake up a
4128 * task to maintain concurrency. If so, wake
4129 * up the task.
4130 */
4131 if (prev->flags & PF_WQ_WORKER) {
4132 struct task_struct *to_wakeup;
4133
4134 to_wakeup = wq_worker_sleeping(prev, cpu);
4135 if (to_wakeup)
4136 try_to_wake_up_local(to_wakeup);
4137 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004138 deactivate_task(rq, prev, DEQUEUE_SLEEP);
Tejun Heo21aa9af2010-06-08 21:40:37 +02004139 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004140 switch_count = &prev->nvcsw;
4141 }
4142
Gregory Haskins3f029d32009-07-29 11:08:47 -04004143 pre_schedule(rq, prev);
Steven Rostedtf65eda42008-01-25 21:08:07 +01004144
Ingo Molnardd41f592007-07-09 18:51:59 +02004145 if (unlikely(!rq->nr_running))
4146 idle_balance(cpu, rq);
4147
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004148 put_prev_task(rq, prev);
Wang Chenb67802e2009-03-02 13:55:26 +08004149 next = pick_next_task(rq);
Mike Galbraithf26f9af2010-12-08 11:05:42 +01004150 clear_tsk_need_resched(prev);
4151 rq->skip_clock_update = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153 if (likely(prev != next)) {
David Simner673a90a2008-04-29 10:08:59 +01004154 sched_info_switch(prev, next);
Peter Zijlstra49f47432009-12-27 11:51:52 +01004155 perf_event_task_sched_out(prev, next);
David Simner673a90a2008-04-29 10:08:59 +01004156
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157 rq->nr_switches++;
4158 rq->curr = next;
4159 ++*switch_count;
4160
Ingo Molnardd41f592007-07-09 18:51:59 +02004161 context_switch(rq, prev, next); /* unlocks the rq */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004162 /*
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004163 * The context switch have flipped the stack from under us
4164 * and restored the local variables which were saved when
4165 * this task called schedule() in the past. prev == current
4166 * is still correct, but it can be moved to another cpu/rq.
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004167 */
4168 cpu = smp_processor_id();
4169 rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004171 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172
Gregory Haskins3f029d32009-07-29 11:08:47 -04004173 post_schedule(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004175 if (unlikely(reacquire_kernel_lock(prev)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176 goto need_resched_nonpreemptible;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004177
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178 preempt_enable_no_resched();
Peter Zijlstraff743342009-03-13 12:21:26 +01004179 if (need_resched())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180 goto need_resched;
4181}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182EXPORT_SYMBOL(schedule);
4183
Frederic Weisbeckerc08f7822009-12-02 20:49:17 +01004184#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004185/*
4186 * Look out! "owner" is an entirely speculative pointer
4187 * access and not reliable.
4188 */
4189int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
4190{
4191 unsigned int cpu;
4192 struct rq *rq;
4193
4194 if (!sched_feat(OWNER_SPIN))
4195 return 0;
4196
4197#ifdef CONFIG_DEBUG_PAGEALLOC
4198 /*
4199 * Need to access the cpu field knowing that
4200 * DEBUG_PAGEALLOC could have unmapped it if
4201 * the mutex owner just released it and exited.
4202 */
4203 if (probe_kernel_address(&owner->cpu, cpu))
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004204 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004205#else
4206 cpu = owner->cpu;
4207#endif
4208
4209 /*
4210 * Even if the access succeeded (likely case),
4211 * the cpu field may no longer be valid.
4212 */
4213 if (cpu >= nr_cpumask_bits)
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004214 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004215
4216 /*
4217 * We need to validate that we can do a
4218 * get_cpu() and that we have the percpu area.
4219 */
4220 if (!cpu_online(cpu))
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004221 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004222
4223 rq = cpu_rq(cpu);
4224
4225 for (;;) {
4226 /*
4227 * Owner changed, break to re-assess state.
4228 */
Tim Chen9d0f4dc2010-08-18 15:00:27 -07004229 if (lock->owner != owner) {
4230 /*
4231 * If the lock has switched to a different owner,
4232 * we likely have heavy contention. Return 0 to quit
4233 * optimistic spinning and not contend further:
4234 */
4235 if (lock->owner)
4236 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004237 break;
Tim Chen9d0f4dc2010-08-18 15:00:27 -07004238 }
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004239
4240 /*
4241 * Is that owner really running on that cpu?
4242 */
4243 if (task_thread_info(rq->curr) != owner || need_resched())
4244 return 0;
4245
Gerald Schaefer335d7af2010-11-22 15:47:36 +01004246 arch_mutex_cpu_relax();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004247 }
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004248
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004249 return 1;
4250}
4251#endif
4252
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253#ifdef CONFIG_PREEMPT
4254/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004255 * this is the entry point to schedule() from in-kernel preemption
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004256 * off of preempt_enable. Kernel preemptions off return from interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 * occur there and call schedule directly.
4258 */
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004259asmlinkage void __sched notrace preempt_schedule(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260{
4261 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004262
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263 /*
4264 * If there is a non-zero preempt_count or interrupts are disabled,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004265 * we do not want to preempt the current task. Just return..
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266 */
Nick Pigginbeed33a2006-10-11 01:21:52 -07004267 if (likely(ti->preempt_count || irqs_disabled()))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 return;
4269
Andi Kleen3a5c3592007-10-15 17:00:14 +02004270 do {
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004271 add_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004272 schedule();
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004273 sub_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004274
4275 /*
4276 * Check again in case we missed a preemption opportunity
4277 * between schedule and now.
4278 */
4279 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004280 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282EXPORT_SYMBOL(preempt_schedule);
4283
4284/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004285 * this is the entry point to schedule() from kernel preemption
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 * off of irq context.
4287 * Note, that this is called and return with irqs disabled. This will
4288 * protect us against recursive calling from irq.
4289 */
4290asmlinkage void __sched preempt_schedule_irq(void)
4291{
4292 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004293
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004294 /* Catch callers which need to be fixed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 BUG_ON(ti->preempt_count || !irqs_disabled());
4296
Andi Kleen3a5c3592007-10-15 17:00:14 +02004297 do {
4298 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004299 local_irq_enable();
4300 schedule();
4301 local_irq_disable();
Andi Kleen3a5c3592007-10-15 17:00:14 +02004302 sub_preempt_count(PREEMPT_ACTIVE);
4303
4304 /*
4305 * Check again in case we missed a preemption opportunity
4306 * between schedule and now.
4307 */
4308 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004309 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310}
4311
4312#endif /* CONFIG_PREEMPT */
4313
Peter Zijlstra63859d42009-09-15 19:14:42 +02004314int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004315 void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316{
Peter Zijlstra63859d42009-09-15 19:14:42 +02004317 return try_to_wake_up(curr->private, mode, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319EXPORT_SYMBOL(default_wake_function);
4320
4321/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004322 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4323 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 * number) then we wake all the non-exclusive tasks and one exclusive task.
4325 *
4326 * There are circumstances in which we can try to wake a task which has already
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004327 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4329 */
Johannes Weiner78ddb082009-04-14 16:53:05 +02004330static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
Peter Zijlstra63859d42009-09-15 19:14:42 +02004331 int nr_exclusive, int wake_flags, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332{
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004333 wait_queue_t *curr, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004335 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07004336 unsigned flags = curr->flags;
4337
Peter Zijlstra63859d42009-09-15 19:14:42 +02004338 if (curr->func(curr, mode, wake_flags, key) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07004339 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 break;
4341 }
4342}
4343
4344/**
4345 * __wake_up - wake up threads blocked on a waitqueue.
4346 * @q: the waitqueue
4347 * @mode: which threads
4348 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Martin Waitz67be2dd2005-05-01 08:59:26 -07004349 * @key: is directly passed to the wakeup function
David Howells50fa6102009-04-28 15:01:38 +01004350 *
4351 * It may be assumed that this function implies a write memory barrier before
4352 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004354void __wake_up(wait_queue_head_t *q, unsigned int mode,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004355 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356{
4357 unsigned long flags;
4358
4359 spin_lock_irqsave(&q->lock, flags);
4360 __wake_up_common(q, mode, nr_exclusive, 0, key);
4361 spin_unlock_irqrestore(&q->lock, flags);
4362}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363EXPORT_SYMBOL(__wake_up);
4364
4365/*
4366 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4367 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004368void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369{
4370 __wake_up_common(q, mode, 1, 0, NULL);
4371}
Michal Nazarewicz22c43c82010-05-05 12:53:11 +02004372EXPORT_SYMBOL_GPL(__wake_up_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373
Davide Libenzi4ede8162009-03-31 15:24:20 -07004374void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4375{
4376 __wake_up_common(q, mode, 1, 0, key);
4377}
4378
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379/**
Davide Libenzi4ede8162009-03-31 15:24:20 -07004380 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 * @q: the waitqueue
4382 * @mode: which threads
4383 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Davide Libenzi4ede8162009-03-31 15:24:20 -07004384 * @key: opaque value to be passed to wakeup targets
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385 *
4386 * The sync wakeup differs that the waker knows that it will schedule
4387 * away soon, so while the target thread will be woken up, it will not
4388 * be migrated to another CPU - ie. the two threads are 'synchronized'
4389 * with each other. This can prevent needless bouncing between CPUs.
4390 *
4391 * On UP it can prevent extra preemption.
David Howells50fa6102009-04-28 15:01:38 +01004392 *
4393 * It may be assumed that this function implies a write memory barrier before
4394 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395 */
Davide Libenzi4ede8162009-03-31 15:24:20 -07004396void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4397 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398{
4399 unsigned long flags;
Peter Zijlstra7d478722009-09-14 19:55:44 +02004400 int wake_flags = WF_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401
4402 if (unlikely(!q))
4403 return;
4404
4405 if (unlikely(!nr_exclusive))
Peter Zijlstra7d478722009-09-14 19:55:44 +02004406 wake_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407
4408 spin_lock_irqsave(&q->lock, flags);
Peter Zijlstra7d478722009-09-14 19:55:44 +02004409 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410 spin_unlock_irqrestore(&q->lock, flags);
4411}
Davide Libenzi4ede8162009-03-31 15:24:20 -07004412EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4413
4414/*
4415 * __wake_up_sync - see __wake_up_sync_key()
4416 */
4417void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4418{
4419 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4420}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4422
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004423/**
4424 * complete: - signals a single thread waiting on this completion
4425 * @x: holds the state of this particular completion
4426 *
4427 * This will wake up a single thread waiting on this completion. Threads will be
4428 * awakened in the same order in which they were queued.
4429 *
4430 * See also complete_all(), wait_for_completion() and related routines.
David Howells50fa6102009-04-28 15:01:38 +01004431 *
4432 * It may be assumed that this function implies a write memory barrier before
4433 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004434 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004435void complete(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436{
4437 unsigned long flags;
4438
4439 spin_lock_irqsave(&x->wait.lock, flags);
4440 x->done++;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004441 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 spin_unlock_irqrestore(&x->wait.lock, flags);
4443}
4444EXPORT_SYMBOL(complete);
4445
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004446/**
4447 * complete_all: - signals all threads waiting on this completion
4448 * @x: holds the state of this particular completion
4449 *
4450 * This will wake up all threads waiting on this particular completion event.
David Howells50fa6102009-04-28 15:01:38 +01004451 *
4452 * It may be assumed that this function implies a write memory barrier before
4453 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004454 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004455void complete_all(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456{
4457 unsigned long flags;
4458
4459 spin_lock_irqsave(&x->wait.lock, flags);
4460 x->done += UINT_MAX/2;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004461 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 spin_unlock_irqrestore(&x->wait.lock, flags);
4463}
4464EXPORT_SYMBOL(complete_all);
4465
Andi Kleen8cbbe862007-10-15 17:00:14 +02004466static inline long __sched
4467do_wait_for_common(struct completion *x, long timeout, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 if (!x->done) {
4470 DECLARE_WAITQUEUE(wait, current);
4471
Changli Gaoa93d2f12010-05-07 14:33:26 +08004472 __add_wait_queue_tail_exclusive(&x->wait, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473 do {
Oleg Nesterov94d3d822008-08-20 16:54:41 -07004474 if (signal_pending_state(state, current)) {
Oleg Nesterovea71a542008-06-20 18:32:20 +04004475 timeout = -ERESTARTSYS;
4476 break;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004477 }
4478 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004480 timeout = schedule_timeout(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481 spin_lock_irq(&x->wait.lock);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004482 } while (!x->done && timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 __remove_wait_queue(&x->wait, &wait);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004484 if (!x->done)
4485 return timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 }
4487 x->done--;
Oleg Nesterovea71a542008-06-20 18:32:20 +04004488 return timeout ?: 1;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004489}
4490
4491static long __sched
4492wait_for_common(struct completion *x, long timeout, int state)
4493{
4494 might_sleep();
4495
4496 spin_lock_irq(&x->wait.lock);
4497 timeout = do_wait_for_common(x, timeout, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004499 return timeout;
4500}
4501
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004502/**
4503 * wait_for_completion: - waits for completion of a task
4504 * @x: holds the state of this particular completion
4505 *
4506 * This waits to be signaled for completion of a specific task. It is NOT
4507 * interruptible and there is no timeout.
4508 *
4509 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4510 * and interrupt capability. Also see complete().
4511 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004512void __sched wait_for_completion(struct completion *x)
Andi Kleen8cbbe862007-10-15 17:00:14 +02004513{
4514 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515}
4516EXPORT_SYMBOL(wait_for_completion);
4517
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004518/**
4519 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4520 * @x: holds the state of this particular completion
4521 * @timeout: timeout value in jiffies
4522 *
4523 * This waits for either a completion of a specific task to be signaled or for a
4524 * specified timeout to expire. The timeout is in jiffies. It is not
4525 * interruptible.
4526 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004527unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4529{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004530 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531}
4532EXPORT_SYMBOL(wait_for_completion_timeout);
4533
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004534/**
4535 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4536 * @x: holds the state of this particular completion
4537 *
4538 * This waits for completion of a specific task to be signaled. It is
4539 * interruptible.
4540 */
Andi Kleen8cbbe862007-10-15 17:00:14 +02004541int __sched wait_for_completion_interruptible(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542{
Andi Kleen51e97992007-10-18 21:32:55 +02004543 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4544 if (t == -ERESTARTSYS)
4545 return t;
4546 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547}
4548EXPORT_SYMBOL(wait_for_completion_interruptible);
4549
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004550/**
4551 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4552 * @x: holds the state of this particular completion
4553 * @timeout: timeout value in jiffies
4554 *
4555 * This waits for either a completion of a specific task to be signaled or for a
4556 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4557 */
NeilBrown6bf41232011-01-05 12:50:16 +11004558long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559wait_for_completion_interruptible_timeout(struct completion *x,
4560 unsigned long timeout)
4561{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004562 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563}
4564EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4565
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004566/**
4567 * wait_for_completion_killable: - waits for completion of a task (killable)
4568 * @x: holds the state of this particular completion
4569 *
4570 * This waits to be signaled for completion of a specific task. It can be
4571 * interrupted by a kill signal.
4572 */
Matthew Wilcox009e5772007-12-06 12:29:54 -05004573int __sched wait_for_completion_killable(struct completion *x)
4574{
4575 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4576 if (t == -ERESTARTSYS)
4577 return t;
4578 return 0;
4579}
4580EXPORT_SYMBOL(wait_for_completion_killable);
4581
Dave Chinnerbe4de352008-08-15 00:40:44 -07004582/**
Sage Weil0aa12fb2010-05-29 09:12:30 -07004583 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4584 * @x: holds the state of this particular completion
4585 * @timeout: timeout value in jiffies
4586 *
4587 * This waits for either a completion of a specific task to be
4588 * signaled or for a specified timeout to expire. It can be
4589 * interrupted by a kill signal. The timeout is in jiffies.
4590 */
NeilBrown6bf41232011-01-05 12:50:16 +11004591long __sched
Sage Weil0aa12fb2010-05-29 09:12:30 -07004592wait_for_completion_killable_timeout(struct completion *x,
4593 unsigned long timeout)
4594{
4595 return wait_for_common(x, timeout, TASK_KILLABLE);
4596}
4597EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4598
4599/**
Dave Chinnerbe4de352008-08-15 00:40:44 -07004600 * try_wait_for_completion - try to decrement a completion without blocking
4601 * @x: completion structure
4602 *
4603 * Returns: 0 if a decrement cannot be done without blocking
4604 * 1 if a decrement succeeded.
4605 *
4606 * If a completion is being used as a counting completion,
4607 * attempt to decrement the counter without blocking. This
4608 * enables us to avoid waiting if the resource the completion
4609 * is protecting is not available.
4610 */
4611bool try_wait_for_completion(struct completion *x)
4612{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004613 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004614 int ret = 1;
4615
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004616 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004617 if (!x->done)
4618 ret = 0;
4619 else
4620 x->done--;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004621 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004622 return ret;
4623}
4624EXPORT_SYMBOL(try_wait_for_completion);
4625
4626/**
4627 * completion_done - Test to see if a completion has any waiters
4628 * @x: completion structure
4629 *
4630 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4631 * 1 if there are no waiters.
4632 *
4633 */
4634bool completion_done(struct completion *x)
4635{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004636 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004637 int ret = 1;
4638
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004639 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004640 if (!x->done)
4641 ret = 0;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004642 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004643 return ret;
4644}
4645EXPORT_SYMBOL(completion_done);
4646
Andi Kleen8cbbe862007-10-15 17:00:14 +02004647static long __sched
4648sleep_on_common(wait_queue_head_t *q, int state, long timeout)
Ingo Molnar0fec1712007-07-09 18:52:01 +02004649{
4650 unsigned long flags;
4651 wait_queue_t wait;
4652
4653 init_waitqueue_entry(&wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654
Andi Kleen8cbbe862007-10-15 17:00:14 +02004655 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656
Andi Kleen8cbbe862007-10-15 17:00:14 +02004657 spin_lock_irqsave(&q->lock, flags);
4658 __add_wait_queue(q, &wait);
4659 spin_unlock(&q->lock);
4660 timeout = schedule_timeout(timeout);
4661 spin_lock_irq(&q->lock);
4662 __remove_wait_queue(q, &wait);
4663 spin_unlock_irqrestore(&q->lock, flags);
4664
4665 return timeout;
4666}
4667
4668void __sched interruptible_sleep_on(wait_queue_head_t *q)
4669{
4670 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672EXPORT_SYMBOL(interruptible_sleep_on);
4673
Ingo Molnar0fec1712007-07-09 18:52:01 +02004674long __sched
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004675interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004677 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4680
Ingo Molnar0fec1712007-07-09 18:52:01 +02004681void __sched sleep_on(wait_queue_head_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004683 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685EXPORT_SYMBOL(sleep_on);
4686
Ingo Molnar0fec1712007-07-09 18:52:01 +02004687long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004689 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691EXPORT_SYMBOL(sleep_on_timeout);
4692
Ingo Molnarb29739f2006-06-27 02:54:51 -07004693#ifdef CONFIG_RT_MUTEXES
4694
4695/*
4696 * rt_mutex_setprio - set the current priority of a task
4697 * @p: task
4698 * @prio: prio value (kernel-internal form)
4699 *
4700 * This function changes the 'effective' priority of a task. It does
4701 * not touch ->normal_prio like __setscheduler().
4702 *
4703 * Used by the rt_mutex code to implement priority inheritance logic.
4704 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004705void rt_mutex_setprio(struct task_struct *p, int prio)
Ingo Molnarb29739f2006-06-27 02:54:51 -07004706{
4707 unsigned long flags;
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004708 int oldprio, on_rq, running;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004709 struct rq *rq;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004710 const struct sched_class *prev_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004711
4712 BUG_ON(prio < 0 || prio > MAX_PRIO);
4713
4714 rq = task_rq_lock(p, &flags);
4715
Steven Rostedta8027072010-09-20 15:13:34 -04004716 trace_sched_pi_setprio(p, prio);
Andrew Mortond5f9f942007-05-08 20:27:06 -07004717 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004718 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004719 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004720 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004721 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004722 dequeue_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004723 if (running)
4724 p->sched_class->put_prev_task(rq, p);
Ingo Molnardd41f592007-07-09 18:51:59 +02004725
4726 if (rt_prio(prio))
4727 p->sched_class = &rt_sched_class;
4728 else
4729 p->sched_class = &fair_sched_class;
4730
Ingo Molnarb29739f2006-06-27 02:54:51 -07004731 p->prio = prio;
4732
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004733 if (running)
4734 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01004735 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004736 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004737
Peter Zijlstrada7a7352011-01-17 17:03:27 +01004738 check_class_changed(rq, p, prev_class, oldprio);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004739 task_rq_unlock(rq, &flags);
4740}
4741
4742#endif
4743
Ingo Molnar36c8b582006-07-03 00:25:41 -07004744void set_user_nice(struct task_struct *p, long nice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745{
Ingo Molnardd41f592007-07-09 18:51:59 +02004746 int old_prio, delta, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004748 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749
4750 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4751 return;
4752 /*
4753 * We have to be careful, if called from sys_setpriority(),
4754 * the task might be in the middle of scheduling on another CPU.
4755 */
4756 rq = task_rq_lock(p, &flags);
4757 /*
4758 * The RT priorities are set via sched_setscheduler(), but we still
4759 * allow the 'normal' nice value to be set - but as expected
4760 * it wont have any effect on scheduling until the task is
Ingo Molnardd41f592007-07-09 18:51:59 +02004761 * SCHED_FIFO/SCHED_RR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762 */
Ingo Molnare05606d2007-07-09 18:51:59 +02004763 if (task_has_rt_policy(p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764 p->static_prio = NICE_TO_PRIO(nice);
4765 goto out_unlock;
4766 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004767 on_rq = p->se.on_rq;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02004768 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004769 dequeue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770
Linus Torvalds1da177e2005-04-16 15:20:36 -07004771 p->static_prio = NICE_TO_PRIO(nice);
Peter Williams2dd73a42006-06-27 02:54:34 -07004772 set_load_weight(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004773 old_prio = p->prio;
4774 p->prio = effective_prio(p);
4775 delta = p->prio - old_prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004776
Ingo Molnardd41f592007-07-09 18:51:59 +02004777 if (on_rq) {
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004778 enqueue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004779 /*
Andrew Mortond5f9f942007-05-08 20:27:06 -07004780 * If the task increased its priority or is running and
4781 * lowered its priority, then reschedule its CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782 */
Andrew Mortond5f9f942007-05-08 20:27:06 -07004783 if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004784 resched_task(rq->curr);
4785 }
4786out_unlock:
4787 task_rq_unlock(rq, &flags);
4788}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789EXPORT_SYMBOL(set_user_nice);
4790
Matt Mackalle43379f2005-05-01 08:59:00 -07004791/*
4792 * can_nice - check if a task can reduce its nice value
4793 * @p: task
4794 * @nice: nice value
4795 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004796int can_nice(const struct task_struct *p, const int nice)
Matt Mackalle43379f2005-05-01 08:59:00 -07004797{
Matt Mackall024f4742005-08-18 11:24:19 -07004798 /* convert nice value [19,-20] to rlimit style value [1,40] */
4799 int nice_rlim = 20 - nice;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004800
Jiri Slaby78d7d402010-03-05 13:42:54 -08004801 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
Matt Mackalle43379f2005-05-01 08:59:00 -07004802 capable(CAP_SYS_NICE));
4803}
4804
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805#ifdef __ARCH_WANT_SYS_NICE
4806
4807/*
4808 * sys_nice - change the priority of the current process.
4809 * @increment: priority increment
4810 *
4811 * sys_setpriority is a more generic, but much slower function that
4812 * does similar things.
4813 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004814SYSCALL_DEFINE1(nice, int, increment)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004815{
Ingo Molnar48f24c42006-07-03 00:25:40 -07004816 long nice, retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817
4818 /*
4819 * Setpriority might change our priority at the same moment.
4820 * We don't have to worry. Conceptually one call occurs first
4821 * and we have a single winner.
4822 */
Matt Mackalle43379f2005-05-01 08:59:00 -07004823 if (increment < -40)
4824 increment = -40;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004825 if (increment > 40)
4826 increment = 40;
4827
Américo Wang2b8f8362009-02-16 18:54:21 +08004828 nice = TASK_NICE(current) + increment;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829 if (nice < -20)
4830 nice = -20;
4831 if (nice > 19)
4832 nice = 19;
4833
Matt Mackalle43379f2005-05-01 08:59:00 -07004834 if (increment < 0 && !can_nice(current, nice))
4835 return -EPERM;
4836
Linus Torvalds1da177e2005-04-16 15:20:36 -07004837 retval = security_task_setnice(current, nice);
4838 if (retval)
4839 return retval;
4840
4841 set_user_nice(current, nice);
4842 return 0;
4843}
4844
4845#endif
4846
4847/**
4848 * task_prio - return the priority value of a given task.
4849 * @p: the task in question.
4850 *
4851 * This is the priority value as seen by users in /proc.
4852 * RT tasks are offset by -200. Normal tasks are centered
4853 * around 0, value goes from -16 to +15.
4854 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004855int task_prio(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856{
4857 return p->prio - MAX_RT_PRIO;
4858}
4859
4860/**
4861 * task_nice - return the nice value of a given task.
4862 * @p: the task in question.
4863 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004864int task_nice(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865{
4866 return TASK_NICE(p);
4867}
Pavel Roskin150d8be2008-03-05 16:56:37 -05004868EXPORT_SYMBOL(task_nice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869
4870/**
4871 * idle_cpu - is a given cpu idle currently?
4872 * @cpu: the processor in question.
4873 */
4874int idle_cpu(int cpu)
4875{
4876 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4877}
4878
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879/**
4880 * idle_task - return the idle task for a given cpu.
4881 * @cpu: the processor in question.
4882 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004883struct task_struct *idle_task(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884{
4885 return cpu_rq(cpu)->idle;
4886}
4887
4888/**
4889 * find_process_by_pid - find a process with a matching PID value.
4890 * @pid: the pid in question.
4891 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02004892static struct task_struct *find_process_by_pid(pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893{
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07004894 return pid ? find_task_by_vpid(pid) : current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895}
4896
4897/* Actually do priority change: must hold rq lock. */
Ingo Molnardd41f592007-07-09 18:51:59 +02004898static void
4899__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004900{
Ingo Molnardd41f592007-07-09 18:51:59 +02004901 BUG_ON(p->se.on_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07004902
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903 p->policy = policy;
4904 p->rt_priority = prio;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004905 p->normal_prio = normal_prio(p);
4906 /* we are holding p->pi_lock already */
4907 p->prio = rt_mutex_getprio(p);
Peter Zijlstraffd44db2009-11-10 20:12:01 +01004908 if (rt_prio(p->prio))
4909 p->sched_class = &rt_sched_class;
4910 else
4911 p->sched_class = &fair_sched_class;
Peter Williams2dd73a42006-06-27 02:54:34 -07004912 set_load_weight(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004913}
4914
David Howellsc69e8d92008-11-14 10:39:19 +11004915/*
4916 * check the target process has a UID that matches the current process's
4917 */
4918static bool check_same_owner(struct task_struct *p)
4919{
4920 const struct cred *cred = current_cred(), *pcred;
4921 bool match;
4922
4923 rcu_read_lock();
4924 pcred = __task_cred(p);
4925 match = (cred->euid == pcred->euid ||
4926 cred->euid == pcred->uid);
4927 rcu_read_unlock();
4928 return match;
4929}
4930
Rusty Russell961ccdd2008-06-23 13:55:38 +10004931static int __sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07004932 const struct sched_param *param, bool user)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004934 int retval, oldprio, oldpolicy = -1, on_rq, running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004935 unsigned long flags;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004936 const struct sched_class *prev_class;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004937 struct rq *rq;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004938 int reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004939
Steven Rostedt66e53932006-06-27 02:54:44 -07004940 /* may grab non-irq protected spin_locks */
4941 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004942recheck:
4943 /* double check policy once rq lock held */
Lennart Poetteringca94c442009-06-15 17:17:47 +02004944 if (policy < 0) {
4945 reset_on_fork = p->sched_reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004946 policy = oldpolicy = p->policy;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004947 } else {
4948 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4949 policy &= ~SCHED_RESET_ON_FORK;
4950
4951 if (policy != SCHED_FIFO && policy != SCHED_RR &&
4952 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4953 policy != SCHED_IDLE)
4954 return -EINVAL;
4955 }
4956
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957 /*
4958 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnardd41f592007-07-09 18:51:59 +02004959 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4960 * SCHED_BATCH and SCHED_IDLE is 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961 */
4962 if (param->sched_priority < 0 ||
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004963 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
Steven Rostedtd46523e2005-07-25 16:28:39 -04004964 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004965 return -EINVAL;
Ingo Molnare05606d2007-07-09 18:51:59 +02004966 if (rt_policy(policy) != (param->sched_priority != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967 return -EINVAL;
4968
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004969 /*
4970 * Allow unprivileged RT tasks to decrease priority:
4971 */
Rusty Russell961ccdd2008-06-23 13:55:38 +10004972 if (user && !capable(CAP_SYS_NICE)) {
Ingo Molnare05606d2007-07-09 18:51:59 +02004973 if (rt_policy(policy)) {
Oleg Nesterova44702e2010-06-11 01:09:44 +02004974 unsigned long rlim_rtprio =
4975 task_rlimit(p, RLIMIT_RTPRIO);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004976
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004977 /* can't set/change the rt policy */
4978 if (policy != p->policy && !rlim_rtprio)
4979 return -EPERM;
4980
4981 /* can't increase priority */
4982 if (param->sched_priority > p->rt_priority &&
4983 param->sched_priority > rlim_rtprio)
4984 return -EPERM;
4985 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004986 /*
4987 * Like positive nice levels, dont allow tasks to
4988 * move out of SCHED_IDLE either:
4989 */
4990 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
4991 return -EPERM;
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004992
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004993 /* can't change other user's priorities */
David Howellsc69e8d92008-11-14 10:39:19 +11004994 if (!check_same_owner(p))
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004995 return -EPERM;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004996
4997 /* Normal users shall not reset the sched_reset_on_fork flag */
4998 if (p->sched_reset_on_fork && !reset_on_fork)
4999 return -EPERM;
Olivier Croquette37e4ab32005-06-25 14:57:32 -07005000 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005001
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07005002 if (user) {
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09005003 retval = security_task_setscheduler(p);
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07005004 if (retval)
5005 return retval;
5006 }
5007
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 /*
Ingo Molnarb29739f2006-06-27 02:54:51 -07005009 * make sure no PI-waiters arrive (or leave) while we are
5010 * changing the priority of the task:
5011 */
Thomas Gleixner1d615482009-11-17 14:54:03 +01005012 raw_spin_lock_irqsave(&p->pi_lock, flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005013 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014 * To be able to change p->policy safely, the apropriate
5015 * runqueue lock must be held.
5016 */
Ingo Molnarb29739f2006-06-27 02:54:51 -07005017 rq = __task_rq_lock(p);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005018
Peter Zijlstra34f971f2010-09-22 13:53:15 +02005019 /*
5020 * Changing the policy of the stop threads its a very bad idea
5021 */
5022 if (p == rq->stop) {
5023 __task_rq_unlock(rq);
5024 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5025 return -EINVAL;
5026 }
5027
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005028#ifdef CONFIG_RT_GROUP_SCHED
5029 if (user) {
5030 /*
5031 * Do not allow realtime tasks into groups that have no runtime
5032 * assigned.
5033 */
5034 if (rt_bandwidth_enabled() && rt_policy(policy) &&
Mike Galbraithf4493772011-01-13 04:54:50 +01005035 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5036 !task_group_is_autogroup(task_group(p))) {
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005037 __task_rq_unlock(rq);
5038 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5039 return -EPERM;
5040 }
5041 }
5042#endif
5043
Linus Torvalds1da177e2005-04-16 15:20:36 -07005044 /* recheck policy now with rq lock held */
5045 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5046 policy = oldpolicy = -1;
Ingo Molnarb29739f2006-06-27 02:54:51 -07005047 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01005048 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049 goto recheck;
5050 }
Ingo Molnardd41f592007-07-09 18:51:59 +02005051 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01005052 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005053 if (on_rq)
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005054 deactivate_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005055 if (running)
5056 p->sched_class->put_prev_task(rq, p);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02005057
Lennart Poetteringca94c442009-06-15 17:17:47 +02005058 p->sched_reset_on_fork = reset_on_fork;
5059
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01005061 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02005062 __setscheduler(rq, p, policy, param->sched_priority);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02005063
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005064 if (running)
5065 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005066 if (on_rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02005067 activate_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005068
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005069 check_class_changed(rq, p, prev_class, oldprio);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005070 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01005071 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005072
Thomas Gleixner95e02ca2006-06-27 02:55:02 -07005073 rt_mutex_adjust_pi(p);
5074
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075 return 0;
5076}
Rusty Russell961ccdd2008-06-23 13:55:38 +10005077
5078/**
5079 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5080 * @p: the task in question.
5081 * @policy: new policy.
5082 * @param: structure containing the new RT priority.
5083 *
5084 * NOTE that the task may be already dead.
5085 */
5086int sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005087 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10005088{
5089 return __sched_setscheduler(p, policy, param, true);
5090}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091EXPORT_SYMBOL_GPL(sched_setscheduler);
5092
Rusty Russell961ccdd2008-06-23 13:55:38 +10005093/**
5094 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5095 * @p: the task in question.
5096 * @policy: new policy.
5097 * @param: structure containing the new RT priority.
5098 *
5099 * Just like sched_setscheduler, only don't bother checking if the
5100 * current context has permission. For example, this is needed in
5101 * stop_machine(): we create temporary high priority worker threads,
5102 * but our caller might not have that capability.
5103 */
5104int sched_setscheduler_nocheck(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005105 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10005106{
5107 return __sched_setscheduler(p, policy, param, false);
5108}
5109
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005110static int
5111do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113 struct sched_param lparam;
5114 struct task_struct *p;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005115 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116
5117 if (!param || pid < 0)
5118 return -EINVAL;
5119 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5120 return -EFAULT;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005121
5122 rcu_read_lock();
5123 retval = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124 p = find_process_by_pid(pid);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005125 if (p != NULL)
5126 retval = sched_setscheduler(p, policy, &lparam);
5127 rcu_read_unlock();
Ingo Molnar36c8b582006-07-03 00:25:41 -07005128
Linus Torvalds1da177e2005-04-16 15:20:36 -07005129 return retval;
5130}
5131
5132/**
5133 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5134 * @pid: the pid in question.
5135 * @policy: new policy.
5136 * @param: structure containing the new RT priority.
5137 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005138SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5139 struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140{
Jason Baronc21761f2006-01-18 17:43:03 -08005141 /* negative values for policy are not valid */
5142 if (policy < 0)
5143 return -EINVAL;
5144
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145 return do_sched_setscheduler(pid, policy, param);
5146}
5147
5148/**
5149 * sys_sched_setparam - set/change the RT priority of a thread
5150 * @pid: the pid in question.
5151 * @param: structure containing the new RT priority.
5152 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005153SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154{
5155 return do_sched_setscheduler(pid, -1, param);
5156}
5157
5158/**
5159 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5160 * @pid: the pid in question.
5161 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005162SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005164 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005165 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166
5167 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005168 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169
5170 retval = -ESRCH;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005171 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172 p = find_process_by_pid(pid);
5173 if (p) {
5174 retval = security_task_getscheduler(p);
5175 if (!retval)
Lennart Poetteringca94c442009-06-15 17:17:47 +02005176 retval = p->policy
5177 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 }
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005179 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180 return retval;
5181}
5182
5183/**
Lennart Poetteringca94c442009-06-15 17:17:47 +02005184 * sys_sched_getparam - get the RT priority of a thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 * @pid: the pid in question.
5186 * @param: structure containing the RT priority.
5187 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005188SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189{
5190 struct sched_param lp;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005191 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005192 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193
5194 if (!param || pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005195 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005197 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198 p = find_process_by_pid(pid);
5199 retval = -ESRCH;
5200 if (!p)
5201 goto out_unlock;
5202
5203 retval = security_task_getscheduler(p);
5204 if (retval)
5205 goto out_unlock;
5206
5207 lp.sched_priority = p->rt_priority;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005208 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209
5210 /*
5211 * This one might sleep, we cannot do it with a spinlock held ...
5212 */
5213 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5214
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215 return retval;
5216
5217out_unlock:
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005218 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005219 return retval;
5220}
5221
Rusty Russell96f874e2008-11-25 02:35:14 +10305222long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305224 cpumask_var_t cpus_allowed, new_mask;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005225 struct task_struct *p;
5226 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005228 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005229 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230
5231 p = find_process_by_pid(pid);
5232 if (!p) {
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005233 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005234 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235 return -ESRCH;
5236 }
5237
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005238 /* Prevent p going away */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239 get_task_struct(p);
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005240 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305242 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5243 retval = -ENOMEM;
5244 goto out_put_task;
5245 }
5246 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5247 retval = -ENOMEM;
5248 goto out_free_cpus_allowed;
5249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005250 retval = -EPERM;
David Howellsc69e8d92008-11-14 10:39:19 +11005251 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252 goto out_unlock;
5253
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09005254 retval = security_task_setscheduler(p);
David Quigleye7834f82006-06-23 02:03:59 -07005255 if (retval)
5256 goto out_unlock;
5257
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305258 cpuset_cpus_allowed(p, cpus_allowed);
5259 cpumask_and(new_mask, in_mask, cpus_allowed);
Peter Zijlstra49246272010-10-17 21:46:10 +02005260again:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305261 retval = set_cpus_allowed_ptr(p, new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262
Paul Menage8707d8b2007-10-18 23:40:22 -07005263 if (!retval) {
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305264 cpuset_cpus_allowed(p, cpus_allowed);
5265 if (!cpumask_subset(new_mask, cpus_allowed)) {
Paul Menage8707d8b2007-10-18 23:40:22 -07005266 /*
5267 * We must have raced with a concurrent cpuset
5268 * update. Just reset the cpus_allowed to the
5269 * cpuset's cpus_allowed
5270 */
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305271 cpumask_copy(new_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07005272 goto again;
5273 }
5274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275out_unlock:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305276 free_cpumask_var(new_mask);
5277out_free_cpus_allowed:
5278 free_cpumask_var(cpus_allowed);
5279out_put_task:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005280 put_task_struct(p);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005281 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 return retval;
5283}
5284
5285static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
Rusty Russell96f874e2008-11-25 02:35:14 +10305286 struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005287{
Rusty Russell96f874e2008-11-25 02:35:14 +10305288 if (len < cpumask_size())
5289 cpumask_clear(new_mask);
5290 else if (len > cpumask_size())
5291 len = cpumask_size();
5292
Linus Torvalds1da177e2005-04-16 15:20:36 -07005293 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5294}
5295
5296/**
5297 * sys_sched_setaffinity - set the cpu affinity of a process
5298 * @pid: pid of the process
5299 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5300 * @user_mask_ptr: user-space pointer to the new cpu mask
5301 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005302SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5303 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305305 cpumask_var_t new_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005306 int retval;
5307
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305308 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5309 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305311 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5312 if (retval == 0)
5313 retval = sched_setaffinity(pid, new_mask);
5314 free_cpumask_var(new_mask);
5315 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316}
5317
Rusty Russell96f874e2008-11-25 02:35:14 +10305318long sched_getaffinity(pid_t pid, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005320 struct task_struct *p;
Thomas Gleixner31605682009-12-08 20:24:16 +00005321 unsigned long flags;
5322 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005323 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005325 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005326 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327
5328 retval = -ESRCH;
5329 p = find_process_by_pid(pid);
5330 if (!p)
5331 goto out_unlock;
5332
David Quigleye7834f82006-06-23 02:03:59 -07005333 retval = security_task_getscheduler(p);
5334 if (retval)
5335 goto out_unlock;
5336
Thomas Gleixner31605682009-12-08 20:24:16 +00005337 rq = task_rq_lock(p, &flags);
Rusty Russell96f874e2008-11-25 02:35:14 +10305338 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
Thomas Gleixner31605682009-12-08 20:24:16 +00005339 task_rq_unlock(rq, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005340
5341out_unlock:
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005342 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005343 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005344
Ulrich Drepper9531b622007-08-09 11:16:46 +02005345 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005346}
5347
5348/**
5349 * sys_sched_getaffinity - get the cpu affinity of a process
5350 * @pid: pid of the process
5351 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5352 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5353 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005354SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5355 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005356{
5357 int ret;
Rusty Russellf17c8602008-11-25 02:35:11 +10305358 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005359
Anton Blanchard84fba5e2010-04-06 17:02:19 +10005360 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005361 return -EINVAL;
5362 if (len & (sizeof(unsigned long)-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005363 return -EINVAL;
5364
Rusty Russellf17c8602008-11-25 02:35:11 +10305365 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5366 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005367
Rusty Russellf17c8602008-11-25 02:35:11 +10305368 ret = sched_getaffinity(pid, mask);
5369 if (ret == 0) {
KOSAKI Motohiro8bc037f2010-03-17 09:36:58 +09005370 size_t retlen = min_t(size_t, len, cpumask_size());
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005371
5372 if (copy_to_user(user_mask_ptr, mask, retlen))
Rusty Russellf17c8602008-11-25 02:35:11 +10305373 ret = -EFAULT;
5374 else
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005375 ret = retlen;
Rusty Russellf17c8602008-11-25 02:35:11 +10305376 }
5377 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378
Rusty Russellf17c8602008-11-25 02:35:11 +10305379 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005380}
5381
5382/**
5383 * sys_sched_yield - yield the current processor to other threads.
5384 *
Ingo Molnardd41f592007-07-09 18:51:59 +02005385 * This function yields the current CPU to other tasks. If there are no
5386 * other threads running on this CPU then this function will return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005387 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005388SYSCALL_DEFINE0(sched_yield)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005389{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005390 struct rq *rq = this_rq_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391
Ingo Molnar2d723762007-10-15 17:00:12 +02005392 schedstat_inc(rq, yld_count);
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02005393 current->sched_class->yield_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394
5395 /*
5396 * Since we are going to call schedule() anyway, there's
5397 * no need to preempt or enable interrupts:
5398 */
5399 __release(rq->lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07005400 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +01005401 do_raw_spin_unlock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005402 preempt_enable_no_resched();
5403
5404 schedule();
5405
5406 return 0;
5407}
5408
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005409static inline int should_resched(void)
5410{
5411 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5412}
5413
Andrew Mortone7b38402006-06-30 01:56:00 -07005414static void __cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415{
Frederic Weisbeckere7aaaa62009-07-16 15:44:29 +02005416 add_preempt_count(PREEMPT_ACTIVE);
5417 schedule();
5418 sub_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419}
5420
Herbert Xu02b67cc32008-01-25 21:08:28 +01005421int __sched _cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005423 if (should_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424 __cond_resched();
5425 return 1;
5426 }
5427 return 0;
5428}
Herbert Xu02b67cc32008-01-25 21:08:28 +01005429EXPORT_SYMBOL(_cond_resched);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430
5431/*
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005432 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433 * call schedule, and on return reacquire the lock.
5434 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005435 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436 * operations here to prevent schedule() from being called twice (once via
5437 * spin_unlock(), once by hand).
5438 */
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005439int __cond_resched_lock(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005441 int resched = should_resched();
Jan Kara6df3cec2005-06-13 15:52:32 -07005442 int ret = 0;
5443
Peter Zijlstraf607c662009-07-20 19:16:29 +02005444 lockdep_assert_held(lock);
5445
Nick Piggin95c354f2008-01-30 13:31:20 +01005446 if (spin_needbreak(lock) || resched) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 spin_unlock(lock);
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005448 if (resched)
Nick Piggin95c354f2008-01-30 13:31:20 +01005449 __cond_resched();
5450 else
5451 cpu_relax();
Jan Kara6df3cec2005-06-13 15:52:32 -07005452 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453 spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005454 }
Jan Kara6df3cec2005-06-13 15:52:32 -07005455 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005457EXPORT_SYMBOL(__cond_resched_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005459int __sched __cond_resched_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460{
5461 BUG_ON(!in_softirq());
5462
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005463 if (should_resched()) {
Thomas Gleixner98d825672007-05-23 13:58:18 -07005464 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465 __cond_resched();
5466 local_bh_disable();
5467 return 1;
5468 }
5469 return 0;
5470}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005471EXPORT_SYMBOL(__cond_resched_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472
Linus Torvalds1da177e2005-04-16 15:20:36 -07005473/**
5474 * yield - yield the current processor to other threads.
5475 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08005476 * This is a shortcut for kernel-space yielding - it marks the
Linus Torvalds1da177e2005-04-16 15:20:36 -07005477 * thread runnable and calls sys_sched_yield().
5478 */
5479void __sched yield(void)
5480{
5481 set_current_state(TASK_RUNNING);
5482 sys_sched_yield();
5483}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005484EXPORT_SYMBOL(yield);
5485
Mike Galbraithd95f4122011-02-01 09:50:51 -05005486/**
5487 * yield_to - yield the current processor to another thread in
5488 * your thread group, or accelerate that thread toward the
5489 * processor it's on.
5490 *
5491 * It's the caller's job to ensure that the target task struct
5492 * can't go away on us before we can do any checks.
5493 *
5494 * Returns true if we indeed boosted the target task.
5495 */
5496bool __sched yield_to(struct task_struct *p, bool preempt)
5497{
5498 struct task_struct *curr = current;
5499 struct rq *rq, *p_rq;
5500 unsigned long flags;
5501 bool yielded = 0;
5502
5503 local_irq_save(flags);
5504 rq = this_rq();
5505
5506again:
5507 p_rq = task_rq(p);
5508 double_rq_lock(rq, p_rq);
5509 while (task_rq(p) != p_rq) {
5510 double_rq_unlock(rq, p_rq);
5511 goto again;
5512 }
5513
5514 if (!curr->sched_class->yield_to_task)
5515 goto out;
5516
5517 if (curr->sched_class != p->sched_class)
5518 goto out;
5519
5520 if (task_running(p_rq, p) || p->state)
5521 goto out;
5522
5523 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
5524 if (yielded)
5525 schedstat_inc(rq, yld_count);
5526
5527out:
5528 double_rq_unlock(rq, p_rq);
5529 local_irq_restore(flags);
5530
5531 if (yielded)
5532 schedule();
5533
5534 return yielded;
5535}
5536EXPORT_SYMBOL_GPL(yield_to);
5537
Linus Torvalds1da177e2005-04-16 15:20:36 -07005538/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005539 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
Linus Torvalds1da177e2005-04-16 15:20:36 -07005540 * that process accounting knows that this is a task in IO wait state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005541 */
5542void __sched io_schedule(void)
5543{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005544 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005545
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005546 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005547 atomic_inc(&rq->nr_iowait);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005548 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549 schedule();
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005550 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005551 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005552 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005553}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005554EXPORT_SYMBOL(io_schedule);
5555
5556long __sched io_schedule_timeout(long timeout)
5557{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005558 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005559 long ret;
5560
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005561 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005562 atomic_inc(&rq->nr_iowait);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005563 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005564 ret = schedule_timeout(timeout);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005565 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005567 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005568 return ret;
5569}
5570
5571/**
5572 * sys_sched_get_priority_max - return maximum RT priority.
5573 * @policy: scheduling class.
5574 *
5575 * this syscall returns the maximum rt_priority that can be used
5576 * by a given scheduling class.
5577 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005578SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005579{
5580 int ret = -EINVAL;
5581
5582 switch (policy) {
5583 case SCHED_FIFO:
5584 case SCHED_RR:
5585 ret = MAX_USER_RT_PRIO-1;
5586 break;
5587 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005588 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005589 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005590 ret = 0;
5591 break;
5592 }
5593 return ret;
5594}
5595
5596/**
5597 * sys_sched_get_priority_min - return minimum RT priority.
5598 * @policy: scheduling class.
5599 *
5600 * this syscall returns the minimum rt_priority that can be used
5601 * by a given scheduling class.
5602 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005603SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005604{
5605 int ret = -EINVAL;
5606
5607 switch (policy) {
5608 case SCHED_FIFO:
5609 case SCHED_RR:
5610 ret = 1;
5611 break;
5612 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005613 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005614 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005615 ret = 0;
5616 }
5617 return ret;
5618}
5619
5620/**
5621 * sys_sched_rr_get_interval - return the default timeslice of a process.
5622 * @pid: pid of the process.
5623 * @interval: userspace pointer to the timeslice value.
5624 *
5625 * this syscall writes the default timeslice value of a given process
5626 * into the user-space timespec buffer. A value of '0' means infinity.
5627 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01005628SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
Heiko Carstens754fe8d2009-01-14 14:14:09 +01005629 struct timespec __user *, interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005630{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005631 struct task_struct *p;
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005632 unsigned int time_slice;
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005633 unsigned long flags;
5634 struct rq *rq;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005635 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005636 struct timespec t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005637
5638 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005639 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640
5641 retval = -ESRCH;
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005642 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643 p = find_process_by_pid(pid);
5644 if (!p)
5645 goto out_unlock;
5646
5647 retval = security_task_getscheduler(p);
5648 if (retval)
5649 goto out_unlock;
5650
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005651 rq = task_rq_lock(p, &flags);
5652 time_slice = p->sched_class->get_rr_interval(rq, p);
5653 task_rq_unlock(rq, &flags);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005654
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005655 rcu_read_unlock();
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005656 jiffies_to_timespec(time_slice, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005657 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658 return retval;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005659
Linus Torvalds1da177e2005-04-16 15:20:36 -07005660out_unlock:
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005661 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005662 return retval;
5663}
5664
Steven Rostedt7c731e02008-05-12 21:20:41 +02005665static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005666
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005667void sched_show_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005669 unsigned long free = 0;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005670 unsigned state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671
Linus Torvalds1da177e2005-04-16 15:20:36 -07005672 state = p->state ? __ffs(p->state) + 1 : 0;
Erik Gilling28d06862010-11-19 18:08:51 -08005673 printk(KERN_INFO "%-15.15s %c", p->comm,
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005674 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
Ingo Molnar4bd77322007-07-11 21:21:47 +02005675#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07005676 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005677 printk(KERN_CONT " running ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005679 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005680#else
5681 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005682 printk(KERN_CONT " running task ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005683 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005684 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685#endif
5686#ifdef CONFIG_DEBUG_STACK_USAGE
Eric Sandeen7c9f8862008-04-22 16:38:23 -05005687 free = stack_not_used(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005688#endif
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005689 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
David Rientjesaa47b7e2009-05-04 01:38:05 -07005690 task_pid_nr(p), task_pid_nr(p->real_parent),
5691 (unsigned long)task_thread_info(p)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005692
Nick Piggin5fb5e6d2008-01-25 21:08:34 +01005693 show_stack(p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005694}
5695
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005696void show_state_filter(unsigned long state_filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005697{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005698 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005699
Ingo Molnar4bd77322007-07-11 21:21:47 +02005700#if BITS_PER_LONG == 32
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005701 printk(KERN_INFO
5702 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005703#else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005704 printk(KERN_INFO
5705 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005706#endif
5707 read_lock(&tasklist_lock);
5708 do_each_thread(g, p) {
5709 /*
5710 * reset the NMI-timeout, listing all files on a slow
5711 * console might take alot of time:
5712 */
5713 touch_nmi_watchdog();
Ingo Molnar39bc89f2007-04-25 20:50:03 -07005714 if (!state_filter || (p->state & state_filter))
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005715 sched_show_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005716 } while_each_thread(g, p);
5717
Jeremy Fitzhardinge04c91672007-05-08 00:28:05 -07005718 touch_all_softlockup_watchdogs();
5719
Ingo Molnardd41f592007-07-09 18:51:59 +02005720#ifdef CONFIG_SCHED_DEBUG
5721 sysrq_sched_debug_show();
5722#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005723 read_unlock(&tasklist_lock);
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005724 /*
5725 * Only show locks if all tasks are dumped:
5726 */
Shmulik Ladkani93335a22009-11-25 15:23:41 +02005727 if (!state_filter)
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005728 debug_show_all_locks();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005729}
5730
Ingo Molnar1df21052007-07-09 18:51:58 +02005731void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5732{
Ingo Molnardd41f592007-07-09 18:51:59 +02005733 idle->sched_class = &idle_sched_class;
Ingo Molnar1df21052007-07-09 18:51:58 +02005734}
5735
Ingo Molnarf340c0d2005-06-28 16:40:42 +02005736/**
5737 * init_idle - set up an idle thread for a given CPU
5738 * @idle: task in question
5739 * @cpu: cpu the idle task belongs to
5740 *
5741 * NOTE: this function does not set the idle thread's NEED_RESCHED
5742 * flag, to make booting more robust.
5743 */
Nick Piggin5c1e1762006-10-03 01:14:04 -07005744void __cpuinit init_idle(struct task_struct *idle, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005745{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005746 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005747 unsigned long flags;
5748
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005749 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar5cbd54e2008-11-12 20:05:50 +01005750
Ingo Molnardd41f592007-07-09 18:51:59 +02005751 __sched_fork(idle);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01005752 idle->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02005753 idle->se.exec_start = sched_clock();
5754
Rusty Russell96f874e2008-11-25 02:35:14 +10305755 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005756 /*
5757 * We're having a chicken and egg problem, even though we are
5758 * holding rq->lock, the cpu isn't yet set to this cpu so the
5759 * lockdep check in task_group() will fail.
5760 *
5761 * Similar case to sched_fork(). / Alternatively we could
5762 * use task_rq_lock() here and obtain the other rq->lock.
5763 *
5764 * Silence PROVE_RCU
5765 */
5766 rcu_read_lock();
Ingo Molnardd41f592007-07-09 18:51:59 +02005767 __set_task_cpu(idle, cpu);
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005768 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005769
Linus Torvalds1da177e2005-04-16 15:20:36 -07005770 rq->curr = rq->idle = idle;
Nick Piggin4866cde2005-06-25 14:57:23 -07005771#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5772 idle->oncpu = 1;
5773#endif
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005774 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005775
5776 /* Set the preempt count _outside_ the spinlocks! */
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005777#if defined(CONFIG_PREEMPT)
5778 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5779#else
Al Viroa1261f52005-11-13 16:06:55 -08005780 task_thread_info(idle)->preempt_count = 0;
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005781#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02005782 /*
5783 * The idle tasks have their own, simple scheduling class:
5784 */
5785 idle->sched_class = &idle_sched_class;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01005786 ftrace_graph_init_task(idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005787}
5788
5789/*
5790 * In a system that switches off the HZ timer nohz_cpu_mask
5791 * indicates which cpus entered this state. This is used
5792 * in the rcu update to wait only for active cpus. For system
5793 * which do not switch off the HZ timer nohz_cpu_mask should
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305794 * always be CPU_BITS_NONE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005795 */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305796cpumask_var_t nohz_cpu_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005797
Ingo Molnar19978ca2007-11-09 22:39:38 +01005798/*
5799 * Increase the granularity value when there are more CPUs,
5800 * because with more CPUs the 'effective latency' as visible
5801 * to users decreases. But the relationship is not linear,
5802 * so pick a second-best guess by going with the log2 of the
5803 * number of CPUs.
5804 *
5805 * This idea comes from the SD scheduler of Con Kolivas:
5806 */
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005807static int get_update_sysctl_factor(void)
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005808{
Mike Galbraith4ca3ef72009-12-10 09:25:53 +01005809 unsigned int cpus = min_t(int, num_online_cpus(), 8);
Christian Ehrhardt1983a922009-11-30 12:16:47 +01005810 unsigned int factor;
5811
5812 switch (sysctl_sched_tunable_scaling) {
5813 case SCHED_TUNABLESCALING_NONE:
5814 factor = 1;
5815 break;
5816 case SCHED_TUNABLESCALING_LINEAR:
5817 factor = cpus;
5818 break;
5819 case SCHED_TUNABLESCALING_LOG:
5820 default:
5821 factor = 1 + ilog2(cpus);
5822 break;
5823 }
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005824
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005825 return factor;
5826}
5827
5828static void update_sysctl(void)
5829{
5830 unsigned int factor = get_update_sysctl_factor();
5831
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005832#define SET_SYSCTL(name) \
5833 (sysctl_##name = (factor) * normalized_sysctl_##name)
5834 SET_SYSCTL(sched_min_granularity);
5835 SET_SYSCTL(sched_latency);
5836 SET_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005837#undef SET_SYSCTL
5838}
5839
Ingo Molnar19978ca2007-11-09 22:39:38 +01005840static inline void sched_init_granularity(void)
5841{
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005842 update_sysctl();
Ingo Molnar19978ca2007-11-09 22:39:38 +01005843}
5844
Linus Torvalds1da177e2005-04-16 15:20:36 -07005845#ifdef CONFIG_SMP
5846/*
5847 * This is how migration works:
5848 *
Tejun Heo969c7922010-05-06 18:49:21 +02005849 * 1) we invoke migration_cpu_stop() on the target CPU using
5850 * stop_one_cpu().
5851 * 2) stopper starts to run (implicitly forcing the migrated thread
5852 * off the CPU)
5853 * 3) it checks whether the migrated task is still in the wrong runqueue.
5854 * 4) if it's in the wrong runqueue then the migration thread removes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855 * it and puts it into the right queue.
Tejun Heo969c7922010-05-06 18:49:21 +02005856 * 5) stopper completes and stop_one_cpu() returns and the migration
5857 * is done.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005858 */
5859
5860/*
5861 * Change a given task's CPU affinity. Migrate the thread to a
5862 * proper CPU and schedule it away if the CPU it's executing on
5863 * is removed from the allowed bitmask.
5864 *
5865 * NOTE: the caller must have a valid reference to the task, the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005866 * task must not exit() & deallocate itself prematurely. The
Linus Torvalds1da177e2005-04-16 15:20:36 -07005867 * call is not atomic; no spinlocks may be held.
5868 */
Rusty Russell96f874e2008-11-25 02:35:14 +10305869int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005870{
5871 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005872 struct rq *rq;
Tejun Heo969c7922010-05-06 18:49:21 +02005873 unsigned int dest_cpu;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005874 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005875
Peter Zijlstra65cc8e42010-03-25 21:05:16 +01005876 /*
5877 * Serialize against TASK_WAKING so that ttwu() and wunt() can
5878 * drop the rq->lock and still rely on ->cpus_allowed.
5879 */
5880again:
5881 while (task_is_waking(p))
5882 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005883 rq = task_rq_lock(p, &flags);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +01005884 if (task_is_waking(p)) {
5885 task_rq_unlock(rq, &flags);
5886 goto again;
5887 }
Peter Zijlstrae2912002009-12-16 18:04:36 +01005888
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005889 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005890 ret = -EINVAL;
5891 goto out;
5892 }
5893
David Rientjes9985b0b2008-06-05 12:57:11 -07005894 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
Rusty Russell96f874e2008-11-25 02:35:14 +10305895 !cpumask_equal(&p->cpus_allowed, new_mask))) {
David Rientjes9985b0b2008-06-05 12:57:11 -07005896 ret = -EINVAL;
5897 goto out;
5898 }
5899
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005900 if (p->sched_class->set_cpus_allowed)
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005901 p->sched_class->set_cpus_allowed(p, new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005902 else {
Rusty Russell96f874e2008-11-25 02:35:14 +10305903 cpumask_copy(&p->cpus_allowed, new_mask);
5904 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005905 }
5906
Linus Torvalds1da177e2005-04-16 15:20:36 -07005907 /* Can the task run on the task's current CPU? If so, we're done */
Rusty Russell96f874e2008-11-25 02:35:14 +10305908 if (cpumask_test_cpu(task_cpu(p), new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909 goto out;
5910
Tejun Heo969c7922010-05-06 18:49:21 +02005911 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
Nikanth Karthikesanb7a2b392010-11-26 12:37:09 +05305912 if (migrate_task(p, rq)) {
Tejun Heo969c7922010-05-06 18:49:21 +02005913 struct migration_arg arg = { p, dest_cpu };
Linus Torvalds1da177e2005-04-16 15:20:36 -07005914 /* Need help from migration thread: drop lock and wait. */
5915 task_rq_unlock(rq, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005916 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005917 tlb_migrate_finish(p->mm);
5918 return 0;
5919 }
5920out:
5921 task_rq_unlock(rq, &flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005922
Linus Torvalds1da177e2005-04-16 15:20:36 -07005923 return ret;
5924}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005925EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005926
5927/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005928 * Move (not current) task off this cpu, onto dest cpu. We're doing
Linus Torvalds1da177e2005-04-16 15:20:36 -07005929 * this because either it can't run here any more (set_cpus_allowed()
5930 * away from this CPU, or CPU going down), or because we're
5931 * attempting to rebalance this task on exec (sched_exec).
5932 *
5933 * So we race with normal scheduler movements, but that's OK, as long
5934 * as the task is no longer on this CPU.
Kirill Korotaevefc30812006-06-27 02:54:32 -07005935 *
5936 * Returns non-zero if task was successfully migrated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005937 */
Kirill Korotaevefc30812006-06-27 02:54:32 -07005938static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005939{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005940 struct rq *rq_dest, *rq_src;
Peter Zijlstrae2912002009-12-16 18:04:36 +01005941 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005942
Max Krasnyanskye761b772008-07-15 04:43:49 -07005943 if (unlikely(!cpu_active(dest_cpu)))
Kirill Korotaevefc30812006-06-27 02:54:32 -07005944 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005945
5946 rq_src = cpu_rq(src_cpu);
5947 rq_dest = cpu_rq(dest_cpu);
5948
5949 double_rq_lock(rq_src, rq_dest);
5950 /* Already moved. */
5951 if (task_cpu(p) != src_cpu)
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005952 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005953 /* Affinity changed (again). */
Rusty Russell96f874e2008-11-25 02:35:14 +10305954 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005955 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005956
Peter Zijlstrae2912002009-12-16 18:04:36 +01005957 /*
5958 * If we're not on a rq, the next wake-up will ensure we're
5959 * placed properly.
5960 */
5961 if (p->se.on_rq) {
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005962 deactivate_task(rq_src, p, 0);
Peter Zijlstrae2912002009-12-16 18:04:36 +01005963 set_task_cpu(p, dest_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005964 activate_task(rq_dest, p, 0);
Peter Zijlstra15afe092008-09-20 23:38:02 +02005965 check_preempt_curr(rq_dest, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005966 }
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005967done:
Kirill Korotaevefc30812006-06-27 02:54:32 -07005968 ret = 1;
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005969fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005970 double_rq_unlock(rq_src, rq_dest);
Kirill Korotaevefc30812006-06-27 02:54:32 -07005971 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005972}
5973
5974/*
Tejun Heo969c7922010-05-06 18:49:21 +02005975 * migration_cpu_stop - this will be executed by a highprio stopper thread
5976 * and performs thread migration by bumping thread off CPU then
5977 * 'pushing' onto another runqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005978 */
Tejun Heo969c7922010-05-06 18:49:21 +02005979static int migration_cpu_stop(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005980{
Tejun Heo969c7922010-05-06 18:49:21 +02005981 struct migration_arg *arg = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005982
Tejun Heo969c7922010-05-06 18:49:21 +02005983 /*
5984 * The original target cpu might have gone down and we might
5985 * be on another cpu but it doesn't matter.
5986 */
5987 local_irq_disable();
5988 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5989 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005990 return 0;
5991}
5992
5993#ifdef CONFIG_HOTPLUG_CPU
Linus Torvalds1da177e2005-04-16 15:20:36 -07005994
Ingo Molnar48f24c42006-07-03 00:25:40 -07005995/*
5996 * Ensures that the idle task is using init_mm right before its cpu goes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005997 * offline.
5998 */
5999void idle_task_exit(void)
6000{
6001 struct mm_struct *mm = current->active_mm;
6002
6003 BUG_ON(cpu_online(smp_processor_id()));
6004
6005 if (mm != &init_mm)
6006 switch_mm(mm, &init_mm, current);
6007 mmdrop(mm);
6008}
6009
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006010/*
6011 * While a dead CPU has no uninterruptible tasks queued at this point,
6012 * it might still have a nonzero ->nr_uninterruptible counter, because
6013 * for performance reasons the counter is not stricly tracking tasks to
6014 * their home CPUs. So we just add the counter to another CPU's counter,
6015 * to keep the global sum constant after CPU-down:
6016 */
6017static void migrate_nr_uninterruptible(struct rq *rq_src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006018{
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006019 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006020
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006021 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
6022 rq_src->nr_uninterruptible = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006023}
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006024
6025/*
6026 * remove the tasks which were accounted by rq from calc_load_tasks.
6027 */
6028static void calc_global_load_remove(struct rq *rq)
6029{
6030 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
Thomas Gleixnera468d382009-07-17 14:15:46 +02006031 rq->calc_load_active = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006032}
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006033
6034/*
6035 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6036 * try_to_wake_up()->select_task_rq().
6037 *
6038 * Called with rq->lock held even though we'er in stop_machine() and
6039 * there's no concurrency possible, we hold the required locks anyway
6040 * because of lock validation efforts.
6041 */
6042static void migrate_tasks(unsigned int dead_cpu)
6043{
6044 struct rq *rq = cpu_rq(dead_cpu);
6045 struct task_struct *next, *stop = rq->stop;
6046 int dest_cpu;
6047
6048 /*
6049 * Fudge the rq selection such that the below task selection loop
6050 * doesn't get stuck on the currently eligible stop task.
6051 *
6052 * We're currently inside stop_machine() and the rq is either stuck
6053 * in the stop_machine_cpu_stop() loop, or we're executing this code,
6054 * either way we should never end up calling schedule() until we're
6055 * done here.
6056 */
6057 rq->stop = NULL;
6058
6059 for ( ; ; ) {
6060 /*
6061 * There's this thread running, bail when that's the only
6062 * remaining thread.
6063 */
6064 if (rq->nr_running == 1)
6065 break;
6066
6067 next = pick_next_task(rq);
6068 BUG_ON(!next);
6069 next->sched_class->put_prev_task(rq, next);
6070
6071 /* Find suitable destination for @next, with force if needed. */
6072 dest_cpu = select_fallback_rq(dead_cpu, next);
6073 raw_spin_unlock(&rq->lock);
6074
6075 __migrate_task(next, dead_cpu, dest_cpu);
6076
6077 raw_spin_lock(&rq->lock);
6078 }
6079
6080 rq->stop = stop;
6081}
6082
Linus Torvalds1da177e2005-04-16 15:20:36 -07006083#endif /* CONFIG_HOTPLUG_CPU */
6084
Nick Piggine692ab52007-07-26 13:40:43 +02006085#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
6086
6087static struct ctl_table sd_ctl_dir[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02006088 {
6089 .procname = "sched_domain",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006090 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02006091 },
Eric W. Biederman56992302009-11-05 15:38:40 -08006092 {}
Nick Piggine692ab52007-07-26 13:40:43 +02006093};
6094
6095static struct ctl_table sd_ctl_root[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02006096 {
6097 .procname = "kernel",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006098 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02006099 .child = sd_ctl_dir,
6100 },
Eric W. Biederman56992302009-11-05 15:38:40 -08006101 {}
Nick Piggine692ab52007-07-26 13:40:43 +02006102};
6103
6104static struct ctl_table *sd_alloc_ctl_entry(int n)
6105{
6106 struct ctl_table *entry =
Milton Miller5cf9f062007-10-15 17:00:19 +02006107 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
Nick Piggine692ab52007-07-26 13:40:43 +02006108
Nick Piggine692ab52007-07-26 13:40:43 +02006109 return entry;
6110}
6111
Milton Miller6382bc92007-10-15 17:00:19 +02006112static void sd_free_ctl_entry(struct ctl_table **tablep)
6113{
Milton Millercd7900762007-10-17 16:55:11 +02006114 struct ctl_table *entry;
Milton Miller6382bc92007-10-15 17:00:19 +02006115
Milton Millercd7900762007-10-17 16:55:11 +02006116 /*
6117 * In the intermediate directories, both the child directory and
6118 * procname are dynamically allocated and could fail but the mode
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006119 * will always be set. In the lowest directory the names are
Milton Millercd7900762007-10-17 16:55:11 +02006120 * static strings and all have proc handlers.
6121 */
6122 for (entry = *tablep; entry->mode; entry++) {
Milton Miller6382bc92007-10-15 17:00:19 +02006123 if (entry->child)
6124 sd_free_ctl_entry(&entry->child);
Milton Millercd7900762007-10-17 16:55:11 +02006125 if (entry->proc_handler == NULL)
6126 kfree(entry->procname);
6127 }
Milton Miller6382bc92007-10-15 17:00:19 +02006128
6129 kfree(*tablep);
6130 *tablep = NULL;
6131}
6132
Nick Piggine692ab52007-07-26 13:40:43 +02006133static void
Alexey Dobriyane0361852007-08-09 11:16:46 +02006134set_table_entry(struct ctl_table *entry,
Nick Piggine692ab52007-07-26 13:40:43 +02006135 const char *procname, void *data, int maxlen,
6136 mode_t mode, proc_handler *proc_handler)
6137{
Nick Piggine692ab52007-07-26 13:40:43 +02006138 entry->procname = procname;
6139 entry->data = data;
6140 entry->maxlen = maxlen;
6141 entry->mode = mode;
6142 entry->proc_handler = proc_handler;
6143}
6144
6145static struct ctl_table *
6146sd_alloc_ctl_domain_table(struct sched_domain *sd)
6147{
Ingo Molnara5d8c342008-10-09 11:35:51 +02006148 struct ctl_table *table = sd_alloc_ctl_entry(13);
Nick Piggine692ab52007-07-26 13:40:43 +02006149
Milton Millerad1cdc12007-10-15 17:00:19 +02006150 if (table == NULL)
6151 return NULL;
6152
Alexey Dobriyane0361852007-08-09 11:16:46 +02006153 set_table_entry(&table[0], "min_interval", &sd->min_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02006154 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006155 set_table_entry(&table[1], "max_interval", &sd->max_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02006156 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006157 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006158 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006159 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006160 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006161 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006162 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006163 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006164 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006165 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006166 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006167 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
Nick Piggine692ab52007-07-26 13:40:43 +02006168 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006169 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
Nick Piggine692ab52007-07-26 13:40:43 +02006170 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02006171 set_table_entry(&table[9], "cache_nice_tries",
Nick Piggine692ab52007-07-26 13:40:43 +02006172 &sd->cache_nice_tries,
6173 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02006174 set_table_entry(&table[10], "flags", &sd->flags,
Nick Piggine692ab52007-07-26 13:40:43 +02006175 sizeof(int), 0644, proc_dointvec_minmax);
Ingo Molnara5d8c342008-10-09 11:35:51 +02006176 set_table_entry(&table[11], "name", sd->name,
6177 CORENAME_MAX_SIZE, 0444, proc_dostring);
6178 /* &table[12] is terminator */
Nick Piggine692ab52007-07-26 13:40:43 +02006179
6180 return table;
6181}
6182
Ingo Molnar9a4e7152007-11-28 15:52:56 +01006183static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
Nick Piggine692ab52007-07-26 13:40:43 +02006184{
6185 struct ctl_table *entry, *table;
6186 struct sched_domain *sd;
6187 int domain_num = 0, i;
6188 char buf[32];
6189
6190 for_each_domain(cpu, sd)
6191 domain_num++;
6192 entry = table = sd_alloc_ctl_entry(domain_num + 1);
Milton Millerad1cdc12007-10-15 17:00:19 +02006193 if (table == NULL)
6194 return NULL;
Nick Piggine692ab52007-07-26 13:40:43 +02006195
6196 i = 0;
6197 for_each_domain(cpu, sd) {
6198 snprintf(buf, 32, "domain%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006199 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006200 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006201 entry->child = sd_alloc_ctl_domain_table(sd);
6202 entry++;
6203 i++;
6204 }
6205 return table;
6206}
6207
6208static struct ctl_table_header *sd_sysctl_header;
Milton Miller6382bc92007-10-15 17:00:19 +02006209static void register_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006210{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006211 int i, cpu_num = num_possible_cpus();
Nick Piggine692ab52007-07-26 13:40:43 +02006212 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6213 char buf[32];
6214
Milton Miller73785472007-10-24 18:23:48 +02006215 WARN_ON(sd_ctl_dir[0].child);
6216 sd_ctl_dir[0].child = entry;
6217
Milton Millerad1cdc12007-10-15 17:00:19 +02006218 if (entry == NULL)
6219 return;
6220
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006221 for_each_possible_cpu(i) {
Nick Piggine692ab52007-07-26 13:40:43 +02006222 snprintf(buf, 32, "cpu%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006223 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006224 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006225 entry->child = sd_alloc_ctl_cpu_table(i);
Milton Miller97b6ea72007-10-15 17:00:19 +02006226 entry++;
Nick Piggine692ab52007-07-26 13:40:43 +02006227 }
Milton Miller73785472007-10-24 18:23:48 +02006228
6229 WARN_ON(sd_sysctl_header);
Nick Piggine692ab52007-07-26 13:40:43 +02006230 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6231}
Milton Miller6382bc92007-10-15 17:00:19 +02006232
Milton Miller73785472007-10-24 18:23:48 +02006233/* may be called multiple times per register */
Milton Miller6382bc92007-10-15 17:00:19 +02006234static void unregister_sched_domain_sysctl(void)
6235{
Milton Miller73785472007-10-24 18:23:48 +02006236 if (sd_sysctl_header)
6237 unregister_sysctl_table(sd_sysctl_header);
Milton Miller6382bc92007-10-15 17:00:19 +02006238 sd_sysctl_header = NULL;
Milton Miller73785472007-10-24 18:23:48 +02006239 if (sd_ctl_dir[0].child)
6240 sd_free_ctl_entry(&sd_ctl_dir[0].child);
Milton Miller6382bc92007-10-15 17:00:19 +02006241}
Nick Piggine692ab52007-07-26 13:40:43 +02006242#else
Milton Miller6382bc92007-10-15 17:00:19 +02006243static void register_sched_domain_sysctl(void)
6244{
6245}
6246static void unregister_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006247{
6248}
6249#endif
6250
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006251static void set_rq_online(struct rq *rq)
6252{
6253 if (!rq->online) {
6254 const struct sched_class *class;
6255
Rusty Russellc6c49272008-11-25 02:35:05 +10306256 cpumask_set_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006257 rq->online = 1;
6258
6259 for_each_class(class) {
6260 if (class->rq_online)
6261 class->rq_online(rq);
6262 }
6263 }
6264}
6265
6266static void set_rq_offline(struct rq *rq)
6267{
6268 if (rq->online) {
6269 const struct sched_class *class;
6270
6271 for_each_class(class) {
6272 if (class->rq_offline)
6273 class->rq_offline(rq);
6274 }
6275
Rusty Russellc6c49272008-11-25 02:35:05 +10306276 cpumask_clear_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006277 rq->online = 0;
6278 }
6279}
6280
Linus Torvalds1da177e2005-04-16 15:20:36 -07006281/*
6282 * migration_call - callback that gets triggered when a CPU is added.
6283 * Here we can start up the necessary migration thread for the new CPU.
6284 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006285static int __cpuinit
6286migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006287{
Ingo Molnar48f24c42006-07-03 00:25:40 -07006288 int cpu = (long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006289 unsigned long flags;
Tejun Heo969c7922010-05-06 18:49:21 +02006290 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006291
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006292 switch (action & ~CPU_TASKS_FROZEN) {
Gautham R Shenoy5be93612007-05-09 02:34:04 -07006293
Linus Torvalds1da177e2005-04-16 15:20:36 -07006294 case CPU_UP_PREPARE:
Thomas Gleixnera468d382009-07-17 14:15:46 +02006295 rq->calc_load_update = calc_load_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006296 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006297
Linus Torvalds1da177e2005-04-16 15:20:36 -07006298 case CPU_ONLINE:
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006299 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006300 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006301 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306302 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006303
6304 set_rq_online(rq);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006305 }
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006306 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006307 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006308
Linus Torvalds1da177e2005-04-16 15:20:36 -07006309#ifdef CONFIG_HOTPLUG_CPU
Gregory Haskins08f503b2008-03-10 17:59:11 -04006310 case CPU_DYING:
Gregory Haskins57d885f2008-01-25 21:08:18 +01006311 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006312 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006313 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306314 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006315 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006316 }
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006317 migrate_tasks(cpu);
6318 BUG_ON(rq->nr_running != 1); /* the migration thread */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006319 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006320
6321 migrate_nr_uninterruptible(rq);
6322 calc_global_load_remove(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006323 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006324#endif
6325 }
6326 return NOTIFY_OK;
6327}
6328
Paul Mackerrasf38b0822009-06-02 21:05:16 +10006329/*
6330 * Register at high priority so that task migration (migrate_all_tasks)
6331 * happens before everything else. This has to be lower priority than
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006332 * the notifier in the perf_event subsystem, though.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006333 */
Chandra Seetharaman26c21432006-06-27 02:54:10 -07006334static struct notifier_block __cpuinitdata migration_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006335 .notifier_call = migration_call,
Tejun Heo50a323b2010-06-08 21:40:36 +02006336 .priority = CPU_PRI_MIGRATION,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006337};
6338
Tejun Heo3a101d02010-06-08 21:40:36 +02006339static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
6340 unsigned long action, void *hcpu)
6341{
6342 switch (action & ~CPU_TASKS_FROZEN) {
6343 case CPU_ONLINE:
6344 case CPU_DOWN_FAILED:
6345 set_cpu_active((long)hcpu, true);
6346 return NOTIFY_OK;
6347 default:
6348 return NOTIFY_DONE;
6349 }
6350}
6351
6352static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6353 unsigned long action, void *hcpu)
6354{
6355 switch (action & ~CPU_TASKS_FROZEN) {
6356 case CPU_DOWN_PREPARE:
6357 set_cpu_active((long)hcpu, false);
6358 return NOTIFY_OK;
6359 default:
6360 return NOTIFY_DONE;
6361 }
6362}
6363
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006364static int __init migration_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006365{
6366 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -07006367 int err;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006368
Tejun Heo3a101d02010-06-08 21:40:36 +02006369 /* Initialize migration for the boot CPU */
Akinobu Mita07dccf32006-09-29 02:00:22 -07006370 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6371 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006372 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6373 register_cpu_notifier(&migration_notifier);
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006374
Tejun Heo3a101d02010-06-08 21:40:36 +02006375 /* Register cpu active notifiers */
6376 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6377 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6378
Thomas Gleixnera004cd42009-07-21 09:54:05 +02006379 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006380}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006381early_initcall(migration_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382#endif
6383
6384#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07006385
Ingo Molnar3e9830d2007-10-15 17:00:13 +02006386#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006387
Mike Travisf6630112009-11-17 18:22:15 -06006388static __read_mostly int sched_domain_debug_enabled;
6389
6390static int __init sched_domain_debug_setup(char *str)
6391{
6392 sched_domain_debug_enabled = 1;
6393
6394 return 0;
6395}
6396early_param("sched_debug", sched_domain_debug_setup);
6397
Mike Travis7c16ec52008-04-04 18:11:11 -07006398static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
Rusty Russell96f874e2008-11-25 02:35:14 +10306399 struct cpumask *groupmask)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006400{
6401 struct sched_group *group = sd->groups;
Mike Travis434d53b2008-04-04 18:11:04 -07006402 char str[256];
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006403
Rusty Russell968ea6d2008-12-13 21:55:51 +10306404 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
Rusty Russell96f874e2008-11-25 02:35:14 +10306405 cpumask_clear(groupmask);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006406
6407 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6408
6409 if (!(sd->flags & SD_LOAD_BALANCE)) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006410 printk("does not load-balance\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006411 if (sd->parent)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006412 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6413 " has parent");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006414 return -1;
6415 }
6416
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006417 printk(KERN_CONT "span %s level %s\n", str, sd->name);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006418
Rusty Russell758b2cd2008-11-25 02:35:04 +10306419 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006420 printk(KERN_ERR "ERROR: domain->span does not contain "
6421 "CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006422 }
Rusty Russell758b2cd2008-11-25 02:35:04 +10306423 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006424 printk(KERN_ERR "ERROR: domain->groups does not contain"
6425 " CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006426 }
6427
6428 printk(KERN_DEBUG "%*s groups:", level + 1, "");
6429 do {
6430 if (!group) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006431 printk("\n");
6432 printk(KERN_ERR "ERROR: group is NULL\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006433 break;
6434 }
6435
Peter Zijlstra18a38852009-09-01 10:34:39 +02006436 if (!group->cpu_power) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006437 printk(KERN_CONT "\n");
6438 printk(KERN_ERR "ERROR: domain->cpu_power not "
6439 "set\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006440 break;
6441 }
6442
Rusty Russell758b2cd2008-11-25 02:35:04 +10306443 if (!cpumask_weight(sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006444 printk(KERN_CONT "\n");
6445 printk(KERN_ERR "ERROR: empty group\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006446 break;
6447 }
6448
Rusty Russell758b2cd2008-11-25 02:35:04 +10306449 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006450 printk(KERN_CONT "\n");
6451 printk(KERN_ERR "ERROR: repeated CPUs\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006452 break;
6453 }
6454
Rusty Russell758b2cd2008-11-25 02:35:04 +10306455 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006456
Rusty Russell968ea6d2008-12-13 21:55:51 +10306457 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306458
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006459 printk(KERN_CONT " %s", str);
Peter Zijlstra18a38852009-09-01 10:34:39 +02006460 if (group->cpu_power != SCHED_LOAD_SCALE) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006461 printk(KERN_CONT " (cpu_power = %d)",
6462 group->cpu_power);
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306463 }
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006464
6465 group = group->next;
6466 } while (group != sd->groups);
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006467 printk(KERN_CONT "\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006468
Rusty Russell758b2cd2008-11-25 02:35:04 +10306469 if (!cpumask_equal(sched_domain_span(sd), groupmask))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006470 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006471
Rusty Russell758b2cd2008-11-25 02:35:04 +10306472 if (sd->parent &&
6473 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006474 printk(KERN_ERR "ERROR: parent span is not a superset "
6475 "of domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006476 return 0;
6477}
6478
Linus Torvalds1da177e2005-04-16 15:20:36 -07006479static void sched_domain_debug(struct sched_domain *sd, int cpu)
6480{
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306481 cpumask_var_t groupmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006482 int level = 0;
6483
Mike Travisf6630112009-11-17 18:22:15 -06006484 if (!sched_domain_debug_enabled)
6485 return;
6486
Nick Piggin41c7ce92005-06-25 14:57:24 -07006487 if (!sd) {
6488 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6489 return;
6490 }
6491
Linus Torvalds1da177e2005-04-16 15:20:36 -07006492 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6493
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306494 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006495 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6496 return;
6497 }
6498
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006499 for (;;) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006500 if (sched_domain_debug_one(sd, cpu, level, groupmask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006501 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006502 level++;
6503 sd = sd->parent;
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08006504 if (!sd)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006505 break;
6506 }
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306507 free_cpumask_var(groupmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006508}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006509#else /* !CONFIG_SCHED_DEBUG */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006510# define sched_domain_debug(sd, cpu) do { } while (0)
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006511#endif /* CONFIG_SCHED_DEBUG */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006512
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006513static int sd_degenerate(struct sched_domain *sd)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006514{
Rusty Russell758b2cd2008-11-25 02:35:04 +10306515 if (cpumask_weight(sched_domain_span(sd)) == 1)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006516 return 1;
6517
6518 /* Following flags need at least 2 groups */
6519 if (sd->flags & (SD_LOAD_BALANCE |
6520 SD_BALANCE_NEWIDLE |
6521 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006522 SD_BALANCE_EXEC |
6523 SD_SHARE_CPUPOWER |
6524 SD_SHARE_PKG_RESOURCES)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006525 if (sd->groups != sd->groups->next)
6526 return 0;
6527 }
6528
6529 /* Following flags don't use groups */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02006530 if (sd->flags & (SD_WAKE_AFFINE))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006531 return 0;
6532
6533 return 1;
6534}
6535
Ingo Molnar48f24c42006-07-03 00:25:40 -07006536static int
6537sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006538{
6539 unsigned long cflags = sd->flags, pflags = parent->flags;
6540
6541 if (sd_degenerate(parent))
6542 return 1;
6543
Rusty Russell758b2cd2008-11-25 02:35:04 +10306544 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006545 return 0;
6546
Suresh Siddha245af2c2005-06-25 14:57:25 -07006547 /* Flags needing groups don't count if only 1 group in parent */
6548 if (parent->groups == parent->groups->next) {
6549 pflags &= ~(SD_LOAD_BALANCE |
6550 SD_BALANCE_NEWIDLE |
6551 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006552 SD_BALANCE_EXEC |
6553 SD_SHARE_CPUPOWER |
6554 SD_SHARE_PKG_RESOURCES);
Ken Chen54364992008-12-07 18:47:37 -08006555 if (nr_node_ids == 1)
6556 pflags &= ~SD_SERIALIZE;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006557 }
6558 if (~cflags & pflags)
6559 return 0;
6560
6561 return 1;
6562}
6563
Rusty Russellc6c49272008-11-25 02:35:05 +10306564static void free_rootdomain(struct root_domain *rd)
6565{
Peter Zijlstra047106a2009-11-16 10:28:09 +01006566 synchronize_sched();
6567
Rusty Russell68e74562008-11-25 02:35:13 +10306568 cpupri_cleanup(&rd->cpupri);
6569
Rusty Russellc6c49272008-11-25 02:35:05 +10306570 free_cpumask_var(rd->rto_mask);
6571 free_cpumask_var(rd->online);
6572 free_cpumask_var(rd->span);
6573 kfree(rd);
6574}
6575
Gregory Haskins57d885f2008-01-25 21:08:18 +01006576static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6577{
Ingo Molnara0490fa2009-02-12 11:35:40 +01006578 struct root_domain *old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006579 unsigned long flags;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006580
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006581 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006582
6583 if (rq->rd) {
Ingo Molnara0490fa2009-02-12 11:35:40 +01006584 old_rd = rq->rd;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006585
Rusty Russellc6c49272008-11-25 02:35:05 +10306586 if (cpumask_test_cpu(rq->cpu, old_rd->online))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006587 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006588
Rusty Russellc6c49272008-11-25 02:35:05 +10306589 cpumask_clear_cpu(rq->cpu, old_rd->span);
Gregory Haskinsdc938522008-01-25 21:08:26 +01006590
Ingo Molnara0490fa2009-02-12 11:35:40 +01006591 /*
6592 * If we dont want to free the old_rt yet then
6593 * set old_rd to NULL to skip the freeing later
6594 * in this function:
6595 */
6596 if (!atomic_dec_and_test(&old_rd->refcount))
6597 old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006598 }
6599
6600 atomic_inc(&rd->refcount);
6601 rq->rd = rd;
6602
Rusty Russellc6c49272008-11-25 02:35:05 +10306603 cpumask_set_cpu(rq->cpu, rd->span);
Gregory Haskins00aec932009-07-30 10:57:23 -04006604 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006605 set_rq_online(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006606
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006607 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnara0490fa2009-02-12 11:35:40 +01006608
6609 if (old_rd)
6610 free_rootdomain(old_rd);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006611}
6612
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006613static int init_rootdomain(struct root_domain *rd)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006614{
6615 memset(rd, 0, sizeof(*rd));
6616
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006617 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
Li Zefan0c910d22009-01-06 17:39:06 +08006618 goto out;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006619 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306620 goto free_span;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006621 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306622 goto free_online;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02006623
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006624 if (cpupri_init(&rd->cpupri) != 0)
Rusty Russell68e74562008-11-25 02:35:13 +10306625 goto free_rto_mask;
Rusty Russellc6c49272008-11-25 02:35:05 +10306626 return 0;
6627
Rusty Russell68e74562008-11-25 02:35:13 +10306628free_rto_mask:
6629 free_cpumask_var(rd->rto_mask);
Rusty Russellc6c49272008-11-25 02:35:05 +10306630free_online:
6631 free_cpumask_var(rd->online);
6632free_span:
6633 free_cpumask_var(rd->span);
Li Zefan0c910d22009-01-06 17:39:06 +08006634out:
Rusty Russellc6c49272008-11-25 02:35:05 +10306635 return -ENOMEM;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006636}
6637
6638static void init_defrootdomain(void)
6639{
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006640 init_rootdomain(&def_root_domain);
Rusty Russellc6c49272008-11-25 02:35:05 +10306641
Gregory Haskins57d885f2008-01-25 21:08:18 +01006642 atomic_set(&def_root_domain.refcount, 1);
6643}
6644
Gregory Haskinsdc938522008-01-25 21:08:26 +01006645static struct root_domain *alloc_rootdomain(void)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006646{
6647 struct root_domain *rd;
6648
6649 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6650 if (!rd)
6651 return NULL;
6652
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006653 if (init_rootdomain(rd) != 0) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306654 kfree(rd);
6655 return NULL;
6656 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01006657
6658 return rd;
6659}
6660
Linus Torvalds1da177e2005-04-16 15:20:36 -07006661/*
Ingo Molnar0eab9142008-01-25 21:08:19 +01006662 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
Linus Torvalds1da177e2005-04-16 15:20:36 -07006663 * hold the hotplug lock.
6664 */
Ingo Molnar0eab9142008-01-25 21:08:19 +01006665static void
6666cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006667{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006668 struct rq *rq = cpu_rq(cpu);
Suresh Siddha245af2c2005-06-25 14:57:25 -07006669 struct sched_domain *tmp;
6670
Peter Zijlstra669c55e2010-04-16 14:59:29 +02006671 for (tmp = sd; tmp; tmp = tmp->parent)
6672 tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
6673
Suresh Siddha245af2c2005-06-25 14:57:25 -07006674 /* Remove the sched domains which do not contribute to scheduling. */
Li Zefanf29c9b12008-11-06 09:45:16 +08006675 for (tmp = sd; tmp; ) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006676 struct sched_domain *parent = tmp->parent;
6677 if (!parent)
6678 break;
Li Zefanf29c9b12008-11-06 09:45:16 +08006679
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006680 if (sd_parent_degenerate(tmp, parent)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006681 tmp->parent = parent->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006682 if (parent->parent)
6683 parent->parent->child = tmp;
Li Zefanf29c9b12008-11-06 09:45:16 +08006684 } else
6685 tmp = tmp->parent;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006686 }
6687
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006688 if (sd && sd_degenerate(sd)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006689 sd = sd->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006690 if (sd)
6691 sd->child = NULL;
6692 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006693
6694 sched_domain_debug(sd, cpu);
6695
Gregory Haskins57d885f2008-01-25 21:08:18 +01006696 rq_attach_root(rq, rd);
Nick Piggin674311d2005-06-25 14:57:27 -07006697 rcu_assign_pointer(rq->sd, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006698}
6699
6700/* cpus with isolated domains */
Rusty Russelldcc30a32008-11-25 02:35:12 +10306701static cpumask_var_t cpu_isolated_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006702
6703/* Setup the mask of cpus configured for isolated domains */
6704static int __init isolated_cpu_setup(char *str)
6705{
Rusty Russellbdddd292009-12-02 14:09:16 +10306706 alloc_bootmem_cpumask_var(&cpu_isolated_map);
Rusty Russell968ea6d2008-12-13 21:55:51 +10306707 cpulist_parse(str, cpu_isolated_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006708 return 1;
6709}
6710
Ingo Molnar8927f492007-10-15 17:00:13 +02006711__setup("isolcpus=", isolated_cpu_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006712
6713/*
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006714 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
6715 * to a function which identifies what group(along with sched group) a CPU
Rusty Russell96f874e2008-11-25 02:35:14 +10306716 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6717 * (due to the fact that we keep track of groups covered with a struct cpumask).
Linus Torvalds1da177e2005-04-16 15:20:36 -07006718 *
6719 * init_sched_build_groups will build a circular linked list of the groups
6720 * covered by the given span, and will set each group's ->cpumask correctly,
6721 * and ->cpu_power to 0.
6722 */
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006723static void
Rusty Russell96f874e2008-11-25 02:35:14 +10306724init_sched_build_groups(const struct cpumask *span,
6725 const struct cpumask *cpu_map,
6726 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
Mike Travis7c16ec52008-04-04 18:11:11 -07006727 struct sched_group **sg,
Rusty Russell96f874e2008-11-25 02:35:14 +10306728 struct cpumask *tmpmask),
6729 struct cpumask *covered, struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006730{
6731 struct sched_group *first = NULL, *last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006732 int i;
6733
Rusty Russell96f874e2008-11-25 02:35:14 +10306734 cpumask_clear(covered);
Mike Travis7c16ec52008-04-04 18:11:11 -07006735
Rusty Russellabcd0832008-11-25 02:35:02 +10306736 for_each_cpu(i, span) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006737 struct sched_group *sg;
Mike Travis7c16ec52008-04-04 18:11:11 -07006738 int group = group_fn(i, cpu_map, &sg, tmpmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006739 int j;
6740
Rusty Russell758b2cd2008-11-25 02:35:04 +10306741 if (cpumask_test_cpu(i, covered))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006742 continue;
6743
Rusty Russell758b2cd2008-11-25 02:35:04 +10306744 cpumask_clear(sched_group_cpus(sg));
Peter Zijlstra18a38852009-09-01 10:34:39 +02006745 sg->cpu_power = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006746
Rusty Russellabcd0832008-11-25 02:35:02 +10306747 for_each_cpu(j, span) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006748 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006749 continue;
6750
Rusty Russell96f874e2008-11-25 02:35:14 +10306751 cpumask_set_cpu(j, covered);
Rusty Russell758b2cd2008-11-25 02:35:04 +10306752 cpumask_set_cpu(j, sched_group_cpus(sg));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006753 }
6754 if (!first)
6755 first = sg;
6756 if (last)
6757 last->next = sg;
6758 last = sg;
6759 }
6760 last->next = first;
6761}
6762
John Hawkes9c1cfda2005-09-06 15:18:14 -07006763#define SD_NODES_PER_DOMAIN 16
Linus Torvalds1da177e2005-04-16 15:20:36 -07006764
John Hawkes9c1cfda2005-09-06 15:18:14 -07006765#ifdef CONFIG_NUMA
akpm@osdl.org198e2f12006-01-12 01:05:30 -08006766
John Hawkes9c1cfda2005-09-06 15:18:14 -07006767/**
6768 * find_next_best_node - find the next node to include in a sched_domain
6769 * @node: node whose sched_domain we're building
6770 * @used_nodes: nodes already in the sched_domain
6771 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006772 * Find the next node to include in a given scheduling domain. Simply
John Hawkes9c1cfda2005-09-06 15:18:14 -07006773 * finds the closest node not already in the @used_nodes map.
6774 *
6775 * Should use nodemask_t.
6776 */
Mike Travisc5f59f02008-04-04 18:11:10 -07006777static int find_next_best_node(int node, nodemask_t *used_nodes)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006778{
6779 int i, n, val, min_val, best_node = 0;
6780
6781 min_val = INT_MAX;
6782
Mike Travis076ac2a2008-05-12 21:21:12 +02006783 for (i = 0; i < nr_node_ids; i++) {
John Hawkes9c1cfda2005-09-06 15:18:14 -07006784 /* Start at @node */
Mike Travis076ac2a2008-05-12 21:21:12 +02006785 n = (node + i) % nr_node_ids;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006786
6787 if (!nr_cpus_node(n))
6788 continue;
6789
6790 /* Skip already used nodes */
Mike Travisc5f59f02008-04-04 18:11:10 -07006791 if (node_isset(n, *used_nodes))
John Hawkes9c1cfda2005-09-06 15:18:14 -07006792 continue;
6793
6794 /* Simple min distance search */
6795 val = node_distance(node, n);
6796
6797 if (val < min_val) {
6798 min_val = val;
6799 best_node = n;
6800 }
6801 }
6802
Mike Travisc5f59f02008-04-04 18:11:10 -07006803 node_set(best_node, *used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006804 return best_node;
6805}
6806
6807/**
6808 * sched_domain_node_span - get a cpumask for a node's sched_domain
6809 * @node: node whose cpumask we're constructing
Randy Dunlap73486722008-04-22 10:07:22 -07006810 * @span: resulting cpumask
John Hawkes9c1cfda2005-09-06 15:18:14 -07006811 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006812 * Given a node, construct a good cpumask for its sched_domain to span. It
John Hawkes9c1cfda2005-09-06 15:18:14 -07006813 * should be one that prevents unnecessary balancing, but also spreads tasks
6814 * out optimally.
6815 */
Rusty Russell96f874e2008-11-25 02:35:14 +10306816static void sched_domain_node_span(int node, struct cpumask *span)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006817{
Mike Travisc5f59f02008-04-04 18:11:10 -07006818 nodemask_t used_nodes;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006819 int i;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006820
Mike Travis6ca09df2008-12-31 18:08:45 -08006821 cpumask_clear(span);
Mike Travisc5f59f02008-04-04 18:11:10 -07006822 nodes_clear(used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006823
Mike Travis6ca09df2008-12-31 18:08:45 -08006824 cpumask_or(span, span, cpumask_of_node(node));
Mike Travisc5f59f02008-04-04 18:11:10 -07006825 node_set(node, used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006826
6827 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
Mike Travisc5f59f02008-04-04 18:11:10 -07006828 int next_node = find_next_best_node(node, &used_nodes);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006829
Mike Travis6ca09df2008-12-31 18:08:45 -08006830 cpumask_or(span, span, cpumask_of_node(next_node));
John Hawkes9c1cfda2005-09-06 15:18:14 -07006831 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006832}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006833#endif /* CONFIG_NUMA */
John Hawkes9c1cfda2005-09-06 15:18:14 -07006834
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006835int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006836
John Hawkes9c1cfda2005-09-06 15:18:14 -07006837/*
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306838 * The cpus mask in sched_group and sched_domain hangs off the end.
Ingo Molnar4200efd2009-05-19 09:22:19 +02006839 *
6840 * ( See the the comments in include/linux/sched.h:struct sched_group
6841 * and struct sched_domain. )
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306842 */
6843struct static_sched_group {
6844 struct sched_group sg;
6845 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
6846};
6847
6848struct static_sched_domain {
6849 struct sched_domain sd;
6850 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
6851};
6852
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006853struct s_data {
6854#ifdef CONFIG_NUMA
6855 int sd_allnodes;
6856 cpumask_var_t domainspan;
6857 cpumask_var_t covered;
6858 cpumask_var_t notcovered;
6859#endif
6860 cpumask_var_t nodemask;
6861 cpumask_var_t this_sibling_map;
6862 cpumask_var_t this_core_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02006863 cpumask_var_t this_book_map;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006864 cpumask_var_t send_covered;
6865 cpumask_var_t tmpmask;
6866 struct sched_group **sched_group_nodes;
6867 struct root_domain *rd;
6868};
6869
Andreas Herrmann2109b992009-08-18 12:53:00 +02006870enum s_alloc {
6871 sa_sched_groups = 0,
6872 sa_rootdomain,
6873 sa_tmpmask,
6874 sa_send_covered,
Heiko Carstens01a08542010-08-31 10:28:16 +02006875 sa_this_book_map,
Andreas Herrmann2109b992009-08-18 12:53:00 +02006876 sa_this_core_map,
6877 sa_this_sibling_map,
6878 sa_nodemask,
6879 sa_sched_group_nodes,
6880#ifdef CONFIG_NUMA
6881 sa_notcovered,
6882 sa_covered,
6883 sa_domainspan,
6884#endif
6885 sa_none,
6886};
6887
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306888/*
Ingo Molnar48f24c42006-07-03 00:25:40 -07006889 * SMT sched-domains:
John Hawkes9c1cfda2005-09-06 15:18:14 -07006890 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006891#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306892static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
Tejun Heo1871e522009-10-29 22:34:13 +09006893static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006894
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006895static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306896cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
6897 struct sched_group **sg, struct cpumask *unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006898{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006899 if (sg)
Tejun Heo1871e522009-10-29 22:34:13 +09006900 *sg = &per_cpu(sched_groups, cpu).sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006901 return cpu;
6902}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006903#endif /* CONFIG_SCHED_SMT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006904
Ingo Molnar48f24c42006-07-03 00:25:40 -07006905/*
6906 * multi-core sched-domains:
6907 */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006908#ifdef CONFIG_SCHED_MC
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306909static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
6910static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006911
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006912static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306913cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
6914 struct sched_group **sg, struct cpumask *mask)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006915{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006916 int group;
Heiko Carstensf2698932010-08-31 10:28:15 +02006917#ifdef CONFIG_SCHED_SMT
Rusty Russellc69fc562009-03-13 14:49:46 +10306918 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306919 group = cpumask_first(mask);
Heiko Carstensf2698932010-08-31 10:28:15 +02006920#else
6921 group = cpu;
6922#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006923 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306924 *sg = &per_cpu(sched_group_core, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006925 return group;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006926}
Heiko Carstensf2698932010-08-31 10:28:15 +02006927#endif /* CONFIG_SCHED_MC */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006928
Heiko Carstens01a08542010-08-31 10:28:16 +02006929/*
6930 * book sched-domains:
6931 */
6932#ifdef CONFIG_SCHED_BOOK
6933static DEFINE_PER_CPU(struct static_sched_domain, book_domains);
6934static DEFINE_PER_CPU(struct static_sched_group, sched_group_book);
6935
Linus Torvalds1da177e2005-04-16 15:20:36 -07006936static int
Heiko Carstens01a08542010-08-31 10:28:16 +02006937cpu_to_book_group(int cpu, const struct cpumask *cpu_map,
6938 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006939{
Heiko Carstens01a08542010-08-31 10:28:16 +02006940 int group = cpu;
6941#ifdef CONFIG_SCHED_MC
6942 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
6943 group = cpumask_first(mask);
6944#elif defined(CONFIG_SCHED_SMT)
6945 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
6946 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006947#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02006948 if (sg)
6949 *sg = &per_cpu(sched_group_book, group).sg;
6950 return group;
6951}
6952#endif /* CONFIG_SCHED_BOOK */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006953
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306954static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
6955static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006956
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006957static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306958cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
6959 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006960{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006961 int group;
Heiko Carstens01a08542010-08-31 10:28:16 +02006962#ifdef CONFIG_SCHED_BOOK
6963 cpumask_and(mask, cpu_book_mask(cpu), cpu_map);
6964 group = cpumask_first(mask);
6965#elif defined(CONFIG_SCHED_MC)
Mike Travis6ca09df2008-12-31 18:08:45 -08006966 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306967 group = cpumask_first(mask);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006968#elif defined(CONFIG_SCHED_SMT)
Rusty Russellc69fc562009-03-13 14:49:46 +10306969 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306970 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006971#else
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006972 group = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006973#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006974 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306975 *sg = &per_cpu(sched_group_phys, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006976 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006977}
6978
6979#ifdef CONFIG_NUMA
John Hawkes9c1cfda2005-09-06 15:18:14 -07006980/*
6981 * The init_sched_build_groups can't handle what we want to do with node
6982 * groups, so roll our own. Now each node has its own list of groups which
6983 * gets dynamically allocated.
6984 */
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006985static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
Mike Travis434d53b2008-04-04 18:11:04 -07006986static struct sched_group ***sched_group_nodes_bycpu;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006987
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006988static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306989static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006990
Rusty Russell96f874e2008-11-25 02:35:14 +10306991static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
6992 struct sched_group **sg,
6993 struct cpumask *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006994{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006995 int group;
6996
Mike Travis6ca09df2008-12-31 18:08:45 -08006997 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306998 group = cpumask_first(nodemask);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006999
7000 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307001 *sg = &per_cpu(sched_group_allnodes, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007002 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007003}
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007004
Siddha, Suresh B08069032006-03-27 01:15:23 -08007005static void init_numa_sched_groups_power(struct sched_group *group_head)
7006{
7007 struct sched_group *sg = group_head;
7008 int j;
7009
7010 if (!sg)
7011 return;
Andi Kleen3a5c3592007-10-15 17:00:14 +02007012 do {
Rusty Russell758b2cd2008-11-25 02:35:04 +10307013 for_each_cpu(j, sched_group_cpus(sg)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02007014 struct sched_domain *sd;
Siddha, Suresh B08069032006-03-27 01:15:23 -08007015
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307016 sd = &per_cpu(phys_domains, j).sd;
Miao Xie13318a72009-04-15 09:59:10 +08007017 if (j != group_first_cpu(sd->groups)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02007018 /*
7019 * Only add "power" once for each
7020 * physical package.
7021 */
7022 continue;
7023 }
7024
Peter Zijlstra18a38852009-09-01 10:34:39 +02007025 sg->cpu_power += sd->groups->cpu_power;
Siddha, Suresh B08069032006-03-27 01:15:23 -08007026 }
Andi Kleen3a5c3592007-10-15 17:00:14 +02007027 sg = sg->next;
7028 } while (sg != group_head);
Siddha, Suresh B08069032006-03-27 01:15:23 -08007029}
Andreas Herrmann0601a882009-08-18 13:01:11 +02007030
7031static int build_numa_sched_groups(struct s_data *d,
7032 const struct cpumask *cpu_map, int num)
7033{
7034 struct sched_domain *sd;
7035 struct sched_group *sg, *prev;
7036 int n, j;
7037
7038 cpumask_clear(d->covered);
7039 cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
7040 if (cpumask_empty(d->nodemask)) {
7041 d->sched_group_nodes[num] = NULL;
7042 goto out;
7043 }
7044
7045 sched_domain_node_span(num, d->domainspan);
7046 cpumask_and(d->domainspan, d->domainspan, cpu_map);
7047
7048 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7049 GFP_KERNEL, num);
7050 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007051 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
7052 num);
Andreas Herrmann0601a882009-08-18 13:01:11 +02007053 return -ENOMEM;
7054 }
7055 d->sched_group_nodes[num] = sg;
7056
7057 for_each_cpu(j, d->nodemask) {
7058 sd = &per_cpu(node_domains, j).sd;
7059 sd->groups = sg;
7060 }
7061
Peter Zijlstra18a38852009-09-01 10:34:39 +02007062 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02007063 cpumask_copy(sched_group_cpus(sg), d->nodemask);
7064 sg->next = sg;
7065 cpumask_or(d->covered, d->covered, d->nodemask);
7066
7067 prev = sg;
7068 for (j = 0; j < nr_node_ids; j++) {
7069 n = (num + j) % nr_node_ids;
7070 cpumask_complement(d->notcovered, d->covered);
7071 cpumask_and(d->tmpmask, d->notcovered, cpu_map);
7072 cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
7073 if (cpumask_empty(d->tmpmask))
7074 break;
7075 cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
7076 if (cpumask_empty(d->tmpmask))
7077 continue;
7078 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7079 GFP_KERNEL, num);
7080 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007081 printk(KERN_WARNING
7082 "Can not alloc domain group for node %d\n", j);
Andreas Herrmann0601a882009-08-18 13:01:11 +02007083 return -ENOMEM;
7084 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02007085 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02007086 cpumask_copy(sched_group_cpus(sg), d->tmpmask);
7087 sg->next = prev->next;
7088 cpumask_or(d->covered, d->covered, d->tmpmask);
7089 prev->next = sg;
7090 prev = sg;
7091 }
7092out:
7093 return 0;
7094}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007095#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007096
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007097#ifdef CONFIG_NUMA
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007098/* Free memory allocated for various sched_group structures */
Rusty Russell96f874e2008-11-25 02:35:14 +10307099static void free_sched_groups(const struct cpumask *cpu_map,
7100 struct cpumask *nodemask)
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007101{
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007102 int cpu, i;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007103
Rusty Russellabcd0832008-11-25 02:35:02 +10307104 for_each_cpu(cpu, cpu_map) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007105 struct sched_group **sched_group_nodes
7106 = sched_group_nodes_bycpu[cpu];
7107
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007108 if (!sched_group_nodes)
7109 continue;
7110
Mike Travis076ac2a2008-05-12 21:21:12 +02007111 for (i = 0; i < nr_node_ids; i++) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007112 struct sched_group *oldsg, *sg = sched_group_nodes[i];
7113
Mike Travis6ca09df2008-12-31 18:08:45 -08007114 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10307115 if (cpumask_empty(nodemask))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007116 continue;
7117
7118 if (sg == NULL)
7119 continue;
7120 sg = sg->next;
7121next_sg:
7122 oldsg = sg;
7123 sg = sg->next;
7124 kfree(oldsg);
7125 if (oldsg != sched_group_nodes[i])
7126 goto next_sg;
7127 }
7128 kfree(sched_group_nodes);
7129 sched_group_nodes_bycpu[cpu] = NULL;
7130 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007131}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007132#else /* !CONFIG_NUMA */
Rusty Russell96f874e2008-11-25 02:35:14 +10307133static void free_sched_groups(const struct cpumask *cpu_map,
7134 struct cpumask *nodemask)
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007135{
7136}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007137#endif /* CONFIG_NUMA */
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007138
Linus Torvalds1da177e2005-04-16 15:20:36 -07007139/*
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007140 * Initialize sched groups cpu_power.
7141 *
7142 * cpu_power indicates the capacity of sched group, which is used while
7143 * distributing the load between different sched groups in a sched domain.
7144 * Typically cpu_power for all the groups in a sched domain will be same unless
7145 * there are asymmetries in the topology. If there are asymmetries, group
7146 * having more cpu_power will pickup more load compared to the group having
7147 * less cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007148 */
7149static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7150{
7151 struct sched_domain *child;
7152 struct sched_group *group;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007153 long power;
7154 int weight;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007155
7156 WARN_ON(!sd || !sd->groups);
7157
Miao Xie13318a72009-04-15 09:59:10 +08007158 if (cpu != group_first_cpu(sd->groups))
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007159 return;
7160
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07007161 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
7162
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007163 child = sd->child;
7164
Peter Zijlstra18a38852009-09-01 10:34:39 +02007165 sd->groups->cpu_power = 0;
Eric Dumazet5517d862007-05-08 00:32:57 -07007166
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007167 if (!child) {
7168 power = SCHED_LOAD_SCALE;
7169 weight = cpumask_weight(sched_domain_span(sd));
7170 /*
7171 * SMT siblings share the power of a single core.
Peter Zijlstraa52bfd732009-09-01 10:34:35 +02007172 * Usually multiple threads get a better yield out of
7173 * that one core than a single thread would have,
7174 * reflect that in sd->smt_gain.
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007175 */
Peter Zijlstraa52bfd732009-09-01 10:34:35 +02007176 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
7177 power *= sd->smt_gain;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007178 power /= weight;
Peter Zijlstraa52bfd732009-09-01 10:34:35 +02007179 power >>= SCHED_LOAD_SHIFT;
7180 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02007181 sd->groups->cpu_power += power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007182 return;
7183 }
7184
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007185 /*
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007186 * Add cpu_power of each child group to this groups cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007187 */
7188 group = child->groups;
7189 do {
Peter Zijlstra18a38852009-09-01 10:34:39 +02007190 sd->groups->cpu_power += group->cpu_power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007191 group = group->next;
7192 } while (group != child->groups);
7193}
7194
7195/*
Mike Travis7c16ec52008-04-04 18:11:11 -07007196 * Initializers for schedule domains
7197 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
7198 */
7199
Ingo Molnara5d8c342008-10-09 11:35:51 +02007200#ifdef CONFIG_SCHED_DEBUG
7201# define SD_INIT_NAME(sd, type) sd->name = #type
7202#else
7203# define SD_INIT_NAME(sd, type) do { } while (0)
7204#endif
7205
Mike Travis7c16ec52008-04-04 18:11:11 -07007206#define SD_INIT(sd, type) sd_init_##type(sd)
Ingo Molnara5d8c342008-10-09 11:35:51 +02007207
Mike Travis7c16ec52008-04-04 18:11:11 -07007208#define SD_INIT_FUNC(type) \
7209static noinline void sd_init_##type(struct sched_domain *sd) \
7210{ \
7211 memset(sd, 0, sizeof(*sd)); \
7212 *sd = SD_##type##_INIT; \
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007213 sd->level = SD_LV_##type; \
Ingo Molnara5d8c342008-10-09 11:35:51 +02007214 SD_INIT_NAME(sd, type); \
Mike Travis7c16ec52008-04-04 18:11:11 -07007215}
7216
7217SD_INIT_FUNC(CPU)
7218#ifdef CONFIG_NUMA
7219 SD_INIT_FUNC(ALLNODES)
7220 SD_INIT_FUNC(NODE)
7221#endif
7222#ifdef CONFIG_SCHED_SMT
7223 SD_INIT_FUNC(SIBLING)
7224#endif
7225#ifdef CONFIG_SCHED_MC
7226 SD_INIT_FUNC(MC)
7227#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007228#ifdef CONFIG_SCHED_BOOK
7229 SD_INIT_FUNC(BOOK)
7230#endif
Mike Travis7c16ec52008-04-04 18:11:11 -07007231
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007232static int default_relax_domain_level = -1;
7233
7234static int __init setup_relax_domain_level(char *str)
7235{
Li Zefan30e0e172008-05-13 10:27:17 +08007236 unsigned long val;
7237
7238 val = simple_strtoul(str, NULL, 0);
7239 if (val < SD_LV_MAX)
7240 default_relax_domain_level = val;
7241
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007242 return 1;
7243}
7244__setup("relax_domain_level=", setup_relax_domain_level);
7245
7246static void set_domain_attribute(struct sched_domain *sd,
7247 struct sched_domain_attr *attr)
7248{
7249 int request;
7250
7251 if (!attr || attr->relax_domain_level < 0) {
7252 if (default_relax_domain_level < 0)
7253 return;
7254 else
7255 request = default_relax_domain_level;
7256 } else
7257 request = attr->relax_domain_level;
7258 if (request < sd->level) {
7259 /* turn off idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007260 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007261 } else {
7262 /* turn on idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007263 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007264 }
7265}
7266
Andreas Herrmann2109b992009-08-18 12:53:00 +02007267static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
7268 const struct cpumask *cpu_map)
7269{
7270 switch (what) {
7271 case sa_sched_groups:
7272 free_sched_groups(cpu_map, d->tmpmask); /* fall through */
7273 d->sched_group_nodes = NULL;
7274 case sa_rootdomain:
7275 free_rootdomain(d->rd); /* fall through */
7276 case sa_tmpmask:
7277 free_cpumask_var(d->tmpmask); /* fall through */
7278 case sa_send_covered:
7279 free_cpumask_var(d->send_covered); /* fall through */
Heiko Carstens01a08542010-08-31 10:28:16 +02007280 case sa_this_book_map:
7281 free_cpumask_var(d->this_book_map); /* fall through */
Andreas Herrmann2109b992009-08-18 12:53:00 +02007282 case sa_this_core_map:
7283 free_cpumask_var(d->this_core_map); /* fall through */
7284 case sa_this_sibling_map:
7285 free_cpumask_var(d->this_sibling_map); /* fall through */
7286 case sa_nodemask:
7287 free_cpumask_var(d->nodemask); /* fall through */
7288 case sa_sched_group_nodes:
7289#ifdef CONFIG_NUMA
7290 kfree(d->sched_group_nodes); /* fall through */
7291 case sa_notcovered:
7292 free_cpumask_var(d->notcovered); /* fall through */
7293 case sa_covered:
7294 free_cpumask_var(d->covered); /* fall through */
7295 case sa_domainspan:
7296 free_cpumask_var(d->domainspan); /* fall through */
7297#endif
7298 case sa_none:
7299 break;
7300 }
7301}
7302
7303static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7304 const struct cpumask *cpu_map)
7305{
7306#ifdef CONFIG_NUMA
7307 if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
7308 return sa_none;
7309 if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
7310 return sa_domainspan;
7311 if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
7312 return sa_covered;
7313 /* Allocate the per-node list of sched groups */
7314 d->sched_group_nodes = kcalloc(nr_node_ids,
7315 sizeof(struct sched_group *), GFP_KERNEL);
7316 if (!d->sched_group_nodes) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007317 printk(KERN_WARNING "Can not alloc sched group node list\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02007318 return sa_notcovered;
7319 }
7320 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
7321#endif
7322 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
7323 return sa_sched_group_nodes;
7324 if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
7325 return sa_nodemask;
7326 if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
7327 return sa_this_sibling_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02007328 if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL))
Andreas Herrmann2109b992009-08-18 12:53:00 +02007329 return sa_this_core_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02007330 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
7331 return sa_this_book_map;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007332 if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
7333 return sa_send_covered;
7334 d->rd = alloc_rootdomain();
7335 if (!d->rd) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007336 printk(KERN_WARNING "Cannot alloc root domain\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02007337 return sa_tmpmask;
7338 }
7339 return sa_rootdomain;
7340}
7341
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007342static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
7343 const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
7344{
7345 struct sched_domain *sd = NULL;
7346#ifdef CONFIG_NUMA
7347 struct sched_domain *parent;
7348
7349 d->sd_allnodes = 0;
7350 if (cpumask_weight(cpu_map) >
7351 SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
7352 sd = &per_cpu(allnodes_domains, i).sd;
7353 SD_INIT(sd, ALLNODES);
7354 set_domain_attribute(sd, attr);
7355 cpumask_copy(sched_domain_span(sd), cpu_map);
7356 cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
7357 d->sd_allnodes = 1;
7358 }
7359 parent = sd;
7360
7361 sd = &per_cpu(node_domains, i).sd;
7362 SD_INIT(sd, NODE);
7363 set_domain_attribute(sd, attr);
7364 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
7365 sd->parent = parent;
7366 if (parent)
7367 parent->child = sd;
7368 cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
7369#endif
7370 return sd;
7371}
7372
Andreas Herrmann87cce662009-08-18 12:54:55 +02007373static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
7374 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7375 struct sched_domain *parent, int i)
7376{
7377 struct sched_domain *sd;
7378 sd = &per_cpu(phys_domains, i).sd;
7379 SD_INIT(sd, CPU);
7380 set_domain_attribute(sd, attr);
7381 cpumask_copy(sched_domain_span(sd), d->nodemask);
7382 sd->parent = parent;
7383 if (parent)
7384 parent->child = sd;
7385 cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
7386 return sd;
7387}
7388
Heiko Carstens01a08542010-08-31 10:28:16 +02007389static struct sched_domain *__build_book_sched_domain(struct s_data *d,
7390 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7391 struct sched_domain *parent, int i)
7392{
7393 struct sched_domain *sd = parent;
7394#ifdef CONFIG_SCHED_BOOK
7395 sd = &per_cpu(book_domains, i).sd;
7396 SD_INIT(sd, BOOK);
7397 set_domain_attribute(sd, attr);
7398 cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i));
7399 sd->parent = parent;
7400 parent->child = sd;
7401 cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask);
7402#endif
7403 return sd;
7404}
7405
Andreas Herrmann410c4082009-08-18 12:56:14 +02007406static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
7407 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7408 struct sched_domain *parent, int i)
7409{
7410 struct sched_domain *sd = parent;
7411#ifdef CONFIG_SCHED_MC
7412 sd = &per_cpu(core_domains, i).sd;
7413 SD_INIT(sd, MC);
7414 set_domain_attribute(sd, attr);
7415 cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
7416 sd->parent = parent;
7417 parent->child = sd;
7418 cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
7419#endif
7420 return sd;
7421}
7422
Andreas Herrmannd8173532009-08-18 12:57:03 +02007423static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
7424 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7425 struct sched_domain *parent, int i)
7426{
7427 struct sched_domain *sd = parent;
7428#ifdef CONFIG_SCHED_SMT
7429 sd = &per_cpu(cpu_domains, i).sd;
7430 SD_INIT(sd, SIBLING);
7431 set_domain_attribute(sd, attr);
7432 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
7433 sd->parent = parent;
7434 parent->child = sd;
7435 cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
7436#endif
7437 return sd;
7438}
7439
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007440static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
7441 const struct cpumask *cpu_map, int cpu)
7442{
7443 switch (l) {
7444#ifdef CONFIG_SCHED_SMT
7445 case SD_LV_SIBLING: /* set up CPU (sibling) groups */
7446 cpumask_and(d->this_sibling_map, cpu_map,
7447 topology_thread_cpumask(cpu));
7448 if (cpu == cpumask_first(d->this_sibling_map))
7449 init_sched_build_groups(d->this_sibling_map, cpu_map,
7450 &cpu_to_cpu_group,
7451 d->send_covered, d->tmpmask);
7452 break;
7453#endif
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007454#ifdef CONFIG_SCHED_MC
7455 case SD_LV_MC: /* set up multi-core groups */
7456 cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
7457 if (cpu == cpumask_first(d->this_core_map))
7458 init_sched_build_groups(d->this_core_map, cpu_map,
7459 &cpu_to_core_group,
7460 d->send_covered, d->tmpmask);
7461 break;
7462#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007463#ifdef CONFIG_SCHED_BOOK
7464 case SD_LV_BOOK: /* set up book groups */
7465 cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu));
7466 if (cpu == cpumask_first(d->this_book_map))
7467 init_sched_build_groups(d->this_book_map, cpu_map,
7468 &cpu_to_book_group,
7469 d->send_covered, d->tmpmask);
7470 break;
7471#endif
Andreas Herrmann86548092009-08-18 12:59:28 +02007472 case SD_LV_CPU: /* set up physical groups */
7473 cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
7474 if (!cpumask_empty(d->nodemask))
7475 init_sched_build_groups(d->nodemask, cpu_map,
7476 &cpu_to_phys_group,
7477 d->send_covered, d->tmpmask);
7478 break;
Andreas Herrmannde616e32009-08-18 13:00:13 +02007479#ifdef CONFIG_NUMA
7480 case SD_LV_ALLNODES:
7481 init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
7482 d->send_covered, d->tmpmask);
7483 break;
7484#endif
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007485 default:
7486 break;
7487 }
7488}
7489
Mike Travis7c16ec52008-04-04 18:11:11 -07007490/*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007491 * Build sched domains for a given set of cpus and attach the sched domains
7492 * to the individual cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07007493 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307494static int __build_sched_domains(const struct cpumask *cpu_map,
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007495 struct sched_domain_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007496{
Andreas Herrmann2109b992009-08-18 12:53:00 +02007497 enum s_alloc alloc_state = sa_none;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007498 struct s_data d;
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007499 struct sched_domain *sd;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007500 int i;
John Hawkesd1b55132005-09-06 15:18:14 -07007501#ifdef CONFIG_NUMA
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007502 d.sd_allnodes = 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307503#endif
7504
Andreas Herrmann2109b992009-08-18 12:53:00 +02007505 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7506 if (alloc_state != sa_rootdomain)
7507 goto error;
7508 alloc_state = sa_sched_groups;
Mike Travis7c16ec52008-04-04 18:11:11 -07007509
Linus Torvalds1da177e2005-04-16 15:20:36 -07007510 /*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007511 * Set up domains for cpus specified by the cpu_map.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007512 */
Rusty Russellabcd0832008-11-25 02:35:02 +10307513 for_each_cpu(i, cpu_map) {
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007514 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
7515 cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007516
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007517 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
Andreas Herrmann87cce662009-08-18 12:54:55 +02007518 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
Heiko Carstens01a08542010-08-31 10:28:16 +02007519 sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmann410c4082009-08-18 12:56:14 +02007520 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmannd8173532009-08-18 12:57:03 +02007521 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007522 }
7523
Rusty Russellabcd0832008-11-25 02:35:02 +10307524 for_each_cpu(i, cpu_map) {
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007525 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
Heiko Carstens01a08542010-08-31 10:28:16 +02007526 build_sched_groups(&d, SD_LV_BOOK, cpu_map, i);
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007527 build_sched_groups(&d, SD_LV_MC, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007528 }
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007529
Linus Torvalds1da177e2005-04-16 15:20:36 -07007530 /* Set up physical groups */
Andreas Herrmann86548092009-08-18 12:59:28 +02007531 for (i = 0; i < nr_node_ids; i++)
7532 build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007533
7534#ifdef CONFIG_NUMA
7535 /* Set up node groups */
Andreas Herrmannde616e32009-08-18 13:00:13 +02007536 if (d.sd_allnodes)
7537 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007538
Andreas Herrmann0601a882009-08-18 13:01:11 +02007539 for (i = 0; i < nr_node_ids; i++)
7540 if (build_numa_sched_groups(&d, cpu_map, i))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007541 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542#endif
7543
7544 /* Calculate CPU power for physical packages and nodes */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007545#ifdef CONFIG_SCHED_SMT
Rusty Russellabcd0832008-11-25 02:35:02 +10307546 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007547 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007548 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007549 }
7550#endif
7551#ifdef CONFIG_SCHED_MC
Rusty Russellabcd0832008-11-25 02:35:02 +10307552 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007553 sd = &per_cpu(core_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007554 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007555 }
7556#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007557#ifdef CONFIG_SCHED_BOOK
7558 for_each_cpu(i, cpu_map) {
7559 sd = &per_cpu(book_domains, i).sd;
7560 init_sched_groups_power(i, sd);
7561 }
7562#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07007563
Rusty Russellabcd0832008-11-25 02:35:02 +10307564 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007565 sd = &per_cpu(phys_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007566 init_sched_groups_power(i, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007567 }
7568
John Hawkes9c1cfda2005-09-06 15:18:14 -07007569#ifdef CONFIG_NUMA
Mike Travis076ac2a2008-05-12 21:21:12 +02007570 for (i = 0; i < nr_node_ids; i++)
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007571 init_numa_sched_groups_power(d.sched_group_nodes[i]);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007572
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007573 if (d.sd_allnodes) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007574 struct sched_group *sg;
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007575
Rusty Russell96f874e2008-11-25 02:35:14 +10307576 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007577 d.tmpmask);
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007578 init_numa_sched_groups_power(sg);
7579 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07007580#endif
7581
Linus Torvalds1da177e2005-04-16 15:20:36 -07007582 /* Attach the domains */
Rusty Russellabcd0832008-11-25 02:35:02 +10307583 for_each_cpu(i, cpu_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007584#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307585 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007586#elif defined(CONFIG_SCHED_MC)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307587 sd = &per_cpu(core_domains, i).sd;
Heiko Carstens01a08542010-08-31 10:28:16 +02007588#elif defined(CONFIG_SCHED_BOOK)
7589 sd = &per_cpu(book_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007590#else
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307591 sd = &per_cpu(phys_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007592#endif
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007593 cpu_attach_domain(sd, d.rd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007594 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007595
Andreas Herrmann2109b992009-08-18 12:53:00 +02007596 d.sched_group_nodes = NULL; /* don't free this we still need it */
7597 __free_domain_allocs(&d, sa_tmpmask, cpu_map);
7598 return 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307599
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007600error:
Andreas Herrmann2109b992009-08-18 12:53:00 +02007601 __free_domain_allocs(&d, alloc_state, cpu_map);
7602 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007603}
Paul Jackson029190c2007-10-18 23:40:20 -07007604
Rusty Russell96f874e2008-11-25 02:35:14 +10307605static int build_sched_domains(const struct cpumask *cpu_map)
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007606{
7607 return __build_sched_domains(cpu_map, NULL);
7608}
7609
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307610static cpumask_var_t *doms_cur; /* current sched domains */
Paul Jackson029190c2007-10-18 23:40:20 -07007611static int ndoms_cur; /* number of sched domains in 'doms_cur' */
Ingo Molnar4285f5942008-05-16 17:47:14 +02007612static struct sched_domain_attr *dattr_cur;
7613 /* attribues of custom domains in 'doms_cur' */
Paul Jackson029190c2007-10-18 23:40:20 -07007614
7615/*
7616 * Special case: If a kmalloc of a doms_cur partition (array of
Rusty Russell42128232008-11-25 02:35:12 +10307617 * cpumask) fails, then fallback to a single sched domain,
7618 * as determined by the single cpumask fallback_doms.
Paul Jackson029190c2007-10-18 23:40:20 -07007619 */
Rusty Russell42128232008-11-25 02:35:12 +10307620static cpumask_var_t fallback_doms;
Paul Jackson029190c2007-10-18 23:40:20 -07007621
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007622/*
7623 * arch_update_cpu_topology lets virtualized architectures update the
7624 * cpu core maps. It is supposed to return 1 if the topology changed
7625 * or 0 if it stayed the same.
7626 */
7627int __attribute__((weak)) arch_update_cpu_topology(void)
Heiko Carstens22e52b02008-03-12 18:31:59 +01007628{
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007629 return 0;
Heiko Carstens22e52b02008-03-12 18:31:59 +01007630}
7631
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307632cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7633{
7634 int i;
7635 cpumask_var_t *doms;
7636
7637 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7638 if (!doms)
7639 return NULL;
7640 for (i = 0; i < ndoms; i++) {
7641 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7642 free_sched_domains(doms, i);
7643 return NULL;
7644 }
7645 }
7646 return doms;
7647}
7648
7649void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7650{
7651 unsigned int i;
7652 for (i = 0; i < ndoms; i++)
7653 free_cpumask_var(doms[i]);
7654 kfree(doms);
7655}
7656
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007657/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007658 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
Paul Jackson029190c2007-10-18 23:40:20 -07007659 * For now this just excludes isolated cpus, but could be used to
7660 * exclude other special cases in the future.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007661 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307662static int arch_init_sched_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007663{
Milton Miller73785472007-10-24 18:23:48 +02007664 int err;
7665
Heiko Carstens22e52b02008-03-12 18:31:59 +01007666 arch_update_cpu_topology();
Paul Jackson029190c2007-10-18 23:40:20 -07007667 ndoms_cur = 1;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307668 doms_cur = alloc_sched_domains(ndoms_cur);
Paul Jackson029190c2007-10-18 23:40:20 -07007669 if (!doms_cur)
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307670 doms_cur = &fallback_doms;
7671 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007672 dattr_cur = NULL;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307673 err = build_sched_domains(doms_cur[0]);
Milton Miller6382bc92007-10-15 17:00:19 +02007674 register_sched_domain_sysctl();
Milton Miller73785472007-10-24 18:23:48 +02007675
7676 return err;
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007677}
7678
Rusty Russell96f874e2008-11-25 02:35:14 +10307679static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
7680 struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007681{
Mike Travis7c16ec52008-04-04 18:11:11 -07007682 free_sched_groups(cpu_map, tmpmask);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007683}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007684
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007685/*
7686 * Detach sched domains from a group of cpus specified in cpu_map
7687 * These cpus will now be attached to the NULL domain
7688 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307689static void detach_destroy_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007690{
Rusty Russell96f874e2008-11-25 02:35:14 +10307691 /* Save because hotplug lock held. */
7692 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007693 int i;
7694
Rusty Russellabcd0832008-11-25 02:35:02 +10307695 for_each_cpu(i, cpu_map)
Gregory Haskins57d885f2008-01-25 21:08:18 +01007696 cpu_attach_domain(NULL, &def_root_domain, i);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007697 synchronize_sched();
Rusty Russell96f874e2008-11-25 02:35:14 +10307698 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007699}
7700
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007701/* handle null as "default" */
7702static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7703 struct sched_domain_attr *new, int idx_new)
7704{
7705 struct sched_domain_attr tmp;
7706
7707 /* fast path */
7708 if (!new && !cur)
7709 return 1;
7710
7711 tmp = SD_ATTR_INIT;
7712 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7713 new ? (new + idx_new) : &tmp,
7714 sizeof(struct sched_domain_attr));
7715}
7716
Paul Jackson029190c2007-10-18 23:40:20 -07007717/*
7718 * Partition sched domains as specified by the 'ndoms_new'
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007719 * cpumasks in the array doms_new[] of cpumasks. This compares
Paul Jackson029190c2007-10-18 23:40:20 -07007720 * doms_new[] to the current sched domain partitioning, doms_cur[].
7721 * It destroys each deleted domain and builds each new domain.
7722 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307723 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007724 * The masks don't intersect (don't overlap.) We should setup one
7725 * sched domain for each mask. CPUs not in any of the cpumasks will
7726 * not be load balanced. If the same cpumask appears both in the
Paul Jackson029190c2007-10-18 23:40:20 -07007727 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7728 * it as it is.
7729 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307730 * The passed in 'doms_new' should be allocated using
7731 * alloc_sched_domains. This routine takes ownership of it and will
7732 * free_sched_domains it when done with it. If the caller failed the
7733 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7734 * and partition_sched_domains() will fallback to the single partition
7735 * 'fallback_doms', it also forces the domains to be rebuilt.
Paul Jackson029190c2007-10-18 23:40:20 -07007736 *
Rusty Russell96f874e2008-11-25 02:35:14 +10307737 * If doms_new == NULL it will be replaced with cpu_online_mask.
Li Zefan700018e2008-11-18 14:02:03 +08007738 * ndoms_new == 0 is a special case for destroying existing domains,
7739 * and it will not create the default domain.
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007740 *
Paul Jackson029190c2007-10-18 23:40:20 -07007741 * Call with hotplug lock held
7742 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307743void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007744 struct sched_domain_attr *dattr_new)
Paul Jackson029190c2007-10-18 23:40:20 -07007745{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007746 int i, j, n;
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007747 int new_topology;
Paul Jackson029190c2007-10-18 23:40:20 -07007748
Heiko Carstens712555e2008-04-28 11:33:07 +02007749 mutex_lock(&sched_domains_mutex);
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007750
Milton Miller73785472007-10-24 18:23:48 +02007751 /* always unregister in case we don't destroy any domains */
7752 unregister_sched_domain_sysctl();
7753
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007754 /* Let architecture update cpu core mappings. */
7755 new_topology = arch_update_cpu_topology();
7756
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007757 n = doms_new ? ndoms_new : 0;
Paul Jackson029190c2007-10-18 23:40:20 -07007758
7759 /* Destroy deleted domains */
7760 for (i = 0; i < ndoms_cur; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007761 for (j = 0; j < n && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307762 if (cpumask_equal(doms_cur[i], doms_new[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007763 && dattrs_equal(dattr_cur, i, dattr_new, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007764 goto match1;
7765 }
7766 /* no match - a current sched domain not in new doms_new[] */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307767 detach_destroy_domains(doms_cur[i]);
Paul Jackson029190c2007-10-18 23:40:20 -07007768match1:
7769 ;
7770 }
7771
Max Krasnyanskye761b772008-07-15 04:43:49 -07007772 if (doms_new == NULL) {
7773 ndoms_cur = 0;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307774 doms_new = &fallback_doms;
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007775 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
Li Zefanfaa2f982008-11-04 16:20:23 +08007776 WARN_ON_ONCE(dattr_new);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007777 }
7778
Paul Jackson029190c2007-10-18 23:40:20 -07007779 /* Build new domains */
7780 for (i = 0; i < ndoms_new; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007781 for (j = 0; j < ndoms_cur && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307782 if (cpumask_equal(doms_new[i], doms_cur[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007783 && dattrs_equal(dattr_new, i, dattr_cur, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007784 goto match2;
7785 }
7786 /* no match - add a new doms_new */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307787 __build_sched_domains(doms_new[i],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007788 dattr_new ? dattr_new + i : NULL);
Paul Jackson029190c2007-10-18 23:40:20 -07007789match2:
7790 ;
7791 }
7792
7793 /* Remember the new sched domains */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307794 if (doms_cur != &fallback_doms)
7795 free_sched_domains(doms_cur, ndoms_cur);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007796 kfree(dattr_cur); /* kfree(NULL) is safe */
Paul Jackson029190c2007-10-18 23:40:20 -07007797 doms_cur = doms_new;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007798 dattr_cur = dattr_new;
Paul Jackson029190c2007-10-18 23:40:20 -07007799 ndoms_cur = ndoms_new;
Milton Miller73785472007-10-24 18:23:48 +02007800
7801 register_sched_domain_sysctl();
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007802
Heiko Carstens712555e2008-04-28 11:33:07 +02007803 mutex_unlock(&sched_domains_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -07007804}
7805
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007806#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Li Zefanc70f22d2009-01-05 19:07:50 +08007807static void arch_reinit_sched_domains(void)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007808{
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007809 get_online_cpus();
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007810
7811 /* Destroy domains first to force the rebuild */
7812 partition_sched_domains(0, NULL, NULL);
7813
Max Krasnyanskye761b772008-07-15 04:43:49 -07007814 rebuild_sched_domains();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007815 put_online_cpus();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007816}
7817
7818static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7819{
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307820 unsigned int level = 0;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007821
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307822 if (sscanf(buf, "%u", &level) != 1)
7823 return -EINVAL;
7824
7825 /*
7826 * level is always be positive so don't check for
7827 * level < POWERSAVINGS_BALANCE_NONE which is 0
7828 * What happens on 0 or 1 byte write,
7829 * need to check for count as well?
7830 */
7831
7832 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007833 return -EINVAL;
7834
7835 if (smt)
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307836 sched_smt_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007837 else
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307838 sched_mc_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007839
Li Zefanc70f22d2009-01-05 19:07:50 +08007840 arch_reinit_sched_domains();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007841
Li Zefanc70f22d2009-01-05 19:07:50 +08007842 return count;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007843}
7844
Adrian Bunk6707de002007-08-12 18:08:19 +02007845#ifdef CONFIG_SCHED_MC
Andi Kleenf718cd42008-07-29 22:33:52 -07007846static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007847 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007848 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007849{
7850 return sprintf(page, "%u\n", sched_mc_power_savings);
7851}
Andi Kleenf718cd42008-07-29 22:33:52 -07007852static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007853 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007854 const char *buf, size_t count)
7855{
7856 return sched_power_savings_store(buf, count, 0);
7857}
Andi Kleenf718cd42008-07-29 22:33:52 -07007858static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7859 sched_mc_power_savings_show,
7860 sched_mc_power_savings_store);
Adrian Bunk6707de002007-08-12 18:08:19 +02007861#endif
7862
7863#ifdef CONFIG_SCHED_SMT
Andi Kleenf718cd42008-07-29 22:33:52 -07007864static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007865 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007866 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007867{
7868 return sprintf(page, "%u\n", sched_smt_power_savings);
7869}
Andi Kleenf718cd42008-07-29 22:33:52 -07007870static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007871 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007872 const char *buf, size_t count)
7873{
7874 return sched_power_savings_store(buf, count, 1);
7875}
Andi Kleenf718cd42008-07-29 22:33:52 -07007876static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7877 sched_smt_power_savings_show,
Adrian Bunk6707de002007-08-12 18:08:19 +02007878 sched_smt_power_savings_store);
7879#endif
7880
Li Zefan39aac642009-01-05 19:18:02 +08007881int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007882{
7883 int err = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007884
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007885#ifdef CONFIG_SCHED_SMT
7886 if (smt_capable())
7887 err = sysfs_create_file(&cls->kset.kobj,
7888 &attr_sched_smt_power_savings.attr);
7889#endif
7890#ifdef CONFIG_SCHED_MC
7891 if (!err && mc_capable())
7892 err = sysfs_create_file(&cls->kset.kobj,
7893 &attr_sched_mc_power_savings.attr);
7894#endif
7895 return err;
7896}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007897#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007898
Linus Torvalds1da177e2005-04-16 15:20:36 -07007899/*
Tejun Heo3a101d02010-06-08 21:40:36 +02007900 * Update cpusets according to cpu_active mask. If cpusets are
7901 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7902 * around partition_sched_domains().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007903 */
Tejun Heo0b2e9182010-06-21 23:53:31 +02007904static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7905 void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007906{
Tejun Heo3a101d02010-06-08 21:40:36 +02007907 switch (action & ~CPU_TASKS_FROZEN) {
Max Krasnyanskye761b772008-07-15 04:43:49 -07007908 case CPU_ONLINE:
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007909 case CPU_DOWN_FAILED:
Tejun Heo3a101d02010-06-08 21:40:36 +02007910 cpuset_update_active_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007911 return NOTIFY_OK;
Max Krasnyanskye761b772008-07-15 04:43:49 -07007912 default:
7913 return NOTIFY_DONE;
7914 }
7915}
Tejun Heo3a101d02010-06-08 21:40:36 +02007916
Tejun Heo0b2e9182010-06-21 23:53:31 +02007917static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7918 void *hcpu)
Tejun Heo3a101d02010-06-08 21:40:36 +02007919{
7920 switch (action & ~CPU_TASKS_FROZEN) {
7921 case CPU_DOWN_PREPARE:
7922 cpuset_update_active_cpus();
7923 return NOTIFY_OK;
7924 default:
7925 return NOTIFY_DONE;
7926 }
7927}
Max Krasnyanskye761b772008-07-15 04:43:49 -07007928
7929static int update_runtime(struct notifier_block *nfb,
7930 unsigned long action, void *hcpu)
7931{
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007932 int cpu = (int)(long)hcpu;
7933
Linus Torvalds1da177e2005-04-16 15:20:36 -07007934 switch (action) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007935 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007936 case CPU_DOWN_PREPARE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007937 disable_runtime(cpu_rq(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007938 return NOTIFY_OK;
7939
Linus Torvalds1da177e2005-04-16 15:20:36 -07007940 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007941 case CPU_DOWN_FAILED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007942 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007943 case CPU_ONLINE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007944 enable_runtime(cpu_rq(cpu));
Max Krasnyanskye761b772008-07-15 04:43:49 -07007945 return NOTIFY_OK;
7946
Linus Torvalds1da177e2005-04-16 15:20:36 -07007947 default:
7948 return NOTIFY_DONE;
7949 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007950}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007951
7952void __init sched_init_smp(void)
7953{
Rusty Russelldcc30a32008-11-25 02:35:12 +10307954 cpumask_var_t non_isolated_cpus;
7955
7956 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
Yong Zhangcb5fd132009-09-14 20:20:16 +08007957 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
Nick Piggin5c1e1762006-10-03 01:14:04 -07007958
Mike Travis434d53b2008-04-04 18:11:04 -07007959#if defined(CONFIG_NUMA)
7960 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
7961 GFP_KERNEL);
7962 BUG_ON(sched_group_nodes_bycpu == NULL);
7963#endif
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007964 get_online_cpus();
Heiko Carstens712555e2008-04-28 11:33:07 +02007965 mutex_lock(&sched_domains_mutex);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007966 arch_init_sched_domains(cpu_active_mask);
Rusty Russelldcc30a32008-11-25 02:35:12 +10307967 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7968 if (cpumask_empty(non_isolated_cpus))
7969 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
Heiko Carstens712555e2008-04-28 11:33:07 +02007970 mutex_unlock(&sched_domains_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007971 put_online_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007972
Tejun Heo3a101d02010-06-08 21:40:36 +02007973 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7974 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007975
7976 /* RT runtime code needs to handle some hotplug events */
7977 hotcpu_notifier(update_runtime, 0);
7978
Peter Zijlstrab328ca12008-04-29 10:02:46 +02007979 init_hrtick();
Nick Piggin5c1e1762006-10-03 01:14:04 -07007980
7981 /* Move init over to a non-isolated CPU */
Rusty Russelldcc30a32008-11-25 02:35:12 +10307982 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
Nick Piggin5c1e1762006-10-03 01:14:04 -07007983 BUG();
Ingo Molnar19978ca2007-11-09 22:39:38 +01007984 sched_init_granularity();
Rusty Russelldcc30a32008-11-25 02:35:12 +10307985 free_cpumask_var(non_isolated_cpus);
Rusty Russell42128232008-11-25 02:35:12 +10307986
Rusty Russell0e3900e2008-11-25 02:35:13 +10307987 init_sched_rt_class();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007988}
7989#else
7990void __init sched_init_smp(void)
7991{
Ingo Molnar19978ca2007-11-09 22:39:38 +01007992 sched_init_granularity();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007993}
7994#endif /* CONFIG_SMP */
7995
Arun R Bharadwajcd1bb942009-04-16 12:15:34 +05307996const_debug unsigned int sysctl_timer_migration = 1;
7997
Linus Torvalds1da177e2005-04-16 15:20:36 -07007998int in_sched_functions(unsigned long addr)
7999{
Linus Torvalds1da177e2005-04-16 15:20:36 -07008000 return in_lock_functions(addr) ||
8001 (addr >= (unsigned long)__sched_text_start
8002 && addr < (unsigned long)__sched_text_end);
8003}
8004
Alexey Dobriyana9957442007-10-15 17:00:13 +02008005static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02008006{
8007 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02008008 INIT_LIST_HEAD(&cfs_rq->tasks);
Ingo Molnardd41f592007-07-09 18:51:59 +02008009#ifdef CONFIG_FAIR_GROUP_SCHED
8010 cfs_rq->rq = rq;
Paul Turnerf07333b2011-01-21 20:45:03 -08008011 /* allow initial update_cfs_load() to truncate */
Peter Zijlstra6ea72f12011-01-26 13:36:03 +01008012#ifdef CONFIG_SMP
Paul Turnerf07333b2011-01-21 20:45:03 -08008013 cfs_rq->load_stamp = 1;
Ingo Molnardd41f592007-07-09 18:51:59 +02008014#endif
Peter Zijlstra6ea72f12011-01-26 13:36:03 +01008015#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02008016 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
Ingo Molnardd41f592007-07-09 18:51:59 +02008017}
8018
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008019static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8020{
8021 struct rt_prio_array *array;
8022 int i;
8023
8024 array = &rt_rq->active;
8025 for (i = 0; i < MAX_RT_PRIO; i++) {
8026 INIT_LIST_HEAD(array->queue + i);
8027 __clear_bit(i, array->bitmap);
8028 }
8029 /* delimiter for bitsearch: */
8030 __set_bit(MAX_RT_PRIO, array->bitmap);
8031
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008032#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -05008033 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins398a1532009-01-14 09:10:04 -05008034#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -05008035 rt_rq->highest_prio.next = MAX_RT_PRIO;
Peter Zijlstra48d5e252008-01-25 21:08:31 +01008036#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008037#endif
8038#ifdef CONFIG_SMP
8039 rt_rq->rt_nr_migratory = 0;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008040 rt_rq->overloaded = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008041 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008042#endif
8043
8044 rt_rq->rt_time = 0;
8045 rt_rq->rt_throttled = 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008046 rt_rq->rt_runtime = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008047 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008048
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008049#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01008050 rt_rq->rt_nr_boosted = 0;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008051 rt_rq->rq = rq;
8052#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008053}
8054
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008055#ifdef CONFIG_FAIR_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008056static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008057 struct sched_entity *se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008058 struct sched_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008059{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008060 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008061 tg->cfs_rq[cpu] = cfs_rq;
8062 init_cfs_rq(cfs_rq, rq);
8063 cfs_rq->tg = tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008064
8065 tg->se[cpu] = se;
Yong Zhang07e06b02011-01-07 15:17:36 +08008066 /* se could be NULL for root_task_group */
Dhaval Giani354d60c2008-04-19 19:44:59 +02008067 if (!se)
8068 return;
8069
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008070 if (!parent)
8071 se->cfs_rq = &rq->cfs;
8072 else
8073 se->cfs_rq = parent->my_q;
8074
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008075 se->my_q = cfs_rq;
Paul Turner94371782010-11-15 15:47:10 -08008076 update_load_set(&se->load, 0);
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008077 se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008078}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008079#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008080
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008081#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008082static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008083 struct sched_rt_entity *rt_se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008084 struct sched_rt_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008085{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008086 struct rq *rq = cpu_rq(cpu);
8087
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008088 tg->rt_rq[cpu] = rt_rq;
8089 init_rt_rq(rt_rq, rq);
8090 rt_rq->tg = tg;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008091 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008092
8093 tg->rt_se[cpu] = rt_se;
Dhaval Giani354d60c2008-04-19 19:44:59 +02008094 if (!rt_se)
8095 return;
8096
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008097 if (!parent)
8098 rt_se->rt_rq = &rq->rt;
8099 else
8100 rt_se->rt_rq = parent->my_q;
8101
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008102 rt_se->my_q = rt_rq;
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008103 rt_se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008104 INIT_LIST_HEAD(&rt_se->run_list);
8105}
8106#endif
8107
Linus Torvalds1da177e2005-04-16 15:20:36 -07008108void __init sched_init(void)
8109{
Ingo Molnardd41f592007-07-09 18:51:59 +02008110 int i, j;
Mike Travis434d53b2008-04-04 18:11:04 -07008111 unsigned long alloc_size = 0, ptr;
8112
8113#ifdef CONFIG_FAIR_GROUP_SCHED
8114 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8115#endif
8116#ifdef CONFIG_RT_GROUP_SCHED
8117 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8118#endif
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308119#ifdef CONFIG_CPUMASK_OFFSTACK
Rusty Russell8c083f02009-03-19 15:22:20 +10308120 alloc_size += num_possible_cpus() * cpumask_size();
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308121#endif
Mike Travis434d53b2008-04-04 18:11:04 -07008122 if (alloc_size) {
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03008123 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
Mike Travis434d53b2008-04-04 18:11:04 -07008124
8125#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008126 root_task_group.se = (struct sched_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008127 ptr += nr_cpu_ids * sizeof(void **);
8128
Yong Zhang07e06b02011-01-07 15:17:36 +08008129 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008130 ptr += nr_cpu_ids * sizeof(void **);
Peter Zijlstraeff766a2008-04-19 19:45:00 +02008131
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008132#endif /* CONFIG_FAIR_GROUP_SCHED */
Mike Travis434d53b2008-04-04 18:11:04 -07008133#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008134 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008135 ptr += nr_cpu_ids * sizeof(void **);
8136
Yong Zhang07e06b02011-01-07 15:17:36 +08008137 root_task_group.rt_rq = (struct rt_rq **)ptr;
Peter Zijlstraeff766a2008-04-19 19:45:00 +02008138 ptr += nr_cpu_ids * sizeof(void **);
8139
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008140#endif /* CONFIG_RT_GROUP_SCHED */
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308141#ifdef CONFIG_CPUMASK_OFFSTACK
8142 for_each_possible_cpu(i) {
8143 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
8144 ptr += cpumask_size();
8145 }
8146#endif /* CONFIG_CPUMASK_OFFSTACK */
Mike Travis434d53b2008-04-04 18:11:04 -07008147 }
Ingo Molnardd41f592007-07-09 18:51:59 +02008148
Gregory Haskins57d885f2008-01-25 21:08:18 +01008149#ifdef CONFIG_SMP
8150 init_defrootdomain();
8151#endif
8152
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008153 init_rt_bandwidth(&def_rt_bandwidth,
8154 global_rt_period(), global_rt_runtime());
8155
8156#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008157 init_rt_bandwidth(&root_task_group.rt_bandwidth,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008158 global_rt_period(), global_rt_runtime());
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008159#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008160
Dhaval Giani7c941432010-01-20 13:26:18 +01008161#ifdef CONFIG_CGROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008162 list_add(&root_task_group.list, &task_groups);
8163 INIT_LIST_HEAD(&root_task_group.children);
Mike Galbraith5091faa2010-11-30 14:18:03 +01008164 autogroup_init(&init_task);
Dhaval Giani7c941432010-01-20 13:26:18 +01008165#endif /* CONFIG_CGROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008166
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08008167 for_each_possible_cpu(i) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07008168 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008169
8170 rq = cpu_rq(i);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008171 raw_spin_lock_init(&rq->lock);
Nick Piggin78979862005-06-25 14:57:13 -07008172 rq->nr_running = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02008173 rq->calc_load_active = 0;
8174 rq->calc_load_update = jiffies + LOAD_FREQ;
Ingo Molnardd41f592007-07-09 18:51:59 +02008175 init_cfs_rq(&rq->cfs, rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008176 init_rt_rq(&rq->rt, rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008177#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008178 root_task_group.shares = root_task_group_load;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008179 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02008180 /*
Yong Zhang07e06b02011-01-07 15:17:36 +08008181 * How much cpu bandwidth does root_task_group get?
Dhaval Giani354d60c2008-04-19 19:44:59 +02008182 *
8183 * In case of task-groups formed thr' the cgroup filesystem, it
8184 * gets 100% of the cpu resources in the system. This overall
8185 * system cpu resource is divided among the tasks of
Yong Zhang07e06b02011-01-07 15:17:36 +08008186 * root_task_group and its child task-groups in a fair manner,
Dhaval Giani354d60c2008-04-19 19:44:59 +02008187 * based on each entity's (task or task-group's) weight
8188 * (se->load.weight).
8189 *
Yong Zhang07e06b02011-01-07 15:17:36 +08008190 * In other words, if root_task_group has 10 tasks of weight
Dhaval Giani354d60c2008-04-19 19:44:59 +02008191 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8192 * then A0's share of the cpu resource is:
8193 *
Ingo Molnar0d905bc2009-05-04 19:13:30 +02008194 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
Dhaval Giani354d60c2008-04-19 19:44:59 +02008195 *
Yong Zhang07e06b02011-01-07 15:17:36 +08008196 * We achieve this by letting root_task_group's tasks sit
8197 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
Dhaval Giani354d60c2008-04-19 19:44:59 +02008198 */
Yong Zhang07e06b02011-01-07 15:17:36 +08008199 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02008200#endif /* CONFIG_FAIR_GROUP_SCHED */
8201
8202 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008203#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008204 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
Yong Zhang07e06b02011-01-07 15:17:36 +08008205 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008206#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008207
Ingo Molnardd41f592007-07-09 18:51:59 +02008208 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
8209 rq->cpu_load[j] = 0;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07008210
8211 rq->last_load_update_tick = jiffies;
8212
Linus Torvalds1da177e2005-04-16 15:20:36 -07008213#ifdef CONFIG_SMP
Nick Piggin41c7ce92005-06-25 14:57:24 -07008214 rq->sd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01008215 rq->rd = NULL;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02008216 rq->cpu_power = SCHED_LOAD_SCALE;
Gregory Haskins3f029d32009-07-29 11:08:47 -04008217 rq->post_schedule = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008218 rq->active_balance = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02008219 rq->next_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008220 rq->push_cpu = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07008221 rq->cpu = i;
Gregory Haskins1f11eb62008-06-04 15:04:05 -04008222 rq->online = 0;
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01008223 rq->idle_stamp = 0;
8224 rq->avg_idle = 2*sysctl_sched_migration_cost;
Gregory Haskinsdc938522008-01-25 21:08:26 +01008225 rq_attach_root(rq, &def_root_domain);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008226#ifdef CONFIG_NO_HZ
8227 rq->nohz_balance_kick = 0;
8228 init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
8229#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008230#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01008231 init_rq_hrtick(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008232 atomic_set(&rq->nr_iowait, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008233 }
8234
Peter Williams2dd73a42006-06-27 02:54:34 -07008235 set_load_weight(&init_task);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008236
Avi Kivitye107be32007-07-26 13:40:43 +02008237#ifdef CONFIG_PREEMPT_NOTIFIERS
8238 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
8239#endif
8240
Christoph Lameterc9819f42006-12-10 02:20:25 -08008241#ifdef CONFIG_SMP
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008242 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
Christoph Lameterc9819f42006-12-10 02:20:25 -08008243#endif
8244
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008245#ifdef CONFIG_RT_MUTEXES
Thomas Gleixner1d615482009-11-17 14:54:03 +01008246 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008247#endif
8248
Linus Torvalds1da177e2005-04-16 15:20:36 -07008249 /*
8250 * The boot idle thread does lazy MMU switching as well:
8251 */
8252 atomic_inc(&init_mm.mm_count);
8253 enter_lazy_tlb(&init_mm, current);
8254
8255 /*
8256 * Make us the idle thread. Technically, schedule() should not be
8257 * called from this thread, however somewhere below it might be,
8258 * but because we are the idle thread, we just pick up running again
8259 * when this runqueue becomes "idle".
8260 */
8261 init_idle(current, smp_processor_id());
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02008262
8263 calc_load_update = jiffies + LOAD_FREQ;
8264
Ingo Molnardd41f592007-07-09 18:51:59 +02008265 /*
8266 * During early bootup we pretend to be a normal task:
8267 */
8268 current->sched_class = &fair_sched_class;
Ingo Molnar6892b752008-02-13 14:02:36 +01008269
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308270 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
Rusty Russell49557e62009-11-02 20:37:20 +10308271 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308272#ifdef CONFIG_SMP
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308273#ifdef CONFIG_NO_HZ
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008274 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8275 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
8276 atomic_set(&nohz.load_balancer, nr_cpu_ids);
8277 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
8278 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308279#endif
Rusty Russellbdddd292009-12-02 14:09:16 +10308280 /* May be allocated at isolcpus cmdline parse time */
8281 if (cpu_isolated_map == NULL)
8282 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308283#endif /* SMP */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308284
Ingo Molnar6892b752008-02-13 14:02:36 +01008285 scheduler_running = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008286}
8287
8288#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008289static inline int preempt_count_equals(int preempt_offset)
8290{
Frederic Weisbecker234da7b2009-12-16 20:21:05 +01008291 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008292
8293 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
8294}
8295
Simon Kagstromd8948372009-12-23 11:08:18 +01008296void __might_sleep(const char *file, int line, int preempt_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008297{
Ingo Molnar48f24c42006-07-03 00:25:40 -07008298#ifdef in_atomic
Linus Torvalds1da177e2005-04-16 15:20:36 -07008299 static unsigned long prev_jiffy; /* ratelimiting */
8300
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008301 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8302 system_state != SYSTEM_RUNNING || oops_in_progress)
Ingo Molnaraef745f2008-08-28 11:34:43 +02008303 return;
8304 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8305 return;
8306 prev_jiffy = jiffies;
8307
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01008308 printk(KERN_ERR
8309 "BUG: sleeping function called from invalid context at %s:%d\n",
8310 file, line);
8311 printk(KERN_ERR
8312 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8313 in_atomic(), irqs_disabled(),
8314 current->pid, current->comm);
Ingo Molnaraef745f2008-08-28 11:34:43 +02008315
8316 debug_show_held_locks(current);
8317 if (irqs_disabled())
8318 print_irqtrace_events(current);
8319 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008320#endif
8321}
8322EXPORT_SYMBOL(__might_sleep);
8323#endif
8324
8325#ifdef CONFIG_MAGIC_SYSRQ
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008326static void normalize_task(struct rq *rq, struct task_struct *p)
8327{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008328 const struct sched_class *prev_class = p->sched_class;
8329 int old_prio = p->prio;
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008330 int on_rq;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02008331
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008332 on_rq = p->se.on_rq;
8333 if (on_rq)
8334 deactivate_task(rq, p, 0);
8335 __setscheduler(rq, p, SCHED_NORMAL, 0);
8336 if (on_rq) {
8337 activate_task(rq, p, 0);
8338 resched_task(rq->curr);
8339 }
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008340
8341 check_class_changed(rq, p, prev_class, old_prio);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008342}
8343
Linus Torvalds1da177e2005-04-16 15:20:36 -07008344void normalize_rt_tasks(void)
8345{
Ingo Molnara0f98a12007-06-17 18:37:45 +02008346 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008347 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07008348 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008349
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008350 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008351 do_each_thread(g, p) {
Ingo Molnar178be792007-10-15 17:00:18 +02008352 /*
8353 * Only normalize user tasks:
8354 */
8355 if (!p->mm)
8356 continue;
8357
Ingo Molnardd41f592007-07-09 18:51:59 +02008358 p->se.exec_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008359#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03008360 p->se.statistics.wait_start = 0;
8361 p->se.statistics.sleep_start = 0;
8362 p->se.statistics.block_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008363#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02008364
8365 if (!rt_task(p)) {
8366 /*
8367 * Renice negative nice level userspace
8368 * tasks back to 0:
8369 */
8370 if (TASK_NICE(p) < 0 && p->mm)
8371 set_user_nice(p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008372 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02008373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008374
Thomas Gleixner1d615482009-11-17 14:54:03 +01008375 raw_spin_lock(&p->pi_lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -07008376 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008377
Ingo Molnar178be792007-10-15 17:00:18 +02008378 normalize_task(rq, p);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008379
Ingo Molnarb29739f2006-06-27 02:54:51 -07008380 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01008381 raw_spin_unlock(&p->pi_lock);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008382 } while_each_thread(g, p);
8383
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008384 read_unlock_irqrestore(&tasklist_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008385}
8386
8387#endif /* CONFIG_MAGIC_SYSRQ */
Linus Torvalds1df5c102005-09-12 07:59:21 -07008388
Jason Wessel67fc4e02010-05-20 21:04:21 -05008389#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008390/*
Jason Wessel67fc4e02010-05-20 21:04:21 -05008391 * These functions are only useful for the IA64 MCA handling, or kdb.
Linus Torvalds1df5c102005-09-12 07:59:21 -07008392 *
8393 * They can only be called when the whole system has been
8394 * stopped - every CPU needs to be quiescent, and no scheduling
8395 * activity can take place. Using them for anything else would
8396 * be a serious bug, and as a result, they aren't even visible
8397 * under any other configuration.
8398 */
8399
8400/**
8401 * curr_task - return the current task for a given cpu.
8402 * @cpu: the processor in question.
8403 *
8404 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8405 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008406struct task_struct *curr_task(int cpu)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008407{
8408 return cpu_curr(cpu);
8409}
8410
Jason Wessel67fc4e02010-05-20 21:04:21 -05008411#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8412
8413#ifdef CONFIG_IA64
Linus Torvalds1df5c102005-09-12 07:59:21 -07008414/**
8415 * set_curr_task - set the current task for a given cpu.
8416 * @cpu: the processor in question.
8417 * @p: the task pointer to set.
8418 *
8419 * Description: This function must only be used when non-maskable interrupts
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008420 * are serviced on a separate stack. It allows the architecture to switch the
8421 * notion of the current task on a cpu in a non-blocking manner. This function
Linus Torvalds1df5c102005-09-12 07:59:21 -07008422 * must be called with all CPU's synchronized, and interrupts disabled, the
8423 * and caller must save the original value of the current task (see
8424 * curr_task() above) and restore that value before reenabling interrupts and
8425 * re-starting the system.
8426 *
8427 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8428 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008429void set_curr_task(int cpu, struct task_struct *p)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008430{
8431 cpu_curr(cpu) = p;
8432}
8433
8434#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008435
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008436#ifdef CONFIG_FAIR_GROUP_SCHED
8437static void free_fair_sched_group(struct task_group *tg)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008438{
8439 int i;
8440
8441 for_each_possible_cpu(i) {
8442 if (tg->cfs_rq)
8443 kfree(tg->cfs_rq[i]);
8444 if (tg->se)
8445 kfree(tg->se[i]);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008446 }
8447
8448 kfree(tg->cfs_rq);
8449 kfree(tg->se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008450}
8451
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008452static
8453int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008454{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008455 struct cfs_rq *cfs_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008456 struct sched_entity *se;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008457 struct rq *rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008458 int i;
8459
Mike Travis434d53b2008-04-04 18:11:04 -07008460 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008461 if (!tg->cfs_rq)
8462 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008463 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008464 if (!tg->se)
8465 goto err;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008466
8467 tg->shares = NICE_0_LOAD;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008468
8469 for_each_possible_cpu(i) {
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008470 rq = cpu_rq(i);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008471
Li Zefaneab17222008-10-29 17:03:22 +08008472 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8473 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008474 if (!cfs_rq)
8475 goto err;
8476
Li Zefaneab17222008-10-29 17:03:22 +08008477 se = kzalloc_node(sizeof(struct sched_entity),
8478 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008479 if (!se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008480 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008481
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008482 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008483 }
8484
8485 return 1;
8486
Peter Zijlstra49246272010-10-17 21:46:10 +02008487err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008488 kfree(cfs_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008489err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008490 return 0;
8491}
8492
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008493static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8494{
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008495 struct rq *rq = cpu_rq(cpu);
8496 unsigned long flags;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008497
8498 /*
8499 * Only empty task groups can be destroyed; so we can speculatively
8500 * check on_list without danger of it being re-added.
8501 */
8502 if (!tg->cfs_rq[cpu]->on_list)
8503 return;
8504
8505 raw_spin_lock_irqsave(&rq->lock, flags);
Paul Turner822bc182010-11-29 16:55:40 -08008506 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008507 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008508}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008509#else /* !CONFG_FAIR_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008510static inline void free_fair_sched_group(struct task_group *tg)
8511{
8512}
8513
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008514static inline
8515int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008516{
8517 return 1;
8518}
8519
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008520static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8521{
8522}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008523#endif /* CONFIG_FAIR_GROUP_SCHED */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008524
8525#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008526static void free_rt_sched_group(struct task_group *tg)
8527{
8528 int i;
8529
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008530 destroy_rt_bandwidth(&tg->rt_bandwidth);
8531
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008532 for_each_possible_cpu(i) {
8533 if (tg->rt_rq)
8534 kfree(tg->rt_rq[i]);
8535 if (tg->rt_se)
8536 kfree(tg->rt_se[i]);
8537 }
8538
8539 kfree(tg->rt_rq);
8540 kfree(tg->rt_se);
8541}
8542
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008543static
8544int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008545{
8546 struct rt_rq *rt_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008547 struct sched_rt_entity *rt_se;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008548 struct rq *rq;
8549 int i;
8550
Mike Travis434d53b2008-04-04 18:11:04 -07008551 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008552 if (!tg->rt_rq)
8553 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008554 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008555 if (!tg->rt_se)
8556 goto err;
8557
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008558 init_rt_bandwidth(&tg->rt_bandwidth,
8559 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008560
8561 for_each_possible_cpu(i) {
8562 rq = cpu_rq(i);
8563
Li Zefaneab17222008-10-29 17:03:22 +08008564 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8565 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008566 if (!rt_rq)
8567 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008568
Li Zefaneab17222008-10-29 17:03:22 +08008569 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8570 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008571 if (!rt_se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008572 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008573
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008574 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008575 }
8576
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008577 return 1;
8578
Peter Zijlstra49246272010-10-17 21:46:10 +02008579err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008580 kfree(rt_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008581err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008582 return 0;
8583}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008584#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008585static inline void free_rt_sched_group(struct task_group *tg)
8586{
8587}
8588
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008589static inline
8590int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008591{
8592 return 1;
8593}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008594#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008595
Dhaval Giani7c941432010-01-20 13:26:18 +01008596#ifdef CONFIG_CGROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008597static void free_sched_group(struct task_group *tg)
8598{
8599 free_fair_sched_group(tg);
8600 free_rt_sched_group(tg);
Mike Galbraithe9aa1dd2011-01-05 11:11:25 +01008601 autogroup_free(tg);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008602 kfree(tg);
8603}
8604
8605/* allocate runqueue etc for a new task group */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008606struct task_group *sched_create_group(struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008607{
8608 struct task_group *tg;
8609 unsigned long flags;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008610
8611 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8612 if (!tg)
8613 return ERR_PTR(-ENOMEM);
8614
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008615 if (!alloc_fair_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008616 goto err;
8617
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008618 if (!alloc_rt_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008619 goto err;
8620
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008621 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008622 list_add_rcu(&tg->list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008623
8624 WARN_ON(!parent); /* root should already exist */
8625
8626 tg->parent = parent;
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008627 INIT_LIST_HEAD(&tg->children);
Zhang, Yanmin09f27242030-08-14 15:56:40 +08008628 list_add_rcu(&tg->siblings, &parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008629 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008630
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008631 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008632
8633err:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008634 free_sched_group(tg);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008635 return ERR_PTR(-ENOMEM);
8636}
8637
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008638/* rcu callback to free various structures associated with a task group */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008639static void free_sched_group_rcu(struct rcu_head *rhp)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008640{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008641 /* now it should be safe to free those cfs_rqs */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008642 free_sched_group(container_of(rhp, struct task_group, rcu));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008643}
8644
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008645/* Destroy runqueue etc associated with a task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008646void sched_destroy_group(struct task_group *tg)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008647{
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008648 unsigned long flags;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008649 int i;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008650
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008651 /* end participation in shares distribution */
8652 for_each_possible_cpu(i)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008653 unregister_fair_sched_group(tg, i);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008654
8655 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008656 list_del_rcu(&tg->list);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008657 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008658 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008659
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008660 /* wait for possible concurrent references to cfs_rqs complete */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008661 call_rcu(&tg->rcu, free_sched_group_rcu);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008662}
8663
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008664/* change task's runqueue when it moves between groups.
Ingo Molnar3a252012007-10-15 17:00:12 +02008665 * The caller of this function should have put the task in its new group
8666 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8667 * reflect its new group.
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008668 */
8669void sched_move_task(struct task_struct *tsk)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008670{
8671 int on_rq, running;
8672 unsigned long flags;
8673 struct rq *rq;
8674
8675 rq = task_rq_lock(tsk, &flags);
8676
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01008677 running = task_current(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008678 on_rq = tsk->se.on_rq;
8679
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008680 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008681 dequeue_task(rq, tsk, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008682 if (unlikely(running))
8683 tsk->sched_class->put_prev_task(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008684
Peter Zijlstra810b3812008-02-29 15:21:01 -05008685#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008686 if (tsk->sched_class->task_move_group)
8687 tsk->sched_class->task_move_group(tsk, on_rq);
8688 else
Peter Zijlstra810b3812008-02-29 15:21:01 -05008689#endif
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008690 set_task_rq(tsk, task_cpu(tsk));
Peter Zijlstra810b3812008-02-29 15:21:01 -05008691
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008692 if (unlikely(running))
8693 tsk->sched_class->set_curr_task(rq);
8694 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01008695 enqueue_task(rq, tsk, 0);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008696
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008697 task_rq_unlock(rq, &flags);
8698}
Dhaval Giani7c941432010-01-20 13:26:18 +01008699#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008700
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008701#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008702static DEFINE_MUTEX(shares_mutex);
8703
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008704int sched_group_set_shares(struct task_group *tg, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008705{
8706 int i;
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008707 unsigned long flags;
Ingo Molnarc61935f2008-01-22 11:24:58 +01008708
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008709 /*
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008710 * We can't change the weight of the root cgroup.
8711 */
8712 if (!tg->se[0])
8713 return -EINVAL;
8714
Peter Zijlstra18d95a22008-04-19 19:45:00 +02008715 if (shares < MIN_SHARES)
8716 shares = MIN_SHARES;
Miao Xiecb4ad1f2008-04-28 12:54:56 +08008717 else if (shares > MAX_SHARES)
8718 shares = MAX_SHARES;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008719
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008720 mutex_lock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008721 if (tg->shares == shares)
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008722 goto done;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008723
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008724 tg->shares = shares;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008725 for_each_possible_cpu(i) {
Paul Turner94371782010-11-15 15:47:10 -08008726 struct rq *rq = cpu_rq(i);
8727 struct sched_entity *se;
8728
8729 se = tg->se[i];
8730 /* Propagate contribution to hierarchy */
8731 raw_spin_lock_irqsave(&rq->lock, flags);
8732 for_each_sched_entity(se)
Paul Turner6d5ab292011-01-21 20:45:01 -08008733 update_cfs_shares(group_cfs_rq(se));
Paul Turner94371782010-11-15 15:47:10 -08008734 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008735 }
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008736
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008737done:
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008738 mutex_unlock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008739 return 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008740}
8741
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008742unsigned long sched_group_shares(struct task_group *tg)
8743{
8744 return tg->shares;
8745}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008746#endif
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008747
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008748#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008749/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008750 * Ensure that the real time constraints are schedulable.
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008751 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008752static DEFINE_MUTEX(rt_constraints_mutex);
8753
8754static unsigned long to_ratio(u64 period, u64 runtime)
8755{
8756 if (runtime == RUNTIME_INF)
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008757 return 1ULL << 20;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008758
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008759 return div64_u64(runtime << 20, period);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008760}
8761
Dhaval Giani521f1a242008-02-28 15:21:56 +05308762/* Must be called with tasklist_lock held */
8763static inline int tg_has_rt_tasks(struct task_group *tg)
8764{
8765 struct task_struct *g, *p;
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008766
Dhaval Giani521f1a242008-02-28 15:21:56 +05308767 do_each_thread(g, p) {
8768 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8769 return 1;
8770 } while_each_thread(g, p);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008771
Dhaval Giani521f1a242008-02-28 15:21:56 +05308772 return 0;
8773}
8774
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008775struct rt_schedulable_data {
8776 struct task_group *tg;
8777 u64 rt_period;
8778 u64 rt_runtime;
8779};
8780
8781static int tg_schedulable(struct task_group *tg, void *data)
8782{
8783 struct rt_schedulable_data *d = data;
8784 struct task_group *child;
8785 unsigned long total, sum = 0;
8786 u64 period, runtime;
8787
8788 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8789 runtime = tg->rt_bandwidth.rt_runtime;
8790
8791 if (tg == d->tg) {
8792 period = d->rt_period;
8793 runtime = d->rt_runtime;
8794 }
8795
Peter Zijlstra4653f802008-09-23 15:33:44 +02008796 /*
8797 * Cannot have more runtime than the period.
8798 */
8799 if (runtime > period && runtime != RUNTIME_INF)
8800 return -EINVAL;
8801
8802 /*
8803 * Ensure we don't starve existing RT tasks.
8804 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008805 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8806 return -EBUSY;
8807
8808 total = to_ratio(period, runtime);
8809
Peter Zijlstra4653f802008-09-23 15:33:44 +02008810 /*
8811 * Nobody can have more than the global setting allows.
8812 */
8813 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8814 return -EINVAL;
8815
8816 /*
8817 * The sum of our children's runtime should not exceed our own.
8818 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008819 list_for_each_entry_rcu(child, &tg->children, siblings) {
8820 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8821 runtime = child->rt_bandwidth.rt_runtime;
8822
8823 if (child == d->tg) {
8824 period = d->rt_period;
8825 runtime = d->rt_runtime;
8826 }
8827
8828 sum += to_ratio(period, runtime);
8829 }
8830
8831 if (sum > total)
8832 return -EINVAL;
8833
8834 return 0;
8835}
8836
8837static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8838{
8839 struct rt_schedulable_data data = {
8840 .tg = tg,
8841 .rt_period = period,
8842 .rt_runtime = runtime,
8843 };
8844
8845 return walk_tg_tree(tg_schedulable, tg_nop, &data);
8846}
8847
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008848static int tg_set_bandwidth(struct task_group *tg,
8849 u64 rt_period, u64 rt_runtime)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008850{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008851 int i, err = 0;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008852
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008853 mutex_lock(&rt_constraints_mutex);
Dhaval Giani521f1a242008-02-28 15:21:56 +05308854 read_lock(&tasklist_lock);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008855 err = __rt_schedulable(tg, rt_period, rt_runtime);
8856 if (err)
Dhaval Giani521f1a242008-02-28 15:21:56 +05308857 goto unlock;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008858
Thomas Gleixner0986b112009-11-17 15:32:06 +01008859 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008860 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8861 tg->rt_bandwidth.rt_runtime = rt_runtime;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008862
8863 for_each_possible_cpu(i) {
8864 struct rt_rq *rt_rq = tg->rt_rq[i];
8865
Thomas Gleixner0986b112009-11-17 15:32:06 +01008866 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008867 rt_rq->rt_runtime = rt_runtime;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008868 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008869 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008870 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstra49246272010-10-17 21:46:10 +02008871unlock:
Dhaval Giani521f1a242008-02-28 15:21:56 +05308872 read_unlock(&tasklist_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008873 mutex_unlock(&rt_constraints_mutex);
8874
8875 return err;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008876}
8877
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008878int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8879{
8880 u64 rt_runtime, rt_period;
8881
8882 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8883 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8884 if (rt_runtime_us < 0)
8885 rt_runtime = RUNTIME_INF;
8886
8887 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8888}
8889
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008890long sched_group_rt_runtime(struct task_group *tg)
8891{
8892 u64 rt_runtime_us;
8893
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008894 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008895 return -1;
8896
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008897 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008898 do_div(rt_runtime_us, NSEC_PER_USEC);
8899 return rt_runtime_us;
8900}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008901
8902int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8903{
8904 u64 rt_runtime, rt_period;
8905
8906 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8907 rt_runtime = tg->rt_bandwidth.rt_runtime;
8908
Raistlin619b0482008-06-26 18:54:09 +02008909 if (rt_period == 0)
8910 return -EINVAL;
8911
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008912 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8913}
8914
8915long sched_group_rt_period(struct task_group *tg)
8916{
8917 u64 rt_period_us;
8918
8919 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8920 do_div(rt_period_us, NSEC_PER_USEC);
8921 return rt_period_us;
8922}
8923
8924static int sched_rt_global_constraints(void)
8925{
Peter Zijlstra4653f802008-09-23 15:33:44 +02008926 u64 runtime, period;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008927 int ret = 0;
8928
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008929 if (sysctl_sched_rt_period <= 0)
8930 return -EINVAL;
8931
Peter Zijlstra4653f802008-09-23 15:33:44 +02008932 runtime = global_rt_runtime();
8933 period = global_rt_period();
8934
8935 /*
8936 * Sanity check on the sysctl variables.
8937 */
8938 if (runtime > period && runtime != RUNTIME_INF)
8939 return -EINVAL;
Peter Zijlstra10b612f2008-06-19 14:22:27 +02008940
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008941 mutex_lock(&rt_constraints_mutex);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008942 read_lock(&tasklist_lock);
Peter Zijlstra4653f802008-09-23 15:33:44 +02008943 ret = __rt_schedulable(NULL, 0, 0);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008944 read_unlock(&tasklist_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008945 mutex_unlock(&rt_constraints_mutex);
8946
8947 return ret;
8948}
Dhaval Giani54e99122009-02-27 15:13:54 +05308949
8950int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8951{
8952 /* Don't accept realtime tasks when there is no way for them to run */
8953 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8954 return 0;
8955
8956 return 1;
8957}
8958
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008959#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008960static int sched_rt_global_constraints(void)
8961{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008962 unsigned long flags;
8963 int i;
8964
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008965 if (sysctl_sched_rt_period <= 0)
8966 return -EINVAL;
8967
Peter Zijlstra60aa6052009-05-05 17:50:21 +02008968 /*
8969 * There's always some RT tasks in the root group
8970 * -- migration, kstopmachine etc..
8971 */
8972 if (sysctl_sched_rt_runtime == 0)
8973 return -EBUSY;
8974
Thomas Gleixner0986b112009-11-17 15:32:06 +01008975 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008976 for_each_possible_cpu(i) {
8977 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8978
Thomas Gleixner0986b112009-11-17 15:32:06 +01008979 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008980 rt_rq->rt_runtime = global_rt_runtime();
Thomas Gleixner0986b112009-11-17 15:32:06 +01008981 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008982 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008983 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008984
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008985 return 0;
8986}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008987#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008988
8989int sched_rt_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008990 void __user *buffer, size_t *lenp,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008991 loff_t *ppos)
8992{
8993 int ret;
8994 int old_period, old_runtime;
8995 static DEFINE_MUTEX(mutex);
8996
8997 mutex_lock(&mutex);
8998 old_period = sysctl_sched_rt_period;
8999 old_runtime = sysctl_sched_rt_runtime;
9000
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07009001 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009002
9003 if (!ret && write) {
9004 ret = sched_rt_global_constraints();
9005 if (ret) {
9006 sysctl_sched_rt_period = old_period;
9007 sysctl_sched_rt_runtime = old_runtime;
9008 } else {
9009 def_rt_bandwidth.rt_runtime = global_rt_runtime();
9010 def_rt_bandwidth.rt_period =
9011 ns_to_ktime(global_rt_period());
9012 }
9013 }
9014 mutex_unlock(&mutex);
9015
9016 return ret;
9017}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009018
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009019#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009020
9021/* return corresponding task_group object of a cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +02009022static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009023{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009024 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
9025 struct task_group, css);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009026}
9027
9028static struct cgroup_subsys_state *
Paul Menage2b01dfe2007-10-24 18:23:50 +02009029cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009030{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009031 struct task_group *tg, *parent;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009032
Paul Menage2b01dfe2007-10-24 18:23:50 +02009033 if (!cgrp->parent) {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009034 /* This is early initialization for the top cgroup */
Yong Zhang07e06b02011-01-07 15:17:36 +08009035 return &root_task_group.css;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009036 }
9037
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009038 parent = cgroup_tg(cgrp->parent);
9039 tg = sched_create_group(parent);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009040 if (IS_ERR(tg))
9041 return ERR_PTR(-ENOMEM);
9042
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009043 return &tg->css;
9044}
9045
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009046static void
9047cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009048{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009049 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009050
9051 sched_destroy_group(tg);
9052}
9053
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009054static int
Ben Blumbe367d02009-09-23 15:56:31 -07009055cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009056{
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009057#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Giani54e99122009-02-27 15:13:54 +05309058 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009059 return -EINVAL;
9060#else
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009061 /* We don't support RT-tasks being in separate groups */
9062 if (tsk->sched_class != &fair_sched_class)
9063 return -EINVAL;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009064#endif
Ben Blumbe367d02009-09-23 15:56:31 -07009065 return 0;
9066}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009067
Ben Blumbe367d02009-09-23 15:56:31 -07009068static int
9069cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9070 struct task_struct *tsk, bool threadgroup)
9071{
9072 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
9073 if (retval)
9074 return retval;
9075 if (threadgroup) {
9076 struct task_struct *c;
9077 rcu_read_lock();
9078 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
9079 retval = cpu_cgroup_can_attach_task(cgrp, c);
9080 if (retval) {
9081 rcu_read_unlock();
9082 return retval;
9083 }
9084 }
9085 rcu_read_unlock();
9086 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009087 return 0;
9088}
9089
9090static void
Paul Menage2b01dfe2007-10-24 18:23:50 +02009091cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
Ben Blumbe367d02009-09-23 15:56:31 -07009092 struct cgroup *old_cont, struct task_struct *tsk,
9093 bool threadgroup)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009094{
9095 sched_move_task(tsk);
Ben Blumbe367d02009-09-23 15:56:31 -07009096 if (threadgroup) {
9097 struct task_struct *c;
9098 rcu_read_lock();
9099 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
9100 sched_move_task(c);
9101 }
9102 rcu_read_unlock();
9103 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009104}
9105
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009106static void
9107cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
9108{
9109 /*
9110 * cgroup_exit() is called in the copy_process() failure path.
9111 * Ignore this case since the task hasn't ran yet, this avoids
9112 * trying to poke a half freed task state from generic code.
9113 */
9114 if (!(task->flags & PF_EXITING))
9115 return;
9116
9117 sched_move_task(task);
9118}
9119
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009120#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagef4c753b2008-04-29 00:59:56 -07009121static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
Paul Menage2b01dfe2007-10-24 18:23:50 +02009122 u64 shareval)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009123{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009124 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009125}
9126
Paul Menagef4c753b2008-04-29 00:59:56 -07009127static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009128{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009129 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009130
9131 return (u64) tg->shares;
9132}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009133#endif /* CONFIG_FAIR_GROUP_SCHED */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009134
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009135#ifdef CONFIG_RT_GROUP_SCHED
Mirco Tischler0c708142008-05-14 16:05:46 -07009136static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
Paul Menage06ecb272008-04-29 01:00:06 -07009137 s64 val)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009138{
Paul Menage06ecb272008-04-29 01:00:06 -07009139 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009140}
9141
Paul Menage06ecb272008-04-29 01:00:06 -07009142static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009143{
Paul Menage06ecb272008-04-29 01:00:06 -07009144 return sched_group_rt_runtime(cgroup_tg(cgrp));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009145}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009146
9147static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
9148 u64 rt_period_us)
9149{
9150 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
9151}
9152
9153static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
9154{
9155 return sched_group_rt_period(cgroup_tg(cgrp));
9156}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009157#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009158
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009159static struct cftype cpu_files[] = {
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009160#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009161 {
9162 .name = "shares",
Paul Menagef4c753b2008-04-29 00:59:56 -07009163 .read_u64 = cpu_shares_read_u64,
9164 .write_u64 = cpu_shares_write_u64,
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009165 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009166#endif
9167#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009168 {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01009169 .name = "rt_runtime_us",
Paul Menage06ecb272008-04-29 01:00:06 -07009170 .read_s64 = cpu_rt_runtime_read,
9171 .write_s64 = cpu_rt_runtime_write,
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009172 },
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009173 {
9174 .name = "rt_period_us",
Paul Menagef4c753b2008-04-29 00:59:56 -07009175 .read_u64 = cpu_rt_period_read_uint,
9176 .write_u64 = cpu_rt_period_write_uint,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009177 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009178#endif
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009179};
9180
9181static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
9182{
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009183 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009184}
9185
9186struct cgroup_subsys cpu_cgroup_subsys = {
Ingo Molnar38605ca2007-10-29 21:18:11 +01009187 .name = "cpu",
9188 .create = cpu_cgroup_create,
9189 .destroy = cpu_cgroup_destroy,
9190 .can_attach = cpu_cgroup_can_attach,
9191 .attach = cpu_cgroup_attach,
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009192 .exit = cpu_cgroup_exit,
Ingo Molnar38605ca2007-10-29 21:18:11 +01009193 .populate = cpu_cgroup_populate,
9194 .subsys_id = cpu_cgroup_subsys_id,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009195 .early_init = 1,
9196};
9197
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009198#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009199
9200#ifdef CONFIG_CGROUP_CPUACCT
9201
9202/*
9203 * CPU accounting code for task groups.
9204 *
9205 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
9206 * (balbir@in.ibm.com).
9207 */
9208
Bharata B Rao934352f2008-11-10 20:41:13 +05309209/* track cpu usage of a group of tasks and its child groups */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009210struct cpuacct {
9211 struct cgroup_subsys_state css;
9212 /* cpuusage holds pointer to a u64-type object on every cpu */
Tejun Heo43cf38e2010-02-02 14:38:57 +09009213 u64 __percpu *cpuusage;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309214 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
Bharata B Rao934352f2008-11-10 20:41:13 +05309215 struct cpuacct *parent;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009216};
9217
9218struct cgroup_subsys cpuacct_subsys;
9219
9220/* return cpu accounting group corresponding to this container */
Dhaval Giani32cd7562008-02-29 10:02:43 +05309221static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009222{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309223 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009224 struct cpuacct, css);
9225}
9226
9227/* return cpu accounting group to which this task belongs */
9228static inline struct cpuacct *task_ca(struct task_struct *tsk)
9229{
9230 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
9231 struct cpuacct, css);
9232}
9233
9234/* create a new cpu accounting group */
9235static struct cgroup_subsys_state *cpuacct_create(
Dhaval Giani32cd7562008-02-29 10:02:43 +05309236 struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009237{
9238 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309239 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009240
9241 if (!ca)
Bharata B Raoef12fef2009-03-31 10:02:22 +05309242 goto out;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009243
9244 ca->cpuusage = alloc_percpu(u64);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309245 if (!ca->cpuusage)
9246 goto out_free_ca;
9247
9248 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9249 if (percpu_counter_init(&ca->cpustat[i], 0))
9250 goto out_free_counters;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009251
Bharata B Rao934352f2008-11-10 20:41:13 +05309252 if (cgrp->parent)
9253 ca->parent = cgroup_ca(cgrp->parent);
9254
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009255 return &ca->css;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309256
9257out_free_counters:
9258 while (--i >= 0)
9259 percpu_counter_destroy(&ca->cpustat[i]);
9260 free_percpu(ca->cpuusage);
9261out_free_ca:
9262 kfree(ca);
9263out:
9264 return ERR_PTR(-ENOMEM);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009265}
9266
9267/* destroy an existing cpu accounting group */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009268static void
Dhaval Giani32cd7562008-02-29 10:02:43 +05309269cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009270{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309271 struct cpuacct *ca = cgroup_ca(cgrp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309272 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009273
Bharata B Raoef12fef2009-03-31 10:02:22 +05309274 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9275 percpu_counter_destroy(&ca->cpustat[i]);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009276 free_percpu(ca->cpuusage);
9277 kfree(ca);
9278}
9279
Ken Chen720f5492008-12-15 22:02:01 -08009280static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9281{
Rusty Russellb36128c2009-02-20 16:29:08 +09009282 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009283 u64 data;
9284
9285#ifndef CONFIG_64BIT
9286 /*
9287 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
9288 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009289 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009290 data = *cpuusage;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009291 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009292#else
9293 data = *cpuusage;
9294#endif
9295
9296 return data;
9297}
9298
9299static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9300{
Rusty Russellb36128c2009-02-20 16:29:08 +09009301 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009302
9303#ifndef CONFIG_64BIT
9304 /*
9305 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
9306 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009307 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009308 *cpuusage = val;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009309 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009310#else
9311 *cpuusage = val;
9312#endif
9313}
9314
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009315/* return total cpu usage (in nanoseconds) of a group */
Dhaval Giani32cd7562008-02-29 10:02:43 +05309316static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009317{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309318 struct cpuacct *ca = cgroup_ca(cgrp);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009319 u64 totalcpuusage = 0;
9320 int i;
9321
Ken Chen720f5492008-12-15 22:02:01 -08009322 for_each_present_cpu(i)
9323 totalcpuusage += cpuacct_cpuusage_read(ca, i);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009324
9325 return totalcpuusage;
9326}
9327
Dhaval Giani0297b802008-02-29 10:02:44 +05309328static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
9329 u64 reset)
9330{
9331 struct cpuacct *ca = cgroup_ca(cgrp);
9332 int err = 0;
9333 int i;
9334
9335 if (reset) {
9336 err = -EINVAL;
9337 goto out;
9338 }
9339
Ken Chen720f5492008-12-15 22:02:01 -08009340 for_each_present_cpu(i)
9341 cpuacct_cpuusage_write(ca, i, 0);
Dhaval Giani0297b802008-02-29 10:02:44 +05309342
Dhaval Giani0297b802008-02-29 10:02:44 +05309343out:
9344 return err;
9345}
9346
Ken Chene9515c32008-12-15 22:04:15 -08009347static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
9348 struct seq_file *m)
9349{
9350 struct cpuacct *ca = cgroup_ca(cgroup);
9351 u64 percpu;
9352 int i;
9353
9354 for_each_present_cpu(i) {
9355 percpu = cpuacct_cpuusage_read(ca, i);
9356 seq_printf(m, "%llu ", (unsigned long long) percpu);
9357 }
9358 seq_printf(m, "\n");
9359 return 0;
9360}
9361
Bharata B Raoef12fef2009-03-31 10:02:22 +05309362static const char *cpuacct_stat_desc[] = {
9363 [CPUACCT_STAT_USER] = "user",
9364 [CPUACCT_STAT_SYSTEM] = "system",
9365};
9366
9367static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9368 struct cgroup_map_cb *cb)
9369{
9370 struct cpuacct *ca = cgroup_ca(cgrp);
9371 int i;
9372
9373 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
9374 s64 val = percpu_counter_read(&ca->cpustat[i]);
9375 val = cputime64_to_clock_t(val);
9376 cb->fill(cb, cpuacct_stat_desc[i], val);
9377 }
9378 return 0;
9379}
9380
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009381static struct cftype files[] = {
9382 {
9383 .name = "usage",
Paul Menagef4c753b2008-04-29 00:59:56 -07009384 .read_u64 = cpuusage_read,
9385 .write_u64 = cpuusage_write,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009386 },
Ken Chene9515c32008-12-15 22:04:15 -08009387 {
9388 .name = "usage_percpu",
9389 .read_seq_string = cpuacct_percpu_seq_read,
9390 },
Bharata B Raoef12fef2009-03-31 10:02:22 +05309391 {
9392 .name = "stat",
9393 .read_map = cpuacct_stats_show,
9394 },
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009395};
9396
Dhaval Giani32cd7562008-02-29 10:02:43 +05309397static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009398{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309399 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009400}
9401
9402/*
9403 * charge this task's execution time to its accounting group.
9404 *
9405 * called with rq->lock held.
9406 */
9407static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9408{
9409 struct cpuacct *ca;
Bharata B Rao934352f2008-11-10 20:41:13 +05309410 int cpu;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009411
Li Zefanc40c6f82009-02-26 15:40:15 +08009412 if (unlikely(!cpuacct_subsys.active))
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009413 return;
9414
Bharata B Rao934352f2008-11-10 20:41:13 +05309415 cpu = task_cpu(tsk);
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309416
9417 rcu_read_lock();
9418
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009419 ca = task_ca(tsk);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009420
Bharata B Rao934352f2008-11-10 20:41:13 +05309421 for (; ca; ca = ca->parent) {
Rusty Russellb36128c2009-02-20 16:29:08 +09009422 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009423 *cpuusage += cputime;
9424 }
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309425
9426 rcu_read_unlock();
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009427}
9428
Bharata B Raoef12fef2009-03-31 10:02:22 +05309429/*
Anton Blanchardfa535a72010-02-02 14:46:13 -08009430 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9431 * in cputime_t units. As a result, cpuacct_update_stats calls
9432 * percpu_counter_add with values large enough to always overflow the
9433 * per cpu batch limit causing bad SMP scalability.
9434 *
9435 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9436 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9437 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9438 */
9439#ifdef CONFIG_SMP
9440#define CPUACCT_BATCH \
9441 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9442#else
9443#define CPUACCT_BATCH 0
9444#endif
9445
9446/*
Bharata B Raoef12fef2009-03-31 10:02:22 +05309447 * Charge the system/user time to the task's accounting group.
9448 */
9449static void cpuacct_update_stats(struct task_struct *tsk,
9450 enum cpuacct_stat_index idx, cputime_t val)
9451{
9452 struct cpuacct *ca;
Anton Blanchardfa535a72010-02-02 14:46:13 -08009453 int batch = CPUACCT_BATCH;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309454
9455 if (unlikely(!cpuacct_subsys.active))
9456 return;
9457
9458 rcu_read_lock();
9459 ca = task_ca(tsk);
9460
9461 do {
Anton Blanchardfa535a72010-02-02 14:46:13 -08009462 __percpu_counter_add(&ca->cpustat[idx], val, batch);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309463 ca = ca->parent;
9464 } while (ca);
9465 rcu_read_unlock();
9466}
9467
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009468struct cgroup_subsys cpuacct_subsys = {
9469 .name = "cpuacct",
9470 .create = cpuacct_create,
9471 .destroy = cpuacct_destroy,
9472 .populate = cpuacct_populate,
9473 .subsys_id = cpuacct_subsys_id,
9474};
9475#endif /* CONFIG_CGROUP_CPUACCT */
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009476