blob: b35ac50b26c86bf0bded331e08a14c7d099f587c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
Ingo Molnarc31f2e82007-07-09 18:52:01 +020019 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
Ingo Molnarb9131762008-01-25 21:08:19 +010025 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080037#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/completion.h>
39#include <linux/kernel_stat.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070040#include <linux/debug_locks.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020041#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080045#include <linux/freezer.h>
akpm@osdl.org198e2f12006-01-12 01:05:30 -080046#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/blkdev.h>
48#include <linux/delay.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070049#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
Alexey Dobriyanb5aadf72008-10-06 13:23:43 +040057#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/seq_file.h>
Tejun Heo969c7922010-05-06 18:49:21 +020059#include <linux/stop_machine.h>
Nick Piggine692ab52007-07-26 13:40:43 +020060#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#include <linux/syscalls.h>
62#include <linux/times.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070063#include <linux/tsacct_kern.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080064#include <linux/kprobes.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070065#include <linux/delayacct.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020066#include <linux/unistd.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020067#include <linux/pagemap.h>
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010068#include <linux/hrtimer.h>
Reynes Philippe30914a52008-03-17 16:19:05 -070069#include <linux/tick.h>
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020070#include <linux/debugfs.h>
71#include <linux/ctype.h>
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020072#include <linux/ftrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090073#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Eric Dumazet5517d862007-05-08 00:32:57 -070075#include <asm/tlb.h>
Satyam Sharma838225b2007-10-24 18:23:50 +020076#include <asm/irq_regs.h>
Gerald Schaefer335d7af2010-11-22 15:47:36 +010077#include <asm/mutex.h>
Glauber Costae6e66852011-07-11 15:28:17 -040078#ifdef CONFIG_PARAVIRT
79#include <asm/paravirt.h>
80#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Gregory Haskins6e0534f2008-05-12 21:21:01 +020082#include "sched_cpupri.h"
Tejun Heo21aa9af2010-06-08 21:40:37 +020083#include "workqueue_sched.h"
Mike Galbraith5091faa2010-11-30 14:18:03 +010084#include "sched_autogroup.h"
Gregory Haskins6e0534f2008-05-12 21:21:01 +020085
Steven Rostedta8d154b2009-04-10 09:36:00 -040086#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040087#include <trace/events/sched.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040088
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/*
90 * Convert user-nice values [ -20 ... 0 ... 19 ]
91 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
92 * and back.
93 */
94#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
95#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
96#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
97
98/*
99 * 'User priority' is the nice value converted to something we
100 * can work with better when scaling various scheduler parameters,
101 * it's a [ 0 ... 39 ] range.
102 */
103#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
104#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
105#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
106
107/*
Ingo Molnard7876a02008-01-25 21:08:19 +0100108 * Helpers for converting nanosecond timing to jiffy resolution
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 */
Eric Dumazetd6322fa2007-11-09 22:39:38 +0100110#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200112#define NICE_0_LOAD SCHED_LOAD_SCALE
113#define NICE_0_SHIFT SCHED_LOAD_SHIFT
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115/*
116 * These are the 'tuning knobs' of the scheduler:
117 *
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +0200118 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 * Timeslices get refilled after they expire.
120 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121#define DEF_TIMESLICE (100 * HZ / 1000)
Peter Williams2dd73a42006-06-27 02:54:34 -0700122
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200123/*
124 * single value that denotes runtime == period, ie unlimited time.
125 */
126#define RUNTIME_INF ((u64)~0ULL)
127
Ingo Molnare05606d2007-07-09 18:51:59 +0200128static inline int rt_policy(int policy)
129{
Roel Kluin3f33a7c2008-05-13 23:44:11 +0200130 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
Ingo Molnare05606d2007-07-09 18:51:59 +0200131 return 1;
132 return 0;
133}
134
135static inline int task_has_rt_policy(struct task_struct *p)
136{
137 return rt_policy(p->policy);
138}
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140/*
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200141 * This is the priority-queue data structure of the RT scheduling class:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 */
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200143struct rt_prio_array {
144 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
145 struct list_head queue[MAX_RT_PRIO];
146};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200148struct rt_bandwidth {
Ingo Molnarea736ed2008-03-25 13:51:45 +0100149 /* nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100150 raw_spinlock_t rt_runtime_lock;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100151 ktime_t rt_period;
152 u64 rt_runtime;
153 struct hrtimer rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200154};
155
156static struct rt_bandwidth def_rt_bandwidth;
157
158static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
159
160static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
161{
162 struct rt_bandwidth *rt_b =
163 container_of(timer, struct rt_bandwidth, rt_period_timer);
164 ktime_t now;
165 int overrun;
166 int idle = 0;
167
168 for (;;) {
169 now = hrtimer_cb_get_time(timer);
170 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
171
172 if (!overrun)
173 break;
174
175 idle = do_sched_rt_period_timer(rt_b, overrun);
176 }
177
178 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
179}
180
181static
182void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
183{
184 rt_b->rt_period = ns_to_ktime(period);
185 rt_b->rt_runtime = runtime;
186
Thomas Gleixner0986b112009-11-17 15:32:06 +0100187 raw_spin_lock_init(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200188
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200189 hrtimer_init(&rt_b->rt_period_timer,
190 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
191 rt_b->rt_period_timer.function = sched_rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200192}
193
Krzysztof Heltc8bfff62008-09-05 23:46:19 +0200194static inline int rt_bandwidth_enabled(void)
195{
196 return sysctl_sched_rt_runtime >= 0;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200197}
198
199static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
200{
201 ktime_t now;
202
Hiroshi Shimamotocac64d02009-02-25 09:59:26 -0800203 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200204 return;
205
206 if (hrtimer_active(&rt_b->rt_period_timer))
207 return;
208
Thomas Gleixner0986b112009-11-17 15:32:06 +0100209 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200210 for (;;) {
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100211 unsigned long delta;
212 ktime_t soft, hard;
213
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200214 if (hrtimer_active(&rt_b->rt_period_timer))
215 break;
216
217 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
218 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100219
220 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
221 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
222 delta = ktime_to_ns(ktime_sub(hard, soft));
223 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530224 HRTIMER_MODE_ABS_PINNED, 0);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200225 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100226 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200227}
228
229#ifdef CONFIG_RT_GROUP_SCHED
230static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
231{
232 hrtimer_cancel(&rt_b->rt_period_timer);
233}
234#endif
235
Heiko Carstens712555e2008-04-28 11:33:07 +0200236/*
Peter Zijlstrac4a88492011-04-07 14:09:42 +0200237 * sched_domains_mutex serializes calls to init_sched_domains,
Heiko Carstens712555e2008-04-28 11:33:07 +0200238 * detach_destroy_domains and partition_sched_domains.
239 */
240static DEFINE_MUTEX(sched_domains_mutex);
241
Dhaval Giani7c941432010-01-20 13:26:18 +0100242#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200243
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700244#include <linux/cgroup.h>
245
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200246struct cfs_rq;
247
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100248static LIST_HEAD(task_groups);
249
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200250/* task group related information */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200251struct task_group {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700252 struct cgroup_subsys_state css;
Arun R Bharadwaj6c415b92008-12-01 20:49:05 +0530253
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100254#ifdef CONFIG_FAIR_GROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200255 /* schedulable entities of this group on each cpu */
256 struct sched_entity **se;
257 /* runqueue "owned" by this group on each cpu */
258 struct cfs_rq **cfs_rq;
259 unsigned long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800260
261 atomic_t load_weight;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100262#endif
263
264#ifdef CONFIG_RT_GROUP_SCHED
265 struct sched_rt_entity **rt_se;
266 struct rt_rq **rt_rq;
267
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200268 struct rt_bandwidth rt_bandwidth;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100269#endif
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100270
Srivatsa Vaddagiriae8393e2007-10-29 21:18:11 +0100271 struct rcu_head rcu;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100272 struct list_head list;
Peter Zijlstraf473aa52008-04-19 19:45:00 +0200273
274 struct task_group *parent;
275 struct list_head siblings;
276 struct list_head children;
Mike Galbraith5091faa2010-11-30 14:18:03 +0100277
278#ifdef CONFIG_SCHED_AUTOGROUP
279 struct autogroup *autogroup;
280#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200281};
282
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800283/* task_group_lock serializes the addition/removal of task groups */
Peter Zijlstra8ed36992008-02-13 15:45:39 +0100284static DEFINE_SPINLOCK(task_group_lock);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100285
Cyrill Gorcunove9036b32009-10-26 22:24:14 +0300286#ifdef CONFIG_FAIR_GROUP_SCHED
287
Yong Zhang07e06b02011-01-07 15:17:36 +0800288# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200289
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800290/*
Lai Jiangshan2e084782008-06-12 16:42:58 +0800291 * A weight of 0 or 1 can cause arithmetics problems.
292 * A weight of a cfs_rq is the sum of weights of which entities
293 * are queued on this cfs_rq, so a weight of a entity should not be
294 * too large, so as the shares value of a task group.
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800295 * (The default weight is 1024 - so there's no practical
296 * limitation from this.)
297 */
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200298#define MIN_SHARES 2
Nikhil Raoc8b28112011-05-18 14:37:48 -0700299#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION))
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200300
Yong Zhang07e06b02011-01-07 15:17:36 +0800301static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100302#endif
303
304/* Default task group.
305 * Every task in system belong to this group at bootup.
306 */
Yong Zhang07e06b02011-01-07 15:17:36 +0800307struct task_group root_task_group;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200308
Dhaval Giani7c941432010-01-20 13:26:18 +0100309#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200310
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200311/* CFS-related fields in a runqueue */
312struct cfs_rq {
313 struct load_weight load;
314 unsigned long nr_running;
315
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200316 u64 exec_clock;
Ingo Molnare9acbff2007-10-15 17:00:04 +0200317 u64 min_vruntime;
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200318#ifndef CONFIG_64BIT
319 u64 min_vruntime_copy;
320#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200321
322 struct rb_root tasks_timeline;
323 struct rb_node *rb_leftmost;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +0200324
325 struct list_head tasks;
326 struct list_head *balance_iterator;
327
328 /*
329 * 'curr' points to currently running entity on this cfs_rq.
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200330 * It is set to NULL otherwise (i.e when none are currently running).
331 */
Rik van Rielac53db52011-02-01 09:51:03 -0500332 struct sched_entity *curr, *next, *last, *skip;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200333
Rakib Mullick4934a4d2011-05-04 22:53:46 +0600334#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100335 unsigned int nr_spread_over;
Rakib Mullick4934a4d2011-05-04 22:53:46 +0600336#endif
Peter Zijlstraddc97292007-10-15 17:00:10 +0200337
Ingo Molnar62160e3f2007-10-15 17:00:03 +0200338#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200339 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
340
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100341 /*
342 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200343 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
344 * (like users, containers etc.)
345 *
346 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
347 * list is used during load balance.
348 */
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800349 int on_list;
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100350 struct list_head leaf_cfs_rq_list;
351 struct task_group *tg; /* group that "owns" this runqueue */
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200352
353#ifdef CONFIG_SMP
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200354 /*
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200355 * the part of load.weight contributed by tasks
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200356 */
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200357 unsigned long task_weight;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200358
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200359 /*
360 * h_load = weight * f(tg)
361 *
362 * Where f(tg) is the recursive weight fraction assigned to
363 * this group.
364 */
365 unsigned long h_load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200366
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200367 /*
Paul Turner3b3d1902010-11-15 15:47:08 -0800368 * Maintaining per-cpu shares distribution for group scheduling
369 *
370 * load_stamp is the last time we updated the load average
371 * load_last is the last time we updated the load average and saw load
372 * load_unacc_exec_time is currently unaccounted execution time
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200373 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800374 u64 load_avg;
375 u64 load_period;
Paul Turner3b3d1902010-11-15 15:47:08 -0800376 u64 load_stamp, load_last, load_unacc_exec_time;
Peter Zijlstraf1d239f2008-06-27 13:41:38 +0200377
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800378 unsigned long load_contribution;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200379#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200380#endif
381};
382
383/* Real-Time classes' related field in a runqueue: */
384struct rt_rq {
385 struct rt_prio_array active;
Steven Rostedt63489e42008-01-25 21:08:03 +0100386 unsigned long rt_nr_running;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100387#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -0500388 struct {
389 int curr; /* highest queued rt task prio */
Gregory Haskins398a1532009-01-14 09:10:04 -0500390#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500391 int next; /* next highest */
Gregory Haskins398a1532009-01-14 09:10:04 -0500392#endif
Gregory Haskinse864c492008-12-29 09:39:49 -0500393 } highest_prio;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100394#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100395#ifdef CONFIG_SMP
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100396 unsigned long rt_nr_migratory;
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200397 unsigned long rt_nr_total;
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +0100398 int overloaded;
Gregory Haskins917b6272008-12-29 09:39:53 -0500399 struct plist_head pushable_tasks;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100400#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100401 int rt_throttled;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100402 u64 rt_time;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200403 u64 rt_runtime;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100404 /* Nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100405 raw_spinlock_t rt_runtime_lock;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100406
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100407#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100408 unsigned long rt_nr_boosted;
409
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100410 struct rq *rq;
411 struct list_head leaf_rt_rq_list;
412 struct task_group *tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100413#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200414};
415
Gregory Haskins57d885f2008-01-25 21:08:18 +0100416#ifdef CONFIG_SMP
417
418/*
419 * We add the notion of a root-domain which will be used to define per-domain
Ingo Molnar0eab9142008-01-25 21:08:19 +0100420 * variables. Each exclusive cpuset essentially defines an island domain by
421 * fully partitioning the member cpus from any other cpuset. Whenever a new
Gregory Haskins57d885f2008-01-25 21:08:18 +0100422 * exclusive cpuset is created, we also create and attach a new root-domain
423 * object.
424 *
Gregory Haskins57d885f2008-01-25 21:08:18 +0100425 */
426struct root_domain {
427 atomic_t refcount;
Peter Zijlstradce840a2011-04-07 14:09:50 +0200428 struct rcu_head rcu;
Rusty Russellc6c49272008-11-25 02:35:05 +1030429 cpumask_var_t span;
430 cpumask_var_t online;
Gregory Haskins637f5082008-01-25 21:08:18 +0100431
Ingo Molnar0eab9142008-01-25 21:08:19 +0100432 /*
Gregory Haskins637f5082008-01-25 21:08:18 +0100433 * The "RT overload" flag: it gets set if a CPU has more than
434 * one runnable RT task.
435 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030436 cpumask_var_t rto_mask;
Ingo Molnar0eab9142008-01-25 21:08:19 +0100437 atomic_t rto_count;
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200438 struct cpupri cpupri;
Gregory Haskins57d885f2008-01-25 21:08:18 +0100439};
440
Gregory Haskinsdc938522008-01-25 21:08:26 +0100441/*
442 * By default the system creates a single root-domain with all cpus as
443 * members (mimicking the global state we have today).
444 */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100445static struct root_domain def_root_domain;
446
Christian Dietriched2d3722010-09-06 16:37:05 +0200447#endif /* CONFIG_SMP */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100448
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200449/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 * This is the main, per-CPU runqueue data structure.
451 *
452 * Locking rule: those places that want to lock multiple runqueues
453 * (such as the load balancing or the thread migration code), lock
454 * acquire operations must be ordered by ascending &runqueue.
455 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700456struct rq {
Ingo Molnard8016492007-10-18 21:32:55 +0200457 /* runqueue lock: */
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100458 raw_spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
460 /*
461 * nr_running and cpu_load should be in the same cacheline because
462 * remote CPUs use both these fields when doing load calculation.
463 */
464 unsigned long nr_running;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200465 #define CPU_LOAD_IDX_MAX 5
466 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -0700467 unsigned long last_load_update_tick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700468#ifdef CONFIG_NO_HZ
Mike Galbraith39c0cbe2010-03-11 17:17:13 +0100469 u64 nohz_stamp;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700470 unsigned char nohz_balance_kick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700471#endif
Mike Galbraith61eadef2011-04-29 08:36:50 +0200472 int skip_clock_update;
Mike Galbraitha64692a2010-03-11 17:16:20 +0100473
Ingo Molnard8016492007-10-18 21:32:55 +0200474 /* capture load from *all* tasks on this cpu: */
475 struct load_weight load;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200476 unsigned long nr_load_updates;
477 u64 nr_switches;
478
479 struct cfs_rq cfs;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100480 struct rt_rq rt;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100481
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200482#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnard8016492007-10-18 21:32:55 +0200483 /* list of leaf cfs_rq on this cpu: */
484 struct list_head leaf_cfs_rq_list;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100485#endif
486#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100487 struct list_head leaf_rt_rq_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
490 /*
491 * This is part of a global counter where only the total sum
492 * over all CPUs matters. A task can increase this counter on
493 * one CPU and if it got migrated afterwards it may decrease
494 * it on another CPU. Always updated under the runqueue lock:
495 */
496 unsigned long nr_uninterruptible;
497
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200498 struct task_struct *curr, *idle, *stop;
Christoph Lameterc9819f42006-12-10 02:20:25 -0800499 unsigned long next_balance;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 struct mm_struct *prev_mm;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200501
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200502 u64 clock;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700503 u64 clock_task;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 atomic_t nr_iowait;
506
507#ifdef CONFIG_SMP
Ingo Molnar0eab9142008-01-25 21:08:19 +0100508 struct root_domain *rd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 struct sched_domain *sd;
510
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +0200511 unsigned long cpu_power;
512
Henrik Austada0a522c2009-02-13 20:35:45 +0100513 unsigned char idle_at_tick;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 /* For active balancing */
Gregory Haskins3f029d32009-07-29 11:08:47 -0400515 int post_schedule;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 int active_balance;
517 int push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +0200518 struct cpu_stop_work active_balance_work;
Ingo Molnard8016492007-10-18 21:32:55 +0200519 /* cpu of this runqueue: */
520 int cpu;
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400521 int online;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Peter Zijlstraa8a51d52008-06-27 13:41:26 +0200523 unsigned long avg_load_per_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200525 u64 rt_avg;
526 u64 age_stamp;
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100527 u64 idle_stamp;
528 u64 avg_idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529#endif
530
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700531#ifdef CONFIG_IRQ_TIME_ACCOUNTING
532 u64 prev_irq_time;
533#endif
Glauber Costae6e66852011-07-11 15:28:17 -0400534#ifdef CONFIG_PARAVIRT
535 u64 prev_steal_time;
536#endif
Glauber Costa095c0aa2011-07-11 15:28:18 -0400537#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
538 u64 prev_steal_time_rq;
539#endif
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700540
Thomas Gleixnerdce48a82009-04-11 10:43:41 +0200541 /* calc_load related fields */
542 unsigned long calc_load_update;
543 long calc_load_active;
544
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100545#ifdef CONFIG_SCHED_HRTICK
Peter Zijlstra31656512008-07-18 18:01:23 +0200546#ifdef CONFIG_SMP
547 int hrtick_csd_pending;
548 struct call_single_data hrtick_csd;
549#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100550 struct hrtimer hrtick_timer;
551#endif
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553#ifdef CONFIG_SCHEDSTATS
554 /* latency stats */
555 struct sched_info rq_sched_info;
Ken Chen9c2c4802008-12-16 23:41:22 -0800556 unsigned long long rq_cpu_time;
557 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
559 /* sys_sched_yield() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200560 unsigned int yld_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 /* schedule() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200563 unsigned int sched_switch;
564 unsigned int sched_count;
565 unsigned int sched_goidle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
567 /* try_to_wake_up() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200568 unsigned int ttwu_count;
569 unsigned int ttwu_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570#endif
Peter Zijlstra317f3942011-04-05 17:23:58 +0200571
572#ifdef CONFIG_SMP
573 struct task_struct *wake_list;
574#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575};
576
Fenghua Yuf34e3b62007-07-19 01:48:13 -0700577static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Mike Galbraitha64692a2010-03-11 17:16:20 +0100579
Peter Zijlstra1e5a7402010-10-31 12:37:04 +0100580static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
Ingo Molnardd41f592007-07-09 18:51:59 +0200581
Christoph Lameter0a2966b2006-09-25 23:30:51 -0700582static inline int cpu_of(struct rq *rq)
583{
584#ifdef CONFIG_SMP
585 return rq->cpu;
586#else
587 return 0;
588#endif
589}
590
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800591#define rcu_dereference_check_sched_domain(p) \
Paul E. McKenneyd11c5632010-02-22 17:04:50 -0800592 rcu_dereference_check((p), \
Peter Zijlstradce840a2011-04-07 14:09:50 +0200593 rcu_read_lock_held() || \
Paul E. McKenneyd11c5632010-02-22 17:04:50 -0800594 lockdep_is_held(&sched_domains_mutex))
595
Ingo Molnar20d315d2007-07-09 18:51:58 +0200596/*
Nick Piggin674311d2005-06-25 14:57:27 -0700597 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -0700598 * See detach_destroy_domains: synchronize_sched for details.
Nick Piggin674311d2005-06-25 14:57:27 -0700599 *
600 * The domain tree of any CPU may only be accessed from within
601 * preempt-disabled sections.
602 */
Ingo Molnar48f24c42006-07-03 00:25:40 -0700603#define for_each_domain(cpu, __sd) \
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800604 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
606#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
607#define this_rq() (&__get_cpu_var(runqueues))
608#define task_rq(p) cpu_rq(task_cpu(p))
609#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Hitoshi Mitake54d35f22009-06-29 14:44:57 +0900610#define raw_rq() (&__raw_get_cpu_var(runqueues))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200612#ifdef CONFIG_CGROUP_SCHED
613
614/*
615 * Return the group to which this tasks belongs.
616 *
Peter Zijlstra6c6c54e2011-06-03 17:37:07 +0200617 * We use task_subsys_state_check() and extend the RCU verification with
618 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
619 * task it moves into the cgroup. Therefore by holding either of those locks,
620 * we pin the task to the current cgroup.
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200621 */
622static inline struct task_group *task_group(struct task_struct *p)
623{
Mike Galbraith5091faa2010-11-30 14:18:03 +0100624 struct task_group *tg;
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200625 struct cgroup_subsys_state *css;
626
627 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
Peter Zijlstra6c6c54e2011-06-03 17:37:07 +0200628 lockdep_is_held(&p->pi_lock) ||
629 lockdep_is_held(&task_rq(p)->lock));
Mike Galbraith5091faa2010-11-30 14:18:03 +0100630 tg = container_of(css, struct task_group, css);
631
632 return autogroup_task_group(p, tg);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200633}
634
635/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
636static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
637{
638#ifdef CONFIG_FAIR_GROUP_SCHED
639 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
640 p->se.parent = task_group(p)->se[cpu];
641#endif
642
643#ifdef CONFIG_RT_GROUP_SCHED
644 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
645 p->rt.parent = task_group(p)->rt_se[cpu];
646#endif
647}
648
649#else /* CONFIG_CGROUP_SCHED */
650
651static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
652static inline struct task_group *task_group(struct task_struct *p)
653{
654 return NULL;
655}
656
657#endif /* CONFIG_CGROUP_SCHED */
658
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100659static void update_rq_clock_task(struct rq *rq, s64 delta);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700660
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100661static void update_rq_clock(struct rq *rq)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200662{
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100663 s64 delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700664
Mike Galbraith61eadef2011-04-29 08:36:50 +0200665 if (rq->skip_clock_update > 0)
Mike Galbraithf26f9af2010-12-08 11:05:42 +0100666 return;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700667
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100668 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
669 rq->clock += delta;
670 update_rq_clock_task(rq, delta);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200671}
672
Ingo Molnare436d802007-07-19 21:28:35 +0200673/*
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200674 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
675 */
676#ifdef CONFIG_SCHED_DEBUG
677# define const_debug __read_mostly
678#else
679# define const_debug static const
680#endif
681
Ingo Molnar017730c2008-05-12 21:20:52 +0200682/**
Randy Dunlap1fd06bb2011-03-15 16:12:30 -0700683 * runqueue_is_locked - Returns true if the current cpu runqueue is locked
Randy Dunlape17b38b2009-10-11 19:12:00 -0700684 * @cpu: the processor in question.
Ingo Molnar017730c2008-05-12 21:20:52 +0200685 *
Ingo Molnar017730c2008-05-12 21:20:52 +0200686 * This interface allows printk to be called with the runqueue lock
687 * held and know whether or not it is OK to wake up the klogd.
688 */
Andrew Morton89f19f02009-09-19 11:55:44 -0700689int runqueue_is_locked(int cpu)
Ingo Molnar017730c2008-05-12 21:20:52 +0200690{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100691 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
Ingo Molnar017730c2008-05-12 21:20:52 +0200692}
693
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200694/*
695 * Debugging: various feature bits
696 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200697
698#define SCHED_FEAT(name, enabled) \
699 __SCHED_FEAT_##name ,
700
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200701enum {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200702#include "sched_features.h"
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200703};
704
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200705#undef SCHED_FEAT
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200706
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200707#define SCHED_FEAT(name, enabled) \
708 (1UL << __SCHED_FEAT_##name) * enabled |
709
710const_debug unsigned int sysctl_sched_features =
711#include "sched_features.h"
712 0;
713
714#undef SCHED_FEAT
715
716#ifdef CONFIG_SCHED_DEBUG
717#define SCHED_FEAT(name, enabled) \
718 #name ,
719
Harvey Harrison983ed7a2008-04-24 18:17:55 -0700720static __read_mostly char *sched_feat_names[] = {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200721#include "sched_features.h"
722 NULL
723};
724
725#undef SCHED_FEAT
726
Li Zefan34f3a812008-10-30 15:23:32 +0800727static int sched_feat_show(struct seq_file *m, void *v)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200728{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200729 int i;
730
731 for (i = 0; sched_feat_names[i]; i++) {
Li Zefan34f3a812008-10-30 15:23:32 +0800732 if (!(sysctl_sched_features & (1UL << i)))
733 seq_puts(m, "NO_");
734 seq_printf(m, "%s ", sched_feat_names[i]);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200735 }
Li Zefan34f3a812008-10-30 15:23:32 +0800736 seq_puts(m, "\n");
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200737
Li Zefan34f3a812008-10-30 15:23:32 +0800738 return 0;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200739}
740
741static ssize_t
742sched_feat_write(struct file *filp, const char __user *ubuf,
743 size_t cnt, loff_t *ppos)
744{
745 char buf[64];
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400746 char *cmp;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200747 int neg = 0;
748 int i;
749
750 if (cnt > 63)
751 cnt = 63;
752
753 if (copy_from_user(&buf, ubuf, cnt))
754 return -EFAULT;
755
756 buf[cnt] = 0;
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400757 cmp = strstrip(buf);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200758
Hillf Danton524429c2011-01-06 20:58:12 +0800759 if (strncmp(cmp, "NO_", 3) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200760 neg = 1;
761 cmp += 3;
762 }
763
764 for (i = 0; sched_feat_names[i]; i++) {
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400765 if (strcmp(cmp, sched_feat_names[i]) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200766 if (neg)
767 sysctl_sched_features &= ~(1UL << i);
768 else
769 sysctl_sched_features |= (1UL << i);
770 break;
771 }
772 }
773
774 if (!sched_feat_names[i])
775 return -EINVAL;
776
Jan Blunck42994722009-11-20 17:40:37 +0100777 *ppos += cnt;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200778
779 return cnt;
780}
781
Li Zefan34f3a812008-10-30 15:23:32 +0800782static int sched_feat_open(struct inode *inode, struct file *filp)
783{
784 return single_open(filp, sched_feat_show, NULL);
785}
786
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700787static const struct file_operations sched_feat_fops = {
Li Zefan34f3a812008-10-30 15:23:32 +0800788 .open = sched_feat_open,
789 .write = sched_feat_write,
790 .read = seq_read,
791 .llseek = seq_lseek,
792 .release = single_release,
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200793};
794
795static __init int sched_init_debug(void)
796{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200797 debugfs_create_file("sched_features", 0644, NULL, NULL,
798 &sched_feat_fops);
799
800 return 0;
801}
802late_initcall(sched_init_debug);
803
804#endif
805
806#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200807
808/*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +0100809 * Number of tasks to iterate in a single balance run.
810 * Limited because this is done with IRQs disabled.
811 */
812const_debug unsigned int sysctl_sched_nr_migrate = 32;
813
814/*
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200815 * period over which we average the RT time consumption, measured
816 * in ms.
817 *
818 * default: 1s
819 */
820const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
821
822/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100823 * period over which we measure -rt task cpu usage in us.
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100824 * default: 1s
825 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100826unsigned int sysctl_sched_rt_period = 1000000;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100827
Ingo Molnar6892b752008-02-13 14:02:36 +0100828static __read_mostly int scheduler_running;
829
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100830/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100831 * part of the period that we allow rt tasks to run in us.
832 * default: 0.95s
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100833 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100834int sysctl_sched_rt_runtime = 950000;
835
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200836static inline u64 global_rt_period(void)
837{
838 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
839}
840
841static inline u64 global_rt_runtime(void)
842{
roel kluine26873b2008-07-22 16:51:15 -0400843 if (sysctl_sched_rt_runtime < 0)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200844 return RUNTIME_INF;
845
846 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
847}
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849#ifndef prepare_arch_switch
Nick Piggin4866cde2005-06-25 14:57:23 -0700850# define prepare_arch_switch(next) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700852#ifndef finish_arch_switch
853# define finish_arch_switch(prev) do { } while (0)
854#endif
855
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100856static inline int task_current(struct rq *rq, struct task_struct *p)
857{
858 return rq->curr == p;
859}
860
Ingo Molnar70b97a72006-07-03 00:25:42 -0700861static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700862{
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200863#ifdef CONFIG_SMP
864 return p->on_cpu;
865#else
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100866 return task_current(rq, p);
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200867#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700868}
869
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200870#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar70b97a72006-07-03 00:25:42 -0700871static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700872{
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200873#ifdef CONFIG_SMP
874 /*
875 * We can optimise this out completely for !SMP, because the
876 * SMP rebalancing from interrupt is the only thing that cares
877 * here.
878 */
879 next->on_cpu = 1;
880#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700881}
882
Ingo Molnar70b97a72006-07-03 00:25:42 -0700883static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700884{
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200885#ifdef CONFIG_SMP
886 /*
887 * After ->on_cpu is cleared, the task can be moved to a different CPU.
888 * We must ensure this doesn't happen until the switch is completely
889 * finished.
890 */
891 smp_wmb();
892 prev->on_cpu = 0;
893#endif
Ingo Molnarda04c032005-09-13 11:17:59 +0200894#ifdef CONFIG_DEBUG_SPINLOCK
895 /* this is a valid case when another task releases the spinlock */
896 rq->lock.owner = current;
897#endif
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700898 /*
899 * If we are tracking spinlock dependencies then we have to
900 * fix up the runqueue lock - which gets 'carried over' from
901 * prev into current:
902 */
903 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
904
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100905 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700906}
907
908#else /* __ARCH_WANT_UNLOCKED_CTXSW */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700909static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700910{
911#ifdef CONFIG_SMP
912 /*
913 * We can optimise this out completely for !SMP, because the
914 * SMP rebalancing from interrupt is the only thing that cares
915 * here.
916 */
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200917 next->on_cpu = 1;
Nick Piggin4866cde2005-06-25 14:57:23 -0700918#endif
919#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100920 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700921#else
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100922 raw_spin_unlock(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700923#endif
924}
925
Ingo Molnar70b97a72006-07-03 00:25:42 -0700926static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700927{
928#ifdef CONFIG_SMP
929 /*
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200930 * After ->on_cpu is cleared, the task can be moved to a different CPU.
Nick Piggin4866cde2005-06-25 14:57:23 -0700931 * We must ensure this doesn't happen until the switch is completely
932 * finished.
933 */
934 smp_wmb();
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200935 prev->on_cpu = 0;
Nick Piggin4866cde2005-06-25 14:57:23 -0700936#endif
937#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
938 local_irq_enable();
939#endif
940}
941#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
943/*
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200944 * __task_rq_lock - lock the rq @p resides on.
Ingo Molnarb29739f2006-06-27 02:54:51 -0700945 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700946static inline struct rq *__task_rq_lock(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700947 __acquires(rq->lock)
948{
Peter Zijlstra0970d292010-02-15 14:45:54 +0100949 struct rq *rq;
950
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200951 lockdep_assert_held(&p->pi_lock);
952
Andi Kleen3a5c3592007-10-15 17:00:14 +0200953 for (;;) {
Peter Zijlstra0970d292010-02-15 14:45:54 +0100954 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100955 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100956 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200957 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100958 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700959 }
Ingo Molnarb29739f2006-06-27 02:54:51 -0700960}
961
962/*
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200963 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700965static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200966 __acquires(p->pi_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 __acquires(rq->lock)
968{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700969 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
Andi Kleen3a5c3592007-10-15 17:00:14 +0200971 for (;;) {
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200972 raw_spin_lock_irqsave(&p->pi_lock, *flags);
Andi Kleen3a5c3592007-10-15 17:00:14 +0200973 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100974 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100975 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200976 return rq;
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200977 raw_spin_unlock(&rq->lock);
978 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980}
981
Alexey Dobriyana9957442007-10-15 17:00:13 +0200982static void __task_rq_unlock(struct rq *rq)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700983 __releases(rq->lock)
984{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100985 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700986}
987
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200988static inline void
989task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 __releases(rq->lock)
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200991 __releases(p->pi_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992{
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200993 raw_spin_unlock(&rq->lock);
994 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995}
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997/*
Robert P. J. Daycc2a73b2006-12-10 02:20:00 -0800998 * this_rq_lock - lock this runqueue and disable interrupts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02001000static struct rq *this_rq_lock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 __acquires(rq->lock)
1002{
Ingo Molnar70b97a72006-07-03 00:25:42 -07001003 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 local_irq_disable();
1006 rq = this_rq();
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001007 raw_spin_lock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009 return rq;
1010}
1011
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001012#ifdef CONFIG_SCHED_HRTICK
1013/*
1014 * Use HR-timers to deliver accurate preemption points.
1015 *
1016 * Its all a bit involved since we cannot program an hrt while holding the
1017 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1018 * reschedule event.
1019 *
1020 * When we get rescheduled we reprogram the hrtick_timer outside of the
1021 * rq->lock.
1022 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001023
1024/*
1025 * Use hrtick when:
1026 * - enabled by features
1027 * - hrtimer is actually high res
1028 */
1029static inline int hrtick_enabled(struct rq *rq)
1030{
1031 if (!sched_feat(HRTICK))
1032 return 0;
Ingo Molnarba420592008-07-20 11:02:06 +02001033 if (!cpu_active(cpu_of(rq)))
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001034 return 0;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001035 return hrtimer_is_hres_active(&rq->hrtick_timer);
1036}
1037
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001038static void hrtick_clear(struct rq *rq)
1039{
1040 if (hrtimer_active(&rq->hrtick_timer))
1041 hrtimer_cancel(&rq->hrtick_timer);
1042}
1043
1044/*
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001045 * High-resolution timer tick.
1046 * Runs from hardirq context with interrupts disabled.
1047 */
1048static enum hrtimer_restart hrtick(struct hrtimer *timer)
1049{
1050 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1051
1052 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1053
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001054 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001055 update_rq_clock(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001056 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001057 raw_spin_unlock(&rq->lock);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001058
1059 return HRTIMER_NORESTART;
1060}
1061
Rabin Vincent95e904c2008-05-11 05:55:33 +05301062#ifdef CONFIG_SMP
Peter Zijlstra31656512008-07-18 18:01:23 +02001063/*
1064 * called from hardirq (IPI) context
1065 */
1066static void __hrtick_start(void *arg)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001067{
Peter Zijlstra31656512008-07-18 18:01:23 +02001068 struct rq *rq = arg;
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001069
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001070 raw_spin_lock(&rq->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001071 hrtimer_restart(&rq->hrtick_timer);
1072 rq->hrtick_csd_pending = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001073 raw_spin_unlock(&rq->lock);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001074}
1075
Peter Zijlstra31656512008-07-18 18:01:23 +02001076/*
1077 * Called to set the hrtick timer state.
1078 *
1079 * called with rq->lock held and irqs disabled
1080 */
1081static void hrtick_start(struct rq *rq, u64 delay)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001082{
Peter Zijlstra31656512008-07-18 18:01:23 +02001083 struct hrtimer *timer = &rq->hrtick_timer;
1084 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001085
Arjan van de Vencc584b22008-09-01 15:02:30 -07001086 hrtimer_set_expires(timer, time);
Peter Zijlstra31656512008-07-18 18:01:23 +02001087
1088 if (rq == this_rq()) {
1089 hrtimer_restart(timer);
1090 } else if (!rq->hrtick_csd_pending) {
Peter Zijlstra6e275632009-02-25 13:59:48 +01001091 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001092 rq->hrtick_csd_pending = 1;
1093 }
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001094}
1095
1096static int
1097hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1098{
1099 int cpu = (int)(long)hcpu;
1100
1101 switch (action) {
1102 case CPU_UP_CANCELED:
1103 case CPU_UP_CANCELED_FROZEN:
1104 case CPU_DOWN_PREPARE:
1105 case CPU_DOWN_PREPARE_FROZEN:
1106 case CPU_DEAD:
1107 case CPU_DEAD_FROZEN:
Peter Zijlstra31656512008-07-18 18:01:23 +02001108 hrtick_clear(cpu_rq(cpu));
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001109 return NOTIFY_OK;
1110 }
1111
1112 return NOTIFY_DONE;
1113}
1114
Rakib Mullickfa748202008-09-22 14:55:45 -07001115static __init void init_hrtick(void)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001116{
1117 hotcpu_notifier(hotplug_hrtick, 0);
1118}
Peter Zijlstra31656512008-07-18 18:01:23 +02001119#else
1120/*
1121 * Called to set the hrtick timer state.
1122 *
1123 * called with rq->lock held and irqs disabled
1124 */
1125static void hrtick_start(struct rq *rq, u64 delay)
1126{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001127 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +05301128 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001129}
1130
Andrew Morton006c75f2008-09-22 14:55:46 -07001131static inline void init_hrtick(void)
Peter Zijlstra31656512008-07-18 18:01:23 +02001132{
1133}
Rabin Vincent95e904c2008-05-11 05:55:33 +05301134#endif /* CONFIG_SMP */
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001135
1136static void init_rq_hrtick(struct rq *rq)
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001137{
Peter Zijlstra31656512008-07-18 18:01:23 +02001138#ifdef CONFIG_SMP
1139 rq->hrtick_csd_pending = 0;
1140
1141 rq->hrtick_csd.flags = 0;
1142 rq->hrtick_csd.func = __hrtick_start;
1143 rq->hrtick_csd.info = rq;
1144#endif
1145
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001146 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1147 rq->hrtick_timer.function = hrtick;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001148}
Andrew Morton006c75f2008-09-22 14:55:46 -07001149#else /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001150static inline void hrtick_clear(struct rq *rq)
1151{
1152}
1153
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001154static inline void init_rq_hrtick(struct rq *rq)
1155{
1156}
1157
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001158static inline void init_hrtick(void)
1159{
1160}
Andrew Morton006c75f2008-09-22 14:55:46 -07001161#endif /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001162
Ingo Molnar1b9f19c2007-07-09 18:51:59 +02001163/*
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001164 * resched_task - mark a task 'to be rescheduled now'.
1165 *
1166 * On UP this means the setting of the need_resched flag, on SMP it
1167 * might also involve a cross-CPU call to trigger the scheduler on
1168 * the target CPU.
1169 */
1170#ifdef CONFIG_SMP
1171
1172#ifndef tsk_is_polling
1173#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1174#endif
1175
Peter Zijlstra31656512008-07-18 18:01:23 +02001176static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001177{
1178 int cpu;
1179
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001180 assert_raw_spin_locked(&task_rq(p)->lock);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001181
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001182 if (test_tsk_need_resched(p))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001183 return;
1184
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001185 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001186
1187 cpu = task_cpu(p);
1188 if (cpu == smp_processor_id())
1189 return;
1190
1191 /* NEED_RESCHED must be visible before we test polling */
1192 smp_mb();
1193 if (!tsk_is_polling(p))
1194 smp_send_reschedule(cpu);
1195}
1196
1197static void resched_cpu(int cpu)
1198{
1199 struct rq *rq = cpu_rq(cpu);
1200 unsigned long flags;
1201
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001202 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001203 return;
1204 resched_task(cpu_curr(cpu));
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001205 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001206}
Thomas Gleixner06d83082008-03-22 09:20:24 +01001207
1208#ifdef CONFIG_NO_HZ
1209/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07001210 * In the semi idle case, use the nearest busy cpu for migrating timers
1211 * from an idle cpu. This is good for power-savings.
1212 *
1213 * We don't do similar optimization for completely idle system, as
1214 * selecting an idle cpu will add more delays to the timers than intended
1215 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1216 */
1217int get_nohz_timer_target(void)
1218{
1219 int cpu = smp_processor_id();
1220 int i;
1221 struct sched_domain *sd;
1222
Peter Zijlstra057f3fa2011-04-18 11:24:34 +02001223 rcu_read_lock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07001224 for_each_domain(cpu, sd) {
Peter Zijlstra057f3fa2011-04-18 11:24:34 +02001225 for_each_cpu(i, sched_domain_span(sd)) {
1226 if (!idle_cpu(i)) {
1227 cpu = i;
1228 goto unlock;
1229 }
1230 }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07001231 }
Peter Zijlstra057f3fa2011-04-18 11:24:34 +02001232unlock:
1233 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07001234 return cpu;
1235}
1236/*
Thomas Gleixner06d83082008-03-22 09:20:24 +01001237 * When add_timer_on() enqueues a timer into the timer wheel of an
1238 * idle CPU then this timer might expire before the next timer event
1239 * which is scheduled to wake up that CPU. In case of a completely
1240 * idle system the next event might even be infinite time into the
1241 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1242 * leaves the inner idle loop so the newly added timer is taken into
1243 * account when the CPU goes back to idle and evaluates the timer
1244 * wheel for the next timer event.
1245 */
1246void wake_up_idle_cpu(int cpu)
1247{
1248 struct rq *rq = cpu_rq(cpu);
1249
1250 if (cpu == smp_processor_id())
1251 return;
1252
1253 /*
1254 * This is safe, as this function is called with the timer
1255 * wheel base lock of (cpu) held. When the CPU is on the way
1256 * to idle and has not yet set rq->curr to idle then it will
1257 * be serialized on the timer wheel base lock and take the new
1258 * timer into account automatically.
1259 */
1260 if (rq->curr != rq->idle)
1261 return;
1262
1263 /*
1264 * We can set TIF_RESCHED on the idle task of the other CPU
1265 * lockless. The worst case is that the other CPU runs the
1266 * idle task through an additional NOOP schedule()
1267 */
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001268 set_tsk_need_resched(rq->idle);
Thomas Gleixner06d83082008-03-22 09:20:24 +01001269
1270 /* NEED_RESCHED must be visible before we test polling */
1271 smp_mb();
1272 if (!tsk_is_polling(rq->idle))
1273 smp_send_reschedule(cpu);
1274}
Mike Galbraith39c0cbe2010-03-11 17:17:13 +01001275
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001276#endif /* CONFIG_NO_HZ */
Thomas Gleixner06d83082008-03-22 09:20:24 +01001277
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001278static u64 sched_avg_period(void)
1279{
1280 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1281}
1282
1283static void sched_avg_update(struct rq *rq)
1284{
1285 s64 period = sched_avg_period();
1286
1287 while ((s64)(rq->clock - rq->age_stamp) > period) {
Will Deacon0d98bb22010-05-24 12:11:43 -07001288 /*
1289 * Inline assembly required to prevent the compiler
1290 * optimising this loop into a divmod call.
1291 * See __iter_div_u64_rem() for another example of this.
1292 */
1293 asm("" : "+rm" (rq->age_stamp));
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001294 rq->age_stamp += period;
1295 rq->rt_avg /= 2;
1296 }
1297}
1298
1299static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1300{
1301 rq->rt_avg += rt_delta;
1302 sched_avg_update(rq);
1303}
1304
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001305#else /* !CONFIG_SMP */
Peter Zijlstra31656512008-07-18 18:01:23 +02001306static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001307{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001308 assert_raw_spin_locked(&task_rq(p)->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001309 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001310}
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001311
1312static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1313{
1314}
Suresh Siddhada2b71e2010-08-23 13:42:51 -07001315
1316static void sched_avg_update(struct rq *rq)
1317{
1318}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001319#endif /* CONFIG_SMP */
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001320
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001321#if BITS_PER_LONG == 32
1322# define WMULT_CONST (~0UL)
1323#else
1324# define WMULT_CONST (1UL << 32)
1325#endif
1326
1327#define WMULT_SHIFT 32
1328
Ingo Molnar194081e2007-08-09 11:16:51 +02001329/*
1330 * Shift right and round:
1331 */
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001332#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
Ingo Molnar194081e2007-08-09 11:16:51 +02001333
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001334/*
1335 * delta *= weight / lw
1336 */
Ingo Molnarcb1c4fc2007-08-02 17:41:40 +02001337static unsigned long
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001338calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1339 struct load_weight *lw)
1340{
1341 u64 tmp;
1342
Nikhil Raoc8b28112011-05-18 14:37:48 -07001343 /*
1344 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
1345 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
1346 * 2^SCHED_LOAD_RESOLUTION.
1347 */
1348 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
1349 tmp = (u64)delta_exec * scale_load_down(weight);
1350 else
1351 tmp = (u64)delta_exec;
Stephan Baerwolfdb670da2011-05-11 18:03:29 +02001352
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001353 if (!lw->inv_weight) {
Nikhil Raoc8b28112011-05-18 14:37:48 -07001354 unsigned long w = scale_load_down(lw->weight);
1355
1356 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001357 lw->inv_weight = 1;
Nikhil Raoc8b28112011-05-18 14:37:48 -07001358 else if (unlikely(!w))
1359 lw->inv_weight = WMULT_CONST;
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001360 else
Nikhil Raoc8b28112011-05-18 14:37:48 -07001361 lw->inv_weight = WMULT_CONST / w;
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001362 }
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001363
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001364 /*
1365 * Check whether we'd overflow the 64-bit multiplication:
1366 */
Ingo Molnar194081e2007-08-09 11:16:51 +02001367 if (unlikely(tmp > WMULT_CONST))
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001368 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
Ingo Molnar194081e2007-08-09 11:16:51 +02001369 WMULT_SHIFT/2);
1370 else
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001371 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001372
Ingo Molnarecf691d2007-08-02 17:41:40 +02001373 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001374}
1375
Ingo Molnar10919852007-10-15 17:00:04 +02001376static inline void update_load_add(struct load_weight *lw, unsigned long inc)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001377{
1378 lw->weight += inc;
Ingo Molnare89996a2008-03-14 23:48:28 +01001379 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001380}
1381
Ingo Molnar10919852007-10-15 17:00:04 +02001382static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001383{
1384 lw->weight -= dec;
Ingo Molnare89996a2008-03-14 23:48:28 +01001385 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001386}
1387
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001388static inline void update_load_set(struct load_weight *lw, unsigned long w)
1389{
1390 lw->weight = w;
1391 lw->inv_weight = 0;
1392}
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001395 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1396 * of tasks with abnormal "nice" values across CPUs the contribution that
1397 * each task makes to its run queue's load is weighted according to its
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01001398 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
Peter Williams2dd73a42006-06-27 02:54:34 -07001399 * scaled version of the new time slice allocation that they receive on time
1400 * slice expiry etc.
1401 */
1402
Peter Zijlstracce7ade2009-01-15 14:53:37 +01001403#define WEIGHT_IDLEPRIO 3
1404#define WMULT_IDLEPRIO 1431655765
Ingo Molnardd41f592007-07-09 18:51:59 +02001405
1406/*
1407 * Nice levels are multiplicative, with a gentle 10% change for every
1408 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1409 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1410 * that remained on nice 0.
1411 *
1412 * The "10% effect" is relative and cumulative: from _any_ nice level,
1413 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
Ingo Molnarf9153ee2007-07-16 09:46:30 +02001414 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1415 * If a task goes up by ~10% and another task goes down by ~10% then
1416 * the relative distance between them is ~25%.)
Ingo Molnardd41f592007-07-09 18:51:59 +02001417 */
1418static const int prio_to_weight[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001419 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1420 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1421 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1422 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1423 /* 0 */ 1024, 820, 655, 526, 423,
1424 /* 5 */ 335, 272, 215, 172, 137,
1425 /* 10 */ 110, 87, 70, 56, 45,
1426 /* 15 */ 36, 29, 23, 18, 15,
Ingo Molnardd41f592007-07-09 18:51:59 +02001427};
1428
Ingo Molnar5714d2d2007-07-16 09:46:31 +02001429/*
1430 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1431 *
1432 * In cases where the weight does not change often, we can use the
1433 * precalculated inverse to speed up arithmetics by turning divisions
1434 * into multiplications:
1435 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001436static const u32 prio_to_wmult[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001437 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1438 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1439 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1440 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1441 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1442 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1443 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1444 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
Ingo Molnardd41f592007-07-09 18:51:59 +02001445};
Peter Williams2dd73a42006-06-27 02:54:34 -07001446
Bharata B Raoef12fef2009-03-31 10:02:22 +05301447/* Time spent by the tasks of the cpu accounting group executing in ... */
1448enum cpuacct_stat_index {
1449 CPUACCT_STAT_USER, /* ... user mode */
1450 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1451
1452 CPUACCT_STAT_NSTATS,
1453};
1454
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001455#ifdef CONFIG_CGROUP_CPUACCT
1456static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
Bharata B Raoef12fef2009-03-31 10:02:22 +05301457static void cpuacct_update_stats(struct task_struct *tsk,
1458 enum cpuacct_stat_index idx, cputime_t val);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001459#else
1460static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
Bharata B Raoef12fef2009-03-31 10:02:22 +05301461static inline void cpuacct_update_stats(struct task_struct *tsk,
1462 enum cpuacct_stat_index idx, cputime_t val) {}
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001463#endif
1464
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001465static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1466{
1467 update_load_add(&rq->load, load);
1468}
1469
1470static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1471{
1472 update_load_sub(&rq->load, load);
1473}
1474
Ingo Molnar7940ca32008-08-19 13:40:47 +02001475#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
Peter Zijlstraeb755802008-08-19 12:33:05 +02001476typedef int (*tg_visitor)(struct task_group *, void *);
1477
1478/*
1479 * Iterate the full tree, calling @down when first entering a node and @up when
1480 * leaving it for the final time.
1481 */
1482static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1483{
1484 struct task_group *parent, *child;
1485 int ret;
1486
1487 rcu_read_lock();
1488 parent = &root_task_group;
1489down:
1490 ret = (*down)(parent, data);
1491 if (ret)
1492 goto out_unlock;
1493 list_for_each_entry_rcu(child, &parent->children, siblings) {
1494 parent = child;
1495 goto down;
1496
1497up:
1498 continue;
1499 }
1500 ret = (*up)(parent, data);
1501 if (ret)
1502 goto out_unlock;
1503
1504 child = parent;
1505 parent = parent->parent;
1506 if (parent)
1507 goto up;
1508out_unlock:
1509 rcu_read_unlock();
1510
1511 return ret;
1512}
1513
1514static int tg_nop(struct task_group *tg, void *data)
1515{
1516 return 0;
1517}
1518#endif
1519
Gregory Haskinse7693a32008-01-25 21:08:09 +01001520#ifdef CONFIG_SMP
Peter Zijlstraf5f08f32009-09-10 13:35:28 +02001521/* Used instead of source_load when we know the type == 0 */
1522static unsigned long weighted_cpuload(const int cpu)
1523{
1524 return cpu_rq(cpu)->load.weight;
1525}
1526
1527/*
1528 * Return a low guess at the load of a migration-source cpu weighted
1529 * according to the scheduling class and "nice" value.
1530 *
1531 * We want to under-estimate the load of migration sources, to
1532 * balance conservatively.
1533 */
1534static unsigned long source_load(int cpu, int type)
1535{
1536 struct rq *rq = cpu_rq(cpu);
1537 unsigned long total = weighted_cpuload(cpu);
1538
1539 if (type == 0 || !sched_feat(LB_BIAS))
1540 return total;
1541
1542 return min(rq->cpu_load[type-1], total);
1543}
1544
1545/*
1546 * Return a high guess at the load of a migration-target cpu weighted
1547 * according to the scheduling class and "nice" value.
1548 */
1549static unsigned long target_load(int cpu, int type)
1550{
1551 struct rq *rq = cpu_rq(cpu);
1552 unsigned long total = weighted_cpuload(cpu);
1553
1554 if (type == 0 || !sched_feat(LB_BIAS))
1555 return total;
1556
1557 return max(rq->cpu_load[type-1], total);
1558}
1559
Peter Zijlstraae154be2009-09-10 14:40:57 +02001560static unsigned long power_of(int cpu)
1561{
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02001562 return cpu_rq(cpu)->cpu_power;
Peter Zijlstraae154be2009-09-10 14:40:57 +02001563}
1564
Gregory Haskinse7693a32008-01-25 21:08:09 +01001565static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001566
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001567static unsigned long cpu_avg_load_per_task(int cpu)
1568{
1569 struct rq *rq = cpu_rq(cpu);
Ingo Molnaraf6d5962008-11-29 20:45:15 +01001570 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001571
Steven Rostedt4cd42622008-11-26 21:04:24 -05001572 if (nr_running)
1573 rq->avg_load_per_task = rq->load.weight / nr_running;
Balbir Singha2d47772008-11-12 16:19:00 +05301574 else
1575 rq->avg_load_per_task = 0;
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001576
1577 return rq->avg_load_per_task;
1578}
1579
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001580#ifdef CONFIG_FAIR_GROUP_SCHED
1581
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001582/*
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001583 * Compute the cpu's hierarchical load factor for each task group.
1584 * This needs to be done in a top-down fashion because the load of a child
1585 * group is a fraction of its parents load.
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001586 */
Peter Zijlstraeb755802008-08-19 12:33:05 +02001587static int tg_load_down(struct task_group *tg, void *data)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001588{
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001589 unsigned long load;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001590 long cpu = (long)data;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001591
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001592 if (!tg->parent) {
1593 load = cpu_rq(cpu)->load.weight;
1594 } else {
1595 load = tg->parent->cfs_rq[cpu]->h_load;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001596 load *= tg->se[cpu]->load.weight;
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001597 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1598 }
1599
1600 tg->cfs_rq[cpu]->h_load = load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001601
Peter Zijlstraeb755802008-08-19 12:33:05 +02001602 return 0;
Peter Zijlstra4d8d5952008-06-27 13:41:19 +02001603}
1604
Peter Zijlstraeb755802008-08-19 12:33:05 +02001605static void update_h_load(long cpu)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001606{
Peter Zijlstraeb755802008-08-19 12:33:05 +02001607 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001608}
1609
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001610#endif
1611
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001612#ifdef CONFIG_PREEMPT
1613
Peter Zijlstrab78bb862009-09-15 14:23:18 +02001614static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1615
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001616/*
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001617 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1618 * way at the expense of forcing extra atomic operations in all
1619 * invocations. This assures that the double_lock is acquired using the
1620 * same underlying policy as the spinlock_t on this architecture, which
1621 * reduces latency compared to the unfair variant below. However, it
1622 * also adds more overhead and therefore may reduce throughput.
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001623 */
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001624static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1625 __releases(this_rq->lock)
1626 __acquires(busiest->lock)
1627 __acquires(this_rq->lock)
1628{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001629 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001630 double_rq_lock(this_rq, busiest);
1631
1632 return 1;
1633}
1634
1635#else
1636/*
1637 * Unfair double_lock_balance: Optimizes throughput at the expense of
1638 * latency by eliminating extra atomic operations when the locks are
1639 * already in proper order on entry. This favors lower cpu-ids and will
1640 * grant the double lock to lower cpus over higher ids under contention,
1641 * regardless of entry order into the function.
1642 */
1643static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001644 __releases(this_rq->lock)
1645 __acquires(busiest->lock)
1646 __acquires(this_rq->lock)
1647{
1648 int ret = 0;
1649
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001650 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001651 if (busiest < this_rq) {
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001652 raw_spin_unlock(&this_rq->lock);
1653 raw_spin_lock(&busiest->lock);
1654 raw_spin_lock_nested(&this_rq->lock,
1655 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001656 ret = 1;
1657 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001658 raw_spin_lock_nested(&busiest->lock,
1659 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001660 }
1661 return ret;
1662}
1663
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001664#endif /* CONFIG_PREEMPT */
1665
1666/*
1667 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1668 */
1669static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1670{
1671 if (unlikely(!irqs_disabled())) {
1672 /* printk() doesn't work good under rq->lock */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001673 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001674 BUG_ON(1);
1675 }
1676
1677 return _double_lock_balance(this_rq, busiest);
1678}
1679
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001680static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1681 __releases(busiest->lock)
1682{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001683 raw_spin_unlock(&busiest->lock);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001684 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1685}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001686
1687/*
1688 * double_rq_lock - safely lock two runqueues
1689 *
1690 * Note this does not disable interrupts like task_rq_lock,
1691 * you need to do so manually before calling.
1692 */
1693static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1694 __acquires(rq1->lock)
1695 __acquires(rq2->lock)
1696{
1697 BUG_ON(!irqs_disabled());
1698 if (rq1 == rq2) {
1699 raw_spin_lock(&rq1->lock);
1700 __acquire(rq2->lock); /* Fake it out ;) */
1701 } else {
1702 if (rq1 < rq2) {
1703 raw_spin_lock(&rq1->lock);
1704 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1705 } else {
1706 raw_spin_lock(&rq2->lock);
1707 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1708 }
1709 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001710}
1711
1712/*
1713 * double_rq_unlock - safely unlock two runqueues
1714 *
1715 * Note this does not restore interrupts like task_rq_unlock,
1716 * you need to do so manually after calling.
1717 */
1718static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1719 __releases(rq1->lock)
1720 __releases(rq2->lock)
1721{
1722 raw_spin_unlock(&rq1->lock);
1723 if (rq1 != rq2)
1724 raw_spin_unlock(&rq2->lock);
1725 else
1726 __release(rq2->lock);
1727}
1728
Mike Galbraithd95f4122011-02-01 09:50:51 -05001729#else /* CONFIG_SMP */
1730
1731/*
1732 * double_rq_lock - safely lock two runqueues
1733 *
1734 * Note this does not disable interrupts like task_rq_lock,
1735 * you need to do so manually before calling.
1736 */
1737static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1738 __acquires(rq1->lock)
1739 __acquires(rq2->lock)
1740{
1741 BUG_ON(!irqs_disabled());
1742 BUG_ON(rq1 != rq2);
1743 raw_spin_lock(&rq1->lock);
1744 __acquire(rq2->lock); /* Fake it out ;) */
1745}
1746
1747/*
1748 * double_rq_unlock - safely unlock two runqueues
1749 *
1750 * Note this does not restore interrupts like task_rq_unlock,
1751 * you need to do so manually after calling.
1752 */
1753static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1754 __releases(rq1->lock)
1755 __releases(rq2->lock)
1756{
1757 BUG_ON(rq1 != rq2);
1758 raw_spin_unlock(&rq1->lock);
1759 __release(rq2->lock);
1760}
1761
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001762#endif
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001763
Peter Zijlstra74f51872010-04-22 21:50:19 +02001764static void calc_load_account_idle(struct rq *this_rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01001765static void update_sysctl(void);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01001766static int get_update_sysctl_factor(void);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07001767static void update_cpu_load(struct rq *this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02001768
Peter Zijlstracd29fe62009-11-27 17:32:46 +01001769static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1770{
1771 set_task_rq(p, cpu);
1772#ifdef CONFIG_SMP
1773 /*
1774 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1775 * successfuly executed on another CPU. We must ensure that updates of
1776 * per-task data have been completed by this moment.
1777 */
1778 smp_wmb();
1779 task_thread_info(p)->cpu = cpu;
1780#endif
1781}
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001782
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001783static const struct sched_class rt_sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02001784
Peter Zijlstra34f971f2010-09-22 13:53:15 +02001785#define sched_class_highest (&stop_sched_class)
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001786#define for_each_class(class) \
1787 for (class = sched_class_highest; class; class = class->next)
Ingo Molnardd41f592007-07-09 18:51:59 +02001788
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001789#include "sched_stats.h"
1790
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001791static void inc_nr_running(struct rq *rq)
Ingo Molnar6363ca52008-05-29 11:28:57 +02001792{
1793 rq->nr_running++;
Ingo Molnar6363ca52008-05-29 11:28:57 +02001794}
1795
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001796static void dec_nr_running(struct rq *rq)
Ingo Molnar9c217242007-08-02 17:41:40 +02001797{
1798 rq->nr_running--;
Ingo Molnar9c217242007-08-02 17:41:40 +02001799}
1800
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001801static void set_load_weight(struct task_struct *p)
1802{
Nikhil Raof05998d2011-05-18 10:09:38 -07001803 int prio = p->static_prio - MAX_RT_PRIO;
1804 struct load_weight *load = &p->se.load;
1805
Ingo Molnardd41f592007-07-09 18:51:59 +02001806 /*
1807 * SCHED_IDLE tasks get minimal weight:
1808 */
1809 if (p->policy == SCHED_IDLE) {
Nikhil Raoc8b28112011-05-18 14:37:48 -07001810 load->weight = scale_load(WEIGHT_IDLEPRIO);
Nikhil Raof05998d2011-05-18 10:09:38 -07001811 load->inv_weight = WMULT_IDLEPRIO;
Ingo Molnardd41f592007-07-09 18:51:59 +02001812 return;
1813 }
1814
Nikhil Raoc8b28112011-05-18 14:37:48 -07001815 load->weight = scale_load(prio_to_weight[prio]);
Nikhil Raof05998d2011-05-18 10:09:38 -07001816 load->inv_weight = prio_to_wmult[prio];
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001817}
1818
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001819static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
Gregory Haskins2087a1a2008-06-27 14:30:00 -06001820{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001821 update_rq_clock(rq);
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001822 sched_info_queued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001823 p->sched_class->enqueue_task(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02001824}
1825
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001826static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnardd41f592007-07-09 18:51:59 +02001827{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001828 update_rq_clock(rq);
Ankita Garg46ac22b2008-07-01 14:30:06 +05301829 sched_info_dequeued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001830 p->sched_class->dequeue_task(rq, p, flags);
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001831}
1832
1833/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001834 * activate_task - move a task to the runqueue.
1835 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001836static void activate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001837{
1838 if (task_contributes_to_load(p))
1839 rq->nr_uninterruptible--;
1840
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001841 enqueue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001842 inc_nr_running(rq);
1843}
1844
1845/*
1846 * deactivate_task - remove a task from the runqueue.
1847 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001848static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001849{
1850 if (task_contributes_to_load(p))
1851 rq->nr_uninterruptible++;
1852
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001853 dequeue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001854 dec_nr_running(rq);
1855}
1856
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001857#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1858
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001859/*
1860 * There are no locks covering percpu hardirq/softirq time.
1861 * They are only modified in account_system_vtime, on corresponding CPU
1862 * with interrupts disabled. So, writes are safe.
1863 * They are read and saved off onto struct rq in update_rq_clock().
1864 * This may result in other CPU reading this CPU's irq time and can
1865 * race with irq/account_system_vtime on this CPU. We would either get old
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001866 * or new value with a side effect of accounting a slice of irq time to wrong
1867 * task when irq is in progress while we read rq->clock. That is a worthy
1868 * compromise in place of having locks on each irq in account_system_time.
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001869 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001870static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1871static DEFINE_PER_CPU(u64, cpu_softirq_time);
1872
1873static DEFINE_PER_CPU(u64, irq_start_time);
1874static int sched_clock_irqtime;
1875
1876void enable_sched_clock_irqtime(void)
1877{
1878 sched_clock_irqtime = 1;
1879}
1880
1881void disable_sched_clock_irqtime(void)
1882{
1883 sched_clock_irqtime = 0;
1884}
1885
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001886#ifndef CONFIG_64BIT
1887static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
1888
1889static inline void irq_time_write_begin(void)
1890{
1891 __this_cpu_inc(irq_time_seq.sequence);
1892 smp_wmb();
1893}
1894
1895static inline void irq_time_write_end(void)
1896{
1897 smp_wmb();
1898 __this_cpu_inc(irq_time_seq.sequence);
1899}
1900
1901static inline u64 irq_time_read(int cpu)
1902{
1903 u64 irq_time;
1904 unsigned seq;
1905
1906 do {
1907 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1908 irq_time = per_cpu(cpu_softirq_time, cpu) +
1909 per_cpu(cpu_hardirq_time, cpu);
1910 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1911
1912 return irq_time;
1913}
1914#else /* CONFIG_64BIT */
1915static inline void irq_time_write_begin(void)
1916{
1917}
1918
1919static inline void irq_time_write_end(void)
1920{
1921}
1922
1923static inline u64 irq_time_read(int cpu)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001924{
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001925 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1926}
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001927#endif /* CONFIG_64BIT */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001928
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001929/*
1930 * Called before incrementing preempt_count on {soft,}irq_enter
1931 * and before decrementing preempt_count on {soft,}irq_exit.
1932 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001933void account_system_vtime(struct task_struct *curr)
1934{
1935 unsigned long flags;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001936 s64 delta;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001937 int cpu;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001938
1939 if (!sched_clock_irqtime)
1940 return;
1941
1942 local_irq_save(flags);
1943
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001944 cpu = smp_processor_id();
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001945 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
1946 __this_cpu_add(irq_start_time, delta);
1947
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001948 irq_time_write_begin();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001949 /*
1950 * We do not account for softirq time from ksoftirqd here.
1951 * We want to continue accounting softirq time to ksoftirqd thread
1952 * in that case, so as not to confuse scheduler with a special task
1953 * that do not consume any time, but still wants to run.
1954 */
1955 if (hardirq_count())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001956 __this_cpu_add(cpu_hardirq_time, delta);
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -08001957 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001958 __this_cpu_add(cpu_softirq_time, delta);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001959
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001960 irq_time_write_end();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001961 local_irq_restore(flags);
1962}
Ingo Molnarb7dadc32010-10-18 20:00:37 +02001963EXPORT_SYMBOL_GPL(account_system_vtime);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001964
Glauber Costae6e66852011-07-11 15:28:17 -04001965#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1966
1967#ifdef CONFIG_PARAVIRT
1968static inline u64 steal_ticks(u64 steal)
1969{
1970 if (unlikely(steal > NSEC_PER_SEC))
1971 return div_u64(steal, TICK_NSEC);
1972
1973 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
1974}
1975#endif
1976
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001977static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001978{
Glauber Costa095c0aa2011-07-11 15:28:18 -04001979/*
1980 * In theory, the compile should just see 0 here, and optimize out the call
1981 * to sched_rt_avg_update. But I don't trust it...
1982 */
1983#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
1984 s64 steal = 0, irq_delta = 0;
1985#endif
1986#ifdef CONFIG_IRQ_TIME_ACCOUNTING
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001987 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001988
1989 /*
1990 * Since irq_time is only updated on {soft,}irq_exit, we might run into
1991 * this case when a previous update_rq_clock() happened inside a
1992 * {soft,}irq region.
1993 *
1994 * When this happens, we stop ->clock_task and only update the
1995 * prev_irq_time stamp to account for the part that fit, so that a next
1996 * update will consume the rest. This ensures ->clock_task is
1997 * monotonic.
1998 *
1999 * It does however cause some slight miss-attribution of {soft,}irq
2000 * time, a more accurate solution would be to update the irq_time using
2001 * the current rq->clock timestamp, except that would require using
2002 * atomic ops.
2003 */
2004 if (irq_delta > delta)
2005 irq_delta = delta;
2006
2007 rq->prev_irq_time += irq_delta;
2008 delta -= irq_delta;
Glauber Costa095c0aa2011-07-11 15:28:18 -04002009#endif
2010#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
2011 if (static_branch((&paravirt_steal_rq_enabled))) {
2012 u64 st;
2013
2014 steal = paravirt_steal_clock(cpu_of(rq));
2015 steal -= rq->prev_steal_time_rq;
2016
2017 if (unlikely(steal > delta))
2018 steal = delta;
2019
2020 st = steal_ticks(steal);
2021 steal = st * TICK_NSEC;
2022
2023 rq->prev_steal_time_rq += steal;
2024
2025 delta -= steal;
2026 }
2027#endif
2028
Peter Zijlstrafe44d622010-12-09 14:15:34 +01002029 rq->clock_task += delta;
2030
Glauber Costa095c0aa2011-07-11 15:28:18 -04002031#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
2032 if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
2033 sched_rt_avg_update(rq, irq_delta + steal);
2034#endif
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07002035}
2036
Glauber Costa095c0aa2011-07-11 15:28:18 -04002037#ifdef CONFIG_IRQ_TIME_ACCOUNTING
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08002038static int irqtime_account_hi_update(void)
2039{
2040 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2041 unsigned long flags;
2042 u64 latest_ns;
2043 int ret = 0;
2044
2045 local_irq_save(flags);
2046 latest_ns = this_cpu_read(cpu_hardirq_time);
2047 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
2048 ret = 1;
2049 local_irq_restore(flags);
2050 return ret;
2051}
2052
2053static int irqtime_account_si_update(void)
2054{
2055 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2056 unsigned long flags;
2057 u64 latest_ns;
2058 int ret = 0;
2059
2060 local_irq_save(flags);
2061 latest_ns = this_cpu_read(cpu_softirq_time);
2062 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
2063 ret = 1;
2064 local_irq_restore(flags);
2065 return ret;
2066}
2067
Peter Zijlstrafe44d622010-12-09 14:15:34 +01002068#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07002069
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08002070#define sched_clock_irqtime (0)
2071
Glauber Costa095c0aa2011-07-11 15:28:18 -04002072#endif
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07002073
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002074#include "sched_idletask.c"
2075#include "sched_fair.c"
2076#include "sched_rt.c"
Mike Galbraith5091faa2010-11-30 14:18:03 +01002077#include "sched_autogroup.c"
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002078#include "sched_stoptask.c"
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002079#ifdef CONFIG_SCHED_DEBUG
2080# include "sched_debug.c"
2081#endif
2082
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002083void sched_set_stop_task(int cpu, struct task_struct *stop)
2084{
2085 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2086 struct task_struct *old_stop = cpu_rq(cpu)->stop;
2087
2088 if (stop) {
2089 /*
2090 * Make it appear like a SCHED_FIFO task, its something
2091 * userspace knows about and won't get confused about.
2092 *
2093 * Also, it will make PI more or less work without too
2094 * much confusion -- but then, stop work should not
2095 * rely on PI working anyway.
2096 */
2097 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2098
2099 stop->sched_class = &stop_sched_class;
2100 }
2101
2102 cpu_rq(cpu)->stop = stop;
2103
2104 if (old_stop) {
2105 /*
2106 * Reset it back to a normal scheduling class so that
2107 * it can die in pieces.
2108 */
2109 old_stop->sched_class = &rt_sched_class;
2110 }
2111}
2112
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002113/*
Ingo Molnardd41f592007-07-09 18:51:59 +02002114 * __normal_prio - return the priority that is based on the static prio
Ingo Molnar71f8bd42007-07-09 18:51:59 +02002115 */
Ingo Molnar14531182007-07-09 18:51:59 +02002116static inline int __normal_prio(struct task_struct *p)
2117{
Ingo Molnardd41f592007-07-09 18:51:59 +02002118 return p->static_prio;
Ingo Molnar14531182007-07-09 18:51:59 +02002119}
2120
2121/*
Ingo Molnarb29739f2006-06-27 02:54:51 -07002122 * Calculate the expected normal priority: i.e. priority
2123 * without taking RT-inheritance into account. Might be
2124 * boosted by interactivity modifiers. Changes upon fork,
2125 * setprio syscalls, and whenever the interactivity
2126 * estimator recalculates.
2127 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002128static inline int normal_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002129{
2130 int prio;
2131
Ingo Molnare05606d2007-07-09 18:51:59 +02002132 if (task_has_rt_policy(p))
Ingo Molnarb29739f2006-06-27 02:54:51 -07002133 prio = MAX_RT_PRIO-1 - p->rt_priority;
2134 else
2135 prio = __normal_prio(p);
2136 return prio;
2137}
2138
2139/*
2140 * Calculate the current priority, i.e. the priority
2141 * taken into account by the scheduler. This value might
2142 * be boosted by RT tasks, or might be boosted by
2143 * interactivity modifiers. Will be RT if the task got
2144 * RT-boosted. If not then it returns p->normal_prio.
2145 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002146static int effective_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002147{
2148 p->normal_prio = normal_prio(p);
2149 /*
2150 * If we are RT tasks or we were boosted to RT priority,
2151 * keep the priority unchanged. Otherwise, update priority
2152 * to the normal priority:
2153 */
2154 if (!rt_prio(p->prio))
2155 return p->normal_prio;
2156 return p->prio;
2157}
2158
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159/**
2160 * task_curr - is this task currently executing on a CPU?
2161 * @p: the task in question.
2162 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002163inline int task_curr(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164{
2165 return cpu_curr(task_cpu(p)) == p;
2166}
2167
Steven Rostedtcb469842008-01-25 21:08:22 +01002168static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2169 const struct sched_class *prev_class,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002170 int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01002171{
2172 if (prev_class != p->sched_class) {
2173 if (prev_class->switched_from)
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002174 prev_class->switched_from(rq, p);
2175 p->sched_class->switched_to(rq, p);
2176 } else if (oldprio != p->prio)
2177 p->sched_class->prio_changed(rq, p, oldprio);
Steven Rostedtcb469842008-01-25 21:08:22 +01002178}
2179
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002180static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2181{
2182 const struct sched_class *class;
2183
2184 if (p->sched_class == rq->curr->sched_class) {
2185 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2186 } else {
2187 for_each_class(class) {
2188 if (class == rq->curr->sched_class)
2189 break;
2190 if (class == p->sched_class) {
2191 resched_task(rq->curr);
2192 break;
2193 }
2194 }
2195 }
2196
2197 /*
2198 * A queue event has occurred, and we're going to schedule. In
2199 * this case, we can save a useless back to back clock update.
2200 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002201 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002202 rq->skip_clock_update = 1;
2203}
2204
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205#ifdef CONFIG_SMP
Ingo Molnarcc367732007-10-15 17:00:18 +02002206/*
2207 * Is this task likely cache-hot:
2208 */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002209static int
Ingo Molnarcc367732007-10-15 17:00:18 +02002210task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2211{
2212 s64 delta;
2213
Peter Zijlstrae6c8fba2009-12-16 18:04:33 +01002214 if (p->sched_class != &fair_sched_class)
2215 return 0;
2216
Nikhil Raoef8002f2010-10-13 12:09:35 -07002217 if (unlikely(p->policy == SCHED_IDLE))
2218 return 0;
2219
Ingo Molnarf540a602008-03-15 17:10:34 +01002220 /*
2221 * Buddy candidates are cache hot:
2222 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002223 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
Peter Zijlstra47932412008-11-04 21:25:09 +01002224 (&p->se == cfs_rq_of(&p->se)->next ||
2225 &p->se == cfs_rq_of(&p->se)->last))
Ingo Molnarf540a602008-03-15 17:10:34 +01002226 return 1;
2227
Ingo Molnar6bc16652007-10-15 17:00:18 +02002228 if (sysctl_sched_migration_cost == -1)
2229 return 1;
2230 if (sysctl_sched_migration_cost == 0)
2231 return 0;
2232
Ingo Molnarcc367732007-10-15 17:00:18 +02002233 delta = now - p->se.exec_start;
2234
2235 return delta < (s64)sysctl_sched_migration_cost;
2236}
2237
Ingo Molnardd41f592007-07-09 18:51:59 +02002238void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Ingo Molnarc65cc872007-07-09 18:51:58 +02002239{
Peter Zijlstrae2912002009-12-16 18:04:36 +01002240#ifdef CONFIG_SCHED_DEBUG
2241 /*
2242 * We should never call set_task_cpu() on a blocked task,
2243 * ttwu() will sort out the placement.
2244 */
Peter Zijlstra077614e2009-12-17 13:16:31 +01002245 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2246 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002247
2248#ifdef CONFIG_LOCKDEP
Peter Zijlstra6c6c54e2011-06-03 17:37:07 +02002249 /*
2250 * The caller should hold either p->pi_lock or rq->lock, when changing
2251 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2252 *
2253 * sched_move_task() holds both and thus holding either pins the cgroup,
2254 * see set_task_rq().
2255 *
2256 * Furthermore, all task_rq users should acquire both locks, see
2257 * task_rq_lock().
2258 */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002259 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2260 lockdep_is_held(&task_rq(p)->lock)));
2261#endif
Peter Zijlstrae2912002009-12-16 18:04:36 +01002262#endif
2263
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +08002264 trace_sched_migrate_task(p, new_cpu);
Peter Zijlstracbc34ed2008-12-10 08:08:22 +01002265
Peter Zijlstra0c697742009-12-22 15:43:19 +01002266 if (task_cpu(p) != new_cpu) {
2267 p->se.nr_migrations++;
2268 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2269 }
Ingo Molnardd41f592007-07-09 18:51:59 +02002270
2271 __set_task_cpu(p, new_cpu);
Ingo Molnarc65cc872007-07-09 18:51:58 +02002272}
2273
Tejun Heo969c7922010-05-06 18:49:21 +02002274struct migration_arg {
Ingo Molnar36c8b582006-07-03 00:25:41 -07002275 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 int dest_cpu;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002277};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
Tejun Heo969c7922010-05-06 18:49:21 +02002279static int migration_cpu_stop(void *data);
2280
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 * wait_task_inactive - wait for a thread to unschedule.
2283 *
Roland McGrath85ba2d82008-07-25 19:45:58 -07002284 * If @match_state is nonzero, it's the @p->state value just checked and
2285 * not expected to change. If it changes, i.e. @p might have woken up,
2286 * then return zero. When we succeed in waiting for @p to be off its CPU,
2287 * we return a positive number (its total switch count). If a second call
2288 * a short while later returns the same number, the caller can be sure that
2289 * @p has remained unscheduled the whole time.
2290 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 * The caller must ensure that the task *will* unschedule sometime soon,
2292 * else this function might spin for a *long* time. This function can't
2293 * be called with interrupts off, or it may introduce deadlock with
2294 * smp_call_function() if an IPI is sent by the same process we are
2295 * waiting to become inactive.
2296 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002297unsigned long wait_task_inactive(struct task_struct *p, long match_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298{
2299 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002300 int running, on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002301 unsigned long ncsw;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002302 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Andi Kleen3a5c3592007-10-15 17:00:14 +02002304 for (;;) {
2305 /*
2306 * We do the initial early heuristics without holding
2307 * any task-queue locks at all. We'll only try to get
2308 * the runqueue lock when things look like they will
2309 * work out!
2310 */
2311 rq = task_rq(p);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002312
Andi Kleen3a5c3592007-10-15 17:00:14 +02002313 /*
2314 * If the task is actively running on another CPU
2315 * still, just relax and busy-wait without holding
2316 * any locks.
2317 *
2318 * NOTE! Since we don't hold any locks, it's not
2319 * even sure that "rq" stays as the right runqueue!
2320 * But we don't care, since "task_running()" will
2321 * return false if the runqueue has changed and p
2322 * is actually now running somewhere else!
2323 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002324 while (task_running(rq, p)) {
2325 if (match_state && unlikely(p->state != match_state))
2326 return 0;
Andi Kleen3a5c3592007-10-15 17:00:14 +02002327 cpu_relax();
Roland McGrath85ba2d82008-07-25 19:45:58 -07002328 }
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002329
Andi Kleen3a5c3592007-10-15 17:00:14 +02002330 /*
2331 * Ok, time to look more closely! We need the rq
2332 * lock now, to be *sure*. If we're wrong, we'll
2333 * just go back and repeat.
2334 */
2335 rq = task_rq_lock(p, &flags);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002336 trace_sched_wait_task(p);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002337 running = task_running(rq, p);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002338 on_rq = p->on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002339 ncsw = 0;
Oleg Nesterovf31e11d2008-08-20 16:54:44 -07002340 if (!match_state || p->state == match_state)
Oleg Nesterov93dcf552008-08-20 16:54:44 -07002341 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002342 task_rq_unlock(rq, p, &flags);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002343
Andi Kleen3a5c3592007-10-15 17:00:14 +02002344 /*
Roland McGrath85ba2d82008-07-25 19:45:58 -07002345 * If it changed from the expected state, bail out now.
2346 */
2347 if (unlikely(!ncsw))
2348 break;
2349
2350 /*
Andi Kleen3a5c3592007-10-15 17:00:14 +02002351 * Was it really running after all now that we
2352 * checked with the proper locks actually held?
2353 *
2354 * Oops. Go back and try again..
2355 */
2356 if (unlikely(running)) {
2357 cpu_relax();
2358 continue;
2359 }
2360
2361 /*
2362 * It's not enough that it's not actively running,
2363 * it must be off the runqueue _entirely_, and not
2364 * preempted!
2365 *
Luis Henriques80dd99b2009-03-16 19:58:09 +00002366 * So if it was still runnable (but just not actively
Andi Kleen3a5c3592007-10-15 17:00:14 +02002367 * running right now), it's preempted, and we should
2368 * yield - it could be a while.
2369 */
2370 if (unlikely(on_rq)) {
Thomas Gleixner8eb90c32011-02-23 23:52:21 +00002371 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
2372
2373 set_current_state(TASK_UNINTERRUPTIBLE);
2374 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002375 continue;
2376 }
2377
2378 /*
2379 * Ahh, all good. It wasn't running, and it wasn't
2380 * runnable, which means that it will never become
2381 * running in the future either. We're all done!
2382 */
2383 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 }
Roland McGrath85ba2d82008-07-25 19:45:58 -07002385
2386 return ncsw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387}
2388
2389/***
2390 * kick_process - kick a running thread to enter/exit the kernel
2391 * @p: the to-be-kicked thread
2392 *
2393 * Cause a process which is running on another CPU to enter
2394 * kernel-mode, without any delay. (to get signals handled.)
2395 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002396 * NOTE: this function doesn't have to take the runqueue lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 * because all it wants to ensure is that the remote task enters
2398 * the kernel. If the IPI races and the task has been migrated
2399 * to another CPU then no harm is done and the purpose has been
2400 * achieved as well.
2401 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002402void kick_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403{
2404 int cpu;
2405
2406 preempt_disable();
2407 cpu = task_cpu(p);
2408 if ((cpu != smp_processor_id()) && task_curr(p))
2409 smp_send_reschedule(cpu);
2410 preempt_enable();
2411}
Rusty Russellb43e3522009-06-12 22:27:00 -06002412EXPORT_SYMBOL_GPL(kick_process);
Nick Piggin476d1392005-06-25 14:57:29 -07002413#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002415#ifdef CONFIG_SMP
Oleg Nesterov30da6882010-03-15 10:10:19 +01002416/*
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002417 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
Oleg Nesterov30da6882010-03-15 10:10:19 +01002418 */
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002419static int select_fallback_rq(int cpu, struct task_struct *p)
2420{
2421 int dest_cpu;
2422 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2423
2424 /* Look for allowed, online CPU in same node. */
2425 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2426 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2427 return dest_cpu;
2428
2429 /* Any allowed, online CPU? */
2430 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2431 if (dest_cpu < nr_cpu_ids)
2432 return dest_cpu;
2433
2434 /* No more Mr. Nice Guy. */
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01002435 dest_cpu = cpuset_cpus_allowed_fallback(p);
2436 /*
2437 * Don't tell them about moving exiting tasks or
2438 * kernel threads (both mm NULL), since they never
2439 * leave kernel.
2440 */
2441 if (p->mm && printk_ratelimit()) {
2442 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
2443 task_pid_nr(p), p->comm, cpu);
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002444 }
2445
2446 return dest_cpu;
2447}
2448
Peter Zijlstrae2912002009-12-16 18:04:36 +01002449/*
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002450 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
Peter Zijlstrae2912002009-12-16 18:04:36 +01002451 */
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002452static inline
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002453int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002454{
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002455 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002456
2457 /*
2458 * In order not to call set_task_cpu() on a blocking task we need
2459 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2460 * cpu.
2461 *
2462 * Since this is common to all placement strategies, this lives here.
2463 *
2464 * [ this allows ->select_task() to simply return task_cpu(p) and
2465 * not worry about this generic constraint ]
2466 */
2467 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
Peter Zijlstra70f11202009-12-20 17:36:27 +01002468 !cpu_online(cpu)))
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002469 cpu = select_fallback_rq(task_cpu(p), p);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002470
2471 return cpu;
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002472}
Mike Galbraith09a40af2010-04-15 07:29:59 +02002473
2474static void update_avg(u64 *avg, u64 sample)
2475{
2476 s64 diff = sample - *avg;
2477 *avg += diff >> 3;
2478}
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002479#endif
2480
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002481static void
Peter Zijlstrab84cb5d2011-04-05 17:23:55 +02002482ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
Tejun Heo9ed38112009-12-03 15:08:03 +09002483{
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002484#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrab84cb5d2011-04-05 17:23:55 +02002485 struct rq *rq = this_rq();
Tejun Heo9ed38112009-12-03 15:08:03 +09002486
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002487#ifdef CONFIG_SMP
2488 int this_cpu = smp_processor_id();
Tejun Heo9ed38112009-12-03 15:08:03 +09002489
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002490 if (cpu == this_cpu) {
2491 schedstat_inc(rq, ttwu_local);
2492 schedstat_inc(p, se.statistics.nr_wakeups_local);
2493 } else {
2494 struct sched_domain *sd;
2495
2496 schedstat_inc(p, se.statistics.nr_wakeups_remote);
Peter Zijlstra057f3fa2011-04-18 11:24:34 +02002497 rcu_read_lock();
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002498 for_each_domain(this_cpu, sd) {
2499 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2500 schedstat_inc(sd, ttwu_wake_remote);
2501 break;
2502 }
2503 }
Peter Zijlstra057f3fa2011-04-18 11:24:34 +02002504 rcu_read_unlock();
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002505 }
Peter Zijlstraf339b9d2011-05-31 10:49:20 +02002506
2507 if (wake_flags & WF_MIGRATED)
2508 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2509
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002510#endif /* CONFIG_SMP */
2511
2512 schedstat_inc(rq, ttwu_count);
2513 schedstat_inc(p, se.statistics.nr_wakeups);
2514
2515 if (wake_flags & WF_SYNC)
2516 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2517
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002518#endif /* CONFIG_SCHEDSTATS */
Tejun Heo9ed38112009-12-03 15:08:03 +09002519}
2520
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002521static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
Tejun Heo9ed38112009-12-03 15:08:03 +09002522{
Tejun Heo9ed38112009-12-03 15:08:03 +09002523 activate_task(rq, p, en_flags);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002524 p->on_rq = 1;
Peter Zijlstrac2f71152011-04-13 13:28:56 +02002525
2526 /* if a worker is waking up, notify workqueue */
2527 if (p->flags & PF_WQ_WORKER)
2528 wq_worker_waking_up(p, cpu_of(rq));
Tejun Heo9ed38112009-12-03 15:08:03 +09002529}
2530
Peter Zijlstra23f41ee2011-04-05 17:23:56 +02002531/*
2532 * Mark the task runnable and perform wakeup-preemption.
2533 */
Peter Zijlstra89363382011-04-05 17:23:42 +02002534static void
Peter Zijlstra23f41ee2011-04-05 17:23:56 +02002535ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Tejun Heo9ed38112009-12-03 15:08:03 +09002536{
Peter Zijlstra89363382011-04-05 17:23:42 +02002537 trace_sched_wakeup(p, true);
Tejun Heo9ed38112009-12-03 15:08:03 +09002538 check_preempt_curr(rq, p, wake_flags);
2539
2540 p->state = TASK_RUNNING;
2541#ifdef CONFIG_SMP
2542 if (p->sched_class->task_woken)
2543 p->sched_class->task_woken(rq, p);
2544
2545 if (unlikely(rq->idle_stamp)) {
2546 u64 delta = rq->clock - rq->idle_stamp;
2547 u64 max = 2*sysctl_sched_migration_cost;
2548
2549 if (delta > max)
2550 rq->avg_idle = max;
2551 else
2552 update_avg(&rq->avg_idle, delta);
2553 rq->idle_stamp = 0;
2554 }
2555#endif
2556}
2557
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02002558static void
2559ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
2560{
2561#ifdef CONFIG_SMP
2562 if (p->sched_contributes_to_load)
2563 rq->nr_uninterruptible--;
2564#endif
2565
2566 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
2567 ttwu_do_wakeup(rq, p, wake_flags);
2568}
2569
2570/*
2571 * Called in case the task @p isn't fully descheduled from its runqueue,
2572 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
2573 * since all we need to do is flip p->state to TASK_RUNNING, since
2574 * the task is still ->on_rq.
2575 */
2576static int ttwu_remote(struct task_struct *p, int wake_flags)
2577{
2578 struct rq *rq;
2579 int ret = 0;
2580
2581 rq = __task_rq_lock(p);
2582 if (p->on_rq) {
2583 ttwu_do_wakeup(rq, p, wake_flags);
2584 ret = 1;
2585 }
2586 __task_rq_unlock(rq);
2587
2588 return ret;
2589}
2590
Peter Zijlstra317f3942011-04-05 17:23:58 +02002591#ifdef CONFIG_SMP
2592static void sched_ttwu_pending(void)
2593{
2594 struct rq *rq = this_rq();
2595 struct task_struct *list = xchg(&rq->wake_list, NULL);
2596
2597 if (!list)
2598 return;
2599
2600 raw_spin_lock(&rq->lock);
2601
2602 while (list) {
2603 struct task_struct *p = list;
2604 list = list->wake_entry;
2605 ttwu_do_activate(rq, p, 0);
2606 }
2607
2608 raw_spin_unlock(&rq->lock);
2609}
2610
2611void scheduler_ipi(void)
2612{
2613 sched_ttwu_pending();
2614}
2615
2616static void ttwu_queue_remote(struct task_struct *p, int cpu)
2617{
2618 struct rq *rq = cpu_rq(cpu);
2619 struct task_struct *next = rq->wake_list;
2620
2621 for (;;) {
2622 struct task_struct *old = next;
2623
2624 p->wake_entry = next;
2625 next = cmpxchg(&rq->wake_list, old, p);
2626 if (next == old)
2627 break;
2628 }
2629
2630 if (!next)
2631 smp_send_reschedule(cpu);
2632}
Peter Zijlstrad6aa8f82011-05-26 14:21:33 +02002633
2634#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2635static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
2636{
2637 struct rq *rq;
2638 int ret = 0;
2639
2640 rq = __task_rq_lock(p);
2641 if (p->on_cpu) {
2642 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2643 ttwu_do_wakeup(rq, p, wake_flags);
2644 ret = 1;
2645 }
2646 __task_rq_unlock(rq);
2647
2648 return ret;
2649
2650}
2651#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2652#endif /* CONFIG_SMP */
Peter Zijlstra317f3942011-04-05 17:23:58 +02002653
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02002654static void ttwu_queue(struct task_struct *p, int cpu)
2655{
2656 struct rq *rq = cpu_rq(cpu);
2657
Daniel Hellstrom17d9f312011-05-20 04:01:10 +00002658#if defined(CONFIG_SMP)
Peter Zijlstra317f3942011-04-05 17:23:58 +02002659 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
Peter Zijlstraf01114c2011-05-31 12:26:55 +02002660 sched_clock_cpu(cpu); /* sync clocks x-cpu */
Peter Zijlstra317f3942011-04-05 17:23:58 +02002661 ttwu_queue_remote(p, cpu);
2662 return;
2663 }
2664#endif
2665
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02002666 raw_spin_lock(&rq->lock);
2667 ttwu_do_activate(rq, p, 0);
2668 raw_spin_unlock(&rq->lock);
Tejun Heo9ed38112009-12-03 15:08:03 +09002669}
2670
2671/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 * try_to_wake_up - wake up a thread
Tejun Heo9ed38112009-12-03 15:08:03 +09002673 * @p: the thread to be awakened
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 * @state: the mask of task states that can be woken
Tejun Heo9ed38112009-12-03 15:08:03 +09002675 * @wake_flags: wake modifier flags (WF_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 *
2677 * Put it on the run-queue if it's not already there. The "current"
2678 * thread is always on the run-queue (except when the actual
2679 * re-schedule is in progress), and as such you're allowed to do
2680 * the simpler "current->state = TASK_RUNNING" to mark yourself
2681 * runnable without the overhead of this.
2682 *
Tejun Heo9ed38112009-12-03 15:08:03 +09002683 * Returns %true if @p was woken up, %false if it was already running
2684 * or @state didn't match @p's state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 */
Peter Zijlstrae4a52bc2011-04-05 17:23:54 +02002686static int
2687try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 unsigned long flags;
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02002690 int cpu, success = 0;
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02002691
Linus Torvalds04e2f172008-02-23 18:05:03 -08002692 smp_wmb();
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002693 raw_spin_lock_irqsave(&p->pi_lock, flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002694 if (!(p->state & state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 goto out;
2696
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02002697 success = 1; /* we're going to change ->state */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 cpu = task_cpu(p);
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002699
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02002700 if (p->on_rq && ttwu_remote(p, wake_flags))
2701 goto stat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702
2703#ifdef CONFIG_SMP
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002704 /*
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02002705 * If the owning (remote) cpu is still in the middle of schedule() with
2706 * this task as prev, wait until its done referencing the task.
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002707 */
Peter Zijlstrae4a52bc2011-04-05 17:23:54 +02002708 while (p->on_cpu) {
2709#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2710 /*
Peter Zijlstrad6aa8f82011-05-26 14:21:33 +02002711 * In case the architecture enables interrupts in
2712 * context_switch(), we cannot busy wait, since that
2713 * would lead to deadlocks when an interrupt hits and
2714 * tries to wake up @prev. So bail and do a complete
2715 * remote wakeup.
Peter Zijlstrae4a52bc2011-04-05 17:23:54 +02002716 */
Peter Zijlstrad6aa8f82011-05-26 14:21:33 +02002717 if (ttwu_activate_remote(p, wake_flags))
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02002718 goto stat;
Peter Zijlstrad6aa8f82011-05-26 14:21:33 +02002719#else
Peter Zijlstrae4a52bc2011-04-05 17:23:54 +02002720 cpu_relax();
Peter Zijlstrad6aa8f82011-05-26 14:21:33 +02002721#endif
Peter Zijlstracc87f762010-03-26 12:22:14 +01002722 }
Peter Zijlstrae4a52bc2011-04-05 17:23:54 +02002723 /*
2724 * Pairs with the smp_wmb() in finish_lock_switch().
2725 */
2726 smp_rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727
Peter Zijlstraa8e4f2e2011-04-05 17:23:49 +02002728 p->sched_contributes_to_load = !!task_contributes_to_load(p);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002729 p->state = TASK_WAKING;
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002730
Peter Zijlstrae4a52bc2011-04-05 17:23:54 +02002731 if (p->sched_class->task_waking)
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02002732 p->sched_class->task_waking(p);
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002733
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002734 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
Peter Zijlstraf339b9d2011-05-31 10:49:20 +02002735 if (task_cpu(p) != cpu) {
2736 wake_flags |= WF_MIGRATED;
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002737 set_task_cpu(p, cpu);
Peter Zijlstraf339b9d2011-05-31 10:49:20 +02002738 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02002741 ttwu_queue(p, cpu);
2742stat:
Peter Zijlstrab84cb5d2011-04-05 17:23:55 +02002743 ttwu_stat(p, cpu, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744out:
Peter Zijlstra013fdb82011-04-05 17:23:45 +02002745 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747 return success;
2748}
2749
David Howells50fa6102009-04-28 15:01:38 +01002750/**
Tejun Heo21aa9af2010-06-08 21:40:37 +02002751 * try_to_wake_up_local - try to wake up a local task with rq lock held
2752 * @p: the thread to be awakened
2753 *
Peter Zijlstra2acca552011-04-05 17:23:50 +02002754 * Put @p on the run-queue if it's not already there. The caller must
Tejun Heo21aa9af2010-06-08 21:40:37 +02002755 * ensure that this_rq() is locked, @p is bound to this_rq() and not
Peter Zijlstra2acca552011-04-05 17:23:50 +02002756 * the current task.
Tejun Heo21aa9af2010-06-08 21:40:37 +02002757 */
2758static void try_to_wake_up_local(struct task_struct *p)
2759{
2760 struct rq *rq = task_rq(p);
Tejun Heo21aa9af2010-06-08 21:40:37 +02002761
2762 BUG_ON(rq != this_rq());
2763 BUG_ON(p == current);
2764 lockdep_assert_held(&rq->lock);
2765
Peter Zijlstra2acca552011-04-05 17:23:50 +02002766 if (!raw_spin_trylock(&p->pi_lock)) {
2767 raw_spin_unlock(&rq->lock);
2768 raw_spin_lock(&p->pi_lock);
2769 raw_spin_lock(&rq->lock);
Tejun Heo21aa9af2010-06-08 21:40:37 +02002770 }
Peter Zijlstra2acca552011-04-05 17:23:50 +02002771
Tejun Heo21aa9af2010-06-08 21:40:37 +02002772 if (!(p->state & TASK_NORMAL))
Peter Zijlstra2acca552011-04-05 17:23:50 +02002773 goto out;
Tejun Heo21aa9af2010-06-08 21:40:37 +02002774
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002775 if (!p->on_rq)
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02002776 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2777
Peter Zijlstra23f41ee2011-04-05 17:23:56 +02002778 ttwu_do_wakeup(rq, p, 0);
Peter Zijlstrab84cb5d2011-04-05 17:23:55 +02002779 ttwu_stat(p, smp_processor_id(), 0);
Peter Zijlstra2acca552011-04-05 17:23:50 +02002780out:
2781 raw_spin_unlock(&p->pi_lock);
Tejun Heo21aa9af2010-06-08 21:40:37 +02002782}
2783
2784/**
David Howells50fa6102009-04-28 15:01:38 +01002785 * wake_up_process - Wake up a specific process
2786 * @p: The process to be woken up.
2787 *
2788 * Attempt to wake up the nominated process and move it to the set of runnable
2789 * processes. Returns 1 if the process was woken up, 0 if it was already
2790 * running.
2791 *
2792 * It may be assumed that this function implies a write memory barrier before
2793 * changing the task state if and only if any tasks are woken up.
2794 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002795int wake_up_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05002797 return try_to_wake_up(p, TASK_ALL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799EXPORT_SYMBOL(wake_up_process);
2800
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002801int wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802{
2803 return try_to_wake_up(p, state, 0);
2804}
2805
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806/*
2807 * Perform scheduler related setup for a newly forked process p.
2808 * p is forked by current.
Ingo Molnardd41f592007-07-09 18:51:59 +02002809 *
2810 * __sched_fork() is basic setup used by init_idle() too:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002812static void __sched_fork(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813{
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002814 p->on_rq = 0;
2815
2816 p->se.on_rq = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02002817 p->se.exec_start = 0;
2818 p->se.sum_exec_runtime = 0;
Ingo Molnarf6cf8912007-08-28 12:53:24 +02002819 p->se.prev_sum_exec_runtime = 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002820 p->se.nr_migrations = 0;
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002821 p->se.vruntime = 0;
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002822 INIT_LIST_HEAD(&p->se.group_node);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002823
2824#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03002825 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002826#endif
Nick Piggin476d1392005-06-25 14:57:29 -07002827
Peter Zijlstrafa717062008-01-25 21:08:27 +01002828 INIT_LIST_HEAD(&p->rt.run_list);
Nick Piggin476d1392005-06-25 14:57:29 -07002829
Avi Kivitye107be32007-07-26 13:40:43 +02002830#ifdef CONFIG_PREEMPT_NOTIFIERS
2831 INIT_HLIST_HEAD(&p->preempt_notifiers);
2832#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002833}
2834
2835/*
2836 * fork()/clone()-time setup:
2837 */
Samir Bellabes3e51e3e2011-05-11 18:18:05 +02002838void sched_fork(struct task_struct *p)
Ingo Molnardd41f592007-07-09 18:51:59 +02002839{
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002840 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002841 int cpu = get_cpu();
2842
2843 __sched_fork(p);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002844 /*
Peter Zijlstra0017d732010-03-24 18:34:10 +01002845 * We mark the process as running here. This guarantees that
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002846 * nobody will actually run it, and a signal or other external
2847 * event cannot wake it up and insert it on the runqueue either.
2848 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01002849 p->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02002850
Ingo Molnarb29739f2006-06-27 02:54:51 -07002851 /*
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002852 * Revert to default priority/policy on fork if requested.
2853 */
2854 if (unlikely(p->sched_reset_on_fork)) {
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002855 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002856 p->policy = SCHED_NORMAL;
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002857 p->normal_prio = p->static_prio;
2858 }
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002859
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002860 if (PRIO_TO_NICE(p->static_prio) < 0) {
2861 p->static_prio = NICE_TO_PRIO(0);
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002862 p->normal_prio = p->static_prio;
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002863 set_load_weight(p);
2864 }
2865
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002866 /*
2867 * We don't need the reset flag anymore after the fork. It has
2868 * fulfilled its duty:
2869 */
2870 p->sched_reset_on_fork = 0;
2871 }
Lennart Poetteringca94c442009-06-15 17:17:47 +02002872
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002873 /*
2874 * Make sure we do not leak PI boosting priority to the child.
2875 */
2876 p->prio = current->normal_prio;
2877
Hiroshi Shimamoto2ddbf952007-10-15 17:00:11 +02002878 if (!rt_prio(p->prio))
2879 p->sched_class = &fair_sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07002880
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002881 if (p->sched_class->task_fork)
2882 p->sched_class->task_fork(p);
2883
Peter Zijlstra86951592010-06-22 11:44:53 +02002884 /*
2885 * The child is not yet in the pid-hash so no cgroup attach races,
2886 * and the cgroup is pinned to this child due to cgroup_fork()
2887 * is ran before sched_fork().
2888 *
2889 * Silence PROVE_RCU.
2890 */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002891 raw_spin_lock_irqsave(&p->pi_lock, flags);
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002892 set_task_cpu(p, cpu);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002893 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002894
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002895#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Ingo Molnardd41f592007-07-09 18:51:59 +02002896 if (likely(sched_info_on()))
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002897 memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898#endif
Peter Zijlstra3ca7a442011-04-05 17:23:40 +02002899#if defined(CONFIG_SMP)
2900 p->on_cpu = 0;
Nick Piggin4866cde2005-06-25 14:57:23 -07002901#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902#ifdef CONFIG_PREEMPT
Nick Piggin4866cde2005-06-25 14:57:23 -07002903 /* Want to start with kernel preemption disabled. */
Al Viroa1261f52005-11-13 16:06:55 -08002904 task_thread_info(p)->preempt_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905#endif
Dario Faggioli806c09a2010-11-30 19:51:33 +01002906#ifdef CONFIG_SMP
Gregory Haskins917b6272008-12-29 09:39:53 -05002907 plist_node_init(&p->pushable_tasks, MAX_PRIO);
Dario Faggioli806c09a2010-11-30 19:51:33 +01002908#endif
Gregory Haskins917b6272008-12-29 09:39:53 -05002909
Nick Piggin476d1392005-06-25 14:57:29 -07002910 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911}
2912
2913/*
2914 * wake_up_new_task - wake up a newly created task for the first time.
2915 *
2916 * This function will do some initial scheduler statistics housekeeping
2917 * that must be done for every newly created context, then puts the task
2918 * on the runqueue and wakes it.
2919 */
Samir Bellabes3e51e3e2011-05-11 18:18:05 +02002920void wake_up_new_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921{
2922 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002923 struct rq *rq;
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002924
Peter Zijlstraab2515c2011-04-05 17:23:52 +02002925 raw_spin_lock_irqsave(&p->pi_lock, flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002926#ifdef CONFIG_SMP
2927 /*
2928 * Fork balancing, do it here and not earlier because:
2929 * - cpus_allowed can change in the fork path
2930 * - any previously selected cpu might disappear through hotplug
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002931 */
Peter Zijlstraab2515c2011-04-05 17:23:52 +02002932 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002933#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934
Peter Zijlstraab2515c2011-04-05 17:23:52 +02002935 rq = __task_rq_lock(p);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002936 activate_task(rq, p, 0);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02002937 p->on_rq = 1;
Peter Zijlstra89363382011-04-05 17:23:42 +02002938 trace_sched_wakeup_new(p, true);
Peter Zijlstraa7558e02009-09-14 20:02:34 +02002939 check_preempt_curr(rq, p, WF_FORK);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002940#ifdef CONFIG_SMP
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002941 if (p->sched_class->task_woken)
2942 p->sched_class->task_woken(rq, p);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002943#endif
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002944 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945}
2946
Avi Kivitye107be32007-07-26 13:40:43 +02002947#ifdef CONFIG_PREEMPT_NOTIFIERS
2948
2949/**
Luis Henriques80dd99b2009-03-16 19:58:09 +00002950 * preempt_notifier_register - tell me when current is being preempted & rescheduled
Randy Dunlap421cee22007-07-31 00:37:50 -07002951 * @notifier: notifier struct to register
Avi Kivitye107be32007-07-26 13:40:43 +02002952 */
2953void preempt_notifier_register(struct preempt_notifier *notifier)
2954{
2955 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2956}
2957EXPORT_SYMBOL_GPL(preempt_notifier_register);
2958
2959/**
2960 * preempt_notifier_unregister - no longer interested in preemption notifications
Randy Dunlap421cee22007-07-31 00:37:50 -07002961 * @notifier: notifier struct to unregister
Avi Kivitye107be32007-07-26 13:40:43 +02002962 *
2963 * This is safe to call from within a preemption notifier.
2964 */
2965void preempt_notifier_unregister(struct preempt_notifier *notifier)
2966{
2967 hlist_del(&notifier->link);
2968}
2969EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2970
2971static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2972{
2973 struct preempt_notifier *notifier;
2974 struct hlist_node *node;
2975
2976 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2977 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2978}
2979
2980static void
2981fire_sched_out_preempt_notifiers(struct task_struct *curr,
2982 struct task_struct *next)
2983{
2984 struct preempt_notifier *notifier;
2985 struct hlist_node *node;
2986
2987 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2988 notifier->ops->sched_out(notifier, next);
2989}
2990
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002991#else /* !CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002992
2993static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2994{
2995}
2996
2997static void
2998fire_sched_out_preempt_notifiers(struct task_struct *curr,
2999 struct task_struct *next)
3000{
3001}
3002
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02003003#endif /* CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02003004
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005/**
Nick Piggin4866cde2005-06-25 14:57:23 -07003006 * prepare_task_switch - prepare to switch tasks
3007 * @rq: the runqueue preparing to switch
Randy Dunlap421cee22007-07-31 00:37:50 -07003008 * @prev: the current task that is being switched out
Nick Piggin4866cde2005-06-25 14:57:23 -07003009 * @next: the task we are going to switch to.
3010 *
3011 * This is called with the rq lock held and interrupts off. It must
3012 * be paired with a subsequent finish_task_switch after the context
3013 * switch.
3014 *
3015 * prepare_task_switch sets up locking and calls architecture specific
3016 * hooks.
3017 */
Avi Kivitye107be32007-07-26 13:40:43 +02003018static inline void
3019prepare_task_switch(struct rq *rq, struct task_struct *prev,
3020 struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -07003021{
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003022 sched_info_switch(prev, next);
3023 perf_event_task_sched_out(prev, next);
Avi Kivitye107be32007-07-26 13:40:43 +02003024 fire_sched_out_preempt_notifiers(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07003025 prepare_lock_switch(rq, next);
3026 prepare_arch_switch(next);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003027 trace_sched_switch(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07003028}
3029
3030/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 * finish_task_switch - clean up after a task-switch
Jeff Garzik344baba2005-09-07 01:15:17 -04003032 * @rq: runqueue associated with task-switch
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 * @prev: the thread we just switched away from.
3034 *
Nick Piggin4866cde2005-06-25 14:57:23 -07003035 * finish_task_switch must be called after the context switch, paired
3036 * with a prepare_task_switch call before the context switch.
3037 * finish_task_switch will reconcile locking set up by prepare_task_switch,
3038 * and do any other architecture-specific cleanup actions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 *
3040 * Note that we may have delayed dropping an mm in context_switch(). If
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003041 * so, we finish that here outside of the runqueue lock. (Doing it
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 * with the lock held can cause deadlocks; see schedule() for
3043 * details.)
3044 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02003045static void finish_task_switch(struct rq *rq, struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 __releases(rq->lock)
3047{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 struct mm_struct *mm = rq->prev_mm;
Oleg Nesterov55a101f2006-09-29 02:01:10 -07003049 long prev_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050
3051 rq->prev_mm = NULL;
3052
3053 /*
3054 * A task struct has one reference for the use as "current".
Oleg Nesterovc394cc92006-09-29 02:01:11 -07003055 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
Oleg Nesterov55a101f2006-09-29 02:01:10 -07003056 * schedule one last time. The schedule call will never return, and
3057 * the scheduled task must drop that reference.
Oleg Nesterovc394cc92006-09-29 02:01:11 -07003058 * The test for TASK_DEAD must occur while the runqueue locks are
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 * still held, otherwise prev could be scheduled on another cpu, die
3060 * there before we look at prev->state, and then the reference would
3061 * be dropped twice.
3062 * Manfred Spraul <manfred@colorfullife.com>
3063 */
Oleg Nesterov55a101f2006-09-29 02:01:10 -07003064 prev_state = prev->state;
Nick Piggin4866cde2005-06-25 14:57:23 -07003065 finish_arch_switch(prev);
Jamie Iles8381f652010-01-08 15:27:33 +00003066#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3067 local_irq_disable();
3068#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Peter Zijlstra49f47432009-12-27 11:51:52 +01003069 perf_event_task_sched_in(current);
Jamie Iles8381f652010-01-08 15:27:33 +00003070#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3071 local_irq_enable();
3072#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Nick Piggin4866cde2005-06-25 14:57:23 -07003073 finish_lock_switch(rq, prev);
Steven Rostedte8fa1362008-01-25 21:08:05 +01003074
Avi Kivitye107be32007-07-26 13:40:43 +02003075 fire_sched_in_preempt_notifiers(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 if (mm)
3077 mmdrop(mm);
Oleg Nesterovc394cc92006-09-29 02:01:11 -07003078 if (unlikely(prev_state == TASK_DEAD)) {
bibo maoc6fd91f2006-03-26 01:38:20 -08003079 /*
3080 * Remove function-return probe instances associated with this
3081 * task and put them back on the free list.
Ingo Molnar9761eea2007-07-09 18:52:00 +02003082 */
bibo maoc6fd91f2006-03-26 01:38:20 -08003083 kprobe_flush_task(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 put_task_struct(prev);
bibo maoc6fd91f2006-03-26 01:38:20 -08003085 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086}
3087
Gregory Haskins3f029d32009-07-29 11:08:47 -04003088#ifdef CONFIG_SMP
3089
3090/* assumes rq->lock is held */
3091static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
3092{
3093 if (prev->sched_class->pre_schedule)
3094 prev->sched_class->pre_schedule(rq, prev);
3095}
3096
3097/* rq->lock is NOT held, but preemption is disabled */
3098static inline void post_schedule(struct rq *rq)
3099{
3100 if (rq->post_schedule) {
3101 unsigned long flags;
3102
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003103 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04003104 if (rq->curr->sched_class->post_schedule)
3105 rq->curr->sched_class->post_schedule(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003106 raw_spin_unlock_irqrestore(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04003107
3108 rq->post_schedule = 0;
3109 }
3110}
3111
3112#else
3113
3114static inline void pre_schedule(struct rq *rq, struct task_struct *p)
3115{
3116}
3117
3118static inline void post_schedule(struct rq *rq)
3119{
3120}
3121
3122#endif
3123
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124/**
3125 * schedule_tail - first thing a freshly forked thread must call.
3126 * @prev: the thread we just switched away from.
3127 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07003128asmlinkage void schedule_tail(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 __releases(rq->lock)
3130{
Ingo Molnar70b97a72006-07-03 00:25:42 -07003131 struct rq *rq = this_rq();
3132
Nick Piggin4866cde2005-06-25 14:57:23 -07003133 finish_task_switch(rq, prev);
Steven Rostedtda19ab52009-07-29 00:21:22 -04003134
Gregory Haskins3f029d32009-07-29 11:08:47 -04003135 /*
3136 * FIXME: do we need to worry about rq being invalidated by the
3137 * task_switch?
3138 */
3139 post_schedule(rq);
Steven Rostedtda19ab52009-07-29 00:21:22 -04003140
Nick Piggin4866cde2005-06-25 14:57:23 -07003141#ifdef __ARCH_WANT_UNLOCKED_CTXSW
3142 /* In this case, finish_task_switch does not reenable preemption */
3143 preempt_enable();
3144#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 if (current->set_child_tid)
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003146 put_user(task_pid_vnr(current), current->set_child_tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147}
3148
3149/*
3150 * context_switch - switch to the new MM and the new
3151 * thread's register state.
3152 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003153static inline void
Ingo Molnar70b97a72006-07-03 00:25:42 -07003154context_switch(struct rq *rq, struct task_struct *prev,
Ingo Molnar36c8b582006-07-03 00:25:41 -07003155 struct task_struct *next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156{
Ingo Molnardd41f592007-07-09 18:51:59 +02003157 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158
Avi Kivitye107be32007-07-26 13:40:43 +02003159 prepare_task_switch(rq, prev, next);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003160
Ingo Molnardd41f592007-07-09 18:51:59 +02003161 mm = next->mm;
3162 oldmm = prev->active_mm;
Zachary Amsden9226d122007-02-13 13:26:21 +01003163 /*
3164 * For paravirt, this is coupled with an exit in switch_to to
3165 * combine the page table reload and the switch backend into
3166 * one hypercall.
3167 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -08003168 arch_start_context_switch(prev);
Zachary Amsden9226d122007-02-13 13:26:21 +01003169
Heiko Carstens31915ab2010-09-16 14:42:25 +02003170 if (!mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 next->active_mm = oldmm;
3172 atomic_inc(&oldmm->mm_count);
3173 enter_lazy_tlb(oldmm, next);
3174 } else
3175 switch_mm(oldmm, mm, next);
3176
Heiko Carstens31915ab2010-09-16 14:42:25 +02003177 if (!prev->mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 prev->active_mm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 rq->prev_mm = oldmm;
3180 }
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07003181 /*
3182 * Since the runqueue lock will be released by the next
3183 * task (which is an invalid locking op but in the case
3184 * of the scheduler it's an obvious special-case), so we
3185 * do an early lockdep release here:
3186 */
3187#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07003188 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07003189#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190
3191 /* Here we just switch the register state and the stack. */
3192 switch_to(prev, next, prev);
3193
Ingo Molnardd41f592007-07-09 18:51:59 +02003194 barrier();
3195 /*
3196 * this_rq must be evaluated again because prev may have moved
3197 * CPUs since it called schedule(), thus the 'rq' on its stack
3198 * frame will be invalid.
3199 */
3200 finish_task_switch(this_rq(), prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201}
3202
3203/*
3204 * nr_running, nr_uninterruptible and nr_context_switches:
3205 *
3206 * externally visible scheduler statistics: current number of runnable
3207 * threads, current number of uninterruptible-sleeping threads, total
3208 * number of context switches performed since bootup.
3209 */
3210unsigned long nr_running(void)
3211{
3212 unsigned long i, sum = 0;
3213
3214 for_each_online_cpu(i)
3215 sum += cpu_rq(i)->nr_running;
3216
3217 return sum;
3218}
3219
3220unsigned long nr_uninterruptible(void)
3221{
3222 unsigned long i, sum = 0;
3223
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003224 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 sum += cpu_rq(i)->nr_uninterruptible;
3226
3227 /*
3228 * Since we read the counters lockless, it might be slightly
3229 * inaccurate. Do not allow it to go below zero though:
3230 */
3231 if (unlikely((long)sum < 0))
3232 sum = 0;
3233
3234 return sum;
3235}
3236
3237unsigned long long nr_context_switches(void)
3238{
Steven Rostedtcc94abf2006-06-27 02:54:31 -07003239 int i;
3240 unsigned long long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003242 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 sum += cpu_rq(i)->nr_switches;
3244
3245 return sum;
3246}
3247
3248unsigned long nr_iowait(void)
3249{
3250 unsigned long i, sum = 0;
3251
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003252 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 sum += atomic_read(&cpu_rq(i)->nr_iowait);
3254
3255 return sum;
3256}
3257
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003258unsigned long nr_iowait_cpu(int cpu)
Arjan van de Ven69d25872009-09-21 17:04:08 -07003259{
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003260 struct rq *this = cpu_rq(cpu);
Arjan van de Ven69d25872009-09-21 17:04:08 -07003261 return atomic_read(&this->nr_iowait);
3262}
3263
3264unsigned long this_cpu_load(void)
3265{
3266 struct rq *this = this_rq();
3267 return this->cpu_load[0];
3268}
3269
3270
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003271/* Variables and functions for calc_load */
3272static atomic_long_t calc_load_tasks;
3273static unsigned long calc_load_update;
3274unsigned long avenrun[3];
3275EXPORT_SYMBOL(avenrun);
3276
Peter Zijlstra74f51872010-04-22 21:50:19 +02003277static long calc_load_fold_active(struct rq *this_rq)
3278{
3279 long nr_active, delta = 0;
3280
3281 nr_active = this_rq->nr_running;
3282 nr_active += (long) this_rq->nr_uninterruptible;
3283
3284 if (nr_active != this_rq->calc_load_active) {
3285 delta = nr_active - this_rq->calc_load_active;
3286 this_rq->calc_load_active = nr_active;
3287 }
3288
3289 return delta;
3290}
3291
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003292static unsigned long
3293calc_load(unsigned long load, unsigned long exp, unsigned long active)
3294{
3295 load *= exp;
3296 load += active * (FIXED_1 - exp);
3297 load += 1UL << (FSHIFT - 1);
3298 return load >> FSHIFT;
3299}
3300
Peter Zijlstra74f51872010-04-22 21:50:19 +02003301#ifdef CONFIG_NO_HZ
3302/*
3303 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
3304 *
3305 * When making the ILB scale, we should try to pull this in as well.
3306 */
3307static atomic_long_t calc_load_tasks_idle;
3308
3309static void calc_load_account_idle(struct rq *this_rq)
3310{
3311 long delta;
3312
3313 delta = calc_load_fold_active(this_rq);
3314 if (delta)
3315 atomic_long_add(delta, &calc_load_tasks_idle);
3316}
3317
3318static long calc_load_fold_idle(void)
3319{
3320 long delta = 0;
3321
3322 /*
3323 * Its got a race, we don't care...
3324 */
3325 if (atomic_long_read(&calc_load_tasks_idle))
3326 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3327
3328 return delta;
3329}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003330
3331/**
3332 * fixed_power_int - compute: x^n, in O(log n) time
3333 *
3334 * @x: base of the power
3335 * @frac_bits: fractional bits of @x
3336 * @n: power to raise @x to.
3337 *
3338 * By exploiting the relation between the definition of the natural power
3339 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
3340 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3341 * (where: n_i \elem {0, 1}, the binary vector representing n),
3342 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3343 * of course trivially computable in O(log_2 n), the length of our binary
3344 * vector.
3345 */
3346static unsigned long
3347fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3348{
3349 unsigned long result = 1UL << frac_bits;
3350
3351 if (n) for (;;) {
3352 if (n & 1) {
3353 result *= x;
3354 result += 1UL << (frac_bits - 1);
3355 result >>= frac_bits;
3356 }
3357 n >>= 1;
3358 if (!n)
3359 break;
3360 x *= x;
3361 x += 1UL << (frac_bits - 1);
3362 x >>= frac_bits;
3363 }
3364
3365 return result;
3366}
3367
3368/*
3369 * a1 = a0 * e + a * (1 - e)
3370 *
3371 * a2 = a1 * e + a * (1 - e)
3372 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3373 * = a0 * e^2 + a * (1 - e) * (1 + e)
3374 *
3375 * a3 = a2 * e + a * (1 - e)
3376 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3377 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3378 *
3379 * ...
3380 *
3381 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3382 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3383 * = a0 * e^n + a * (1 - e^n)
3384 *
3385 * [1] application of the geometric series:
3386 *
3387 * n 1 - x^(n+1)
3388 * S_n := \Sum x^i = -------------
3389 * i=0 1 - x
3390 */
3391static unsigned long
3392calc_load_n(unsigned long load, unsigned long exp,
3393 unsigned long active, unsigned int n)
3394{
3395
3396 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3397}
3398
3399/*
3400 * NO_HZ can leave us missing all per-cpu ticks calling
3401 * calc_load_account_active(), but since an idle CPU folds its delta into
3402 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3403 * in the pending idle delta if our idle period crossed a load cycle boundary.
3404 *
3405 * Once we've updated the global active value, we need to apply the exponential
3406 * weights adjusted to the number of cycles missed.
3407 */
3408static void calc_global_nohz(unsigned long ticks)
3409{
3410 long delta, active, n;
3411
3412 if (time_before(jiffies, calc_load_update))
3413 return;
3414
3415 /*
3416 * If we crossed a calc_load_update boundary, make sure to fold
3417 * any pending idle changes, the respective CPUs might have
3418 * missed the tick driven calc_load_account_active() update
3419 * due to NO_HZ.
3420 */
3421 delta = calc_load_fold_idle();
3422 if (delta)
3423 atomic_long_add(delta, &calc_load_tasks);
3424
3425 /*
3426 * If we were idle for multiple load cycles, apply them.
3427 */
3428 if (ticks >= LOAD_FREQ) {
3429 n = ticks / LOAD_FREQ;
3430
3431 active = atomic_long_read(&calc_load_tasks);
3432 active = active > 0 ? active * FIXED_1 : 0;
3433
3434 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3435 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3436 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3437
3438 calc_load_update += n * LOAD_FREQ;
3439 }
3440
3441 /*
3442 * Its possible the remainder of the above division also crosses
3443 * a LOAD_FREQ period, the regular check in calc_global_load()
3444 * which comes after this will take care of that.
3445 *
3446 * Consider us being 11 ticks before a cycle completion, and us
3447 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3448 * age us 4 cycles, and the test in calc_global_load() will
3449 * pick up the final one.
3450 */
3451}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003452#else
3453static void calc_load_account_idle(struct rq *this_rq)
3454{
3455}
3456
3457static inline long calc_load_fold_idle(void)
3458{
3459 return 0;
3460}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003461
3462static void calc_global_nohz(unsigned long ticks)
3463{
3464}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003465#endif
3466
Thomas Gleixner2d024942009-05-02 20:08:52 +02003467/**
3468 * get_avenrun - get the load average array
3469 * @loads: pointer to dest load array
3470 * @offset: offset to add
3471 * @shift: shift count to shift the result left
3472 *
3473 * These values are estimates at best, so no need for locking.
3474 */
3475void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3476{
3477 loads[0] = (avenrun[0] + offset) << shift;
3478 loads[1] = (avenrun[1] + offset) << shift;
3479 loads[2] = (avenrun[2] + offset) << shift;
3480}
3481
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003482/*
3483 * calc_load - update the avenrun load estimates 10 ticks after the
3484 * CPUs have updated calc_load_tasks.
3485 */
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003486void calc_global_load(unsigned long ticks)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003487{
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003488 long active;
3489
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003490 calc_global_nohz(ticks);
3491
3492 if (time_before(jiffies, calc_load_update + 10))
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003493 return;
3494
3495 active = atomic_long_read(&calc_load_tasks);
3496 active = active > 0 ? active * FIXED_1 : 0;
3497
3498 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3499 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3500 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3501
3502 calc_load_update += LOAD_FREQ;
3503}
3504
3505/*
Peter Zijlstra74f51872010-04-22 21:50:19 +02003506 * Called from update_cpu_load() to periodically update this CPU's
3507 * active count.
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003508 */
3509static void calc_load_account_active(struct rq *this_rq)
3510{
Peter Zijlstra74f51872010-04-22 21:50:19 +02003511 long delta;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003512
Peter Zijlstra74f51872010-04-22 21:50:19 +02003513 if (time_before(jiffies, this_rq->calc_load_update))
3514 return;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003515
Peter Zijlstra74f51872010-04-22 21:50:19 +02003516 delta = calc_load_fold_active(this_rq);
3517 delta += calc_load_fold_idle();
3518 if (delta)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003519 atomic_long_add(delta, &calc_load_tasks);
Peter Zijlstra74f51872010-04-22 21:50:19 +02003520
3521 this_rq->calc_load_update += LOAD_FREQ;
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08003522}
3523
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524/*
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003525 * The exact cpuload at various idx values, calculated at every tick would be
3526 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3527 *
3528 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3529 * on nth tick when cpu may be busy, then we have:
3530 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3531 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3532 *
3533 * decay_load_missed() below does efficient calculation of
3534 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3535 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3536 *
3537 * The calculation is approximated on a 128 point scale.
3538 * degrade_zero_ticks is the number of ticks after which load at any
3539 * particular idx is approximated to be zero.
3540 * degrade_factor is a precomputed table, a row for each load idx.
3541 * Each column corresponds to degradation factor for a power of two ticks,
3542 * based on 128 point scale.
3543 * Example:
3544 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3545 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3546 *
3547 * With this power of 2 load factors, we can degrade the load n times
3548 * by looking at 1 bits in n and doing as many mult/shift instead of
3549 * n mult/shifts needed by the exact degradation.
3550 */
3551#define DEGRADE_SHIFT 7
3552static const unsigned char
3553 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3554static const unsigned char
3555 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3556 {0, 0, 0, 0, 0, 0, 0, 0},
3557 {64, 32, 8, 0, 0, 0, 0, 0},
3558 {96, 72, 40, 12, 1, 0, 0},
3559 {112, 98, 75, 43, 15, 1, 0},
3560 {120, 112, 98, 76, 45, 16, 2} };
3561
3562/*
3563 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3564 * would be when CPU is idle and so we just decay the old load without
3565 * adding any new load.
3566 */
3567static unsigned long
3568decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3569{
3570 int j = 0;
3571
3572 if (!missed_updates)
3573 return load;
3574
3575 if (missed_updates >= degrade_zero_ticks[idx])
3576 return 0;
3577
3578 if (idx == 1)
3579 return load >> missed_updates;
3580
3581 while (missed_updates) {
3582 if (missed_updates % 2)
3583 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3584
3585 missed_updates >>= 1;
3586 j++;
3587 }
3588 return load;
3589}
3590
3591/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003592 * Update rq->cpu_load[] statistics. This function is usually called every
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003593 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3594 * every tick. We fix it up based on jiffies.
Ingo Molnar48f24c42006-07-03 00:25:40 -07003595 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003596static void update_cpu_load(struct rq *this_rq)
Ingo Molnar48f24c42006-07-03 00:25:40 -07003597{
Dmitry Adamushko495eca42007-10-15 17:00:06 +02003598 unsigned long this_load = this_rq->load.weight;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003599 unsigned long curr_jiffies = jiffies;
3600 unsigned long pending_updates;
Ingo Molnardd41f592007-07-09 18:51:59 +02003601 int i, scale;
3602
3603 this_rq->nr_load_updates++;
Ingo Molnardd41f592007-07-09 18:51:59 +02003604
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003605 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3606 if (curr_jiffies == this_rq->last_load_update_tick)
3607 return;
3608
3609 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3610 this_rq->last_load_update_tick = curr_jiffies;
3611
Ingo Molnardd41f592007-07-09 18:51:59 +02003612 /* Update our load: */
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003613 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3614 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
Ingo Molnardd41f592007-07-09 18:51:59 +02003615 unsigned long old_load, new_load;
3616
3617 /* scale is effectively 1 << i now, and >> i divides by scale */
3618
3619 old_load = this_rq->cpu_load[i];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003620 old_load = decay_load_missed(old_load, pending_updates - 1, i);
Ingo Molnardd41f592007-07-09 18:51:59 +02003621 new_load = this_load;
Ingo Molnara25707f2007-10-15 17:00:03 +02003622 /*
3623 * Round up the averaging division if load is increasing. This
3624 * prevents us from getting stuck on 9 if the load is 10, for
3625 * example.
3626 */
3627 if (new_load > old_load)
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003628 new_load += scale - 1;
3629
3630 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
Ingo Molnardd41f592007-07-09 18:51:59 +02003631 }
Suresh Siddhada2b71e2010-08-23 13:42:51 -07003632
3633 sched_avg_update(this_rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003634}
3635
3636static void update_cpu_load_active(struct rq *this_rq)
3637{
3638 update_cpu_load(this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003639
Peter Zijlstra74f51872010-04-22 21:50:19 +02003640 calc_load_account_active(this_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07003641}
3642
Ingo Molnardd41f592007-07-09 18:51:59 +02003643#ifdef CONFIG_SMP
3644
Ingo Molnar48f24c42006-07-03 00:25:40 -07003645/*
Peter Zijlstra38022902009-12-16 18:04:37 +01003646 * sched_exec - execve() is a valuable balancing opportunity, because at
3647 * this point the task has the smallest effective memory and cache footprint.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 */
Peter Zijlstra38022902009-12-16 18:04:37 +01003649void sched_exec(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650{
Peter Zijlstra38022902009-12-16 18:04:37 +01003651 struct task_struct *p = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652 unsigned long flags;
Peter Zijlstra0017d732010-03-24 18:34:10 +01003653 int dest_cpu;
Peter Zijlstra38022902009-12-16 18:04:37 +01003654
Peter Zijlstra8f42ced2011-04-05 17:23:53 +02003655 raw_spin_lock_irqsave(&p->pi_lock, flags);
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003656 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
Peter Zijlstra0017d732010-03-24 18:34:10 +01003657 if (dest_cpu == smp_processor_id())
3658 goto unlock;
Peter Zijlstra38022902009-12-16 18:04:37 +01003659
Peter Zijlstra8f42ced2011-04-05 17:23:53 +02003660 if (likely(cpu_active(dest_cpu))) {
Tejun Heo969c7922010-05-06 18:49:21 +02003661 struct migration_arg arg = { p, dest_cpu };
Ingo Molnar36c8b582006-07-03 00:25:41 -07003662
Peter Zijlstra8f42ced2011-04-05 17:23:53 +02003663 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3664 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 return;
3666 }
Peter Zijlstra0017d732010-03-24 18:34:10 +01003667unlock:
Peter Zijlstra8f42ced2011-04-05 17:23:53 +02003668 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669}
3670
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671#endif
3672
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673DEFINE_PER_CPU(struct kernel_stat, kstat);
3674
3675EXPORT_PER_CPU_SYMBOL(kstat);
3676
3677/*
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003678 * Return any ns on the sched_clock that have not yet been accounted in
Frank Mayharf06febc2008-09-12 09:54:39 -07003679 * @p in case that task is currently running.
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003680 *
3681 * Called with task_rq_lock() held on @rq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 */
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003683static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3684{
3685 u64 ns = 0;
3686
3687 if (task_current(rq, p)) {
3688 update_rq_clock(rq);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07003689 ns = rq->clock_task - p->se.exec_start;
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003690 if ((s64)ns < 0)
3691 ns = 0;
3692 }
3693
3694 return ns;
3695}
3696
Frank Mayharbb34d922008-09-12 09:54:39 -07003697unsigned long long task_delta_exec(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 unsigned long flags;
Ingo Molnar41b86e92007-07-09 18:51:58 +02003700 struct rq *rq;
Frank Mayharbb34d922008-09-12 09:54:39 -07003701 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003702
Ingo Molnar41b86e92007-07-09 18:51:58 +02003703 rq = task_rq_lock(p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003704 ns = do_task_delta_exec(p, rq);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003705 task_rq_unlock(rq, p, &flags);
Ingo Molnar15084872008-09-30 08:28:17 +02003706
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003707 return ns;
3708}
Frank Mayharf06febc2008-09-12 09:54:39 -07003709
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003710/*
3711 * Return accounted runtime for the task.
3712 * In case the task is currently running, return the runtime plus current's
3713 * pending runtime that have not been accounted yet.
3714 */
3715unsigned long long task_sched_runtime(struct task_struct *p)
3716{
3717 unsigned long flags;
3718 struct rq *rq;
3719 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003720
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003721 rq = task_rq_lock(p, &flags);
3722 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003723 task_rq_unlock(rq, p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003724
3725 return ns;
3726}
3727
3728/*
3729 * Return sum_exec_runtime for the thread group.
3730 * In case the task is currently running, return the sum plus current's
3731 * pending runtime that have not been accounted yet.
3732 *
3733 * Note that the thread group might have other running tasks as well,
3734 * so the return value not includes other pending runtime that other
3735 * running tasks might have.
3736 */
3737unsigned long long thread_group_sched_runtime(struct task_struct *p)
3738{
3739 struct task_cputime totals;
3740 unsigned long flags;
3741 struct rq *rq;
3742 u64 ns;
3743
3744 rq = task_rq_lock(p, &flags);
3745 thread_group_cputime(p, &totals);
3746 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003747 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748
3749 return ns;
3750}
3751
3752/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 * Account user cpu time to a process.
3754 * @p: the process that the cpu time gets accounted to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 * @cputime: the cpu time spent in user space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003756 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003758void account_user_time(struct task_struct *p, cputime_t cputime,
3759 cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760{
3761 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3762 cputime64_t tmp;
3763
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003764 /* Add user time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003766 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003767 account_group_user_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768
3769 /* Add user time to cpustat. */
3770 tmp = cputime_to_cputime64(cputime);
3771 if (TASK_NICE(p) > 0)
3772 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3773 else
3774 cpustat->user = cputime64_add(cpustat->user, tmp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05303775
3776 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
Jonathan Lim49b5cf32008-07-25 01:48:40 -07003777 /* Account for user time used */
3778 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779}
3780
3781/*
Laurent Vivier94886b82007-10-15 17:00:19 +02003782 * Account guest cpu time to a process.
3783 * @p: the process that the cpu time gets accounted to
3784 * @cputime: the cpu time spent in virtual machine since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003785 * @cputime_scaled: cputime scaled by cpu frequency
Laurent Vivier94886b82007-10-15 17:00:19 +02003786 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003787static void account_guest_time(struct task_struct *p, cputime_t cputime,
3788 cputime_t cputime_scaled)
Laurent Vivier94886b82007-10-15 17:00:19 +02003789{
3790 cputime64_t tmp;
3791 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3792
3793 tmp = cputime_to_cputime64(cputime);
3794
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003795 /* Add guest time to process. */
Laurent Vivier94886b82007-10-15 17:00:19 +02003796 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003797 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003798 account_group_user_time(p, cputime);
Laurent Vivier94886b82007-10-15 17:00:19 +02003799 p->gtime = cputime_add(p->gtime, cputime);
3800
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003801 /* Add guest time to cpustat. */
Ryota Ozakice0e7b22009-10-24 01:20:10 +09003802 if (TASK_NICE(p) > 0) {
3803 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3804 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3805 } else {
3806 cpustat->user = cputime64_add(cpustat->user, tmp);
3807 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3808 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003809}
3810
3811/*
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003812 * Account system cpu time to a process and desired cpustat field
3813 * @p: the process that the cpu time gets accounted to
3814 * @cputime: the cpu time spent in kernel space since the last update
3815 * @cputime_scaled: cputime scaled by cpu frequency
3816 * @target_cputime64: pointer to cpustat field that has to be updated
3817 */
3818static inline
3819void __account_system_time(struct task_struct *p, cputime_t cputime,
3820 cputime_t cputime_scaled, cputime64_t *target_cputime64)
3821{
3822 cputime64_t tmp = cputime_to_cputime64(cputime);
3823
3824 /* Add system time to process. */
3825 p->stime = cputime_add(p->stime, cputime);
3826 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3827 account_group_system_time(p, cputime);
3828
3829 /* Add system time to cpustat. */
3830 *target_cputime64 = cputime64_add(*target_cputime64, tmp);
3831 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3832
3833 /* Account for system time used */
3834 acct_update_integrals(p);
3835}
3836
3837/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 * Account system cpu time to a process.
3839 * @p: the process that the cpu time gets accounted to
3840 * @hardirq_offset: the offset to subtract from hardirq_count()
3841 * @cputime: the cpu time spent in kernel space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003842 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843 */
3844void account_system_time(struct task_struct *p, int hardirq_offset,
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003845 cputime_t cputime, cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846{
3847 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003848 cputime64_t *target_cputime64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003850 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003851 account_guest_time(p, cputime, cputime_scaled);
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003852 return;
3853 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003854
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 if (hardirq_count() - hardirq_offset)
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003856 target_cputime64 = &cpustat->irq;
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07003857 else if (in_serving_softirq())
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003858 target_cputime64 = &cpustat->softirq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 else
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003860 target_cputime64 = &cpustat->system;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003861
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003862 __account_system_time(p, cputime, cputime_scaled, target_cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863}
3864
3865/*
3866 * Account for involuntary wait time.
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003867 * @cputime: the cpu time spent in involuntary wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003868 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003869void account_steal_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003872 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3873
3874 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875}
3876
Christoph Lameter7835b982006-12-10 02:20:22 -08003877/*
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003878 * Account for idle time.
3879 * @cputime: the cpu time spent in idle wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003881void account_idle_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882{
3883 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003884 cputime64_t cputime64 = cputime_to_cputime64(cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 struct rq *rq = this_rq();
3886
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003887 if (atomic_read(&rq->nr_iowait) > 0)
3888 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3889 else
3890 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
Christoph Lameter7835b982006-12-10 02:20:22 -08003891}
3892
Glauber Costae6e66852011-07-11 15:28:17 -04003893static __always_inline bool steal_account_process_tick(void)
3894{
3895#ifdef CONFIG_PARAVIRT
3896 if (static_branch(&paravirt_steal_enabled)) {
3897 u64 steal, st = 0;
3898
3899 steal = paravirt_steal_clock(smp_processor_id());
3900 steal -= this_rq()->prev_steal_time;
3901
3902 st = steal_ticks(steal);
3903 this_rq()->prev_steal_time += st * TICK_NSEC;
3904
3905 account_steal_time(st);
3906 return st;
3907 }
3908#endif
3909 return false;
3910}
3911
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003912#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3913
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003914#ifdef CONFIG_IRQ_TIME_ACCOUNTING
3915/*
3916 * Account a tick to a process and cpustat
3917 * @p: the process that the cpu time gets accounted to
3918 * @user_tick: is the tick from userspace
3919 * @rq: the pointer to rq
3920 *
3921 * Tick demultiplexing follows the order
3922 * - pending hardirq update
3923 * - pending softirq update
3924 * - user_time
3925 * - idle_time
3926 * - system time
3927 * - check for guest_time
3928 * - else account as system_time
3929 *
3930 * Check for hardirq is done both for system and user time as there is
3931 * no timer going off while we are on hardirq and hence we may never get an
3932 * opportunity to update it solely in system time.
3933 * p->stime and friends are only updated on system time and not on irq
3934 * softirq as those do not count in task exec_runtime any more.
3935 */
3936static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3937 struct rq *rq)
3938{
3939 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3940 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
3941 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3942
Glauber Costae6e66852011-07-11 15:28:17 -04003943 if (steal_account_process_tick())
3944 return;
3945
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003946 if (irqtime_account_hi_update()) {
3947 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3948 } else if (irqtime_account_si_update()) {
3949 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
Venkatesh Pallipadi414bee92010-12-21 17:09:04 -08003950 } else if (this_cpu_ksoftirqd() == p) {
3951 /*
3952 * ksoftirqd time do not get accounted in cpu_softirq_time.
3953 * So, we have to handle it separately here.
3954 * Also, p->stime needs to be updated for ksoftirqd.
3955 */
3956 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3957 &cpustat->softirq);
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003958 } else if (user_tick) {
3959 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3960 } else if (p == rq->idle) {
3961 account_idle_time(cputime_one_jiffy);
3962 } else if (p->flags & PF_VCPU) { /* System time or guest time */
3963 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
3964 } else {
3965 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3966 &cpustat->system);
3967 }
3968}
3969
3970static void irqtime_account_idle_ticks(int ticks)
3971{
3972 int i;
3973 struct rq *rq = this_rq();
3974
3975 for (i = 0; i < ticks; i++)
3976 irqtime_account_process_tick(current, 0, rq);
3977}
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003978#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003979static void irqtime_account_idle_ticks(int ticks) {}
3980static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3981 struct rq *rq) {}
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003982#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003983
3984/*
3985 * Account a single tick of cpu time.
3986 * @p: the process that the cpu time gets accounted to
3987 * @user_tick: indicates if the tick is a user or a system tick
3988 */
3989void account_process_tick(struct task_struct *p, int user_tick)
3990{
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003991 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003992 struct rq *rq = this_rq();
3993
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003994 if (sched_clock_irqtime) {
3995 irqtime_account_process_tick(p, user_tick, rq);
3996 return;
3997 }
3998
Glauber Costae6e66852011-07-11 15:28:17 -04003999 if (steal_account_process_tick())
4000 return;
4001
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01004002 if (user_tick)
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02004003 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
Eric Dumazetf5f293a2009-04-29 14:44:49 +02004004 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02004005 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01004006 one_jiffy_scaled);
4007 else
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02004008 account_idle_time(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01004009}
4010
4011/*
4012 * Account multiple ticks of steal time.
4013 * @p: the process from which the cpu time has been stolen
4014 * @ticks: number of stolen ticks
4015 */
4016void account_steal_ticks(unsigned long ticks)
4017{
4018 account_steal_time(jiffies_to_cputime(ticks));
4019}
4020
4021/*
4022 * Account multiple ticks of idle time.
4023 * @ticks: number of stolen ticks
4024 */
4025void account_idle_ticks(unsigned long ticks)
4026{
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08004027
4028 if (sched_clock_irqtime) {
4029 irqtime_account_idle_ticks(ticks);
4030 return;
4031 }
4032
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01004033 account_idle_time(jiffies_to_cputime(ticks));
4034}
4035
4036#endif
4037
Christoph Lameter7835b982006-12-10 02:20:22 -08004038/*
Balbir Singh49048622008-09-05 18:12:23 +02004039 * Use precise platform statistics if available:
4040 */
4041#ifdef CONFIG_VIRT_CPU_ACCOUNTING
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09004042void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02004043{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09004044 *ut = p->utime;
4045 *st = p->stime;
Balbir Singh49048622008-09-05 18:12:23 +02004046}
4047
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09004048void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02004049{
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09004050 struct task_cputime cputime;
4051
4052 thread_group_cputime(p, &cputime);
4053
4054 *ut = cputime.utime;
4055 *st = cputime.stime;
Balbir Singh49048622008-09-05 18:12:23 +02004056}
4057#else
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09004058
4059#ifndef nsecs_to_cputime
Hidetoshi Setob7b20df92009-11-26 14:49:27 +09004060# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09004061#endif
4062
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09004063void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02004064{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09004065 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
Balbir Singh49048622008-09-05 18:12:23 +02004066
4067 /*
4068 * Use CFS's precise accounting:
4069 */
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09004070 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
Balbir Singh49048622008-09-05 18:12:23 +02004071
4072 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02004073 u64 temp = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02004074
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02004075 temp *= utime;
Balbir Singh49048622008-09-05 18:12:23 +02004076 do_div(temp, total);
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09004077 utime = (cputime_t)temp;
4078 } else
4079 utime = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02004080
4081 /*
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09004082 * Compare with previous values, to keep monotonicity:
Balbir Singh49048622008-09-05 18:12:23 +02004083 */
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09004084 p->prev_utime = max(p->prev_utime, utime);
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09004085 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
Balbir Singh49048622008-09-05 18:12:23 +02004086
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09004087 *ut = p->prev_utime;
4088 *st = p->prev_stime;
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09004089}
Balbir Singh49048622008-09-05 18:12:23 +02004090
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09004091/*
4092 * Must be called with siglock held.
4093 */
4094void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4095{
4096 struct signal_struct *sig = p->signal;
4097 struct task_cputime cputime;
4098 cputime_t rtime, utime, total;
4099
4100 thread_group_cputime(p, &cputime);
4101
4102 total = cputime_add(cputime.utime, cputime.stime);
4103 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
4104
4105 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02004106 u64 temp = rtime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09004107
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02004108 temp *= cputime.utime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09004109 do_div(temp, total);
4110 utime = (cputime_t)temp;
4111 } else
4112 utime = rtime;
4113
4114 sig->prev_utime = max(sig->prev_utime, utime);
4115 sig->prev_stime = max(sig->prev_stime,
4116 cputime_sub(rtime, sig->prev_utime));
4117
4118 *ut = sig->prev_utime;
4119 *st = sig->prev_stime;
Balbir Singh49048622008-09-05 18:12:23 +02004120}
4121#endif
4122
Balbir Singh49048622008-09-05 18:12:23 +02004123/*
Christoph Lameter7835b982006-12-10 02:20:22 -08004124 * This function gets called by the timer code, with HZ frequency.
4125 * We call it with interrupts disabled.
Christoph Lameter7835b982006-12-10 02:20:22 -08004126 */
4127void scheduler_tick(void)
4128{
Christoph Lameter7835b982006-12-10 02:20:22 -08004129 int cpu = smp_processor_id();
4130 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02004131 struct task_struct *curr = rq->curr;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02004132
4133 sched_clock_tick();
Christoph Lameter7835b982006-12-10 02:20:22 -08004134
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004135 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02004136 update_rq_clock(rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07004137 update_cpu_load_active(rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01004138 curr->sched_class->task_tick(rq, curr, 0);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004139 raw_spin_unlock(&rq->lock);
Ingo Molnardd41f592007-07-09 18:51:59 +02004140
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02004141 perf_event_task_tick();
Peter Zijlstrae220d2d2009-05-23 18:28:55 +02004142
Christoph Lametere418e1c2006-12-10 02:20:23 -08004143#ifdef CONFIG_SMP
Ingo Molnardd41f592007-07-09 18:51:59 +02004144 rq->idle_at_tick = idle_cpu(cpu);
4145 trigger_load_balance(rq, cpu);
Christoph Lametere418e1c2006-12-10 02:20:23 -08004146#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147}
4148
Lai Jiangshan132380a2009-04-02 14:18:25 +08004149notrace unsigned long get_parent_ip(unsigned long addr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004150{
4151 if (in_lock_functions(addr)) {
4152 addr = CALLER_ADDR2;
4153 if (in_lock_functions(addr))
4154 addr = CALLER_ADDR3;
4155 }
4156 return addr;
4157}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158
Steven Rostedt7e49fcc2009-01-22 19:01:40 -05004159#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
4160 defined(CONFIG_PREEMPT_TRACER))
4161
Srinivasa Ds43627582008-02-23 15:24:04 -08004162void __kprobes add_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004164#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165 /*
4166 * Underflow?
4167 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07004168 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4169 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004170#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171 preempt_count() += val;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004172#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173 /*
4174 * Spinlock count overflowing soon?
4175 */
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08004176 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4177 PREEMPT_MASK - 10);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004178#endif
4179 if (preempt_count() == val)
4180 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181}
4182EXPORT_SYMBOL(add_preempt_count);
4183
Srinivasa Ds43627582008-02-23 15:24:04 -08004184void __kprobes sub_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004186#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187 /*
4188 * Underflow?
4189 */
Ingo Molnar01e3eb82009-01-12 13:00:50 +01004190 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07004191 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 /*
4193 * Is the spinlock portion underflowing?
4194 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07004195 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4196 !(preempt_count() & PREEMPT_MASK)))
4197 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004198#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07004199
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02004200 if (preempt_count() == val)
4201 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 preempt_count() -= val;
4203}
4204EXPORT_SYMBOL(sub_preempt_count);
4205
4206#endif
4207
4208/*
Ingo Molnardd41f592007-07-09 18:51:59 +02004209 * Print scheduling while atomic bug:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210 */
Ingo Molnardd41f592007-07-09 18:51:59 +02004211static noinline void __schedule_bug(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212{
Satyam Sharma838225b2007-10-24 18:23:50 +02004213 struct pt_regs *regs = get_irq_regs();
4214
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004215 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4216 prev->comm, prev->pid, preempt_count());
Satyam Sharma838225b2007-10-24 18:23:50 +02004217
Ingo Molnardd41f592007-07-09 18:51:59 +02004218 debug_show_held_locks(prev);
Arjan van de Vene21f5b12008-05-23 09:05:58 -07004219 print_modules();
Ingo Molnardd41f592007-07-09 18:51:59 +02004220 if (irqs_disabled())
4221 print_irqtrace_events(prev);
Satyam Sharma838225b2007-10-24 18:23:50 +02004222
4223 if (regs)
4224 show_regs(regs);
4225 else
4226 dump_stack();
Ingo Molnardd41f592007-07-09 18:51:59 +02004227}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228
Ingo Molnardd41f592007-07-09 18:51:59 +02004229/*
4230 * Various schedule()-time debugging checks and statistics:
4231 */
4232static inline void schedule_debug(struct task_struct *prev)
4233{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 /*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004235 * Test if we are atomic. Since do_exit() needs to call into
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 * schedule() atomically, we ignore that path for now.
4237 * Otherwise, whine if we are scheduling when we should not be.
4238 */
Roel Kluin3f33a7c2008-05-13 23:44:11 +02004239 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
Ingo Molnardd41f592007-07-09 18:51:59 +02004240 __schedule_bug(prev);
4241
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4243
Ingo Molnar2d723762007-10-15 17:00:12 +02004244 schedstat_inc(this_rq(), sched_count);
Ingo Molnardd41f592007-07-09 18:51:59 +02004245}
4246
Peter Zijlstra6cecd082009-11-30 13:00:37 +01004247static void put_prev_task(struct rq *rq, struct task_struct *prev)
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004248{
Mike Galbraith61eadef2011-04-29 08:36:50 +02004249 if (prev->on_rq || rq->skip_clock_update < 0)
Mike Galbraitha64692a2010-03-11 17:16:20 +01004250 update_rq_clock(rq);
Peter Zijlstra6cecd082009-11-30 13:00:37 +01004251 prev->sched_class->put_prev_task(rq, prev);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004252}
4253
Ingo Molnardd41f592007-07-09 18:51:59 +02004254/*
4255 * Pick up the highest-prio task:
4256 */
4257static inline struct task_struct *
Wang Chenb67802e2009-03-02 13:55:26 +08004258pick_next_task(struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02004259{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02004260 const struct sched_class *class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004261 struct task_struct *p;
4262
4263 /*
4264 * Optimization: we know that if all tasks are in
4265 * the fair class we can call that function directly:
4266 */
4267 if (likely(rq->nr_running == rq->cfs.nr_running)) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004268 p = fair_sched_class.pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004269 if (likely(p))
4270 return p;
4271 }
4272
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004273 for_each_class(class) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004274 p = class->pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004275 if (p)
4276 return p;
Ingo Molnardd41f592007-07-09 18:51:59 +02004277 }
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004278
4279 BUG(); /* the idle class will always have a runnable task */
Ingo Molnardd41f592007-07-09 18:51:59 +02004280}
4281
4282/*
4283 * schedule() is the main scheduler function.
4284 */
Peter Zijlstraff743342009-03-13 12:21:26 +01004285asmlinkage void __sched schedule(void)
Ingo Molnardd41f592007-07-09 18:51:59 +02004286{
4287 struct task_struct *prev, *next;
Harvey Harrison67ca7bd2008-02-15 09:56:36 -08004288 unsigned long *switch_count;
Ingo Molnardd41f592007-07-09 18:51:59 +02004289 struct rq *rq;
Peter Zijlstra31656512008-07-18 18:01:23 +02004290 int cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02004291
Peter Zijlstraff743342009-03-13 12:21:26 +01004292need_resched:
4293 preempt_disable();
Ingo Molnardd41f592007-07-09 18:51:59 +02004294 cpu = smp_processor_id();
4295 rq = cpu_rq(cpu);
Paul E. McKenney25502a62010-04-01 17:37:01 -07004296 rcu_note_context_switch(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02004297 prev = rq->curr;
Ingo Molnardd41f592007-07-09 18:51:59 +02004298
Ingo Molnardd41f592007-07-09 18:51:59 +02004299 schedule_debug(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300
Peter Zijlstra31656512008-07-18 18:01:23 +02004301 if (sched_feat(HRTICK))
Mike Galbraithf333fdc2008-05-12 21:20:55 +02004302 hrtick_clear(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004303
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004304 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004306 switch_count = &prev->nivcsw;
Ingo Molnardd41f592007-07-09 18:51:59 +02004307 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
Tejun Heo21aa9af2010-06-08 21:40:37 +02004308 if (unlikely(signal_pending_state(prev->state, prev))) {
Ingo Molnardd41f592007-07-09 18:51:59 +02004309 prev->state = TASK_RUNNING;
Tejun Heo21aa9af2010-06-08 21:40:37 +02004310 } else {
Peter Zijlstra2acca552011-04-05 17:23:50 +02004311 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4312 prev->on_rq = 0;
4313
Tejun Heo21aa9af2010-06-08 21:40:37 +02004314 /*
Peter Zijlstra2acca552011-04-05 17:23:50 +02004315 * If a worker went to sleep, notify and ask workqueue
4316 * whether it wants to wake up a task to maintain
4317 * concurrency.
Tejun Heo21aa9af2010-06-08 21:40:37 +02004318 */
4319 if (prev->flags & PF_WQ_WORKER) {
4320 struct task_struct *to_wakeup;
4321
4322 to_wakeup = wq_worker_sleeping(prev, cpu);
4323 if (to_wakeup)
4324 try_to_wake_up_local(to_wakeup);
4325 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004326
Linus Torvalds6631e632011-04-13 08:08:20 -07004327 /*
Peter Zijlstra2acca552011-04-05 17:23:50 +02004328 * If we are going to sleep and we have plugged IO
4329 * queued, make sure to submit it to avoid deadlocks.
Linus Torvalds6631e632011-04-13 08:08:20 -07004330 */
4331 if (blk_needs_flush_plug(prev)) {
4332 raw_spin_unlock(&rq->lock);
Jens Axboea237c1c2011-04-16 13:27:55 +02004333 blk_schedule_flush_plug(prev);
Linus Torvalds6631e632011-04-13 08:08:20 -07004334 raw_spin_lock(&rq->lock);
4335 }
Tejun Heo21aa9af2010-06-08 21:40:37 +02004336 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004337 switch_count = &prev->nvcsw;
4338 }
4339
Gregory Haskins3f029d32009-07-29 11:08:47 -04004340 pre_schedule(rq, prev);
Steven Rostedtf65eda42008-01-25 21:08:07 +01004341
Ingo Molnardd41f592007-07-09 18:51:59 +02004342 if (unlikely(!rq->nr_running))
4343 idle_balance(cpu, rq);
4344
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004345 put_prev_task(rq, prev);
Wang Chenb67802e2009-03-02 13:55:26 +08004346 next = pick_next_task(rq);
Mike Galbraithf26f9af2010-12-08 11:05:42 +01004347 clear_tsk_need_resched(prev);
4348 rq->skip_clock_update = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 if (likely(prev != next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351 rq->nr_switches++;
4352 rq->curr = next;
4353 ++*switch_count;
4354
Ingo Molnardd41f592007-07-09 18:51:59 +02004355 context_switch(rq, prev, next); /* unlocks the rq */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004356 /*
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004357 * The context switch have flipped the stack from under us
4358 * and restored the local variables which were saved when
4359 * this task called schedule() in the past. prev == current
4360 * is still correct, but it can be moved to another cpu/rq.
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004361 */
4362 cpu = smp_processor_id();
4363 rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004365 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366
Gregory Haskins3f029d32009-07-29 11:08:47 -04004367 post_schedule(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369 preempt_enable_no_resched();
Peter Zijlstraff743342009-03-13 12:21:26 +01004370 if (need_resched())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 goto need_resched;
4372}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373EXPORT_SYMBOL(schedule);
4374
Frederic Weisbeckerc08f7822009-12-02 20:49:17 +01004375#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004376
4377static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
4378{
4379 bool ret = false;
4380
4381 rcu_read_lock();
4382 if (lock->owner != owner)
4383 goto fail;
4384
4385 /*
4386 * Ensure we emit the owner->on_cpu, dereference _after_ checking
4387 * lock->owner still matches owner, if that fails, owner might
4388 * point to free()d memory, if it still matches, the rcu_read_lock()
4389 * ensures the memory stays valid.
4390 */
4391 barrier();
4392
4393 ret = owner->on_cpu;
4394fail:
4395 rcu_read_unlock();
4396
4397 return ret;
4398}
4399
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004400/*
4401 * Look out! "owner" is an entirely speculative pointer
4402 * access and not reliable.
4403 */
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004404int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004405{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004406 if (!sched_feat(OWNER_SPIN))
4407 return 0;
4408
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004409 while (owner_running(lock, owner)) {
4410 if (need_resched())
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004411 return 0;
4412
Gerald Schaefer335d7af2010-11-22 15:47:36 +01004413 arch_mutex_cpu_relax();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004414 }
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004415
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02004416 /*
4417 * If the owner changed to another task there is likely
4418 * heavy contention, stop spinning.
4419 */
4420 if (lock->owner)
4421 return 0;
4422
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004423 return 1;
4424}
4425#endif
4426
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427#ifdef CONFIG_PREEMPT
4428/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004429 * this is the entry point to schedule() from in-kernel preemption
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004430 * off of preempt_enable. Kernel preemptions off return from interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 * occur there and call schedule directly.
4432 */
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004433asmlinkage void __sched notrace preempt_schedule(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434{
4435 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004436
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437 /*
4438 * If there is a non-zero preempt_count or interrupts are disabled,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004439 * we do not want to preempt the current task. Just return..
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 */
Nick Pigginbeed33a2006-10-11 01:21:52 -07004441 if (likely(ti->preempt_count || irqs_disabled()))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 return;
4443
Andi Kleen3a5c3592007-10-15 17:00:14 +02004444 do {
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004445 add_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004446 schedule();
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004447 sub_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004448
4449 /*
4450 * Check again in case we missed a preemption opportunity
4451 * between schedule and now.
4452 */
4453 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004454 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456EXPORT_SYMBOL(preempt_schedule);
4457
4458/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004459 * this is the entry point to schedule() from kernel preemption
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 * off of irq context.
4461 * Note, that this is called and return with irqs disabled. This will
4462 * protect us against recursive calling from irq.
4463 */
4464asmlinkage void __sched preempt_schedule_irq(void)
4465{
4466 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004467
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004468 /* Catch callers which need to be fixed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 BUG_ON(ti->preempt_count || !irqs_disabled());
4470
Andi Kleen3a5c3592007-10-15 17:00:14 +02004471 do {
4472 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004473 local_irq_enable();
4474 schedule();
4475 local_irq_disable();
Andi Kleen3a5c3592007-10-15 17:00:14 +02004476 sub_preempt_count(PREEMPT_ACTIVE);
4477
4478 /*
4479 * Check again in case we missed a preemption opportunity
4480 * between schedule and now.
4481 */
4482 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004483 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484}
4485
4486#endif /* CONFIG_PREEMPT */
4487
Peter Zijlstra63859d42009-09-15 19:14:42 +02004488int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004489 void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490{
Peter Zijlstra63859d42009-09-15 19:14:42 +02004491 return try_to_wake_up(curr->private, mode, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493EXPORT_SYMBOL(default_wake_function);
4494
4495/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004496 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4497 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 * number) then we wake all the non-exclusive tasks and one exclusive task.
4499 *
4500 * There are circumstances in which we can try to wake a task which has already
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004501 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4503 */
Johannes Weiner78ddb082009-04-14 16:53:05 +02004504static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
Peter Zijlstra63859d42009-09-15 19:14:42 +02004505 int nr_exclusive, int wake_flags, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506{
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004507 wait_queue_t *curr, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004509 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07004510 unsigned flags = curr->flags;
4511
Peter Zijlstra63859d42009-09-15 19:14:42 +02004512 if (curr->func(curr, mode, wake_flags, key) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07004513 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514 break;
4515 }
4516}
4517
4518/**
4519 * __wake_up - wake up threads blocked on a waitqueue.
4520 * @q: the waitqueue
4521 * @mode: which threads
4522 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Martin Waitz67be2dd2005-05-01 08:59:26 -07004523 * @key: is directly passed to the wakeup function
David Howells50fa6102009-04-28 15:01:38 +01004524 *
4525 * It may be assumed that this function implies a write memory barrier before
4526 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004528void __wake_up(wait_queue_head_t *q, unsigned int mode,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004529 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530{
4531 unsigned long flags;
4532
4533 spin_lock_irqsave(&q->lock, flags);
4534 __wake_up_common(q, mode, nr_exclusive, 0, key);
4535 spin_unlock_irqrestore(&q->lock, flags);
4536}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537EXPORT_SYMBOL(__wake_up);
4538
4539/*
4540 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4541 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004542void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543{
4544 __wake_up_common(q, mode, 1, 0, NULL);
4545}
Michal Nazarewicz22c43c82010-05-05 12:53:11 +02004546EXPORT_SYMBOL_GPL(__wake_up_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547
Davide Libenzi4ede8162009-03-31 15:24:20 -07004548void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4549{
4550 __wake_up_common(q, mode, 1, 0, key);
4551}
Trond Myklebustbf294b42011-02-21 11:05:41 -08004552EXPORT_SYMBOL_GPL(__wake_up_locked_key);
Davide Libenzi4ede8162009-03-31 15:24:20 -07004553
Linus Torvalds1da177e2005-04-16 15:20:36 -07004554/**
Davide Libenzi4ede8162009-03-31 15:24:20 -07004555 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556 * @q: the waitqueue
4557 * @mode: which threads
4558 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Davide Libenzi4ede8162009-03-31 15:24:20 -07004559 * @key: opaque value to be passed to wakeup targets
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560 *
4561 * The sync wakeup differs that the waker knows that it will schedule
4562 * away soon, so while the target thread will be woken up, it will not
4563 * be migrated to another CPU - ie. the two threads are 'synchronized'
4564 * with each other. This can prevent needless bouncing between CPUs.
4565 *
4566 * On UP it can prevent extra preemption.
David Howells50fa6102009-04-28 15:01:38 +01004567 *
4568 * It may be assumed that this function implies a write memory barrier before
4569 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570 */
Davide Libenzi4ede8162009-03-31 15:24:20 -07004571void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4572 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573{
4574 unsigned long flags;
Peter Zijlstra7d478722009-09-14 19:55:44 +02004575 int wake_flags = WF_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576
4577 if (unlikely(!q))
4578 return;
4579
4580 if (unlikely(!nr_exclusive))
Peter Zijlstra7d478722009-09-14 19:55:44 +02004581 wake_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582
4583 spin_lock_irqsave(&q->lock, flags);
Peter Zijlstra7d478722009-09-14 19:55:44 +02004584 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585 spin_unlock_irqrestore(&q->lock, flags);
4586}
Davide Libenzi4ede8162009-03-31 15:24:20 -07004587EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4588
4589/*
4590 * __wake_up_sync - see __wake_up_sync_key()
4591 */
4592void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4593{
4594 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4595}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4597
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004598/**
4599 * complete: - signals a single thread waiting on this completion
4600 * @x: holds the state of this particular completion
4601 *
4602 * This will wake up a single thread waiting on this completion. Threads will be
4603 * awakened in the same order in which they were queued.
4604 *
4605 * See also complete_all(), wait_for_completion() and related routines.
David Howells50fa6102009-04-28 15:01:38 +01004606 *
4607 * It may be assumed that this function implies a write memory barrier before
4608 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004609 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004610void complete(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004611{
4612 unsigned long flags;
4613
4614 spin_lock_irqsave(&x->wait.lock, flags);
4615 x->done++;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004616 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617 spin_unlock_irqrestore(&x->wait.lock, flags);
4618}
4619EXPORT_SYMBOL(complete);
4620
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004621/**
4622 * complete_all: - signals all threads waiting on this completion
4623 * @x: holds the state of this particular completion
4624 *
4625 * This will wake up all threads waiting on this particular completion event.
David Howells50fa6102009-04-28 15:01:38 +01004626 *
4627 * It may be assumed that this function implies a write memory barrier before
4628 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004629 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004630void complete_all(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631{
4632 unsigned long flags;
4633
4634 spin_lock_irqsave(&x->wait.lock, flags);
4635 x->done += UINT_MAX/2;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004636 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637 spin_unlock_irqrestore(&x->wait.lock, flags);
4638}
4639EXPORT_SYMBOL(complete_all);
4640
Andi Kleen8cbbe862007-10-15 17:00:14 +02004641static inline long __sched
4642do_wait_for_common(struct completion *x, long timeout, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644 if (!x->done) {
4645 DECLARE_WAITQUEUE(wait, current);
4646
Changli Gaoa93d2f12010-05-07 14:33:26 +08004647 __add_wait_queue_tail_exclusive(&x->wait, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648 do {
Oleg Nesterov94d3d822008-08-20 16:54:41 -07004649 if (signal_pending_state(state, current)) {
Oleg Nesterovea71a542008-06-20 18:32:20 +04004650 timeout = -ERESTARTSYS;
4651 break;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004652 }
4653 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004655 timeout = schedule_timeout(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 spin_lock_irq(&x->wait.lock);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004657 } while (!x->done && timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 __remove_wait_queue(&x->wait, &wait);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004659 if (!x->done)
4660 return timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 }
4662 x->done--;
Oleg Nesterovea71a542008-06-20 18:32:20 +04004663 return timeout ?: 1;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004664}
4665
4666static long __sched
4667wait_for_common(struct completion *x, long timeout, int state)
4668{
4669 might_sleep();
4670
4671 spin_lock_irq(&x->wait.lock);
4672 timeout = do_wait_for_common(x, timeout, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004674 return timeout;
4675}
4676
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004677/**
4678 * wait_for_completion: - waits for completion of a task
4679 * @x: holds the state of this particular completion
4680 *
4681 * This waits to be signaled for completion of a specific task. It is NOT
4682 * interruptible and there is no timeout.
4683 *
4684 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4685 * and interrupt capability. Also see complete().
4686 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004687void __sched wait_for_completion(struct completion *x)
Andi Kleen8cbbe862007-10-15 17:00:14 +02004688{
4689 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690}
4691EXPORT_SYMBOL(wait_for_completion);
4692
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004693/**
4694 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4695 * @x: holds the state of this particular completion
4696 * @timeout: timeout value in jiffies
4697 *
4698 * This waits for either a completion of a specific task to be signaled or for a
4699 * specified timeout to expire. The timeout is in jiffies. It is not
4700 * interruptible.
4701 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004702unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4704{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004705 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706}
4707EXPORT_SYMBOL(wait_for_completion_timeout);
4708
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004709/**
4710 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4711 * @x: holds the state of this particular completion
4712 *
4713 * This waits for completion of a specific task to be signaled. It is
4714 * interruptible.
4715 */
Andi Kleen8cbbe862007-10-15 17:00:14 +02004716int __sched wait_for_completion_interruptible(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717{
Andi Kleen51e97992007-10-18 21:32:55 +02004718 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4719 if (t == -ERESTARTSYS)
4720 return t;
4721 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722}
4723EXPORT_SYMBOL(wait_for_completion_interruptible);
4724
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004725/**
4726 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4727 * @x: holds the state of this particular completion
4728 * @timeout: timeout value in jiffies
4729 *
4730 * This waits for either a completion of a specific task to be signaled or for a
4731 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4732 */
NeilBrown6bf41232011-01-05 12:50:16 +11004733long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734wait_for_completion_interruptible_timeout(struct completion *x,
4735 unsigned long timeout)
4736{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004737 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738}
4739EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4740
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004741/**
4742 * wait_for_completion_killable: - waits for completion of a task (killable)
4743 * @x: holds the state of this particular completion
4744 *
4745 * This waits to be signaled for completion of a specific task. It can be
4746 * interrupted by a kill signal.
4747 */
Matthew Wilcox009e5772007-12-06 12:29:54 -05004748int __sched wait_for_completion_killable(struct completion *x)
4749{
4750 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4751 if (t == -ERESTARTSYS)
4752 return t;
4753 return 0;
4754}
4755EXPORT_SYMBOL(wait_for_completion_killable);
4756
Dave Chinnerbe4de352008-08-15 00:40:44 -07004757/**
Sage Weil0aa12fb2010-05-29 09:12:30 -07004758 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4759 * @x: holds the state of this particular completion
4760 * @timeout: timeout value in jiffies
4761 *
4762 * This waits for either a completion of a specific task to be
4763 * signaled or for a specified timeout to expire. It can be
4764 * interrupted by a kill signal. The timeout is in jiffies.
4765 */
NeilBrown6bf41232011-01-05 12:50:16 +11004766long __sched
Sage Weil0aa12fb2010-05-29 09:12:30 -07004767wait_for_completion_killable_timeout(struct completion *x,
4768 unsigned long timeout)
4769{
4770 return wait_for_common(x, timeout, TASK_KILLABLE);
4771}
4772EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4773
4774/**
Dave Chinnerbe4de352008-08-15 00:40:44 -07004775 * try_wait_for_completion - try to decrement a completion without blocking
4776 * @x: completion structure
4777 *
4778 * Returns: 0 if a decrement cannot be done without blocking
4779 * 1 if a decrement succeeded.
4780 *
4781 * If a completion is being used as a counting completion,
4782 * attempt to decrement the counter without blocking. This
4783 * enables us to avoid waiting if the resource the completion
4784 * is protecting is not available.
4785 */
4786bool try_wait_for_completion(struct completion *x)
4787{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004788 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004789 int ret = 1;
4790
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004791 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004792 if (!x->done)
4793 ret = 0;
4794 else
4795 x->done--;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004796 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004797 return ret;
4798}
4799EXPORT_SYMBOL(try_wait_for_completion);
4800
4801/**
4802 * completion_done - Test to see if a completion has any waiters
4803 * @x: completion structure
4804 *
4805 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4806 * 1 if there are no waiters.
4807 *
4808 */
4809bool completion_done(struct completion *x)
4810{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004811 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004812 int ret = 1;
4813
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004814 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004815 if (!x->done)
4816 ret = 0;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004817 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004818 return ret;
4819}
4820EXPORT_SYMBOL(completion_done);
4821
Andi Kleen8cbbe862007-10-15 17:00:14 +02004822static long __sched
4823sleep_on_common(wait_queue_head_t *q, int state, long timeout)
Ingo Molnar0fec1712007-07-09 18:52:01 +02004824{
4825 unsigned long flags;
4826 wait_queue_t wait;
4827
4828 init_waitqueue_entry(&wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829
Andi Kleen8cbbe862007-10-15 17:00:14 +02004830 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831
Andi Kleen8cbbe862007-10-15 17:00:14 +02004832 spin_lock_irqsave(&q->lock, flags);
4833 __add_wait_queue(q, &wait);
4834 spin_unlock(&q->lock);
4835 timeout = schedule_timeout(timeout);
4836 spin_lock_irq(&q->lock);
4837 __remove_wait_queue(q, &wait);
4838 spin_unlock_irqrestore(&q->lock, flags);
4839
4840 return timeout;
4841}
4842
4843void __sched interruptible_sleep_on(wait_queue_head_t *q)
4844{
4845 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004846}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004847EXPORT_SYMBOL(interruptible_sleep_on);
4848
Ingo Molnar0fec1712007-07-09 18:52:01 +02004849long __sched
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004850interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004852 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004853}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004854EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4855
Ingo Molnar0fec1712007-07-09 18:52:01 +02004856void __sched sleep_on(wait_queue_head_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004858 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860EXPORT_SYMBOL(sleep_on);
4861
Ingo Molnar0fec1712007-07-09 18:52:01 +02004862long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004864 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866EXPORT_SYMBOL(sleep_on_timeout);
4867
Ingo Molnarb29739f2006-06-27 02:54:51 -07004868#ifdef CONFIG_RT_MUTEXES
4869
4870/*
4871 * rt_mutex_setprio - set the current priority of a task
4872 * @p: task
4873 * @prio: prio value (kernel-internal form)
4874 *
4875 * This function changes the 'effective' priority of a task. It does
4876 * not touch ->normal_prio like __setscheduler().
4877 *
4878 * Used by the rt_mutex code to implement priority inheritance logic.
4879 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004880void rt_mutex_setprio(struct task_struct *p, int prio)
Ingo Molnarb29739f2006-06-27 02:54:51 -07004881{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004882 int oldprio, on_rq, running;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004883 struct rq *rq;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004884 const struct sched_class *prev_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004885
4886 BUG_ON(prio < 0 || prio > MAX_PRIO);
4887
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004888 rq = __task_rq_lock(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004889
Steven Rostedta8027072010-09-20 15:13:34 -04004890 trace_sched_pi_setprio(p, prio);
Andrew Mortond5f9f942007-05-08 20:27:06 -07004891 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004892 prev_class = p->sched_class;
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004893 on_rq = p->on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004894 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004895 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004896 dequeue_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004897 if (running)
4898 p->sched_class->put_prev_task(rq, p);
Ingo Molnardd41f592007-07-09 18:51:59 +02004899
4900 if (rt_prio(prio))
4901 p->sched_class = &rt_sched_class;
4902 else
4903 p->sched_class = &fair_sched_class;
4904
Ingo Molnarb29739f2006-06-27 02:54:51 -07004905 p->prio = prio;
4906
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004907 if (running)
4908 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01004909 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004910 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004911
Peter Zijlstrada7a7352011-01-17 17:03:27 +01004912 check_class_changed(rq, p, prev_class, oldprio);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004913 __task_rq_unlock(rq);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004914}
4915
4916#endif
4917
Ingo Molnar36c8b582006-07-03 00:25:41 -07004918void set_user_nice(struct task_struct *p, long nice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004919{
Ingo Molnardd41f592007-07-09 18:51:59 +02004920 int old_prio, delta, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004922 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004923
4924 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4925 return;
4926 /*
4927 * We have to be careful, if called from sys_setpriority(),
4928 * the task might be in the middle of scheduling on another CPU.
4929 */
4930 rq = task_rq_lock(p, &flags);
4931 /*
4932 * The RT priorities are set via sched_setscheduler(), but we still
4933 * allow the 'normal' nice value to be set - but as expected
4934 * it wont have any effect on scheduling until the task is
Ingo Molnardd41f592007-07-09 18:51:59 +02004935 * SCHED_FIFO/SCHED_RR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936 */
Ingo Molnare05606d2007-07-09 18:51:59 +02004937 if (task_has_rt_policy(p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938 p->static_prio = NICE_TO_PRIO(nice);
4939 goto out_unlock;
4940 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004941 on_rq = p->on_rq;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02004942 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004943 dequeue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004944
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945 p->static_prio = NICE_TO_PRIO(nice);
Peter Williams2dd73a42006-06-27 02:54:34 -07004946 set_load_weight(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004947 old_prio = p->prio;
4948 p->prio = effective_prio(p);
4949 delta = p->prio - old_prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004950
Ingo Molnardd41f592007-07-09 18:51:59 +02004951 if (on_rq) {
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004952 enqueue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 /*
Andrew Mortond5f9f942007-05-08 20:27:06 -07004954 * If the task increased its priority or is running and
4955 * lowered its priority, then reschedule its CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956 */
Andrew Mortond5f9f942007-05-08 20:27:06 -07004957 if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958 resched_task(rq->curr);
4959 }
4960out_unlock:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004961 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963EXPORT_SYMBOL(set_user_nice);
4964
Matt Mackalle43379f2005-05-01 08:59:00 -07004965/*
4966 * can_nice - check if a task can reduce its nice value
4967 * @p: task
4968 * @nice: nice value
4969 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004970int can_nice(const struct task_struct *p, const int nice)
Matt Mackalle43379f2005-05-01 08:59:00 -07004971{
Matt Mackall024f4742005-08-18 11:24:19 -07004972 /* convert nice value [19,-20] to rlimit style value [1,40] */
4973 int nice_rlim = 20 - nice;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004974
Jiri Slaby78d7d402010-03-05 13:42:54 -08004975 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
Matt Mackalle43379f2005-05-01 08:59:00 -07004976 capable(CAP_SYS_NICE));
4977}
4978
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979#ifdef __ARCH_WANT_SYS_NICE
4980
4981/*
4982 * sys_nice - change the priority of the current process.
4983 * @increment: priority increment
4984 *
4985 * sys_setpriority is a more generic, but much slower function that
4986 * does similar things.
4987 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004988SYSCALL_DEFINE1(nice, int, increment)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989{
Ingo Molnar48f24c42006-07-03 00:25:40 -07004990 long nice, retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991
4992 /*
4993 * Setpriority might change our priority at the same moment.
4994 * We don't have to worry. Conceptually one call occurs first
4995 * and we have a single winner.
4996 */
Matt Mackalle43379f2005-05-01 08:59:00 -07004997 if (increment < -40)
4998 increment = -40;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999 if (increment > 40)
5000 increment = 40;
5001
Américo Wang2b8f8362009-02-16 18:54:21 +08005002 nice = TASK_NICE(current) + increment;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 if (nice < -20)
5004 nice = -20;
5005 if (nice > 19)
5006 nice = 19;
5007
Matt Mackalle43379f2005-05-01 08:59:00 -07005008 if (increment < 0 && !can_nice(current, nice))
5009 return -EPERM;
5010
Linus Torvalds1da177e2005-04-16 15:20:36 -07005011 retval = security_task_setnice(current, nice);
5012 if (retval)
5013 return retval;
5014
5015 set_user_nice(current, nice);
5016 return 0;
5017}
5018
5019#endif
5020
5021/**
5022 * task_prio - return the priority value of a given task.
5023 * @p: the task in question.
5024 *
5025 * This is the priority value as seen by users in /proc.
5026 * RT tasks are offset by -200. Normal tasks are centered
5027 * around 0, value goes from -16 to +15.
5028 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07005029int task_prio(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030{
5031 return p->prio - MAX_RT_PRIO;
5032}
5033
5034/**
5035 * task_nice - return the nice value of a given task.
5036 * @p: the task in question.
5037 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07005038int task_nice(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039{
5040 return TASK_NICE(p);
5041}
Pavel Roskin150d8be2008-03-05 16:56:37 -05005042EXPORT_SYMBOL(task_nice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043
5044/**
5045 * idle_cpu - is a given cpu idle currently?
5046 * @cpu: the processor in question.
5047 */
5048int idle_cpu(int cpu)
5049{
5050 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
5051}
5052
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053/**
5054 * idle_task - return the idle task for a given cpu.
5055 * @cpu: the processor in question.
5056 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07005057struct task_struct *idle_task(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005058{
5059 return cpu_rq(cpu)->idle;
5060}
5061
5062/**
5063 * find_process_by_pid - find a process with a matching PID value.
5064 * @pid: the pid in question.
5065 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02005066static struct task_struct *find_process_by_pid(pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067{
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07005068 return pid ? find_task_by_vpid(pid) : current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069}
5070
5071/* Actually do priority change: must hold rq lock. */
Ingo Molnardd41f592007-07-09 18:51:59 +02005072static void
5073__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075 p->policy = policy;
5076 p->rt_priority = prio;
Ingo Molnarb29739f2006-06-27 02:54:51 -07005077 p->normal_prio = normal_prio(p);
5078 /* we are holding p->pi_lock already */
5079 p->prio = rt_mutex_getprio(p);
Peter Zijlstraffd44db2009-11-10 20:12:01 +01005080 if (rt_prio(p->prio))
5081 p->sched_class = &rt_sched_class;
5082 else
5083 p->sched_class = &fair_sched_class;
Peter Williams2dd73a42006-06-27 02:54:34 -07005084 set_load_weight(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085}
5086
David Howellsc69e8d92008-11-14 10:39:19 +11005087/*
5088 * check the target process has a UID that matches the current process's
5089 */
5090static bool check_same_owner(struct task_struct *p)
5091{
5092 const struct cred *cred = current_cred(), *pcred;
5093 bool match;
5094
5095 rcu_read_lock();
5096 pcred = __task_cred(p);
Serge E. Hallynb0e77592011-03-23 16:43:24 -07005097 if (cred->user->user_ns == pcred->user->user_ns)
5098 match = (cred->euid == pcred->euid ||
5099 cred->euid == pcred->uid);
5100 else
5101 match = false;
David Howellsc69e8d92008-11-14 10:39:19 +11005102 rcu_read_unlock();
5103 return match;
5104}
5105
Rusty Russell961ccdd2008-06-23 13:55:38 +10005106static int __sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005107 const struct sched_param *param, bool user)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02005109 int retval, oldprio, oldpolicy = -1, on_rq, running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110 unsigned long flags;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01005111 const struct sched_class *prev_class;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005112 struct rq *rq;
Lennart Poetteringca94c442009-06-15 17:17:47 +02005113 int reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114
Steven Rostedt66e53932006-06-27 02:54:44 -07005115 /* may grab non-irq protected spin_locks */
5116 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07005117recheck:
5118 /* double check policy once rq lock held */
Lennart Poetteringca94c442009-06-15 17:17:47 +02005119 if (policy < 0) {
5120 reset_on_fork = p->sched_reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 policy = oldpolicy = p->policy;
Lennart Poetteringca94c442009-06-15 17:17:47 +02005122 } else {
5123 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
5124 policy &= ~SCHED_RESET_ON_FORK;
5125
5126 if (policy != SCHED_FIFO && policy != SCHED_RR &&
5127 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
5128 policy != SCHED_IDLE)
5129 return -EINVAL;
5130 }
5131
Linus Torvalds1da177e2005-04-16 15:20:36 -07005132 /*
5133 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnardd41f592007-07-09 18:51:59 +02005134 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
5135 * SCHED_BATCH and SCHED_IDLE is 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005136 */
5137 if (param->sched_priority < 0 ||
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005138 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
Steven Rostedtd46523e2005-07-25 16:28:39 -04005139 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140 return -EINVAL;
Ingo Molnare05606d2007-07-09 18:51:59 +02005141 if (rt_policy(policy) != (param->sched_priority != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142 return -EINVAL;
5143
Olivier Croquette37e4ab32005-06-25 14:57:32 -07005144 /*
5145 * Allow unprivileged RT tasks to decrease priority:
5146 */
Rusty Russell961ccdd2008-06-23 13:55:38 +10005147 if (user && !capable(CAP_SYS_NICE)) {
Ingo Molnare05606d2007-07-09 18:51:59 +02005148 if (rt_policy(policy)) {
Oleg Nesterova44702e2010-06-11 01:09:44 +02005149 unsigned long rlim_rtprio =
5150 task_rlimit(p, RLIMIT_RTPRIO);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005151
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07005152 /* can't set/change the rt policy */
5153 if (policy != p->policy && !rlim_rtprio)
5154 return -EPERM;
5155
5156 /* can't increase priority */
5157 if (param->sched_priority > p->rt_priority &&
5158 param->sched_priority > rlim_rtprio)
5159 return -EPERM;
5160 }
Darren Hartc02aa732011-02-17 15:37:07 -08005161
Ingo Molnardd41f592007-07-09 18:51:59 +02005162 /*
Darren Hartc02aa732011-02-17 15:37:07 -08005163 * Treat SCHED_IDLE as nice 20. Only allow a switch to
5164 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
Ingo Molnardd41f592007-07-09 18:51:59 +02005165 */
Darren Hartc02aa732011-02-17 15:37:07 -08005166 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
5167 if (!can_nice(p, TASK_NICE(p)))
5168 return -EPERM;
5169 }
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07005170
Olivier Croquette37e4ab32005-06-25 14:57:32 -07005171 /* can't change other user's priorities */
David Howellsc69e8d92008-11-14 10:39:19 +11005172 if (!check_same_owner(p))
Olivier Croquette37e4ab32005-06-25 14:57:32 -07005173 return -EPERM;
Lennart Poetteringca94c442009-06-15 17:17:47 +02005174
5175 /* Normal users shall not reset the sched_reset_on_fork flag */
5176 if (p->sched_reset_on_fork && !reset_on_fork)
5177 return -EPERM;
Olivier Croquette37e4ab32005-06-25 14:57:32 -07005178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005179
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07005180 if (user) {
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09005181 retval = security_task_setscheduler(p);
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07005182 if (retval)
5183 return retval;
5184 }
5185
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186 /*
Ingo Molnarb29739f2006-06-27 02:54:51 -07005187 * make sure no PI-waiters arrive (or leave) while we are
5188 * changing the priority of the task:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005189 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005190 * To be able to change p->policy safely, the appropriate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191 * runqueue lock must be held.
5192 */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005193 rq = task_rq_lock(p, &flags);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005194
Peter Zijlstra34f971f2010-09-22 13:53:15 +02005195 /*
5196 * Changing the policy of the stop threads its a very bad idea
5197 */
5198 if (p == rq->stop) {
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005199 task_rq_unlock(rq, p, &flags);
Peter Zijlstra34f971f2010-09-22 13:53:15 +02005200 return -EINVAL;
5201 }
5202
Dario Faggiolia51e9192011-03-24 14:00:18 +01005203 /*
5204 * If not changing anything there's no need to proceed further:
5205 */
5206 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
5207 param->sched_priority == p->rt_priority))) {
5208
5209 __task_rq_unlock(rq);
5210 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5211 return 0;
5212 }
5213
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005214#ifdef CONFIG_RT_GROUP_SCHED
5215 if (user) {
5216 /*
5217 * Do not allow realtime tasks into groups that have no runtime
5218 * assigned.
5219 */
5220 if (rt_bandwidth_enabled() && rt_policy(policy) &&
Mike Galbraithf4493772011-01-13 04:54:50 +01005221 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5222 !task_group_is_autogroup(task_group(p))) {
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005223 task_rq_unlock(rq, p, &flags);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005224 return -EPERM;
5225 }
5226 }
5227#endif
5228
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229 /* recheck policy now with rq lock held */
5230 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5231 policy = oldpolicy = -1;
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005232 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233 goto recheck;
5234 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02005235 on_rq = p->on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01005236 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005237 if (on_rq)
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005238 deactivate_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005239 if (running)
5240 p->sched_class->put_prev_task(rq, p);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02005241
Lennart Poetteringca94c442009-06-15 17:17:47 +02005242 p->sched_reset_on_fork = reset_on_fork;
5243
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01005245 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02005246 __setscheduler(rq, p, policy, param->sched_priority);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02005247
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005248 if (running)
5249 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005250 if (on_rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02005251 activate_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005252
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005253 check_class_changed(rq, p, prev_class, oldprio);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005254 task_rq_unlock(rq, p, &flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005255
Thomas Gleixner95e02ca2006-06-27 02:55:02 -07005256 rt_mutex_adjust_pi(p);
5257
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258 return 0;
5259}
Rusty Russell961ccdd2008-06-23 13:55:38 +10005260
5261/**
5262 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5263 * @p: the task in question.
5264 * @policy: new policy.
5265 * @param: structure containing the new RT priority.
5266 *
5267 * NOTE that the task may be already dead.
5268 */
5269int sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005270 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10005271{
5272 return __sched_setscheduler(p, policy, param, true);
5273}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274EXPORT_SYMBOL_GPL(sched_setscheduler);
5275
Rusty Russell961ccdd2008-06-23 13:55:38 +10005276/**
5277 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5278 * @p: the task in question.
5279 * @policy: new policy.
5280 * @param: structure containing the new RT priority.
5281 *
5282 * Just like sched_setscheduler, only don't bother checking if the
5283 * current context has permission. For example, this is needed in
5284 * stop_machine(): we create temporary high priority worker threads,
5285 * but our caller might not have that capability.
5286 */
5287int sched_setscheduler_nocheck(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005288 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10005289{
5290 return __sched_setscheduler(p, policy, param, false);
5291}
5292
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005293static int
5294do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005296 struct sched_param lparam;
5297 struct task_struct *p;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005298 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299
5300 if (!param || pid < 0)
5301 return -EINVAL;
5302 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5303 return -EFAULT;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005304
5305 rcu_read_lock();
5306 retval = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005307 p = find_process_by_pid(pid);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005308 if (p != NULL)
5309 retval = sched_setscheduler(p, policy, &lparam);
5310 rcu_read_unlock();
Ingo Molnar36c8b582006-07-03 00:25:41 -07005311
Linus Torvalds1da177e2005-04-16 15:20:36 -07005312 return retval;
5313}
5314
5315/**
5316 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5317 * @pid: the pid in question.
5318 * @policy: new policy.
5319 * @param: structure containing the new RT priority.
5320 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005321SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5322 struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005323{
Jason Baronc21761f2006-01-18 17:43:03 -08005324 /* negative values for policy are not valid */
5325 if (policy < 0)
5326 return -EINVAL;
5327
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328 return do_sched_setscheduler(pid, policy, param);
5329}
5330
5331/**
5332 * sys_sched_setparam - set/change the RT priority of a thread
5333 * @pid: the pid in question.
5334 * @param: structure containing the new RT priority.
5335 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005336SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005337{
5338 return do_sched_setscheduler(pid, -1, param);
5339}
5340
5341/**
5342 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5343 * @pid: the pid in question.
5344 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005345SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005346{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005347 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005348 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005349
5350 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005351 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005352
5353 retval = -ESRCH;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005354 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005355 p = find_process_by_pid(pid);
5356 if (p) {
5357 retval = security_task_getscheduler(p);
5358 if (!retval)
Lennart Poetteringca94c442009-06-15 17:17:47 +02005359 retval = p->policy
5360 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005361 }
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005362 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005363 return retval;
5364}
5365
5366/**
Lennart Poetteringca94c442009-06-15 17:17:47 +02005367 * sys_sched_getparam - get the RT priority of a thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368 * @pid: the pid in question.
5369 * @param: structure containing the RT priority.
5370 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005371SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005372{
5373 struct sched_param lp;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005374 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005375 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376
5377 if (!param || pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005378 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005379
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005380 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381 p = find_process_by_pid(pid);
5382 retval = -ESRCH;
5383 if (!p)
5384 goto out_unlock;
5385
5386 retval = security_task_getscheduler(p);
5387 if (retval)
5388 goto out_unlock;
5389
5390 lp.sched_priority = p->rt_priority;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005391 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392
5393 /*
5394 * This one might sleep, we cannot do it with a spinlock held ...
5395 */
5396 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5397
Linus Torvalds1da177e2005-04-16 15:20:36 -07005398 return retval;
5399
5400out_unlock:
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005401 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005402 return retval;
5403}
5404
Rusty Russell96f874e2008-11-25 02:35:14 +10305405long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005406{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305407 cpumask_var_t cpus_allowed, new_mask;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005408 struct task_struct *p;
5409 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005411 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005412 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005413
5414 p = find_process_by_pid(pid);
5415 if (!p) {
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005416 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005417 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418 return -ESRCH;
5419 }
5420
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005421 /* Prevent p going away */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422 get_task_struct(p);
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005423 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305425 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5426 retval = -ENOMEM;
5427 goto out_put_task;
5428 }
5429 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5430 retval = -ENOMEM;
5431 goto out_free_cpus_allowed;
5432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433 retval = -EPERM;
Serge E. Hallynb0e77592011-03-23 16:43:24 -07005434 if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435 goto out_unlock;
5436
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09005437 retval = security_task_setscheduler(p);
David Quigleye7834f82006-06-23 02:03:59 -07005438 if (retval)
5439 goto out_unlock;
5440
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305441 cpuset_cpus_allowed(p, cpus_allowed);
5442 cpumask_and(new_mask, in_mask, cpus_allowed);
Peter Zijlstra49246272010-10-17 21:46:10 +02005443again:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305444 retval = set_cpus_allowed_ptr(p, new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445
Paul Menage8707d8b2007-10-18 23:40:22 -07005446 if (!retval) {
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305447 cpuset_cpus_allowed(p, cpus_allowed);
5448 if (!cpumask_subset(new_mask, cpus_allowed)) {
Paul Menage8707d8b2007-10-18 23:40:22 -07005449 /*
5450 * We must have raced with a concurrent cpuset
5451 * update. Just reset the cpus_allowed to the
5452 * cpuset's cpus_allowed
5453 */
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305454 cpumask_copy(new_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07005455 goto again;
5456 }
5457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458out_unlock:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305459 free_cpumask_var(new_mask);
5460out_free_cpus_allowed:
5461 free_cpumask_var(cpus_allowed);
5462out_put_task:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005463 put_task_struct(p);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005464 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465 return retval;
5466}
5467
5468static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
Rusty Russell96f874e2008-11-25 02:35:14 +10305469 struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470{
Rusty Russell96f874e2008-11-25 02:35:14 +10305471 if (len < cpumask_size())
5472 cpumask_clear(new_mask);
5473 else if (len > cpumask_size())
5474 len = cpumask_size();
5475
Linus Torvalds1da177e2005-04-16 15:20:36 -07005476 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5477}
5478
5479/**
5480 * sys_sched_setaffinity - set the cpu affinity of a process
5481 * @pid: pid of the process
5482 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5483 * @user_mask_ptr: user-space pointer to the new cpu mask
5484 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005485SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5486 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005487{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305488 cpumask_var_t new_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489 int retval;
5490
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305491 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5492 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005493
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305494 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5495 if (retval == 0)
5496 retval = sched_setaffinity(pid, new_mask);
5497 free_cpumask_var(new_mask);
5498 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005499}
5500
Rusty Russell96f874e2008-11-25 02:35:14 +10305501long sched_getaffinity(pid_t pid, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005502{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005503 struct task_struct *p;
Thomas Gleixner31605682009-12-08 20:24:16 +00005504 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005505 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005507 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005508 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005509
5510 retval = -ESRCH;
5511 p = find_process_by_pid(pid);
5512 if (!p)
5513 goto out_unlock;
5514
David Quigleye7834f82006-06-23 02:03:59 -07005515 retval = security_task_getscheduler(p);
5516 if (retval)
5517 goto out_unlock;
5518
Peter Zijlstra013fdb82011-04-05 17:23:45 +02005519 raw_spin_lock_irqsave(&p->pi_lock, flags);
Rusty Russell96f874e2008-11-25 02:35:14 +10305520 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
Peter Zijlstra013fdb82011-04-05 17:23:45 +02005521 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005522
5523out_unlock:
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005524 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005525 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005526
Ulrich Drepper9531b622007-08-09 11:16:46 +02005527 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005528}
5529
5530/**
5531 * sys_sched_getaffinity - get the cpu affinity of a process
5532 * @pid: pid of the process
5533 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5534 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5535 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005536SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5537 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005538{
5539 int ret;
Rusty Russellf17c8602008-11-25 02:35:11 +10305540 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005541
Anton Blanchard84fba5e2010-04-06 17:02:19 +10005542 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005543 return -EINVAL;
5544 if (len & (sizeof(unsigned long)-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005545 return -EINVAL;
5546
Rusty Russellf17c8602008-11-25 02:35:11 +10305547 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5548 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549
Rusty Russellf17c8602008-11-25 02:35:11 +10305550 ret = sched_getaffinity(pid, mask);
5551 if (ret == 0) {
KOSAKI Motohiro8bc037f2010-03-17 09:36:58 +09005552 size_t retlen = min_t(size_t, len, cpumask_size());
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005553
5554 if (copy_to_user(user_mask_ptr, mask, retlen))
Rusty Russellf17c8602008-11-25 02:35:11 +10305555 ret = -EFAULT;
5556 else
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005557 ret = retlen;
Rusty Russellf17c8602008-11-25 02:35:11 +10305558 }
5559 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005560
Rusty Russellf17c8602008-11-25 02:35:11 +10305561 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005562}
5563
5564/**
5565 * sys_sched_yield - yield the current processor to other threads.
5566 *
Ingo Molnardd41f592007-07-09 18:51:59 +02005567 * This function yields the current CPU to other tasks. If there are no
5568 * other threads running on this CPU then this function will return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005570SYSCALL_DEFINE0(sched_yield)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005571{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005572 struct rq *rq = this_rq_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005573
Ingo Molnar2d723762007-10-15 17:00:12 +02005574 schedstat_inc(rq, yld_count);
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02005575 current->sched_class->yield_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576
5577 /*
5578 * Since we are going to call schedule() anyway, there's
5579 * no need to preempt or enable interrupts:
5580 */
5581 __release(rq->lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07005582 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +01005583 do_raw_spin_unlock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005584 preempt_enable_no_resched();
5585
5586 schedule();
5587
5588 return 0;
5589}
5590
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005591static inline int should_resched(void)
5592{
5593 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5594}
5595
Andrew Mortone7b38402006-06-30 01:56:00 -07005596static void __cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005597{
Frederic Weisbeckere7aaaa62009-07-16 15:44:29 +02005598 add_preempt_count(PREEMPT_ACTIVE);
5599 schedule();
5600 sub_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005601}
5602
Herbert Xu02b67cc32008-01-25 21:08:28 +01005603int __sched _cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005604{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005605 if (should_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005606 __cond_resched();
5607 return 1;
5608 }
5609 return 0;
5610}
Herbert Xu02b67cc32008-01-25 21:08:28 +01005611EXPORT_SYMBOL(_cond_resched);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005612
5613/*
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005614 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005615 * call schedule, and on return reacquire the lock.
5616 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005617 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
Linus Torvalds1da177e2005-04-16 15:20:36 -07005618 * operations here to prevent schedule() from being called twice (once via
5619 * spin_unlock(), once by hand).
5620 */
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005621int __cond_resched_lock(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005622{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005623 int resched = should_resched();
Jan Kara6df3cec2005-06-13 15:52:32 -07005624 int ret = 0;
5625
Peter Zijlstraf607c662009-07-20 19:16:29 +02005626 lockdep_assert_held(lock);
5627
Nick Piggin95c354f2008-01-30 13:31:20 +01005628 if (spin_needbreak(lock) || resched) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629 spin_unlock(lock);
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005630 if (resched)
Nick Piggin95c354f2008-01-30 13:31:20 +01005631 __cond_resched();
5632 else
5633 cpu_relax();
Jan Kara6df3cec2005-06-13 15:52:32 -07005634 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635 spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005636 }
Jan Kara6df3cec2005-06-13 15:52:32 -07005637 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005638}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005639EXPORT_SYMBOL(__cond_resched_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005641int __sched __cond_resched_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005642{
5643 BUG_ON(!in_softirq());
5644
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005645 if (should_resched()) {
Thomas Gleixner98d825672007-05-23 13:58:18 -07005646 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647 __cond_resched();
5648 local_bh_disable();
5649 return 1;
5650 }
5651 return 0;
5652}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005653EXPORT_SYMBOL(__cond_resched_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005654
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655/**
5656 * yield - yield the current processor to other threads.
5657 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08005658 * This is a shortcut for kernel-space yielding - it marks the
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659 * thread runnable and calls sys_sched_yield().
5660 */
5661void __sched yield(void)
5662{
5663 set_current_state(TASK_RUNNING);
5664 sys_sched_yield();
5665}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666EXPORT_SYMBOL(yield);
5667
Mike Galbraithd95f4122011-02-01 09:50:51 -05005668/**
5669 * yield_to - yield the current processor to another thread in
5670 * your thread group, or accelerate that thread toward the
5671 * processor it's on.
Randy Dunlap16addf92011-03-18 09:34:53 -07005672 * @p: target task
5673 * @preempt: whether task preemption is allowed or not
Mike Galbraithd95f4122011-02-01 09:50:51 -05005674 *
5675 * It's the caller's job to ensure that the target task struct
5676 * can't go away on us before we can do any checks.
5677 *
5678 * Returns true if we indeed boosted the target task.
5679 */
5680bool __sched yield_to(struct task_struct *p, bool preempt)
5681{
5682 struct task_struct *curr = current;
5683 struct rq *rq, *p_rq;
5684 unsigned long flags;
5685 bool yielded = 0;
5686
5687 local_irq_save(flags);
5688 rq = this_rq();
5689
5690again:
5691 p_rq = task_rq(p);
5692 double_rq_lock(rq, p_rq);
5693 while (task_rq(p) != p_rq) {
5694 double_rq_unlock(rq, p_rq);
5695 goto again;
5696 }
5697
5698 if (!curr->sched_class->yield_to_task)
5699 goto out;
5700
5701 if (curr->sched_class != p->sched_class)
5702 goto out;
5703
5704 if (task_running(p_rq, p) || p->state)
5705 goto out;
5706
5707 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
Venkatesh Pallipadi6d1cafd2011-03-01 16:28:21 -08005708 if (yielded) {
Mike Galbraithd95f4122011-02-01 09:50:51 -05005709 schedstat_inc(rq, yld_count);
Venkatesh Pallipadi6d1cafd2011-03-01 16:28:21 -08005710 /*
5711 * Make p's CPU reschedule; pick_next_entity takes care of
5712 * fairness.
5713 */
5714 if (preempt && rq != p_rq)
5715 resched_task(p_rq->curr);
5716 }
Mike Galbraithd95f4122011-02-01 09:50:51 -05005717
5718out:
5719 double_rq_unlock(rq, p_rq);
5720 local_irq_restore(flags);
5721
5722 if (yielded)
5723 schedule();
5724
5725 return yielded;
5726}
5727EXPORT_SYMBOL_GPL(yield_to);
5728
Linus Torvalds1da177e2005-04-16 15:20:36 -07005729/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005730 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
Linus Torvalds1da177e2005-04-16 15:20:36 -07005731 * that process accounting knows that this is a task in IO wait state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005732 */
5733void __sched io_schedule(void)
5734{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005735 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005736
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005737 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005738 atomic_inc(&rq->nr_iowait);
Jens Axboe73c10102011-03-08 13:19:51 +01005739 blk_flush_plug(current);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005740 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005741 schedule();
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005742 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005743 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005744 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005745}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005746EXPORT_SYMBOL(io_schedule);
5747
5748long __sched io_schedule_timeout(long timeout)
5749{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005750 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005751 long ret;
5752
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005753 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005754 atomic_inc(&rq->nr_iowait);
Jens Axboe73c10102011-03-08 13:19:51 +01005755 blk_flush_plug(current);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005756 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005757 ret = schedule_timeout(timeout);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005758 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005759 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005760 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005761 return ret;
5762}
5763
5764/**
5765 * sys_sched_get_priority_max - return maximum RT priority.
5766 * @policy: scheduling class.
5767 *
5768 * this syscall returns the maximum rt_priority that can be used
5769 * by a given scheduling class.
5770 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005771SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005772{
5773 int ret = -EINVAL;
5774
5775 switch (policy) {
5776 case SCHED_FIFO:
5777 case SCHED_RR:
5778 ret = MAX_USER_RT_PRIO-1;
5779 break;
5780 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005781 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005782 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005783 ret = 0;
5784 break;
5785 }
5786 return ret;
5787}
5788
5789/**
5790 * sys_sched_get_priority_min - return minimum RT priority.
5791 * @policy: scheduling class.
5792 *
5793 * this syscall returns the minimum rt_priority that can be used
5794 * by a given scheduling class.
5795 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005796SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005797{
5798 int ret = -EINVAL;
5799
5800 switch (policy) {
5801 case SCHED_FIFO:
5802 case SCHED_RR:
5803 ret = 1;
5804 break;
5805 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005806 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005807 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005808 ret = 0;
5809 }
5810 return ret;
5811}
5812
5813/**
5814 * sys_sched_rr_get_interval - return the default timeslice of a process.
5815 * @pid: pid of the process.
5816 * @interval: userspace pointer to the timeslice value.
5817 *
5818 * this syscall writes the default timeslice value of a given process
5819 * into the user-space timespec buffer. A value of '0' means infinity.
5820 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01005821SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
Heiko Carstens754fe8d2009-01-14 14:14:09 +01005822 struct timespec __user *, interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005823{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005824 struct task_struct *p;
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005825 unsigned int time_slice;
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005826 unsigned long flags;
5827 struct rq *rq;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005828 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829 struct timespec t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005830
5831 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005832 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005833
5834 retval = -ESRCH;
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005835 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005836 p = find_process_by_pid(pid);
5837 if (!p)
5838 goto out_unlock;
5839
5840 retval = security_task_getscheduler(p);
5841 if (retval)
5842 goto out_unlock;
5843
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005844 rq = task_rq_lock(p, &flags);
5845 time_slice = p->sched_class->get_rr_interval(rq, p);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02005846 task_rq_unlock(rq, p, &flags);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005847
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005848 rcu_read_unlock();
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005849 jiffies_to_timespec(time_slice, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005850 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005851 return retval;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005852
Linus Torvalds1da177e2005-04-16 15:20:36 -07005853out_unlock:
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005854 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855 return retval;
5856}
5857
Steven Rostedt7c731e02008-05-12 21:20:41 +02005858static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005859
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005860void sched_show_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005861{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005862 unsigned long free = 0;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005863 unsigned state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005864
Linus Torvalds1da177e2005-04-16 15:20:36 -07005865 state = p->state ? __ffs(p->state) + 1 : 0;
Erik Gilling28d06862010-11-19 18:08:51 -08005866 printk(KERN_INFO "%-15.15s %c", p->comm,
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005867 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
Ingo Molnar4bd77322007-07-11 21:21:47 +02005868#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07005869 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005870 printk(KERN_CONT " running ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005871 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005872 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005873#else
5874 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005875 printk(KERN_CONT " running task ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005876 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005877 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005878#endif
5879#ifdef CONFIG_DEBUG_STACK_USAGE
Eric Sandeen7c9f8862008-04-22 16:38:23 -05005880 free = stack_not_used(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005881#endif
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005882 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
David Rientjesaa47b7e2009-05-04 01:38:05 -07005883 task_pid_nr(p), task_pid_nr(p->real_parent),
5884 (unsigned long)task_thread_info(p)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005885
Nick Piggin5fb5e6d2008-01-25 21:08:34 +01005886 show_stack(p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005887}
5888
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005889void show_state_filter(unsigned long state_filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005890{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005891 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005892
Ingo Molnar4bd77322007-07-11 21:21:47 +02005893#if BITS_PER_LONG == 32
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005894 printk(KERN_INFO
5895 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005896#else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005897 printk(KERN_INFO
5898 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005899#endif
5900 read_lock(&tasklist_lock);
5901 do_each_thread(g, p) {
5902 /*
5903 * reset the NMI-timeout, listing all files on a slow
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005904 * console might take a lot of time:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005905 */
5906 touch_nmi_watchdog();
Ingo Molnar39bc89f2007-04-25 20:50:03 -07005907 if (!state_filter || (p->state & state_filter))
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005908 sched_show_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909 } while_each_thread(g, p);
5910
Jeremy Fitzhardinge04c91672007-05-08 00:28:05 -07005911 touch_all_softlockup_watchdogs();
5912
Ingo Molnardd41f592007-07-09 18:51:59 +02005913#ifdef CONFIG_SCHED_DEBUG
5914 sysrq_sched_debug_show();
5915#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005916 read_unlock(&tasklist_lock);
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005917 /*
5918 * Only show locks if all tasks are dumped:
5919 */
Shmulik Ladkani93335a22009-11-25 15:23:41 +02005920 if (!state_filter)
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005921 debug_show_all_locks();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005922}
5923
Ingo Molnar1df21052007-07-09 18:51:58 +02005924void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5925{
Ingo Molnardd41f592007-07-09 18:51:59 +02005926 idle->sched_class = &idle_sched_class;
Ingo Molnar1df21052007-07-09 18:51:58 +02005927}
5928
Ingo Molnarf340c0d2005-06-28 16:40:42 +02005929/**
5930 * init_idle - set up an idle thread for a given CPU
5931 * @idle: task in question
5932 * @cpu: cpu the idle task belongs to
5933 *
5934 * NOTE: this function does not set the idle thread's NEED_RESCHED
5935 * flag, to make booting more robust.
5936 */
Nick Piggin5c1e1762006-10-03 01:14:04 -07005937void __cpuinit init_idle(struct task_struct *idle, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005938{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005939 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005940 unsigned long flags;
5941
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005942 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar5cbd54e2008-11-12 20:05:50 +01005943
Ingo Molnardd41f592007-07-09 18:51:59 +02005944 __sched_fork(idle);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01005945 idle->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02005946 idle->se.exec_start = sched_clock();
5947
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09005948 do_set_cpus_allowed(idle, cpumask_of(cpu));
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005949 /*
5950 * We're having a chicken and egg problem, even though we are
5951 * holding rq->lock, the cpu isn't yet set to this cpu so the
5952 * lockdep check in task_group() will fail.
5953 *
5954 * Similar case to sched_fork(). / Alternatively we could
5955 * use task_rq_lock() here and obtain the other rq->lock.
5956 *
5957 * Silence PROVE_RCU
5958 */
5959 rcu_read_lock();
Ingo Molnardd41f592007-07-09 18:51:59 +02005960 __set_task_cpu(idle, cpu);
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005961 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005962
Linus Torvalds1da177e2005-04-16 15:20:36 -07005963 rq->curr = rq->idle = idle;
Peter Zijlstra3ca7a442011-04-05 17:23:40 +02005964#if defined(CONFIG_SMP)
5965 idle->on_cpu = 1;
Nick Piggin4866cde2005-06-25 14:57:23 -07005966#endif
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005967 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005968
5969 /* Set the preempt count _outside_ the spinlocks! */
Al Viroa1261f52005-11-13 16:06:55 -08005970 task_thread_info(idle)->preempt_count = 0;
Jonathan Corbet625f2a32011-04-22 11:19:10 -06005971
Ingo Molnardd41f592007-07-09 18:51:59 +02005972 /*
5973 * The idle tasks have their own, simple scheduling class:
5974 */
5975 idle->sched_class = &idle_sched_class;
Steven Rostedt868baf02011-02-10 21:26:13 -05005976 ftrace_graph_init_idle_task(idle, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005977}
5978
5979/*
5980 * In a system that switches off the HZ timer nohz_cpu_mask
5981 * indicates which cpus entered this state. This is used
5982 * in the rcu update to wait only for active cpus. For system
5983 * which do not switch off the HZ timer nohz_cpu_mask should
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305984 * always be CPU_BITS_NONE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005985 */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305986cpumask_var_t nohz_cpu_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005987
Ingo Molnar19978ca2007-11-09 22:39:38 +01005988/*
5989 * Increase the granularity value when there are more CPUs,
5990 * because with more CPUs the 'effective latency' as visible
5991 * to users decreases. But the relationship is not linear,
5992 * so pick a second-best guess by going with the log2 of the
5993 * number of CPUs.
5994 *
5995 * This idea comes from the SD scheduler of Con Kolivas:
5996 */
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005997static int get_update_sysctl_factor(void)
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005998{
Mike Galbraith4ca3ef72009-12-10 09:25:53 +01005999 unsigned int cpus = min_t(int, num_online_cpus(), 8);
Christian Ehrhardt1983a922009-11-30 12:16:47 +01006000 unsigned int factor;
6001
6002 switch (sysctl_sched_tunable_scaling) {
6003 case SCHED_TUNABLESCALING_NONE:
6004 factor = 1;
6005 break;
6006 case SCHED_TUNABLESCALING_LINEAR:
6007 factor = cpus;
6008 break;
6009 case SCHED_TUNABLESCALING_LOG:
6010 default:
6011 factor = 1 + ilog2(cpus);
6012 break;
6013 }
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006014
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01006015 return factor;
6016}
6017
6018static void update_sysctl(void)
6019{
6020 unsigned int factor = get_update_sysctl_factor();
6021
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006022#define SET_SYSCTL(name) \
6023 (sysctl_##name = (factor) * normalized_sysctl_##name)
6024 SET_SYSCTL(sched_min_granularity);
6025 SET_SYSCTL(sched_latency);
6026 SET_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006027#undef SET_SYSCTL
6028}
6029
Ingo Molnar19978ca2007-11-09 22:39:38 +01006030static inline void sched_init_granularity(void)
6031{
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006032 update_sysctl();
Ingo Molnar19978ca2007-11-09 22:39:38 +01006033}
6034
Linus Torvalds1da177e2005-04-16 15:20:36 -07006035#ifdef CONFIG_SMP
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09006036void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
6037{
6038 if (p->sched_class && p->sched_class->set_cpus_allowed)
6039 p->sched_class->set_cpus_allowed(p, new_mask);
6040 else {
6041 cpumask_copy(&p->cpus_allowed, new_mask);
6042 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
6043 }
6044}
6045
Linus Torvalds1da177e2005-04-16 15:20:36 -07006046/*
6047 * This is how migration works:
6048 *
Tejun Heo969c7922010-05-06 18:49:21 +02006049 * 1) we invoke migration_cpu_stop() on the target CPU using
6050 * stop_one_cpu().
6051 * 2) stopper starts to run (implicitly forcing the migrated thread
6052 * off the CPU)
6053 * 3) it checks whether the migrated task is still in the wrong runqueue.
6054 * 4) if it's in the wrong runqueue then the migration thread removes
Linus Torvalds1da177e2005-04-16 15:20:36 -07006055 * it and puts it into the right queue.
Tejun Heo969c7922010-05-06 18:49:21 +02006056 * 5) stopper completes and stop_one_cpu() returns and the migration
6057 * is done.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006058 */
6059
6060/*
6061 * Change a given task's CPU affinity. Migrate the thread to a
6062 * proper CPU and schedule it away if the CPU it's executing on
6063 * is removed from the allowed bitmask.
6064 *
6065 * NOTE: the caller must have a valid reference to the task, the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006066 * task must not exit() & deallocate itself prematurely. The
Linus Torvalds1da177e2005-04-16 15:20:36 -07006067 * call is not atomic; no spinlocks may be held.
6068 */
Rusty Russell96f874e2008-11-25 02:35:14 +10306069int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006070{
6071 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07006072 struct rq *rq;
Tejun Heo969c7922010-05-06 18:49:21 +02006073 unsigned int dest_cpu;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006074 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006075
6076 rq = task_rq_lock(p, &flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01006077
Yong Zhangdb44fc02011-05-09 22:07:05 +08006078 if (cpumask_equal(&p->cpus_allowed, new_mask))
6079 goto out;
6080
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006081 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006082 ret = -EINVAL;
6083 goto out;
6084 }
6085
Yong Zhangdb44fc02011-05-09 22:07:05 +08006086 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
David Rientjes9985b0b2008-06-05 12:57:11 -07006087 ret = -EINVAL;
6088 goto out;
6089 }
6090
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09006091 do_set_cpus_allowed(p, new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01006092
Linus Torvalds1da177e2005-04-16 15:20:36 -07006093 /* Can the task run on the task's current CPU? If so, we're done */
Rusty Russell96f874e2008-11-25 02:35:14 +10306094 if (cpumask_test_cpu(task_cpu(p), new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006095 goto out;
6096
Tejun Heo969c7922010-05-06 18:49:21 +02006097 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
Peter Zijlstrabd8e7dd2011-04-05 17:23:59 +02006098 if (p->on_rq) {
Tejun Heo969c7922010-05-06 18:49:21 +02006099 struct migration_arg arg = { p, dest_cpu };
Linus Torvalds1da177e2005-04-16 15:20:36 -07006100 /* Need help from migration thread: drop lock and wait. */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02006101 task_rq_unlock(rq, p, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02006102 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006103 tlb_migrate_finish(p->mm);
6104 return 0;
6105 }
6106out:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02006107 task_rq_unlock(rq, p, &flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006108
Linus Torvalds1da177e2005-04-16 15:20:36 -07006109 return ret;
6110}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07006111EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006112
6113/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006114 * Move (not current) task off this cpu, onto dest cpu. We're doing
Linus Torvalds1da177e2005-04-16 15:20:36 -07006115 * this because either it can't run here any more (set_cpus_allowed()
6116 * away from this CPU, or CPU going down), or because we're
6117 * attempting to rebalance this task on exec (sched_exec).
6118 *
6119 * So we race with normal scheduler movements, but that's OK, as long
6120 * as the task is no longer on this CPU.
Kirill Korotaevefc30812006-06-27 02:54:32 -07006121 *
6122 * Returns non-zero if task was successfully migrated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006123 */
Kirill Korotaevefc30812006-06-27 02:54:32 -07006124static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006125{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006126 struct rq *rq_dest, *rq_src;
Peter Zijlstrae2912002009-12-16 18:04:36 +01006127 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006128
Max Krasnyanskye761b772008-07-15 04:43:49 -07006129 if (unlikely(!cpu_active(dest_cpu)))
Kirill Korotaevefc30812006-06-27 02:54:32 -07006130 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006131
6132 rq_src = cpu_rq(src_cpu);
6133 rq_dest = cpu_rq(dest_cpu);
6134
Peter Zijlstra0122ec52011-04-05 17:23:51 +02006135 raw_spin_lock(&p->pi_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006136 double_rq_lock(rq_src, rq_dest);
6137 /* Already moved. */
6138 if (task_cpu(p) != src_cpu)
Linus Torvaldsb1e38732008-07-10 11:25:03 -07006139 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006140 /* Affinity changed (again). */
Rusty Russell96f874e2008-11-25 02:35:14 +10306141 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
Linus Torvaldsb1e38732008-07-10 11:25:03 -07006142 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006143
Peter Zijlstrae2912002009-12-16 18:04:36 +01006144 /*
6145 * If we're not on a rq, the next wake-up will ensure we're
6146 * placed properly.
6147 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02006148 if (p->on_rq) {
Ingo Molnar2e1cb742007-08-09 11:16:49 +02006149 deactivate_task(rq_src, p, 0);
Peter Zijlstrae2912002009-12-16 18:04:36 +01006150 set_task_cpu(p, dest_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02006151 activate_task(rq_dest, p, 0);
Peter Zijlstra15afe092008-09-20 23:38:02 +02006152 check_preempt_curr(rq_dest, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006153 }
Linus Torvaldsb1e38732008-07-10 11:25:03 -07006154done:
Kirill Korotaevefc30812006-06-27 02:54:32 -07006155 ret = 1;
Linus Torvaldsb1e38732008-07-10 11:25:03 -07006156fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006157 double_rq_unlock(rq_src, rq_dest);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02006158 raw_spin_unlock(&p->pi_lock);
Kirill Korotaevefc30812006-06-27 02:54:32 -07006159 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006160}
6161
6162/*
Tejun Heo969c7922010-05-06 18:49:21 +02006163 * migration_cpu_stop - this will be executed by a highprio stopper thread
6164 * and performs thread migration by bumping thread off CPU then
6165 * 'pushing' onto another runqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006166 */
Tejun Heo969c7922010-05-06 18:49:21 +02006167static int migration_cpu_stop(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006168{
Tejun Heo969c7922010-05-06 18:49:21 +02006169 struct migration_arg *arg = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006170
Tejun Heo969c7922010-05-06 18:49:21 +02006171 /*
6172 * The original target cpu might have gone down and we might
6173 * be on another cpu but it doesn't matter.
6174 */
6175 local_irq_disable();
6176 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
6177 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006178 return 0;
6179}
6180
6181#ifdef CONFIG_HOTPLUG_CPU
Linus Torvalds1da177e2005-04-16 15:20:36 -07006182
Ingo Molnar48f24c42006-07-03 00:25:40 -07006183/*
6184 * Ensures that the idle task is using init_mm right before its cpu goes
Linus Torvalds1da177e2005-04-16 15:20:36 -07006185 * offline.
6186 */
6187void idle_task_exit(void)
6188{
6189 struct mm_struct *mm = current->active_mm;
6190
6191 BUG_ON(cpu_online(smp_processor_id()));
6192
6193 if (mm != &init_mm)
6194 switch_mm(mm, &init_mm, current);
6195 mmdrop(mm);
6196}
6197
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006198/*
6199 * While a dead CPU has no uninterruptible tasks queued at this point,
6200 * it might still have a nonzero ->nr_uninterruptible counter, because
6201 * for performance reasons the counter is not stricly tracking tasks to
6202 * their home CPUs. So we just add the counter to another CPU's counter,
6203 * to keep the global sum constant after CPU-down:
6204 */
6205static void migrate_nr_uninterruptible(struct rq *rq_src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006206{
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006207 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006208
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006209 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
6210 rq_src->nr_uninterruptible = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006211}
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006212
6213/*
6214 * remove the tasks which were accounted by rq from calc_load_tasks.
6215 */
6216static void calc_global_load_remove(struct rq *rq)
6217{
6218 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
Thomas Gleixnera468d382009-07-17 14:15:46 +02006219 rq->calc_load_active = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006220}
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006221
6222/*
6223 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6224 * try_to_wake_up()->select_task_rq().
6225 *
6226 * Called with rq->lock held even though we'er in stop_machine() and
6227 * there's no concurrency possible, we hold the required locks anyway
6228 * because of lock validation efforts.
6229 */
6230static void migrate_tasks(unsigned int dead_cpu)
6231{
6232 struct rq *rq = cpu_rq(dead_cpu);
6233 struct task_struct *next, *stop = rq->stop;
6234 int dest_cpu;
6235
6236 /*
6237 * Fudge the rq selection such that the below task selection loop
6238 * doesn't get stuck on the currently eligible stop task.
6239 *
6240 * We're currently inside stop_machine() and the rq is either stuck
6241 * in the stop_machine_cpu_stop() loop, or we're executing this code,
6242 * either way we should never end up calling schedule() until we're
6243 * done here.
6244 */
6245 rq->stop = NULL;
6246
6247 for ( ; ; ) {
6248 /*
6249 * There's this thread running, bail when that's the only
6250 * remaining thread.
6251 */
6252 if (rq->nr_running == 1)
6253 break;
6254
6255 next = pick_next_task(rq);
6256 BUG_ON(!next);
6257 next->sched_class->put_prev_task(rq, next);
6258
6259 /* Find suitable destination for @next, with force if needed. */
6260 dest_cpu = select_fallback_rq(dead_cpu, next);
6261 raw_spin_unlock(&rq->lock);
6262
6263 __migrate_task(next, dead_cpu, dest_cpu);
6264
6265 raw_spin_lock(&rq->lock);
6266 }
6267
6268 rq->stop = stop;
6269}
6270
Linus Torvalds1da177e2005-04-16 15:20:36 -07006271#endif /* CONFIG_HOTPLUG_CPU */
6272
Nick Piggine692ab52007-07-26 13:40:43 +02006273#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
6274
6275static struct ctl_table sd_ctl_dir[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02006276 {
6277 .procname = "sched_domain",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006278 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02006279 },
Eric W. Biederman56992302009-11-05 15:38:40 -08006280 {}
Nick Piggine692ab52007-07-26 13:40:43 +02006281};
6282
6283static struct ctl_table sd_ctl_root[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02006284 {
6285 .procname = "kernel",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006286 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02006287 .child = sd_ctl_dir,
6288 },
Eric W. Biederman56992302009-11-05 15:38:40 -08006289 {}
Nick Piggine692ab52007-07-26 13:40:43 +02006290};
6291
6292static struct ctl_table *sd_alloc_ctl_entry(int n)
6293{
6294 struct ctl_table *entry =
Milton Miller5cf9f062007-10-15 17:00:19 +02006295 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
Nick Piggine692ab52007-07-26 13:40:43 +02006296
Nick Piggine692ab52007-07-26 13:40:43 +02006297 return entry;
6298}
6299
Milton Miller6382bc92007-10-15 17:00:19 +02006300static void sd_free_ctl_entry(struct ctl_table **tablep)
6301{
Milton Millercd7900762007-10-17 16:55:11 +02006302 struct ctl_table *entry;
Milton Miller6382bc92007-10-15 17:00:19 +02006303
Milton Millercd7900762007-10-17 16:55:11 +02006304 /*
6305 * In the intermediate directories, both the child directory and
6306 * procname are dynamically allocated and could fail but the mode
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006307 * will always be set. In the lowest directory the names are
Milton Millercd7900762007-10-17 16:55:11 +02006308 * static strings and all have proc handlers.
6309 */
6310 for (entry = *tablep; entry->mode; entry++) {
Milton Miller6382bc92007-10-15 17:00:19 +02006311 if (entry->child)
6312 sd_free_ctl_entry(&entry->child);
Milton Millercd7900762007-10-17 16:55:11 +02006313 if (entry->proc_handler == NULL)
6314 kfree(entry->procname);
6315 }
Milton Miller6382bc92007-10-15 17:00:19 +02006316
6317 kfree(*tablep);
6318 *tablep = NULL;
6319}
6320
Nick Piggine692ab52007-07-26 13:40:43 +02006321static void
Alexey Dobriyane0361852007-08-09 11:16:46 +02006322set_table_entry(struct ctl_table *entry,
Nick Piggine692ab52007-07-26 13:40:43 +02006323 const char *procname, void *data, int maxlen,
6324 mode_t mode, proc_handler *proc_handler)
6325{
Nick Piggine692ab52007-07-26 13:40:43 +02006326 entry->procname = procname;
6327 entry->data = data;
6328 entry->maxlen = maxlen;
6329 entry->mode = mode;
6330 entry->proc_handler = proc_handler;
6331}
6332
6333static struct ctl_table *
6334sd_alloc_ctl_domain_table(struct sched_domain *sd)
6335{
Ingo Molnara5d8c342008-10-09 11:35:51 +02006336 struct ctl_table *table = sd_alloc_ctl_entry(13);
Nick Piggine692ab52007-07-26 13:40:43 +02006337
Milton Millerad1cdc12007-10-15 17:00:19 +02006338 if (table == NULL)
6339 return NULL;
6340
Alexey Dobriyane0361852007-08-09 11:16:46 +02006341 set_table_entry(&table[0], "min_interval", &sd->min_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02006342 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006343 set_table_entry(&table[1], "max_interval", &sd->max_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02006344 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006345 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006346 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006347 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006348 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006349 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006350 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006351 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006352 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006353 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006354 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006355 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
Nick Piggine692ab52007-07-26 13:40:43 +02006356 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006357 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
Nick Piggine692ab52007-07-26 13:40:43 +02006358 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02006359 set_table_entry(&table[9], "cache_nice_tries",
Nick Piggine692ab52007-07-26 13:40:43 +02006360 &sd->cache_nice_tries,
6361 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02006362 set_table_entry(&table[10], "flags", &sd->flags,
Nick Piggine692ab52007-07-26 13:40:43 +02006363 sizeof(int), 0644, proc_dointvec_minmax);
Ingo Molnara5d8c342008-10-09 11:35:51 +02006364 set_table_entry(&table[11], "name", sd->name,
6365 CORENAME_MAX_SIZE, 0444, proc_dostring);
6366 /* &table[12] is terminator */
Nick Piggine692ab52007-07-26 13:40:43 +02006367
6368 return table;
6369}
6370
Ingo Molnar9a4e7152007-11-28 15:52:56 +01006371static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
Nick Piggine692ab52007-07-26 13:40:43 +02006372{
6373 struct ctl_table *entry, *table;
6374 struct sched_domain *sd;
6375 int domain_num = 0, i;
6376 char buf[32];
6377
6378 for_each_domain(cpu, sd)
6379 domain_num++;
6380 entry = table = sd_alloc_ctl_entry(domain_num + 1);
Milton Millerad1cdc12007-10-15 17:00:19 +02006381 if (table == NULL)
6382 return NULL;
Nick Piggine692ab52007-07-26 13:40:43 +02006383
6384 i = 0;
6385 for_each_domain(cpu, sd) {
6386 snprintf(buf, 32, "domain%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006387 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006388 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006389 entry->child = sd_alloc_ctl_domain_table(sd);
6390 entry++;
6391 i++;
6392 }
6393 return table;
6394}
6395
6396static struct ctl_table_header *sd_sysctl_header;
Milton Miller6382bc92007-10-15 17:00:19 +02006397static void register_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006398{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006399 int i, cpu_num = num_possible_cpus();
Nick Piggine692ab52007-07-26 13:40:43 +02006400 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6401 char buf[32];
6402
Milton Miller73785472007-10-24 18:23:48 +02006403 WARN_ON(sd_ctl_dir[0].child);
6404 sd_ctl_dir[0].child = entry;
6405
Milton Millerad1cdc12007-10-15 17:00:19 +02006406 if (entry == NULL)
6407 return;
6408
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006409 for_each_possible_cpu(i) {
Nick Piggine692ab52007-07-26 13:40:43 +02006410 snprintf(buf, 32, "cpu%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006411 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006412 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006413 entry->child = sd_alloc_ctl_cpu_table(i);
Milton Miller97b6ea72007-10-15 17:00:19 +02006414 entry++;
Nick Piggine692ab52007-07-26 13:40:43 +02006415 }
Milton Miller73785472007-10-24 18:23:48 +02006416
6417 WARN_ON(sd_sysctl_header);
Nick Piggine692ab52007-07-26 13:40:43 +02006418 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6419}
Milton Miller6382bc92007-10-15 17:00:19 +02006420
Milton Miller73785472007-10-24 18:23:48 +02006421/* may be called multiple times per register */
Milton Miller6382bc92007-10-15 17:00:19 +02006422static void unregister_sched_domain_sysctl(void)
6423{
Milton Miller73785472007-10-24 18:23:48 +02006424 if (sd_sysctl_header)
6425 unregister_sysctl_table(sd_sysctl_header);
Milton Miller6382bc92007-10-15 17:00:19 +02006426 sd_sysctl_header = NULL;
Milton Miller73785472007-10-24 18:23:48 +02006427 if (sd_ctl_dir[0].child)
6428 sd_free_ctl_entry(&sd_ctl_dir[0].child);
Milton Miller6382bc92007-10-15 17:00:19 +02006429}
Nick Piggine692ab52007-07-26 13:40:43 +02006430#else
Milton Miller6382bc92007-10-15 17:00:19 +02006431static void register_sched_domain_sysctl(void)
6432{
6433}
6434static void unregister_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006435{
6436}
6437#endif
6438
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006439static void set_rq_online(struct rq *rq)
6440{
6441 if (!rq->online) {
6442 const struct sched_class *class;
6443
Rusty Russellc6c49272008-11-25 02:35:05 +10306444 cpumask_set_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006445 rq->online = 1;
6446
6447 for_each_class(class) {
6448 if (class->rq_online)
6449 class->rq_online(rq);
6450 }
6451 }
6452}
6453
6454static void set_rq_offline(struct rq *rq)
6455{
6456 if (rq->online) {
6457 const struct sched_class *class;
6458
6459 for_each_class(class) {
6460 if (class->rq_offline)
6461 class->rq_offline(rq);
6462 }
6463
Rusty Russellc6c49272008-11-25 02:35:05 +10306464 cpumask_clear_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006465 rq->online = 0;
6466 }
6467}
6468
Linus Torvalds1da177e2005-04-16 15:20:36 -07006469/*
6470 * migration_call - callback that gets triggered when a CPU is added.
6471 * Here we can start up the necessary migration thread for the new CPU.
6472 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006473static int __cpuinit
6474migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006475{
Ingo Molnar48f24c42006-07-03 00:25:40 -07006476 int cpu = (long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006477 unsigned long flags;
Tejun Heo969c7922010-05-06 18:49:21 +02006478 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006479
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006480 switch (action & ~CPU_TASKS_FROZEN) {
Gautham R Shenoy5be93612007-05-09 02:34:04 -07006481
Linus Torvalds1da177e2005-04-16 15:20:36 -07006482 case CPU_UP_PREPARE:
Thomas Gleixnera468d382009-07-17 14:15:46 +02006483 rq->calc_load_update = calc_load_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006484 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006485
Linus Torvalds1da177e2005-04-16 15:20:36 -07006486 case CPU_ONLINE:
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006487 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006488 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006489 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306490 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006491
6492 set_rq_online(rq);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006493 }
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006494 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006495 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006496
Linus Torvalds1da177e2005-04-16 15:20:36 -07006497#ifdef CONFIG_HOTPLUG_CPU
Gregory Haskins08f503b2008-03-10 17:59:11 -04006498 case CPU_DYING:
Peter Zijlstra317f3942011-04-05 17:23:58 +02006499 sched_ttwu_pending();
Gregory Haskins57d885f2008-01-25 21:08:18 +01006500 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006501 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006502 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306503 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006504 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006505 }
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006506 migrate_tasks(cpu);
6507 BUG_ON(rq->nr_running != 1); /* the migration thread */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006508 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006509
6510 migrate_nr_uninterruptible(rq);
6511 calc_global_load_remove(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006512 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006513#endif
6514 }
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006515
6516 update_max_interval();
6517
Linus Torvalds1da177e2005-04-16 15:20:36 -07006518 return NOTIFY_OK;
6519}
6520
Paul Mackerrasf38b0822009-06-02 21:05:16 +10006521/*
6522 * Register at high priority so that task migration (migrate_all_tasks)
6523 * happens before everything else. This has to be lower priority than
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006524 * the notifier in the perf_event subsystem, though.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006525 */
Chandra Seetharaman26c21432006-06-27 02:54:10 -07006526static struct notifier_block __cpuinitdata migration_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006527 .notifier_call = migration_call,
Tejun Heo50a323b2010-06-08 21:40:36 +02006528 .priority = CPU_PRI_MIGRATION,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006529};
6530
Tejun Heo3a101d02010-06-08 21:40:36 +02006531static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
6532 unsigned long action, void *hcpu)
6533{
6534 switch (action & ~CPU_TASKS_FROZEN) {
6535 case CPU_ONLINE:
6536 case CPU_DOWN_FAILED:
6537 set_cpu_active((long)hcpu, true);
6538 return NOTIFY_OK;
6539 default:
6540 return NOTIFY_DONE;
6541 }
6542}
6543
6544static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6545 unsigned long action, void *hcpu)
6546{
6547 switch (action & ~CPU_TASKS_FROZEN) {
6548 case CPU_DOWN_PREPARE:
6549 set_cpu_active((long)hcpu, false);
6550 return NOTIFY_OK;
6551 default:
6552 return NOTIFY_DONE;
6553 }
6554}
6555
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006556static int __init migration_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006557{
6558 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -07006559 int err;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006560
Tejun Heo3a101d02010-06-08 21:40:36 +02006561 /* Initialize migration for the boot CPU */
Akinobu Mita07dccf32006-09-29 02:00:22 -07006562 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6563 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006564 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6565 register_cpu_notifier(&migration_notifier);
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006566
Tejun Heo3a101d02010-06-08 21:40:36 +02006567 /* Register cpu active notifiers */
6568 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6569 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6570
Thomas Gleixnera004cd42009-07-21 09:54:05 +02006571 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006572}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006573early_initcall(migration_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006574#endif
6575
6576#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07006577
Peter Zijlstra4cb98832011-04-07 14:09:58 +02006578static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
6579
Ingo Molnar3e9830d2007-10-15 17:00:13 +02006580#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006581
Mike Travisf6630112009-11-17 18:22:15 -06006582static __read_mostly int sched_domain_debug_enabled;
6583
6584static int __init sched_domain_debug_setup(char *str)
6585{
6586 sched_domain_debug_enabled = 1;
6587
6588 return 0;
6589}
6590early_param("sched_debug", sched_domain_debug_setup);
6591
Mike Travis7c16ec52008-04-04 18:11:11 -07006592static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
Rusty Russell96f874e2008-11-25 02:35:14 +10306593 struct cpumask *groupmask)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006594{
6595 struct sched_group *group = sd->groups;
Mike Travis434d53b2008-04-04 18:11:04 -07006596 char str[256];
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006597
Rusty Russell968ea6d2008-12-13 21:55:51 +10306598 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
Rusty Russell96f874e2008-11-25 02:35:14 +10306599 cpumask_clear(groupmask);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006600
6601 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6602
6603 if (!(sd->flags & SD_LOAD_BALANCE)) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006604 printk("does not load-balance\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006605 if (sd->parent)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006606 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6607 " has parent");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006608 return -1;
6609 }
6610
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006611 printk(KERN_CONT "span %s level %s\n", str, sd->name);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006612
Rusty Russell758b2cd2008-11-25 02:35:04 +10306613 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006614 printk(KERN_ERR "ERROR: domain->span does not contain "
6615 "CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006616 }
Rusty Russell758b2cd2008-11-25 02:35:04 +10306617 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006618 printk(KERN_ERR "ERROR: domain->groups does not contain"
6619 " CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006620 }
6621
6622 printk(KERN_DEBUG "%*s groups:", level + 1, "");
6623 do {
6624 if (!group) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006625 printk("\n");
6626 printk(KERN_ERR "ERROR: group is NULL\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006627 break;
6628 }
6629
Peter Zijlstra18a38852009-09-01 10:34:39 +02006630 if (!group->cpu_power) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006631 printk(KERN_CONT "\n");
6632 printk(KERN_ERR "ERROR: domain->cpu_power not "
6633 "set\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006634 break;
6635 }
6636
Rusty Russell758b2cd2008-11-25 02:35:04 +10306637 if (!cpumask_weight(sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006638 printk(KERN_CONT "\n");
6639 printk(KERN_ERR "ERROR: empty group\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006640 break;
6641 }
6642
Rusty Russell758b2cd2008-11-25 02:35:04 +10306643 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006644 printk(KERN_CONT "\n");
6645 printk(KERN_ERR "ERROR: repeated CPUs\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006646 break;
6647 }
6648
Rusty Russell758b2cd2008-11-25 02:35:04 +10306649 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006650
Rusty Russell968ea6d2008-12-13 21:55:51 +10306651 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306652
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006653 printk(KERN_CONT " %s", str);
Nikhil Rao1399fa72011-05-18 10:09:39 -07006654 if (group->cpu_power != SCHED_POWER_SCALE) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006655 printk(KERN_CONT " (cpu_power = %d)",
6656 group->cpu_power);
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306657 }
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006658
6659 group = group->next;
6660 } while (group != sd->groups);
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006661 printk(KERN_CONT "\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006662
Rusty Russell758b2cd2008-11-25 02:35:04 +10306663 if (!cpumask_equal(sched_domain_span(sd), groupmask))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006664 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006665
Rusty Russell758b2cd2008-11-25 02:35:04 +10306666 if (sd->parent &&
6667 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006668 printk(KERN_ERR "ERROR: parent span is not a superset "
6669 "of domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006670 return 0;
6671}
6672
Linus Torvalds1da177e2005-04-16 15:20:36 -07006673static void sched_domain_debug(struct sched_domain *sd, int cpu)
6674{
6675 int level = 0;
6676
Mike Travisf6630112009-11-17 18:22:15 -06006677 if (!sched_domain_debug_enabled)
6678 return;
6679
Nick Piggin41c7ce92005-06-25 14:57:24 -07006680 if (!sd) {
6681 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6682 return;
6683 }
6684
Linus Torvalds1da177e2005-04-16 15:20:36 -07006685 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6686
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006687 for (;;) {
Peter Zijlstra4cb98832011-04-07 14:09:58 +02006688 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006689 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006690 level++;
6691 sd = sd->parent;
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08006692 if (!sd)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006693 break;
6694 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006695}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006696#else /* !CONFIG_SCHED_DEBUG */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006697# define sched_domain_debug(sd, cpu) do { } while (0)
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006698#endif /* CONFIG_SCHED_DEBUG */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006699
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006700static int sd_degenerate(struct sched_domain *sd)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006701{
Rusty Russell758b2cd2008-11-25 02:35:04 +10306702 if (cpumask_weight(sched_domain_span(sd)) == 1)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006703 return 1;
6704
6705 /* Following flags need at least 2 groups */
6706 if (sd->flags & (SD_LOAD_BALANCE |
6707 SD_BALANCE_NEWIDLE |
6708 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006709 SD_BALANCE_EXEC |
6710 SD_SHARE_CPUPOWER |
6711 SD_SHARE_PKG_RESOURCES)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006712 if (sd->groups != sd->groups->next)
6713 return 0;
6714 }
6715
6716 /* Following flags don't use groups */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02006717 if (sd->flags & (SD_WAKE_AFFINE))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006718 return 0;
6719
6720 return 1;
6721}
6722
Ingo Molnar48f24c42006-07-03 00:25:40 -07006723static int
6724sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006725{
6726 unsigned long cflags = sd->flags, pflags = parent->flags;
6727
6728 if (sd_degenerate(parent))
6729 return 1;
6730
Rusty Russell758b2cd2008-11-25 02:35:04 +10306731 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006732 return 0;
6733
Suresh Siddha245af2c2005-06-25 14:57:25 -07006734 /* Flags needing groups don't count if only 1 group in parent */
6735 if (parent->groups == parent->groups->next) {
6736 pflags &= ~(SD_LOAD_BALANCE |
6737 SD_BALANCE_NEWIDLE |
6738 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006739 SD_BALANCE_EXEC |
6740 SD_SHARE_CPUPOWER |
6741 SD_SHARE_PKG_RESOURCES);
Ken Chen54364992008-12-07 18:47:37 -08006742 if (nr_node_ids == 1)
6743 pflags &= ~SD_SERIALIZE;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006744 }
6745 if (~cflags & pflags)
6746 return 0;
6747
6748 return 1;
6749}
6750
Peter Zijlstradce840a2011-04-07 14:09:50 +02006751static void free_rootdomain(struct rcu_head *rcu)
Rusty Russellc6c49272008-11-25 02:35:05 +10306752{
Peter Zijlstradce840a2011-04-07 14:09:50 +02006753 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
Peter Zijlstra047106a2009-11-16 10:28:09 +01006754
Rusty Russell68e74562008-11-25 02:35:13 +10306755 cpupri_cleanup(&rd->cpupri);
Rusty Russellc6c49272008-11-25 02:35:05 +10306756 free_cpumask_var(rd->rto_mask);
6757 free_cpumask_var(rd->online);
6758 free_cpumask_var(rd->span);
6759 kfree(rd);
6760}
6761
Gregory Haskins57d885f2008-01-25 21:08:18 +01006762static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6763{
Ingo Molnara0490fa2009-02-12 11:35:40 +01006764 struct root_domain *old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006765 unsigned long flags;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006766
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006767 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006768
6769 if (rq->rd) {
Ingo Molnara0490fa2009-02-12 11:35:40 +01006770 old_rd = rq->rd;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006771
Rusty Russellc6c49272008-11-25 02:35:05 +10306772 if (cpumask_test_cpu(rq->cpu, old_rd->online))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006773 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006774
Rusty Russellc6c49272008-11-25 02:35:05 +10306775 cpumask_clear_cpu(rq->cpu, old_rd->span);
Gregory Haskinsdc938522008-01-25 21:08:26 +01006776
Ingo Molnara0490fa2009-02-12 11:35:40 +01006777 /*
6778 * If we dont want to free the old_rt yet then
6779 * set old_rd to NULL to skip the freeing later
6780 * in this function:
6781 */
6782 if (!atomic_dec_and_test(&old_rd->refcount))
6783 old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006784 }
6785
6786 atomic_inc(&rd->refcount);
6787 rq->rd = rd;
6788
Rusty Russellc6c49272008-11-25 02:35:05 +10306789 cpumask_set_cpu(rq->cpu, rd->span);
Gregory Haskins00aec932009-07-30 10:57:23 -04006790 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006791 set_rq_online(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006792
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006793 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnara0490fa2009-02-12 11:35:40 +01006794
6795 if (old_rd)
Peter Zijlstradce840a2011-04-07 14:09:50 +02006796 call_rcu_sched(&old_rd->rcu, free_rootdomain);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006797}
6798
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006799static int init_rootdomain(struct root_domain *rd)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006800{
6801 memset(rd, 0, sizeof(*rd));
6802
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006803 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
Li Zefan0c910d22009-01-06 17:39:06 +08006804 goto out;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006805 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306806 goto free_span;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006807 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306808 goto free_online;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02006809
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006810 if (cpupri_init(&rd->cpupri) != 0)
Rusty Russell68e74562008-11-25 02:35:13 +10306811 goto free_rto_mask;
Rusty Russellc6c49272008-11-25 02:35:05 +10306812 return 0;
6813
Rusty Russell68e74562008-11-25 02:35:13 +10306814free_rto_mask:
6815 free_cpumask_var(rd->rto_mask);
Rusty Russellc6c49272008-11-25 02:35:05 +10306816free_online:
6817 free_cpumask_var(rd->online);
6818free_span:
6819 free_cpumask_var(rd->span);
Li Zefan0c910d22009-01-06 17:39:06 +08006820out:
Rusty Russellc6c49272008-11-25 02:35:05 +10306821 return -ENOMEM;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006822}
6823
6824static void init_defrootdomain(void)
6825{
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006826 init_rootdomain(&def_root_domain);
Rusty Russellc6c49272008-11-25 02:35:05 +10306827
Gregory Haskins57d885f2008-01-25 21:08:18 +01006828 atomic_set(&def_root_domain.refcount, 1);
6829}
6830
Gregory Haskinsdc938522008-01-25 21:08:26 +01006831static struct root_domain *alloc_rootdomain(void)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006832{
6833 struct root_domain *rd;
6834
6835 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6836 if (!rd)
6837 return NULL;
6838
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006839 if (init_rootdomain(rd) != 0) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306840 kfree(rd);
6841 return NULL;
6842 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01006843
6844 return rd;
6845}
6846
Peter Zijlstradce840a2011-04-07 14:09:50 +02006847static void free_sched_domain(struct rcu_head *rcu)
6848{
6849 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
6850 if (atomic_dec_and_test(&sd->groups->ref))
6851 kfree(sd->groups);
6852 kfree(sd);
6853}
6854
6855static void destroy_sched_domain(struct sched_domain *sd, int cpu)
6856{
6857 call_rcu(&sd->rcu, free_sched_domain);
6858}
6859
6860static void destroy_sched_domains(struct sched_domain *sd, int cpu)
6861{
6862 for (; sd; sd = sd->parent)
6863 destroy_sched_domain(sd, cpu);
6864}
6865
Linus Torvalds1da177e2005-04-16 15:20:36 -07006866/*
Ingo Molnar0eab9142008-01-25 21:08:19 +01006867 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
Linus Torvalds1da177e2005-04-16 15:20:36 -07006868 * hold the hotplug lock.
6869 */
Ingo Molnar0eab9142008-01-25 21:08:19 +01006870static void
6871cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006872{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006873 struct rq *rq = cpu_rq(cpu);
Suresh Siddha245af2c2005-06-25 14:57:25 -07006874 struct sched_domain *tmp;
6875
6876 /* Remove the sched domains which do not contribute to scheduling. */
Li Zefanf29c9b12008-11-06 09:45:16 +08006877 for (tmp = sd; tmp; ) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006878 struct sched_domain *parent = tmp->parent;
6879 if (!parent)
6880 break;
Li Zefanf29c9b12008-11-06 09:45:16 +08006881
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006882 if (sd_parent_degenerate(tmp, parent)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006883 tmp->parent = parent->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006884 if (parent->parent)
6885 parent->parent->child = tmp;
Peter Zijlstradce840a2011-04-07 14:09:50 +02006886 destroy_sched_domain(parent, cpu);
Li Zefanf29c9b12008-11-06 09:45:16 +08006887 } else
6888 tmp = tmp->parent;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006889 }
6890
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006891 if (sd && sd_degenerate(sd)) {
Peter Zijlstradce840a2011-04-07 14:09:50 +02006892 tmp = sd;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006893 sd = sd->parent;
Peter Zijlstradce840a2011-04-07 14:09:50 +02006894 destroy_sched_domain(tmp, cpu);
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006895 if (sd)
6896 sd->child = NULL;
6897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006898
Peter Zijlstra4cb98832011-04-07 14:09:58 +02006899 sched_domain_debug(sd, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006900
Gregory Haskins57d885f2008-01-25 21:08:18 +01006901 rq_attach_root(rq, rd);
Peter Zijlstradce840a2011-04-07 14:09:50 +02006902 tmp = rq->sd;
Nick Piggin674311d2005-06-25 14:57:27 -07006903 rcu_assign_pointer(rq->sd, sd);
Peter Zijlstradce840a2011-04-07 14:09:50 +02006904 destroy_sched_domains(tmp, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006905}
6906
6907/* cpus with isolated domains */
Rusty Russelldcc30a32008-11-25 02:35:12 +10306908static cpumask_var_t cpu_isolated_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006909
6910/* Setup the mask of cpus configured for isolated domains */
6911static int __init isolated_cpu_setup(char *str)
6912{
Rusty Russellbdddd292009-12-02 14:09:16 +10306913 alloc_bootmem_cpumask_var(&cpu_isolated_map);
Rusty Russell968ea6d2008-12-13 21:55:51 +10306914 cpulist_parse(str, cpu_isolated_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006915 return 1;
6916}
6917
Ingo Molnar8927f492007-10-15 17:00:13 +02006918__setup("isolcpus=", isolated_cpu_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006919
John Hawkes9c1cfda2005-09-06 15:18:14 -07006920#define SD_NODES_PER_DOMAIN 16
Linus Torvalds1da177e2005-04-16 15:20:36 -07006921
John Hawkes9c1cfda2005-09-06 15:18:14 -07006922#ifdef CONFIG_NUMA
akpm@osdl.org198e2f12006-01-12 01:05:30 -08006923
John Hawkes9c1cfda2005-09-06 15:18:14 -07006924/**
6925 * find_next_best_node - find the next node to include in a sched_domain
6926 * @node: node whose sched_domain we're building
6927 * @used_nodes: nodes already in the sched_domain
6928 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006929 * Find the next node to include in a given scheduling domain. Simply
John Hawkes9c1cfda2005-09-06 15:18:14 -07006930 * finds the closest node not already in the @used_nodes map.
6931 *
6932 * Should use nodemask_t.
6933 */
Mike Travisc5f59f02008-04-04 18:11:10 -07006934static int find_next_best_node(int node, nodemask_t *used_nodes)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006935{
Hillf Danton7142d172011-05-05 20:53:20 +08006936 int i, n, val, min_val, best_node = -1;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006937
6938 min_val = INT_MAX;
6939
Mike Travis076ac2a2008-05-12 21:21:12 +02006940 for (i = 0; i < nr_node_ids; i++) {
John Hawkes9c1cfda2005-09-06 15:18:14 -07006941 /* Start at @node */
Mike Travis076ac2a2008-05-12 21:21:12 +02006942 n = (node + i) % nr_node_ids;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006943
6944 if (!nr_cpus_node(n))
6945 continue;
6946
6947 /* Skip already used nodes */
Mike Travisc5f59f02008-04-04 18:11:10 -07006948 if (node_isset(n, *used_nodes))
John Hawkes9c1cfda2005-09-06 15:18:14 -07006949 continue;
6950
6951 /* Simple min distance search */
6952 val = node_distance(node, n);
6953
6954 if (val < min_val) {
6955 min_val = val;
6956 best_node = n;
6957 }
6958 }
6959
Hillf Danton7142d172011-05-05 20:53:20 +08006960 if (best_node != -1)
6961 node_set(best_node, *used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006962 return best_node;
6963}
6964
6965/**
6966 * sched_domain_node_span - get a cpumask for a node's sched_domain
6967 * @node: node whose cpumask we're constructing
Randy Dunlap73486722008-04-22 10:07:22 -07006968 * @span: resulting cpumask
John Hawkes9c1cfda2005-09-06 15:18:14 -07006969 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006970 * Given a node, construct a good cpumask for its sched_domain to span. It
John Hawkes9c1cfda2005-09-06 15:18:14 -07006971 * should be one that prevents unnecessary balancing, but also spreads tasks
6972 * out optimally.
6973 */
Rusty Russell96f874e2008-11-25 02:35:14 +10306974static void sched_domain_node_span(int node, struct cpumask *span)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006975{
Mike Travisc5f59f02008-04-04 18:11:10 -07006976 nodemask_t used_nodes;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006977 int i;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006978
Mike Travis6ca09df2008-12-31 18:08:45 -08006979 cpumask_clear(span);
Mike Travisc5f59f02008-04-04 18:11:10 -07006980 nodes_clear(used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006981
Mike Travis6ca09df2008-12-31 18:08:45 -08006982 cpumask_or(span, span, cpumask_of_node(node));
Mike Travisc5f59f02008-04-04 18:11:10 -07006983 node_set(node, used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006984
6985 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
Mike Travisc5f59f02008-04-04 18:11:10 -07006986 int next_node = find_next_best_node(node, &used_nodes);
Hillf Danton7142d172011-05-05 20:53:20 +08006987 if (next_node < 0)
6988 break;
Mike Travis6ca09df2008-12-31 18:08:45 -08006989 cpumask_or(span, span, cpumask_of_node(next_node));
John Hawkes9c1cfda2005-09-06 15:18:14 -07006990 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006991}
Peter Zijlstrad3081f52011-04-07 14:09:59 +02006992
6993static const struct cpumask *cpu_node_mask(int cpu)
6994{
6995 lockdep_assert_held(&sched_domains_mutex);
6996
6997 sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask);
6998
6999 return sched_domains_tmpmask;
7000}
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007001
7002static const struct cpumask *cpu_allnodes_mask(int cpu)
7003{
7004 return cpu_possible_mask;
7005}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007006#endif /* CONFIG_NUMA */
John Hawkes9c1cfda2005-09-06 15:18:14 -07007007
Peter Zijlstrad3081f52011-04-07 14:09:59 +02007008static const struct cpumask *cpu_cpu_mask(int cpu)
7009{
7010 return cpumask_of_node(cpu_to_node(cpu));
7011}
7012
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007013int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007014
Peter Zijlstradce840a2011-04-07 14:09:50 +02007015struct sd_data {
7016 struct sched_domain **__percpu sd;
7017 struct sched_group **__percpu sg;
7018};
7019
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007020struct s_data {
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02007021 struct sched_domain ** __percpu sd;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007022 struct root_domain *rd;
7023};
7024
Andreas Herrmann2109b992009-08-18 12:53:00 +02007025enum s_alloc {
Andreas Herrmann2109b992009-08-18 12:53:00 +02007026 sa_rootdomain,
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02007027 sa_sd,
Peter Zijlstradce840a2011-04-07 14:09:50 +02007028 sa_sd_storage,
Andreas Herrmann2109b992009-08-18 12:53:00 +02007029 sa_none,
7030};
7031
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02007032struct sched_domain_topology_level;
7033
7034typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02007035typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
7036
7037struct sched_domain_topology_level {
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007038 sched_domain_init_f init;
7039 sched_domain_mask_f mask;
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02007040 struct sd_data data;
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02007041};
7042
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307043/*
Peter Zijlstradce840a2011-04-07 14:09:50 +02007044 * Assumes the sched_domain tree is fully constructed
John Hawkes9c1cfda2005-09-06 15:18:14 -07007045 */
Peter Zijlstradce840a2011-04-07 14:09:50 +02007046static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007047{
Peter Zijlstradce840a2011-04-07 14:09:50 +02007048 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
7049 struct sched_domain *child = sd->child;
7050
7051 if (child)
7052 cpu = cpumask_first(sched_domain_span(child));
7053
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007054 if (sg)
Peter Zijlstradce840a2011-04-07 14:09:50 +02007055 *sg = *per_cpu_ptr(sdd->sg, cpu);
7056
Linus Torvalds1da177e2005-04-16 15:20:36 -07007057 return cpu;
7058}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007059
Ingo Molnar48f24c42006-07-03 00:25:40 -07007060/*
Peter Zijlstradce840a2011-04-07 14:09:50 +02007061 * build_sched_groups takes the cpumask we wish to span, and a pointer
7062 * to a function which identifies what group(along with sched group) a CPU
7063 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
7064 * (due to the fact that we keep track of groups covered with a struct cpumask).
7065 *
7066 * build_sched_groups will build a circular linked list of the groups
7067 * covered by the given span, and will set each group's ->cpumask correctly,
7068 * and ->cpu_power to 0.
Ingo Molnar48f24c42006-07-03 00:25:40 -07007069 */
Peter Zijlstradce840a2011-04-07 14:09:50 +02007070static void
Peter Zijlstraf96225f2011-04-07 14:09:57 +02007071build_sched_groups(struct sched_domain *sd)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007072{
Peter Zijlstradce840a2011-04-07 14:09:50 +02007073 struct sched_group *first = NULL, *last = NULL;
7074 struct sd_data *sdd = sd->private;
7075 const struct cpumask *span = sched_domain_span(sd);
Peter Zijlstraf96225f2011-04-07 14:09:57 +02007076 struct cpumask *covered;
Peter Zijlstradce840a2011-04-07 14:09:50 +02007077 int i;
7078
Peter Zijlstraf96225f2011-04-07 14:09:57 +02007079 lockdep_assert_held(&sched_domains_mutex);
7080 covered = sched_domains_tmpmask;
7081
Peter Zijlstradce840a2011-04-07 14:09:50 +02007082 cpumask_clear(covered);
7083
7084 for_each_cpu(i, span) {
7085 struct sched_group *sg;
7086 int group = get_group(i, sdd, &sg);
7087 int j;
7088
7089 if (cpumask_test_cpu(i, covered))
7090 continue;
7091
7092 cpumask_clear(sched_group_cpus(sg));
7093 sg->cpu_power = 0;
7094
7095 for_each_cpu(j, span) {
7096 if (get_group(j, sdd, NULL) != group)
7097 continue;
7098
7099 cpumask_set_cpu(j, covered);
7100 cpumask_set_cpu(j, sched_group_cpus(sg));
7101 }
7102
7103 if (!first)
7104 first = sg;
7105 if (last)
7106 last->next = sg;
7107 last = sg;
7108 }
7109 last->next = first;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007110}
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007111
Linus Torvalds1da177e2005-04-16 15:20:36 -07007112/*
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007113 * Initialize sched groups cpu_power.
7114 *
7115 * cpu_power indicates the capacity of sched group, which is used while
7116 * distributing the load between different sched groups in a sched domain.
7117 * Typically cpu_power for all the groups in a sched domain will be same unless
7118 * there are asymmetries in the topology. If there are asymmetries, group
7119 * having more cpu_power will pickup more load compared to the group having
7120 * less cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007121 */
7122static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7123{
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007124 WARN_ON(!sd || !sd->groups);
7125
Miao Xie13318a72009-04-15 09:59:10 +08007126 if (cpu != group_first_cpu(sd->groups))
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007127 return;
7128
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07007129 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
7130
Peter Zijlstrad274cb32011-04-07 14:09:43 +02007131 update_group_power(sd, cpu);
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007132}
7133
7134/*
Mike Travis7c16ec52008-04-04 18:11:11 -07007135 * Initializers for schedule domains
7136 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
7137 */
7138
Ingo Molnara5d8c342008-10-09 11:35:51 +02007139#ifdef CONFIG_SCHED_DEBUG
7140# define SD_INIT_NAME(sd, type) sd->name = #type
7141#else
7142# define SD_INIT_NAME(sd, type) do { } while (0)
7143#endif
7144
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02007145#define SD_INIT_FUNC(type) \
7146static noinline struct sched_domain * \
7147sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
7148{ \
7149 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
7150 *sd = SD_##type##_INIT; \
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02007151 SD_INIT_NAME(sd, type); \
7152 sd->private = &tl->data; \
7153 return sd; \
Mike Travis7c16ec52008-04-04 18:11:11 -07007154}
7155
7156SD_INIT_FUNC(CPU)
7157#ifdef CONFIG_NUMA
7158 SD_INIT_FUNC(ALLNODES)
7159 SD_INIT_FUNC(NODE)
7160#endif
7161#ifdef CONFIG_SCHED_SMT
7162 SD_INIT_FUNC(SIBLING)
7163#endif
7164#ifdef CONFIG_SCHED_MC
7165 SD_INIT_FUNC(MC)
7166#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007167#ifdef CONFIG_SCHED_BOOK
7168 SD_INIT_FUNC(BOOK)
7169#endif
Mike Travis7c16ec52008-04-04 18:11:11 -07007170
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007171static int default_relax_domain_level = -1;
Peter Zijlstra60495e72011-04-07 14:10:04 +02007172int sched_domain_level_max;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007173
7174static int __init setup_relax_domain_level(char *str)
7175{
Li Zefan30e0e172008-05-13 10:27:17 +08007176 unsigned long val;
7177
7178 val = simple_strtoul(str, NULL, 0);
Peter Zijlstra60495e72011-04-07 14:10:04 +02007179 if (val < sched_domain_level_max)
Li Zefan30e0e172008-05-13 10:27:17 +08007180 default_relax_domain_level = val;
7181
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007182 return 1;
7183}
7184__setup("relax_domain_level=", setup_relax_domain_level);
7185
7186static void set_domain_attribute(struct sched_domain *sd,
7187 struct sched_domain_attr *attr)
7188{
7189 int request;
7190
7191 if (!attr || attr->relax_domain_level < 0) {
7192 if (default_relax_domain_level < 0)
7193 return;
7194 else
7195 request = default_relax_domain_level;
7196 } else
7197 request = attr->relax_domain_level;
7198 if (request < sd->level) {
7199 /* turn off idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007200 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007201 } else {
7202 /* turn on idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007203 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007204 }
7205}
7206
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02007207static void __sdt_free(const struct cpumask *cpu_map);
7208static int __sdt_alloc(const struct cpumask *cpu_map);
7209
Andreas Herrmann2109b992009-08-18 12:53:00 +02007210static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
7211 const struct cpumask *cpu_map)
7212{
7213 switch (what) {
Andreas Herrmann2109b992009-08-18 12:53:00 +02007214 case sa_rootdomain:
Peter Zijlstra822ff792011-04-07 14:09:51 +02007215 if (!atomic_read(&d->rd->refcount))
7216 free_rootdomain(&d->rd->rcu); /* fall through */
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02007217 case sa_sd:
7218 free_percpu(d->sd); /* fall through */
Peter Zijlstradce840a2011-04-07 14:09:50 +02007219 case sa_sd_storage:
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02007220 __sdt_free(cpu_map); /* fall through */
Andreas Herrmann2109b992009-08-18 12:53:00 +02007221 case sa_none:
7222 break;
7223 }
7224}
7225
7226static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7227 const struct cpumask *cpu_map)
7228{
Peter Zijlstradce840a2011-04-07 14:09:50 +02007229 memset(d, 0, sizeof(*d));
7230
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02007231 if (__sdt_alloc(cpu_map))
7232 return sa_sd_storage;
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02007233 d->sd = alloc_percpu(struct sched_domain *);
Peter Zijlstradce840a2011-04-07 14:09:50 +02007234 if (!d->sd)
7235 return sa_sd_storage;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007236 d->rd = alloc_rootdomain();
Peter Zijlstradce840a2011-04-07 14:09:50 +02007237 if (!d->rd)
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02007238 return sa_sd;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007239 return sa_rootdomain;
7240}
7241
Peter Zijlstradce840a2011-04-07 14:09:50 +02007242/*
7243 * NULL the sd_data elements we've used to build the sched_domain and
7244 * sched_group structure so that the subsequent __free_domain_allocs()
7245 * will not free the data we're using.
7246 */
7247static void claim_allocations(int cpu, struct sched_domain *sd)
7248{
7249 struct sd_data *sdd = sd->private;
7250 struct sched_group *sg = sd->groups;
7251
7252 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
7253 *per_cpu_ptr(sdd->sd, cpu) = NULL;
7254
7255 if (cpu == cpumask_first(sched_group_cpus(sg))) {
7256 WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
7257 *per_cpu_ptr(sdd->sg, cpu) = NULL;
7258 }
7259}
7260
Andreas Herrmannd8173532009-08-18 12:57:03 +02007261#ifdef CONFIG_SCHED_SMT
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007262static const struct cpumask *cpu_smt_mask(int cpu)
7263{
7264 return topology_thread_cpumask(cpu);
Andreas Herrmannd8173532009-08-18 12:57:03 +02007265}
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007266#endif
Andreas Herrmannd8173532009-08-18 12:57:03 +02007267
Peter Zijlstrad069b912011-04-07 14:10:02 +02007268/*
7269 * Topology list, bottom-up.
7270 */
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02007271static struct sched_domain_topology_level default_topology[] = {
Peter Zijlstrad069b912011-04-07 14:10:02 +02007272#ifdef CONFIG_SCHED_SMT
7273 { sd_init_SIBLING, cpu_smt_mask, },
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007274#endif
7275#ifdef CONFIG_SCHED_MC
7276 { sd_init_MC, cpu_coregroup_mask, },
7277#endif
Peter Zijlstrad069b912011-04-07 14:10:02 +02007278#ifdef CONFIG_SCHED_BOOK
7279 { sd_init_BOOK, cpu_book_mask, },
7280#endif
7281 { sd_init_CPU, cpu_cpu_mask, },
7282#ifdef CONFIG_NUMA
7283 { sd_init_NODE, cpu_node_mask, },
7284 { sd_init_ALLNODES, cpu_allnodes_mask, },
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007285#endif
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02007286 { NULL, },
7287};
7288
7289static struct sched_domain_topology_level *sched_domain_topology = default_topology;
7290
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02007291static int __sdt_alloc(const struct cpumask *cpu_map)
7292{
7293 struct sched_domain_topology_level *tl;
7294 int j;
7295
7296 for (tl = sched_domain_topology; tl->init; tl++) {
7297 struct sd_data *sdd = &tl->data;
7298
7299 sdd->sd = alloc_percpu(struct sched_domain *);
7300 if (!sdd->sd)
7301 return -ENOMEM;
7302
7303 sdd->sg = alloc_percpu(struct sched_group *);
7304 if (!sdd->sg)
7305 return -ENOMEM;
7306
7307 for_each_cpu(j, cpu_map) {
7308 struct sched_domain *sd;
7309 struct sched_group *sg;
7310
7311 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
7312 GFP_KERNEL, cpu_to_node(j));
7313 if (!sd)
7314 return -ENOMEM;
7315
7316 *per_cpu_ptr(sdd->sd, j) = sd;
7317
7318 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
7319 GFP_KERNEL, cpu_to_node(j));
7320 if (!sg)
7321 return -ENOMEM;
7322
7323 *per_cpu_ptr(sdd->sg, j) = sg;
7324 }
7325 }
7326
7327 return 0;
7328}
7329
7330static void __sdt_free(const struct cpumask *cpu_map)
7331{
7332 struct sched_domain_topology_level *tl;
7333 int j;
7334
7335 for (tl = sched_domain_topology; tl->init; tl++) {
7336 struct sd_data *sdd = &tl->data;
7337
7338 for_each_cpu(j, cpu_map) {
7339 kfree(*per_cpu_ptr(sdd->sd, j));
7340 kfree(*per_cpu_ptr(sdd->sg, j));
7341 }
7342 free_percpu(sdd->sd);
7343 free_percpu(sdd->sg);
7344 }
7345}
7346
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007347struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
7348 struct s_data *d, const struct cpumask *cpu_map,
Peter Zijlstrad069b912011-04-07 14:10:02 +02007349 struct sched_domain_attr *attr, struct sched_domain *child,
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007350 int cpu)
7351{
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02007352 struct sched_domain *sd = tl->init(tl, cpu);
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007353 if (!sd)
Peter Zijlstrad069b912011-04-07 14:10:02 +02007354 return child;
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007355
7356 set_domain_attribute(sd, attr);
7357 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
Peter Zijlstra60495e72011-04-07 14:10:04 +02007358 if (child) {
7359 sd->level = child->level + 1;
7360 sched_domain_level_max = max(sched_domain_level_max, sd->level);
Peter Zijlstrad069b912011-04-07 14:10:02 +02007361 child->parent = sd;
Peter Zijlstra60495e72011-04-07 14:10:04 +02007362 }
Peter Zijlstrad069b912011-04-07 14:10:02 +02007363 sd->child = child;
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007364
7365 return sd;
7366}
7367
Mike Travis7c16ec52008-04-04 18:11:11 -07007368/*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007369 * Build sched domains for a given set of cpus and attach the sched domains
7370 * to the individual cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07007371 */
Peter Zijlstradce840a2011-04-07 14:09:50 +02007372static int build_sched_domains(const struct cpumask *cpu_map,
7373 struct sched_domain_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007374{
Andreas Herrmann2109b992009-08-18 12:53:00 +02007375 enum s_alloc alloc_state = sa_none;
Peter Zijlstradce840a2011-04-07 14:09:50 +02007376 struct sched_domain *sd;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007377 struct s_data d;
Peter Zijlstra822ff792011-04-07 14:09:51 +02007378 int i, ret = -ENOMEM;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307379
Andreas Herrmann2109b992009-08-18 12:53:00 +02007380 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7381 if (alloc_state != sa_rootdomain)
7382 goto error;
Mike Travis7c16ec52008-04-04 18:11:11 -07007383
Peter Zijlstradce840a2011-04-07 14:09:50 +02007384 /* Set up domains for cpus specified by the cpu_map. */
Rusty Russellabcd0832008-11-25 02:35:02 +10307385 for_each_cpu(i, cpu_map) {
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02007386 struct sched_domain_topology_level *tl;
7387
Peter Zijlstra3bd65a82011-04-07 14:09:54 +02007388 sd = NULL;
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02007389 for (tl = sched_domain_topology; tl->init; tl++)
7390 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
Peter Zijlstrad274cb32011-04-07 14:09:43 +02007391
Peter Zijlstrad069b912011-04-07 14:10:02 +02007392 while (sd->child)
7393 sd = sd->child;
7394
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02007395 *per_cpu_ptr(d.sd, i) = sd;
Peter Zijlstradce840a2011-04-07 14:09:50 +02007396 }
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02007397
Peter Zijlstradce840a2011-04-07 14:09:50 +02007398 /* Build the groups for the domains */
7399 for_each_cpu(i, cpu_map) {
7400 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7401 sd->span_weight = cpumask_weight(sched_domain_span(sd));
7402 get_group(i, sd->private, &sd->groups);
7403 atomic_inc(&sd->groups->ref);
7404
7405 if (i != cpumask_first(sched_domain_span(sd)))
7406 continue;
7407
Peter Zijlstraf96225f2011-04-07 14:09:57 +02007408 build_sched_groups(sd);
Peter Zijlstra1cf519022011-04-07 14:09:47 +02007409 }
Peter Zijlstraa06dadb2011-04-07 14:09:44 +02007410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007411
Linus Torvalds1da177e2005-04-16 15:20:36 -07007412 /* Calculate CPU power for physical packages and nodes */
Peter Zijlstraa9c9a9b2011-04-07 14:09:49 +02007413 for (i = nr_cpumask_bits-1; i >= 0; i--) {
7414 if (!cpumask_test_cpu(i, cpu_map))
7415 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416
Peter Zijlstradce840a2011-04-07 14:09:50 +02007417 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7418 claim_allocations(i, sd);
Peter Zijlstracd4ea6a2011-04-07 14:09:45 +02007419 init_sched_groups_power(i, sd);
Peter Zijlstradce840a2011-04-07 14:09:50 +02007420 }
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007421 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07007422
Linus Torvalds1da177e2005-04-16 15:20:36 -07007423 /* Attach the domains */
Peter Zijlstradce840a2011-04-07 14:09:50 +02007424 rcu_read_lock();
Rusty Russellabcd0832008-11-25 02:35:02 +10307425 for_each_cpu(i, cpu_map) {
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02007426 sd = *per_cpu_ptr(d.sd, i);
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007427 cpu_attach_domain(sd, d.rd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007428 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02007429 rcu_read_unlock();
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007430
Peter Zijlstra822ff792011-04-07 14:09:51 +02007431 ret = 0;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007432error:
Andreas Herrmann2109b992009-08-18 12:53:00 +02007433 __free_domain_allocs(&d, alloc_state, cpu_map);
Peter Zijlstra822ff792011-04-07 14:09:51 +02007434 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007435}
Paul Jackson029190c2007-10-18 23:40:20 -07007436
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307437static cpumask_var_t *doms_cur; /* current sched domains */
Paul Jackson029190c2007-10-18 23:40:20 -07007438static int ndoms_cur; /* number of sched domains in 'doms_cur' */
Ingo Molnar4285f5942008-05-16 17:47:14 +02007439static struct sched_domain_attr *dattr_cur;
7440 /* attribues of custom domains in 'doms_cur' */
Paul Jackson029190c2007-10-18 23:40:20 -07007441
7442/*
7443 * Special case: If a kmalloc of a doms_cur partition (array of
Rusty Russell42128232008-11-25 02:35:12 +10307444 * cpumask) fails, then fallback to a single sched domain,
7445 * as determined by the single cpumask fallback_doms.
Paul Jackson029190c2007-10-18 23:40:20 -07007446 */
Rusty Russell42128232008-11-25 02:35:12 +10307447static cpumask_var_t fallback_doms;
Paul Jackson029190c2007-10-18 23:40:20 -07007448
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007449/*
7450 * arch_update_cpu_topology lets virtualized architectures update the
7451 * cpu core maps. It is supposed to return 1 if the topology changed
7452 * or 0 if it stayed the same.
7453 */
7454int __attribute__((weak)) arch_update_cpu_topology(void)
Heiko Carstens22e52b02008-03-12 18:31:59 +01007455{
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007456 return 0;
Heiko Carstens22e52b02008-03-12 18:31:59 +01007457}
7458
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307459cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7460{
7461 int i;
7462 cpumask_var_t *doms;
7463
7464 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7465 if (!doms)
7466 return NULL;
7467 for (i = 0; i < ndoms; i++) {
7468 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7469 free_sched_domains(doms, i);
7470 return NULL;
7471 }
7472 }
7473 return doms;
7474}
7475
7476void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7477{
7478 unsigned int i;
7479 for (i = 0; i < ndoms; i++)
7480 free_cpumask_var(doms[i]);
7481 kfree(doms);
7482}
7483
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007484/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007485 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
Paul Jackson029190c2007-10-18 23:40:20 -07007486 * For now this just excludes isolated cpus, but could be used to
7487 * exclude other special cases in the future.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007488 */
Peter Zijlstrac4a88492011-04-07 14:09:42 +02007489static int init_sched_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007490{
Milton Miller73785472007-10-24 18:23:48 +02007491 int err;
7492
Heiko Carstens22e52b02008-03-12 18:31:59 +01007493 arch_update_cpu_topology();
Paul Jackson029190c2007-10-18 23:40:20 -07007494 ndoms_cur = 1;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307495 doms_cur = alloc_sched_domains(ndoms_cur);
Paul Jackson029190c2007-10-18 23:40:20 -07007496 if (!doms_cur)
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307497 doms_cur = &fallback_doms;
7498 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007499 dattr_cur = NULL;
Peter Zijlstradce840a2011-04-07 14:09:50 +02007500 err = build_sched_domains(doms_cur[0], NULL);
Milton Miller6382bc92007-10-15 17:00:19 +02007501 register_sched_domain_sysctl();
Milton Miller73785472007-10-24 18:23:48 +02007502
7503 return err;
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007504}
7505
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007506/*
7507 * Detach sched domains from a group of cpus specified in cpu_map
7508 * These cpus will now be attached to the NULL domain
7509 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307510static void detach_destroy_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007511{
7512 int i;
7513
Peter Zijlstradce840a2011-04-07 14:09:50 +02007514 rcu_read_lock();
Rusty Russellabcd0832008-11-25 02:35:02 +10307515 for_each_cpu(i, cpu_map)
Gregory Haskins57d885f2008-01-25 21:08:18 +01007516 cpu_attach_domain(NULL, &def_root_domain, i);
Peter Zijlstradce840a2011-04-07 14:09:50 +02007517 rcu_read_unlock();
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007518}
7519
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007520/* handle null as "default" */
7521static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7522 struct sched_domain_attr *new, int idx_new)
7523{
7524 struct sched_domain_attr tmp;
7525
7526 /* fast path */
7527 if (!new && !cur)
7528 return 1;
7529
7530 tmp = SD_ATTR_INIT;
7531 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7532 new ? (new + idx_new) : &tmp,
7533 sizeof(struct sched_domain_attr));
7534}
7535
Paul Jackson029190c2007-10-18 23:40:20 -07007536/*
7537 * Partition sched domains as specified by the 'ndoms_new'
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007538 * cpumasks in the array doms_new[] of cpumasks. This compares
Paul Jackson029190c2007-10-18 23:40:20 -07007539 * doms_new[] to the current sched domain partitioning, doms_cur[].
7540 * It destroys each deleted domain and builds each new domain.
7541 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307542 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007543 * The masks don't intersect (don't overlap.) We should setup one
7544 * sched domain for each mask. CPUs not in any of the cpumasks will
7545 * not be load balanced. If the same cpumask appears both in the
Paul Jackson029190c2007-10-18 23:40:20 -07007546 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7547 * it as it is.
7548 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307549 * The passed in 'doms_new' should be allocated using
7550 * alloc_sched_domains. This routine takes ownership of it and will
7551 * free_sched_domains it when done with it. If the caller failed the
7552 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7553 * and partition_sched_domains() will fallback to the single partition
7554 * 'fallback_doms', it also forces the domains to be rebuilt.
Paul Jackson029190c2007-10-18 23:40:20 -07007555 *
Rusty Russell96f874e2008-11-25 02:35:14 +10307556 * If doms_new == NULL it will be replaced with cpu_online_mask.
Li Zefan700018e2008-11-18 14:02:03 +08007557 * ndoms_new == 0 is a special case for destroying existing domains,
7558 * and it will not create the default domain.
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007559 *
Paul Jackson029190c2007-10-18 23:40:20 -07007560 * Call with hotplug lock held
7561 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307562void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007563 struct sched_domain_attr *dattr_new)
Paul Jackson029190c2007-10-18 23:40:20 -07007564{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007565 int i, j, n;
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007566 int new_topology;
Paul Jackson029190c2007-10-18 23:40:20 -07007567
Heiko Carstens712555e2008-04-28 11:33:07 +02007568 mutex_lock(&sched_domains_mutex);
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007569
Milton Miller73785472007-10-24 18:23:48 +02007570 /* always unregister in case we don't destroy any domains */
7571 unregister_sched_domain_sysctl();
7572
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007573 /* Let architecture update cpu core mappings. */
7574 new_topology = arch_update_cpu_topology();
7575
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007576 n = doms_new ? ndoms_new : 0;
Paul Jackson029190c2007-10-18 23:40:20 -07007577
7578 /* Destroy deleted domains */
7579 for (i = 0; i < ndoms_cur; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007580 for (j = 0; j < n && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307581 if (cpumask_equal(doms_cur[i], doms_new[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007582 && dattrs_equal(dattr_cur, i, dattr_new, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007583 goto match1;
7584 }
7585 /* no match - a current sched domain not in new doms_new[] */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307586 detach_destroy_domains(doms_cur[i]);
Paul Jackson029190c2007-10-18 23:40:20 -07007587match1:
7588 ;
7589 }
7590
Max Krasnyanskye761b772008-07-15 04:43:49 -07007591 if (doms_new == NULL) {
7592 ndoms_cur = 0;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307593 doms_new = &fallback_doms;
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007594 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
Li Zefanfaa2f982008-11-04 16:20:23 +08007595 WARN_ON_ONCE(dattr_new);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007596 }
7597
Paul Jackson029190c2007-10-18 23:40:20 -07007598 /* Build new domains */
7599 for (i = 0; i < ndoms_new; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007600 for (j = 0; j < ndoms_cur && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307601 if (cpumask_equal(doms_new[i], doms_cur[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007602 && dattrs_equal(dattr_new, i, dattr_cur, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007603 goto match2;
7604 }
7605 /* no match - add a new doms_new */
Peter Zijlstradce840a2011-04-07 14:09:50 +02007606 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
Paul Jackson029190c2007-10-18 23:40:20 -07007607match2:
7608 ;
7609 }
7610
7611 /* Remember the new sched domains */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307612 if (doms_cur != &fallback_doms)
7613 free_sched_domains(doms_cur, ndoms_cur);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007614 kfree(dattr_cur); /* kfree(NULL) is safe */
Paul Jackson029190c2007-10-18 23:40:20 -07007615 doms_cur = doms_new;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007616 dattr_cur = dattr_new;
Paul Jackson029190c2007-10-18 23:40:20 -07007617 ndoms_cur = ndoms_new;
Milton Miller73785472007-10-24 18:23:48 +02007618
7619 register_sched_domain_sysctl();
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007620
Heiko Carstens712555e2008-04-28 11:33:07 +02007621 mutex_unlock(&sched_domains_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -07007622}
7623
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007624#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Peter Zijlstrac4a88492011-04-07 14:09:42 +02007625static void reinit_sched_domains(void)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007626{
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007627 get_online_cpus();
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007628
7629 /* Destroy domains first to force the rebuild */
7630 partition_sched_domains(0, NULL, NULL);
7631
Max Krasnyanskye761b772008-07-15 04:43:49 -07007632 rebuild_sched_domains();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007633 put_online_cpus();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007634}
7635
7636static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7637{
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307638 unsigned int level = 0;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007639
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307640 if (sscanf(buf, "%u", &level) != 1)
7641 return -EINVAL;
7642
7643 /*
7644 * level is always be positive so don't check for
7645 * level < POWERSAVINGS_BALANCE_NONE which is 0
7646 * What happens on 0 or 1 byte write,
7647 * need to check for count as well?
7648 */
7649
7650 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007651 return -EINVAL;
7652
7653 if (smt)
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307654 sched_smt_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007655 else
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307656 sched_mc_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007657
Peter Zijlstrac4a88492011-04-07 14:09:42 +02007658 reinit_sched_domains();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007659
Li Zefanc70f22d2009-01-05 19:07:50 +08007660 return count;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007661}
7662
Adrian Bunk6707de002007-08-12 18:08:19 +02007663#ifdef CONFIG_SCHED_MC
Andi Kleenf718cd42008-07-29 22:33:52 -07007664static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007665 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007666 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007667{
7668 return sprintf(page, "%u\n", sched_mc_power_savings);
7669}
Andi Kleenf718cd42008-07-29 22:33:52 -07007670static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007671 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007672 const char *buf, size_t count)
7673{
7674 return sched_power_savings_store(buf, count, 0);
7675}
Andi Kleenf718cd42008-07-29 22:33:52 -07007676static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7677 sched_mc_power_savings_show,
7678 sched_mc_power_savings_store);
Adrian Bunk6707de002007-08-12 18:08:19 +02007679#endif
7680
7681#ifdef CONFIG_SCHED_SMT
Andi Kleenf718cd42008-07-29 22:33:52 -07007682static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007683 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007684 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007685{
7686 return sprintf(page, "%u\n", sched_smt_power_savings);
7687}
Andi Kleenf718cd42008-07-29 22:33:52 -07007688static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007689 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007690 const char *buf, size_t count)
7691{
7692 return sched_power_savings_store(buf, count, 1);
7693}
Andi Kleenf718cd42008-07-29 22:33:52 -07007694static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7695 sched_smt_power_savings_show,
Adrian Bunk6707de002007-08-12 18:08:19 +02007696 sched_smt_power_savings_store);
7697#endif
7698
Li Zefan39aac642009-01-05 19:18:02 +08007699int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007700{
7701 int err = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007702
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007703#ifdef CONFIG_SCHED_SMT
7704 if (smt_capable())
7705 err = sysfs_create_file(&cls->kset.kobj,
7706 &attr_sched_smt_power_savings.attr);
7707#endif
7708#ifdef CONFIG_SCHED_MC
7709 if (!err && mc_capable())
7710 err = sysfs_create_file(&cls->kset.kobj,
7711 &attr_sched_mc_power_savings.attr);
7712#endif
7713 return err;
7714}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007715#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007716
Linus Torvalds1da177e2005-04-16 15:20:36 -07007717/*
Tejun Heo3a101d02010-06-08 21:40:36 +02007718 * Update cpusets according to cpu_active mask. If cpusets are
7719 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7720 * around partition_sched_domains().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007721 */
Tejun Heo0b2e9182010-06-21 23:53:31 +02007722static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7723 void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007724{
Tejun Heo3a101d02010-06-08 21:40:36 +02007725 switch (action & ~CPU_TASKS_FROZEN) {
Max Krasnyanskye761b772008-07-15 04:43:49 -07007726 case CPU_ONLINE:
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007727 case CPU_DOWN_FAILED:
Tejun Heo3a101d02010-06-08 21:40:36 +02007728 cpuset_update_active_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007729 return NOTIFY_OK;
Max Krasnyanskye761b772008-07-15 04:43:49 -07007730 default:
7731 return NOTIFY_DONE;
7732 }
7733}
Tejun Heo3a101d02010-06-08 21:40:36 +02007734
Tejun Heo0b2e9182010-06-21 23:53:31 +02007735static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7736 void *hcpu)
Tejun Heo3a101d02010-06-08 21:40:36 +02007737{
7738 switch (action & ~CPU_TASKS_FROZEN) {
7739 case CPU_DOWN_PREPARE:
7740 cpuset_update_active_cpus();
7741 return NOTIFY_OK;
7742 default:
7743 return NOTIFY_DONE;
7744 }
7745}
Max Krasnyanskye761b772008-07-15 04:43:49 -07007746
7747static int update_runtime(struct notifier_block *nfb,
7748 unsigned long action, void *hcpu)
7749{
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007750 int cpu = (int)(long)hcpu;
7751
Linus Torvalds1da177e2005-04-16 15:20:36 -07007752 switch (action) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007753 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007754 case CPU_DOWN_PREPARE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007755 disable_runtime(cpu_rq(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007756 return NOTIFY_OK;
7757
Linus Torvalds1da177e2005-04-16 15:20:36 -07007758 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007759 case CPU_DOWN_FAILED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007760 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007761 case CPU_ONLINE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007762 enable_runtime(cpu_rq(cpu));
Max Krasnyanskye761b772008-07-15 04:43:49 -07007763 return NOTIFY_OK;
7764
Linus Torvalds1da177e2005-04-16 15:20:36 -07007765 default:
7766 return NOTIFY_DONE;
7767 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007768}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007769
7770void __init sched_init_smp(void)
7771{
Rusty Russelldcc30a32008-11-25 02:35:12 +10307772 cpumask_var_t non_isolated_cpus;
7773
7774 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
Yong Zhangcb5fd132009-09-14 20:20:16 +08007775 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
Nick Piggin5c1e1762006-10-03 01:14:04 -07007776
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007777 get_online_cpus();
Heiko Carstens712555e2008-04-28 11:33:07 +02007778 mutex_lock(&sched_domains_mutex);
Peter Zijlstrac4a88492011-04-07 14:09:42 +02007779 init_sched_domains(cpu_active_mask);
Rusty Russelldcc30a32008-11-25 02:35:12 +10307780 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7781 if (cpumask_empty(non_isolated_cpus))
7782 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
Heiko Carstens712555e2008-04-28 11:33:07 +02007783 mutex_unlock(&sched_domains_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007784 put_online_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007785
Tejun Heo3a101d02010-06-08 21:40:36 +02007786 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7787 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007788
7789 /* RT runtime code needs to handle some hotplug events */
7790 hotcpu_notifier(update_runtime, 0);
7791
Peter Zijlstrab328ca12008-04-29 10:02:46 +02007792 init_hrtick();
Nick Piggin5c1e1762006-10-03 01:14:04 -07007793
7794 /* Move init over to a non-isolated CPU */
Rusty Russelldcc30a32008-11-25 02:35:12 +10307795 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
Nick Piggin5c1e1762006-10-03 01:14:04 -07007796 BUG();
Ingo Molnar19978ca2007-11-09 22:39:38 +01007797 sched_init_granularity();
Rusty Russelldcc30a32008-11-25 02:35:12 +10307798 free_cpumask_var(non_isolated_cpus);
Rusty Russell42128232008-11-25 02:35:12 +10307799
Rusty Russell0e3900e2008-11-25 02:35:13 +10307800 init_sched_rt_class();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007801}
7802#else
7803void __init sched_init_smp(void)
7804{
Ingo Molnar19978ca2007-11-09 22:39:38 +01007805 sched_init_granularity();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007806}
7807#endif /* CONFIG_SMP */
7808
Arun R Bharadwajcd1bb942009-04-16 12:15:34 +05307809const_debug unsigned int sysctl_timer_migration = 1;
7810
Linus Torvalds1da177e2005-04-16 15:20:36 -07007811int in_sched_functions(unsigned long addr)
7812{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007813 return in_lock_functions(addr) ||
7814 (addr >= (unsigned long)__sched_text_start
7815 && addr < (unsigned long)__sched_text_end);
7816}
7817
Alexey Dobriyana9957442007-10-15 17:00:13 +02007818static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02007819{
7820 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02007821 INIT_LIST_HEAD(&cfs_rq->tasks);
Ingo Molnardd41f592007-07-09 18:51:59 +02007822#ifdef CONFIG_FAIR_GROUP_SCHED
7823 cfs_rq->rq = rq;
Paul Turnerf07333b2011-01-21 20:45:03 -08007824 /* allow initial update_cfs_load() to truncate */
Peter Zijlstra6ea72f12011-01-26 13:36:03 +01007825#ifdef CONFIG_SMP
Paul Turnerf07333b2011-01-21 20:45:03 -08007826 cfs_rq->load_stamp = 1;
Ingo Molnardd41f592007-07-09 18:51:59 +02007827#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02007828#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02007829 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
Ingo Molnardd41f592007-07-09 18:51:59 +02007830}
7831
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007832static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7833{
7834 struct rt_prio_array *array;
7835 int i;
7836
7837 array = &rt_rq->active;
7838 for (i = 0; i < MAX_RT_PRIO; i++) {
7839 INIT_LIST_HEAD(array->queue + i);
7840 __clear_bit(i, array->bitmap);
7841 }
7842 /* delimiter for bitsearch: */
7843 __set_bit(MAX_RT_PRIO, array->bitmap);
7844
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007845#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -05007846 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins398a1532009-01-14 09:10:04 -05007847#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -05007848 rt_rq->highest_prio.next = MAX_RT_PRIO;
Peter Zijlstra48d5e252008-01-25 21:08:31 +01007849#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007850#endif
7851#ifdef CONFIG_SMP
7852 rt_rq->rt_nr_migratory = 0;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007853 rt_rq->overloaded = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007854 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007855#endif
7856
7857 rt_rq->rt_time = 0;
7858 rt_rq->rt_throttled = 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007859 rt_rq->rt_runtime = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +01007860 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007861
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007862#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01007863 rt_rq->rt_nr_boosted = 0;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007864 rt_rq->rq = rq;
7865#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007866}
7867
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007868#ifdef CONFIG_FAIR_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007869static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08007870 struct sched_entity *se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007871 struct sched_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007872{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007873 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007874 tg->cfs_rq[cpu] = cfs_rq;
7875 init_cfs_rq(cfs_rq, rq);
7876 cfs_rq->tg = tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007877
7878 tg->se[cpu] = se;
Yong Zhang07e06b02011-01-07 15:17:36 +08007879 /* se could be NULL for root_task_group */
Dhaval Giani354d60c2008-04-19 19:44:59 +02007880 if (!se)
7881 return;
7882
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007883 if (!parent)
7884 se->cfs_rq = &rq->cfs;
7885 else
7886 se->cfs_rq = parent->my_q;
7887
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007888 se->my_q = cfs_rq;
Paul Turner94371782010-11-15 15:47:10 -08007889 update_load_set(&se->load, 0);
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007890 se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007891}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007892#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007893
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007894#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007895static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08007896 struct sched_rt_entity *rt_se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007897 struct sched_rt_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007898{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007899 struct rq *rq = cpu_rq(cpu);
7900
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007901 tg->rt_rq[cpu] = rt_rq;
7902 init_rt_rq(rt_rq, rq);
7903 rt_rq->tg = tg;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007904 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007905
7906 tg->rt_se[cpu] = rt_se;
Dhaval Giani354d60c2008-04-19 19:44:59 +02007907 if (!rt_se)
7908 return;
7909
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007910 if (!parent)
7911 rt_se->rt_rq = &rq->rt;
7912 else
7913 rt_se->rt_rq = parent->my_q;
7914
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007915 rt_se->my_q = rt_rq;
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007916 rt_se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007917 INIT_LIST_HEAD(&rt_se->run_list);
7918}
7919#endif
7920
Linus Torvalds1da177e2005-04-16 15:20:36 -07007921void __init sched_init(void)
7922{
Ingo Molnardd41f592007-07-09 18:51:59 +02007923 int i, j;
Mike Travis434d53b2008-04-04 18:11:04 -07007924 unsigned long alloc_size = 0, ptr;
7925
7926#ifdef CONFIG_FAIR_GROUP_SCHED
7927 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7928#endif
7929#ifdef CONFIG_RT_GROUP_SCHED
7930 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7931#endif
Rusty Russelldf7c8e82009-03-19 15:22:20 +10307932#ifdef CONFIG_CPUMASK_OFFSTACK
Rusty Russell8c083f02009-03-19 15:22:20 +10307933 alloc_size += num_possible_cpus() * cpumask_size();
Rusty Russelldf7c8e82009-03-19 15:22:20 +10307934#endif
Mike Travis434d53b2008-04-04 18:11:04 -07007935 if (alloc_size) {
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03007936 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
Mike Travis434d53b2008-04-04 18:11:04 -07007937
7938#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08007939 root_task_group.se = (struct sched_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07007940 ptr += nr_cpu_ids * sizeof(void **);
7941
Yong Zhang07e06b02011-01-07 15:17:36 +08007942 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07007943 ptr += nr_cpu_ids * sizeof(void **);
Peter Zijlstraeff766a2008-04-19 19:45:00 +02007944
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007945#endif /* CONFIG_FAIR_GROUP_SCHED */
Mike Travis434d53b2008-04-04 18:11:04 -07007946#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08007947 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07007948 ptr += nr_cpu_ids * sizeof(void **);
7949
Yong Zhang07e06b02011-01-07 15:17:36 +08007950 root_task_group.rt_rq = (struct rt_rq **)ptr;
Peter Zijlstraeff766a2008-04-19 19:45:00 +02007951 ptr += nr_cpu_ids * sizeof(void **);
7952
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007953#endif /* CONFIG_RT_GROUP_SCHED */
Rusty Russelldf7c8e82009-03-19 15:22:20 +10307954#ifdef CONFIG_CPUMASK_OFFSTACK
7955 for_each_possible_cpu(i) {
7956 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
7957 ptr += cpumask_size();
7958 }
7959#endif /* CONFIG_CPUMASK_OFFSTACK */
Mike Travis434d53b2008-04-04 18:11:04 -07007960 }
Ingo Molnardd41f592007-07-09 18:51:59 +02007961
Gregory Haskins57d885f2008-01-25 21:08:18 +01007962#ifdef CONFIG_SMP
7963 init_defrootdomain();
7964#endif
7965
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007966 init_rt_bandwidth(&def_rt_bandwidth,
7967 global_rt_period(), global_rt_runtime());
7968
7969#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08007970 init_rt_bandwidth(&root_task_group.rt_bandwidth,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007971 global_rt_period(), global_rt_runtime());
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007972#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007973
Dhaval Giani7c941432010-01-20 13:26:18 +01007974#ifdef CONFIG_CGROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08007975 list_add(&root_task_group.list, &task_groups);
7976 INIT_LIST_HEAD(&root_task_group.children);
Mike Galbraith5091faa2010-11-30 14:18:03 +01007977 autogroup_init(&init_task);
Dhaval Giani7c941432010-01-20 13:26:18 +01007978#endif /* CONFIG_CGROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007979
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08007980 for_each_possible_cpu(i) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07007981 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007982
7983 rq = cpu_rq(i);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007984 raw_spin_lock_init(&rq->lock);
Nick Piggin78979862005-06-25 14:57:13 -07007985 rq->nr_running = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02007986 rq->calc_load_active = 0;
7987 rq->calc_load_update = jiffies + LOAD_FREQ;
Ingo Molnardd41f592007-07-09 18:51:59 +02007988 init_cfs_rq(&rq->cfs, rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007989 init_rt_rq(&rq->rt, rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007990#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08007991 root_task_group.shares = root_task_group_load;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007992 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02007993 /*
Yong Zhang07e06b02011-01-07 15:17:36 +08007994 * How much cpu bandwidth does root_task_group get?
Dhaval Giani354d60c2008-04-19 19:44:59 +02007995 *
7996 * In case of task-groups formed thr' the cgroup filesystem, it
7997 * gets 100% of the cpu resources in the system. This overall
7998 * system cpu resource is divided among the tasks of
Yong Zhang07e06b02011-01-07 15:17:36 +08007999 * root_task_group and its child task-groups in a fair manner,
Dhaval Giani354d60c2008-04-19 19:44:59 +02008000 * based on each entity's (task or task-group's) weight
8001 * (se->load.weight).
8002 *
Yong Zhang07e06b02011-01-07 15:17:36 +08008003 * In other words, if root_task_group has 10 tasks of weight
Dhaval Giani354d60c2008-04-19 19:44:59 +02008004 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8005 * then A0's share of the cpu resource is:
8006 *
Ingo Molnar0d905bc2009-05-04 19:13:30 +02008007 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
Dhaval Giani354d60c2008-04-19 19:44:59 +02008008 *
Yong Zhang07e06b02011-01-07 15:17:36 +08008009 * We achieve this by letting root_task_group's tasks sit
8010 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
Dhaval Giani354d60c2008-04-19 19:44:59 +02008011 */
Yong Zhang07e06b02011-01-07 15:17:36 +08008012 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02008013#endif /* CONFIG_FAIR_GROUP_SCHED */
8014
8015 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008016#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008017 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
Yong Zhang07e06b02011-01-07 15:17:36 +08008018 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008019#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008020
Ingo Molnardd41f592007-07-09 18:51:59 +02008021 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
8022 rq->cpu_load[j] = 0;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07008023
8024 rq->last_load_update_tick = jiffies;
8025
Linus Torvalds1da177e2005-04-16 15:20:36 -07008026#ifdef CONFIG_SMP
Nick Piggin41c7ce92005-06-25 14:57:24 -07008027 rq->sd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01008028 rq->rd = NULL;
Nikhil Rao1399fa72011-05-18 10:09:39 -07008029 rq->cpu_power = SCHED_POWER_SCALE;
Gregory Haskins3f029d32009-07-29 11:08:47 -04008030 rq->post_schedule = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008031 rq->active_balance = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02008032 rq->next_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008033 rq->push_cpu = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07008034 rq->cpu = i;
Gregory Haskins1f11eb62008-06-04 15:04:05 -04008035 rq->online = 0;
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01008036 rq->idle_stamp = 0;
8037 rq->avg_idle = 2*sysctl_sched_migration_cost;
Gregory Haskinsdc938522008-01-25 21:08:26 +01008038 rq_attach_root(rq, &def_root_domain);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008039#ifdef CONFIG_NO_HZ
8040 rq->nohz_balance_kick = 0;
8041 init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
8042#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008043#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01008044 init_rq_hrtick(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008045 atomic_set(&rq->nr_iowait, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008046 }
8047
Peter Williams2dd73a42006-06-27 02:54:34 -07008048 set_load_weight(&init_task);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008049
Avi Kivitye107be32007-07-26 13:40:43 +02008050#ifdef CONFIG_PREEMPT_NOTIFIERS
8051 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
8052#endif
8053
Christoph Lameterc9819f42006-12-10 02:20:25 -08008054#ifdef CONFIG_SMP
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008055 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
Christoph Lameterc9819f42006-12-10 02:20:25 -08008056#endif
8057
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008058#ifdef CONFIG_RT_MUTEXES
Thomas Gleixner1d615482009-11-17 14:54:03 +01008059 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008060#endif
8061
Linus Torvalds1da177e2005-04-16 15:20:36 -07008062 /*
8063 * The boot idle thread does lazy MMU switching as well:
8064 */
8065 atomic_inc(&init_mm.mm_count);
8066 enter_lazy_tlb(&init_mm, current);
8067
8068 /*
8069 * Make us the idle thread. Technically, schedule() should not be
8070 * called from this thread, however somewhere below it might be,
8071 * but because we are the idle thread, we just pick up running again
8072 * when this runqueue becomes "idle".
8073 */
8074 init_idle(current, smp_processor_id());
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02008075
8076 calc_load_update = jiffies + LOAD_FREQ;
8077
Ingo Molnardd41f592007-07-09 18:51:59 +02008078 /*
8079 * During early bootup we pretend to be a normal task:
8080 */
8081 current->sched_class = &fair_sched_class;
Ingo Molnar6892b752008-02-13 14:02:36 +01008082
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308083 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
Rusty Russell49557e62009-11-02 20:37:20 +10308084 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308085#ifdef CONFIG_SMP
Peter Zijlstra4cb98832011-04-07 14:09:58 +02008086 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308087#ifdef CONFIG_NO_HZ
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008088 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8089 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
8090 atomic_set(&nohz.load_balancer, nr_cpu_ids);
8091 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
8092 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308093#endif
Rusty Russellbdddd292009-12-02 14:09:16 +10308094 /* May be allocated at isolcpus cmdline parse time */
8095 if (cpu_isolated_map == NULL)
8096 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308097#endif /* SMP */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308098
Ingo Molnar6892b752008-02-13 14:02:36 +01008099 scheduler_running = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008100}
8101
8102#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008103static inline int preempt_count_equals(int preempt_offset)
8104{
Frederic Weisbecker234da7b2009-12-16 20:21:05 +01008105 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008106
Arnd Bergmann4ba82162011-01-25 22:52:22 +01008107 return (nested == preempt_offset);
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008108}
8109
Simon Kagstromd8948372009-12-23 11:08:18 +01008110void __might_sleep(const char *file, int line, int preempt_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008111{
Ingo Molnar48f24c42006-07-03 00:25:40 -07008112#ifdef in_atomic
Linus Torvalds1da177e2005-04-16 15:20:36 -07008113 static unsigned long prev_jiffy; /* ratelimiting */
8114
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008115 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8116 system_state != SYSTEM_RUNNING || oops_in_progress)
Ingo Molnaraef745f2008-08-28 11:34:43 +02008117 return;
8118 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8119 return;
8120 prev_jiffy = jiffies;
8121
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01008122 printk(KERN_ERR
8123 "BUG: sleeping function called from invalid context at %s:%d\n",
8124 file, line);
8125 printk(KERN_ERR
8126 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8127 in_atomic(), irqs_disabled(),
8128 current->pid, current->comm);
Ingo Molnaraef745f2008-08-28 11:34:43 +02008129
8130 debug_show_held_locks(current);
8131 if (irqs_disabled())
8132 print_irqtrace_events(current);
8133 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008134#endif
8135}
8136EXPORT_SYMBOL(__might_sleep);
8137#endif
8138
8139#ifdef CONFIG_MAGIC_SYSRQ
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008140static void normalize_task(struct rq *rq, struct task_struct *p)
8141{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008142 const struct sched_class *prev_class = p->sched_class;
8143 int old_prio = p->prio;
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008144 int on_rq;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02008145
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02008146 on_rq = p->on_rq;
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008147 if (on_rq)
8148 deactivate_task(rq, p, 0);
8149 __setscheduler(rq, p, SCHED_NORMAL, 0);
8150 if (on_rq) {
8151 activate_task(rq, p, 0);
8152 resched_task(rq->curr);
8153 }
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008154
8155 check_class_changed(rq, p, prev_class, old_prio);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008156}
8157
Linus Torvalds1da177e2005-04-16 15:20:36 -07008158void normalize_rt_tasks(void)
8159{
Ingo Molnara0f98a12007-06-17 18:37:45 +02008160 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008161 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07008162 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008163
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008164 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008165 do_each_thread(g, p) {
Ingo Molnar178be792007-10-15 17:00:18 +02008166 /*
8167 * Only normalize user tasks:
8168 */
8169 if (!p->mm)
8170 continue;
8171
Ingo Molnardd41f592007-07-09 18:51:59 +02008172 p->se.exec_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008173#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03008174 p->se.statistics.wait_start = 0;
8175 p->se.statistics.sleep_start = 0;
8176 p->se.statistics.block_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008177#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02008178
8179 if (!rt_task(p)) {
8180 /*
8181 * Renice negative nice level userspace
8182 * tasks back to 0:
8183 */
8184 if (TASK_NICE(p) < 0 && p->mm)
8185 set_user_nice(p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008186 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02008187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008188
Thomas Gleixner1d615482009-11-17 14:54:03 +01008189 raw_spin_lock(&p->pi_lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -07008190 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008191
Ingo Molnar178be792007-10-15 17:00:18 +02008192 normalize_task(rq, p);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008193
Ingo Molnarb29739f2006-06-27 02:54:51 -07008194 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01008195 raw_spin_unlock(&p->pi_lock);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008196 } while_each_thread(g, p);
8197
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008198 read_unlock_irqrestore(&tasklist_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008199}
8200
8201#endif /* CONFIG_MAGIC_SYSRQ */
Linus Torvalds1df5c102005-09-12 07:59:21 -07008202
Jason Wessel67fc4e02010-05-20 21:04:21 -05008203#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008204/*
Jason Wessel67fc4e02010-05-20 21:04:21 -05008205 * These functions are only useful for the IA64 MCA handling, or kdb.
Linus Torvalds1df5c102005-09-12 07:59:21 -07008206 *
8207 * They can only be called when the whole system has been
8208 * stopped - every CPU needs to be quiescent, and no scheduling
8209 * activity can take place. Using them for anything else would
8210 * be a serious bug, and as a result, they aren't even visible
8211 * under any other configuration.
8212 */
8213
8214/**
8215 * curr_task - return the current task for a given cpu.
8216 * @cpu: the processor in question.
8217 *
8218 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8219 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008220struct task_struct *curr_task(int cpu)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008221{
8222 return cpu_curr(cpu);
8223}
8224
Jason Wessel67fc4e02010-05-20 21:04:21 -05008225#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8226
8227#ifdef CONFIG_IA64
Linus Torvalds1df5c102005-09-12 07:59:21 -07008228/**
8229 * set_curr_task - set the current task for a given cpu.
8230 * @cpu: the processor in question.
8231 * @p: the task pointer to set.
8232 *
8233 * Description: This function must only be used when non-maskable interrupts
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008234 * are serviced on a separate stack. It allows the architecture to switch the
8235 * notion of the current task on a cpu in a non-blocking manner. This function
Linus Torvalds1df5c102005-09-12 07:59:21 -07008236 * must be called with all CPU's synchronized, and interrupts disabled, the
8237 * and caller must save the original value of the current task (see
8238 * curr_task() above) and restore that value before reenabling interrupts and
8239 * re-starting the system.
8240 *
8241 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8242 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008243void set_curr_task(int cpu, struct task_struct *p)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008244{
8245 cpu_curr(cpu) = p;
8246}
8247
8248#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008249
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008250#ifdef CONFIG_FAIR_GROUP_SCHED
8251static void free_fair_sched_group(struct task_group *tg)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008252{
8253 int i;
8254
8255 for_each_possible_cpu(i) {
8256 if (tg->cfs_rq)
8257 kfree(tg->cfs_rq[i]);
8258 if (tg->se)
8259 kfree(tg->se[i]);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008260 }
8261
8262 kfree(tg->cfs_rq);
8263 kfree(tg->se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008264}
8265
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008266static
8267int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008268{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008269 struct cfs_rq *cfs_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008270 struct sched_entity *se;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008271 int i;
8272
Mike Travis434d53b2008-04-04 18:11:04 -07008273 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008274 if (!tg->cfs_rq)
8275 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008276 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008277 if (!tg->se)
8278 goto err;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008279
8280 tg->shares = NICE_0_LOAD;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008281
8282 for_each_possible_cpu(i) {
Li Zefaneab17222008-10-29 17:03:22 +08008283 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8284 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008285 if (!cfs_rq)
8286 goto err;
8287
Li Zefaneab17222008-10-29 17:03:22 +08008288 se = kzalloc_node(sizeof(struct sched_entity),
8289 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008290 if (!se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008291 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008292
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008293 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008294 }
8295
8296 return 1;
8297
Peter Zijlstra49246272010-10-17 21:46:10 +02008298err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008299 kfree(cfs_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008300err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008301 return 0;
8302}
8303
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008304static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8305{
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008306 struct rq *rq = cpu_rq(cpu);
8307 unsigned long flags;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008308
8309 /*
8310 * Only empty task groups can be destroyed; so we can speculatively
8311 * check on_list without danger of it being re-added.
8312 */
8313 if (!tg->cfs_rq[cpu]->on_list)
8314 return;
8315
8316 raw_spin_lock_irqsave(&rq->lock, flags);
Paul Turner822bc182010-11-29 16:55:40 -08008317 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008318 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008319}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008320#else /* !CONFG_FAIR_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008321static inline void free_fair_sched_group(struct task_group *tg)
8322{
8323}
8324
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008325static inline
8326int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008327{
8328 return 1;
8329}
8330
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008331static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8332{
8333}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008334#endif /* CONFIG_FAIR_GROUP_SCHED */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008335
8336#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008337static void free_rt_sched_group(struct task_group *tg)
8338{
8339 int i;
8340
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008341 destroy_rt_bandwidth(&tg->rt_bandwidth);
8342
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008343 for_each_possible_cpu(i) {
8344 if (tg->rt_rq)
8345 kfree(tg->rt_rq[i]);
8346 if (tg->rt_se)
8347 kfree(tg->rt_se[i]);
8348 }
8349
8350 kfree(tg->rt_rq);
8351 kfree(tg->rt_se);
8352}
8353
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008354static
8355int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008356{
8357 struct rt_rq *rt_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008358 struct sched_rt_entity *rt_se;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008359 int i;
8360
Mike Travis434d53b2008-04-04 18:11:04 -07008361 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008362 if (!tg->rt_rq)
8363 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008364 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008365 if (!tg->rt_se)
8366 goto err;
8367
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008368 init_rt_bandwidth(&tg->rt_bandwidth,
8369 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008370
8371 for_each_possible_cpu(i) {
Li Zefaneab17222008-10-29 17:03:22 +08008372 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8373 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008374 if (!rt_rq)
8375 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008376
Li Zefaneab17222008-10-29 17:03:22 +08008377 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8378 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008379 if (!rt_se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008380 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008381
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008382 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008383 }
8384
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008385 return 1;
8386
Peter Zijlstra49246272010-10-17 21:46:10 +02008387err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008388 kfree(rt_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008389err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008390 return 0;
8391}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008392#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008393static inline void free_rt_sched_group(struct task_group *tg)
8394{
8395}
8396
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008397static inline
8398int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008399{
8400 return 1;
8401}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008402#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008403
Dhaval Giani7c941432010-01-20 13:26:18 +01008404#ifdef CONFIG_CGROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008405static void free_sched_group(struct task_group *tg)
8406{
8407 free_fair_sched_group(tg);
8408 free_rt_sched_group(tg);
Mike Galbraithe9aa1dd2011-01-05 11:11:25 +01008409 autogroup_free(tg);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008410 kfree(tg);
8411}
8412
8413/* allocate runqueue etc for a new task group */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008414struct task_group *sched_create_group(struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008415{
8416 struct task_group *tg;
8417 unsigned long flags;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008418
8419 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8420 if (!tg)
8421 return ERR_PTR(-ENOMEM);
8422
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008423 if (!alloc_fair_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008424 goto err;
8425
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008426 if (!alloc_rt_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008427 goto err;
8428
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008429 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008430 list_add_rcu(&tg->list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008431
8432 WARN_ON(!parent); /* root should already exist */
8433
8434 tg->parent = parent;
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008435 INIT_LIST_HEAD(&tg->children);
Zhang, Yanmin09f27242030-08-14 15:56:40 +08008436 list_add_rcu(&tg->siblings, &parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008437 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008438
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008439 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008440
8441err:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008442 free_sched_group(tg);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008443 return ERR_PTR(-ENOMEM);
8444}
8445
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008446/* rcu callback to free various structures associated with a task group */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008447static void free_sched_group_rcu(struct rcu_head *rhp)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008448{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008449 /* now it should be safe to free those cfs_rqs */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008450 free_sched_group(container_of(rhp, struct task_group, rcu));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008451}
8452
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008453/* Destroy runqueue etc associated with a task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008454void sched_destroy_group(struct task_group *tg)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008455{
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008456 unsigned long flags;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008457 int i;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008458
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008459 /* end participation in shares distribution */
8460 for_each_possible_cpu(i)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008461 unregister_fair_sched_group(tg, i);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008462
8463 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008464 list_del_rcu(&tg->list);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008465 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008466 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008467
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008468 /* wait for possible concurrent references to cfs_rqs complete */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008469 call_rcu(&tg->rcu, free_sched_group_rcu);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008470}
8471
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008472/* change task's runqueue when it moves between groups.
Ingo Molnar3a252012007-10-15 17:00:12 +02008473 * The caller of this function should have put the task in its new group
8474 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8475 * reflect its new group.
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008476 */
8477void sched_move_task(struct task_struct *tsk)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008478{
8479 int on_rq, running;
8480 unsigned long flags;
8481 struct rq *rq;
8482
8483 rq = task_rq_lock(tsk, &flags);
8484
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01008485 running = task_current(rq, tsk);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02008486 on_rq = tsk->on_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008487
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008488 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008489 dequeue_task(rq, tsk, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008490 if (unlikely(running))
8491 tsk->sched_class->put_prev_task(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008492
Peter Zijlstra810b3812008-02-29 15:21:01 -05008493#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008494 if (tsk->sched_class->task_move_group)
8495 tsk->sched_class->task_move_group(tsk, on_rq);
8496 else
Peter Zijlstra810b3812008-02-29 15:21:01 -05008497#endif
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008498 set_task_rq(tsk, task_cpu(tsk));
Peter Zijlstra810b3812008-02-29 15:21:01 -05008499
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008500 if (unlikely(running))
8501 tsk->sched_class->set_curr_task(rq);
8502 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01008503 enqueue_task(rq, tsk, 0);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008504
Peter Zijlstra0122ec52011-04-05 17:23:51 +02008505 task_rq_unlock(rq, tsk, &flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008506}
Dhaval Giani7c941432010-01-20 13:26:18 +01008507#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008508
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008509#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008510static DEFINE_MUTEX(shares_mutex);
8511
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008512int sched_group_set_shares(struct task_group *tg, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008513{
8514 int i;
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008515 unsigned long flags;
Ingo Molnarc61935f2008-01-22 11:24:58 +01008516
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008517 /*
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008518 * We can't change the weight of the root cgroup.
8519 */
8520 if (!tg->se[0])
8521 return -EINVAL;
8522
Peter Zijlstra18d95a22008-04-19 19:45:00 +02008523 if (shares < MIN_SHARES)
8524 shares = MIN_SHARES;
Miao Xiecb4ad1f2008-04-28 12:54:56 +08008525 else if (shares > MAX_SHARES)
8526 shares = MAX_SHARES;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008527
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008528 mutex_lock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008529 if (tg->shares == shares)
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008530 goto done;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008531
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008532 tg->shares = shares;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008533 for_each_possible_cpu(i) {
Paul Turner94371782010-11-15 15:47:10 -08008534 struct rq *rq = cpu_rq(i);
8535 struct sched_entity *se;
8536
8537 se = tg->se[i];
8538 /* Propagate contribution to hierarchy */
8539 raw_spin_lock_irqsave(&rq->lock, flags);
8540 for_each_sched_entity(se)
Paul Turner6d5ab292011-01-21 20:45:01 -08008541 update_cfs_shares(group_cfs_rq(se));
Paul Turner94371782010-11-15 15:47:10 -08008542 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008543 }
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008544
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008545done:
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008546 mutex_unlock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008547 return 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008548}
8549
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008550unsigned long sched_group_shares(struct task_group *tg)
8551{
8552 return tg->shares;
8553}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008554#endif
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008555
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008556#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008557/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008558 * Ensure that the real time constraints are schedulable.
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008559 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008560static DEFINE_MUTEX(rt_constraints_mutex);
8561
8562static unsigned long to_ratio(u64 period, u64 runtime)
8563{
8564 if (runtime == RUNTIME_INF)
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008565 return 1ULL << 20;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008566
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008567 return div64_u64(runtime << 20, period);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008568}
8569
Dhaval Giani521f1a242008-02-28 15:21:56 +05308570/* Must be called with tasklist_lock held */
8571static inline int tg_has_rt_tasks(struct task_group *tg)
8572{
8573 struct task_struct *g, *p;
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008574
Dhaval Giani521f1a242008-02-28 15:21:56 +05308575 do_each_thread(g, p) {
8576 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8577 return 1;
8578 } while_each_thread(g, p);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008579
Dhaval Giani521f1a242008-02-28 15:21:56 +05308580 return 0;
8581}
8582
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008583struct rt_schedulable_data {
8584 struct task_group *tg;
8585 u64 rt_period;
8586 u64 rt_runtime;
8587};
8588
8589static int tg_schedulable(struct task_group *tg, void *data)
8590{
8591 struct rt_schedulable_data *d = data;
8592 struct task_group *child;
8593 unsigned long total, sum = 0;
8594 u64 period, runtime;
8595
8596 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8597 runtime = tg->rt_bandwidth.rt_runtime;
8598
8599 if (tg == d->tg) {
8600 period = d->rt_period;
8601 runtime = d->rt_runtime;
8602 }
8603
Peter Zijlstra4653f802008-09-23 15:33:44 +02008604 /*
8605 * Cannot have more runtime than the period.
8606 */
8607 if (runtime > period && runtime != RUNTIME_INF)
8608 return -EINVAL;
8609
8610 /*
8611 * Ensure we don't starve existing RT tasks.
8612 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008613 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8614 return -EBUSY;
8615
8616 total = to_ratio(period, runtime);
8617
Peter Zijlstra4653f802008-09-23 15:33:44 +02008618 /*
8619 * Nobody can have more than the global setting allows.
8620 */
8621 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8622 return -EINVAL;
8623
8624 /*
8625 * The sum of our children's runtime should not exceed our own.
8626 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008627 list_for_each_entry_rcu(child, &tg->children, siblings) {
8628 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8629 runtime = child->rt_bandwidth.rt_runtime;
8630
8631 if (child == d->tg) {
8632 period = d->rt_period;
8633 runtime = d->rt_runtime;
8634 }
8635
8636 sum += to_ratio(period, runtime);
8637 }
8638
8639 if (sum > total)
8640 return -EINVAL;
8641
8642 return 0;
8643}
8644
8645static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8646{
8647 struct rt_schedulable_data data = {
8648 .tg = tg,
8649 .rt_period = period,
8650 .rt_runtime = runtime,
8651 };
8652
8653 return walk_tg_tree(tg_schedulable, tg_nop, &data);
8654}
8655
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008656static int tg_set_bandwidth(struct task_group *tg,
8657 u64 rt_period, u64 rt_runtime)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008658{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008659 int i, err = 0;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008660
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008661 mutex_lock(&rt_constraints_mutex);
Dhaval Giani521f1a242008-02-28 15:21:56 +05308662 read_lock(&tasklist_lock);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008663 err = __rt_schedulable(tg, rt_period, rt_runtime);
8664 if (err)
Dhaval Giani521f1a242008-02-28 15:21:56 +05308665 goto unlock;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008666
Thomas Gleixner0986b112009-11-17 15:32:06 +01008667 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008668 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8669 tg->rt_bandwidth.rt_runtime = rt_runtime;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008670
8671 for_each_possible_cpu(i) {
8672 struct rt_rq *rt_rq = tg->rt_rq[i];
8673
Thomas Gleixner0986b112009-11-17 15:32:06 +01008674 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008675 rt_rq->rt_runtime = rt_runtime;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008676 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008677 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008678 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstra49246272010-10-17 21:46:10 +02008679unlock:
Dhaval Giani521f1a242008-02-28 15:21:56 +05308680 read_unlock(&tasklist_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008681 mutex_unlock(&rt_constraints_mutex);
8682
8683 return err;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008684}
8685
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008686int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8687{
8688 u64 rt_runtime, rt_period;
8689
8690 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8691 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8692 if (rt_runtime_us < 0)
8693 rt_runtime = RUNTIME_INF;
8694
8695 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8696}
8697
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008698long sched_group_rt_runtime(struct task_group *tg)
8699{
8700 u64 rt_runtime_us;
8701
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008702 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008703 return -1;
8704
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008705 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008706 do_div(rt_runtime_us, NSEC_PER_USEC);
8707 return rt_runtime_us;
8708}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008709
8710int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8711{
8712 u64 rt_runtime, rt_period;
8713
8714 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8715 rt_runtime = tg->rt_bandwidth.rt_runtime;
8716
Raistlin619b0482008-06-26 18:54:09 +02008717 if (rt_period == 0)
8718 return -EINVAL;
8719
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008720 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8721}
8722
8723long sched_group_rt_period(struct task_group *tg)
8724{
8725 u64 rt_period_us;
8726
8727 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8728 do_div(rt_period_us, NSEC_PER_USEC);
8729 return rt_period_us;
8730}
8731
8732static int sched_rt_global_constraints(void)
8733{
Peter Zijlstra4653f802008-09-23 15:33:44 +02008734 u64 runtime, period;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008735 int ret = 0;
8736
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008737 if (sysctl_sched_rt_period <= 0)
8738 return -EINVAL;
8739
Peter Zijlstra4653f802008-09-23 15:33:44 +02008740 runtime = global_rt_runtime();
8741 period = global_rt_period();
8742
8743 /*
8744 * Sanity check on the sysctl variables.
8745 */
8746 if (runtime > period && runtime != RUNTIME_INF)
8747 return -EINVAL;
Peter Zijlstra10b612f2008-06-19 14:22:27 +02008748
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008749 mutex_lock(&rt_constraints_mutex);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008750 read_lock(&tasklist_lock);
Peter Zijlstra4653f802008-09-23 15:33:44 +02008751 ret = __rt_schedulable(NULL, 0, 0);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008752 read_unlock(&tasklist_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008753 mutex_unlock(&rt_constraints_mutex);
8754
8755 return ret;
8756}
Dhaval Giani54e99122009-02-27 15:13:54 +05308757
8758int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8759{
8760 /* Don't accept realtime tasks when there is no way for them to run */
8761 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8762 return 0;
8763
8764 return 1;
8765}
8766
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008767#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008768static int sched_rt_global_constraints(void)
8769{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008770 unsigned long flags;
8771 int i;
8772
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008773 if (sysctl_sched_rt_period <= 0)
8774 return -EINVAL;
8775
Peter Zijlstra60aa6052009-05-05 17:50:21 +02008776 /*
8777 * There's always some RT tasks in the root group
8778 * -- migration, kstopmachine etc..
8779 */
8780 if (sysctl_sched_rt_runtime == 0)
8781 return -EBUSY;
8782
Thomas Gleixner0986b112009-11-17 15:32:06 +01008783 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008784 for_each_possible_cpu(i) {
8785 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8786
Thomas Gleixner0986b112009-11-17 15:32:06 +01008787 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008788 rt_rq->rt_runtime = global_rt_runtime();
Thomas Gleixner0986b112009-11-17 15:32:06 +01008789 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008790 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008791 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008792
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008793 return 0;
8794}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008795#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008796
8797int sched_rt_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008798 void __user *buffer, size_t *lenp,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008799 loff_t *ppos)
8800{
8801 int ret;
8802 int old_period, old_runtime;
8803 static DEFINE_MUTEX(mutex);
8804
8805 mutex_lock(&mutex);
8806 old_period = sysctl_sched_rt_period;
8807 old_runtime = sysctl_sched_rt_runtime;
8808
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008809 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008810
8811 if (!ret && write) {
8812 ret = sched_rt_global_constraints();
8813 if (ret) {
8814 sysctl_sched_rt_period = old_period;
8815 sysctl_sched_rt_runtime = old_runtime;
8816 } else {
8817 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8818 def_rt_bandwidth.rt_period =
8819 ns_to_ktime(global_rt_period());
8820 }
8821 }
8822 mutex_unlock(&mutex);
8823
8824 return ret;
8825}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008826
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008827#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008828
8829/* return corresponding task_group object of a cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +02008830static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008831{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008832 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
8833 struct task_group, css);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008834}
8835
8836static struct cgroup_subsys_state *
Paul Menage2b01dfe2007-10-24 18:23:50 +02008837cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008838{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008839 struct task_group *tg, *parent;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008840
Paul Menage2b01dfe2007-10-24 18:23:50 +02008841 if (!cgrp->parent) {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008842 /* This is early initialization for the top cgroup */
Yong Zhang07e06b02011-01-07 15:17:36 +08008843 return &root_task_group.css;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008844 }
8845
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008846 parent = cgroup_tg(cgrp->parent);
8847 tg = sched_create_group(parent);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008848 if (IS_ERR(tg))
8849 return ERR_PTR(-ENOMEM);
8850
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008851 return &tg->css;
8852}
8853
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008854static void
8855cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008856{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008857 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008858
8859 sched_destroy_group(tg);
8860}
8861
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008862static int
Ben Blumbe367d02009-09-23 15:56:31 -07008863cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008864{
Peter Zijlstrab68aa232008-02-13 15:45:40 +01008865#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Giani54e99122009-02-27 15:13:54 +05308866 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
Peter Zijlstrab68aa232008-02-13 15:45:40 +01008867 return -EINVAL;
8868#else
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008869 /* We don't support RT-tasks being in separate groups */
8870 if (tsk->sched_class != &fair_sched_class)
8871 return -EINVAL;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01008872#endif
Ben Blumbe367d02009-09-23 15:56:31 -07008873 return 0;
8874}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008875
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008876static void
Ben Blumf780bdb2011-05-26 16:25:19 -07008877cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008878{
8879 sched_move_task(tsk);
8880}
8881
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01008882static void
Peter Zijlstrad41d5a02011-02-07 17:02:20 +01008883cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
8884 struct cgroup *old_cgrp, struct task_struct *task)
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01008885{
8886 /*
8887 * cgroup_exit() is called in the copy_process() failure path.
8888 * Ignore this case since the task hasn't ran yet, this avoids
8889 * trying to poke a half freed task state from generic code.
8890 */
8891 if (!(task->flags & PF_EXITING))
8892 return;
8893
8894 sched_move_task(task);
8895}
8896
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008897#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagef4c753b2008-04-29 00:59:56 -07008898static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
Paul Menage2b01dfe2007-10-24 18:23:50 +02008899 u64 shareval)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008900{
Nikhil Raoc8b28112011-05-18 14:37:48 -07008901 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008902}
8903
Paul Menagef4c753b2008-04-29 00:59:56 -07008904static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008905{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008906 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008907
Nikhil Raoc8b28112011-05-18 14:37:48 -07008908 return (u64) scale_load_down(tg->shares);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008909}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008910#endif /* CONFIG_FAIR_GROUP_SCHED */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008911
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008912#ifdef CONFIG_RT_GROUP_SCHED
Mirco Tischler0c708142008-05-14 16:05:46 -07008913static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
Paul Menage06ecb272008-04-29 01:00:06 -07008914 s64 val)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008915{
Paul Menage06ecb272008-04-29 01:00:06 -07008916 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008917}
8918
Paul Menage06ecb272008-04-29 01:00:06 -07008919static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008920{
Paul Menage06ecb272008-04-29 01:00:06 -07008921 return sched_group_rt_runtime(cgroup_tg(cgrp));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008922}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008923
8924static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
8925 u64 rt_period_us)
8926{
8927 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
8928}
8929
8930static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
8931{
8932 return sched_group_rt_period(cgroup_tg(cgrp));
8933}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008934#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008935
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008936static struct cftype cpu_files[] = {
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008937#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008938 {
8939 .name = "shares",
Paul Menagef4c753b2008-04-29 00:59:56 -07008940 .read_u64 = cpu_shares_read_u64,
8941 .write_u64 = cpu_shares_write_u64,
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008942 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008943#endif
8944#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008945 {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008946 .name = "rt_runtime_us",
Paul Menage06ecb272008-04-29 01:00:06 -07008947 .read_s64 = cpu_rt_runtime_read,
8948 .write_s64 = cpu_rt_runtime_write,
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008949 },
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008950 {
8951 .name = "rt_period_us",
Paul Menagef4c753b2008-04-29 00:59:56 -07008952 .read_u64 = cpu_rt_period_read_uint,
8953 .write_u64 = cpu_rt_period_write_uint,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008954 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008955#endif
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008956};
8957
8958static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
8959{
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008960 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008961}
8962
8963struct cgroup_subsys cpu_cgroup_subsys = {
Ingo Molnar38605ca2007-10-29 21:18:11 +01008964 .name = "cpu",
8965 .create = cpu_cgroup_create,
8966 .destroy = cpu_cgroup_destroy,
Ben Blumf780bdb2011-05-26 16:25:19 -07008967 .can_attach_task = cpu_cgroup_can_attach_task,
8968 .attach_task = cpu_cgroup_attach_task,
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01008969 .exit = cpu_cgroup_exit,
Ingo Molnar38605ca2007-10-29 21:18:11 +01008970 .populate = cpu_cgroup_populate,
8971 .subsys_id = cpu_cgroup_subsys_id,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008972 .early_init = 1,
8973};
8974
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008975#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008976
8977#ifdef CONFIG_CGROUP_CPUACCT
8978
8979/*
8980 * CPU accounting code for task groups.
8981 *
8982 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
8983 * (balbir@in.ibm.com).
8984 */
8985
Bharata B Rao934352f2008-11-10 20:41:13 +05308986/* track cpu usage of a group of tasks and its child groups */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008987struct cpuacct {
8988 struct cgroup_subsys_state css;
8989 /* cpuusage holds pointer to a u64-type object on every cpu */
Tejun Heo43cf38e2010-02-02 14:38:57 +09008990 u64 __percpu *cpuusage;
Bharata B Raoef12fef2009-03-31 10:02:22 +05308991 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
Bharata B Rao934352f2008-11-10 20:41:13 +05308992 struct cpuacct *parent;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008993};
8994
8995struct cgroup_subsys cpuacct_subsys;
8996
8997/* return cpu accounting group corresponding to this container */
Dhaval Giani32cd7562008-02-29 10:02:43 +05308998static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008999{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309000 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009001 struct cpuacct, css);
9002}
9003
9004/* return cpu accounting group to which this task belongs */
9005static inline struct cpuacct *task_ca(struct task_struct *tsk)
9006{
9007 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
9008 struct cpuacct, css);
9009}
9010
9011/* create a new cpu accounting group */
9012static struct cgroup_subsys_state *cpuacct_create(
Dhaval Giani32cd7562008-02-29 10:02:43 +05309013 struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009014{
9015 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309016 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009017
9018 if (!ca)
Bharata B Raoef12fef2009-03-31 10:02:22 +05309019 goto out;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009020
9021 ca->cpuusage = alloc_percpu(u64);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309022 if (!ca->cpuusage)
9023 goto out_free_ca;
9024
9025 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9026 if (percpu_counter_init(&ca->cpustat[i], 0))
9027 goto out_free_counters;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009028
Bharata B Rao934352f2008-11-10 20:41:13 +05309029 if (cgrp->parent)
9030 ca->parent = cgroup_ca(cgrp->parent);
9031
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009032 return &ca->css;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309033
9034out_free_counters:
9035 while (--i >= 0)
9036 percpu_counter_destroy(&ca->cpustat[i]);
9037 free_percpu(ca->cpuusage);
9038out_free_ca:
9039 kfree(ca);
9040out:
9041 return ERR_PTR(-ENOMEM);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009042}
9043
9044/* destroy an existing cpu accounting group */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009045static void
Dhaval Giani32cd7562008-02-29 10:02:43 +05309046cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009047{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309048 struct cpuacct *ca = cgroup_ca(cgrp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309049 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009050
Bharata B Raoef12fef2009-03-31 10:02:22 +05309051 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9052 percpu_counter_destroy(&ca->cpustat[i]);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009053 free_percpu(ca->cpuusage);
9054 kfree(ca);
9055}
9056
Ken Chen720f5492008-12-15 22:02:01 -08009057static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9058{
Rusty Russellb36128c2009-02-20 16:29:08 +09009059 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009060 u64 data;
9061
9062#ifndef CONFIG_64BIT
9063 /*
9064 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
9065 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009066 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009067 data = *cpuusage;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009068 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009069#else
9070 data = *cpuusage;
9071#endif
9072
9073 return data;
9074}
9075
9076static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9077{
Rusty Russellb36128c2009-02-20 16:29:08 +09009078 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009079
9080#ifndef CONFIG_64BIT
9081 /*
9082 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
9083 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009084 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009085 *cpuusage = val;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009086 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009087#else
9088 *cpuusage = val;
9089#endif
9090}
9091
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009092/* return total cpu usage (in nanoseconds) of a group */
Dhaval Giani32cd7562008-02-29 10:02:43 +05309093static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009094{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309095 struct cpuacct *ca = cgroup_ca(cgrp);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009096 u64 totalcpuusage = 0;
9097 int i;
9098
Ken Chen720f5492008-12-15 22:02:01 -08009099 for_each_present_cpu(i)
9100 totalcpuusage += cpuacct_cpuusage_read(ca, i);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009101
9102 return totalcpuusage;
9103}
9104
Dhaval Giani0297b802008-02-29 10:02:44 +05309105static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
9106 u64 reset)
9107{
9108 struct cpuacct *ca = cgroup_ca(cgrp);
9109 int err = 0;
9110 int i;
9111
9112 if (reset) {
9113 err = -EINVAL;
9114 goto out;
9115 }
9116
Ken Chen720f5492008-12-15 22:02:01 -08009117 for_each_present_cpu(i)
9118 cpuacct_cpuusage_write(ca, i, 0);
Dhaval Giani0297b802008-02-29 10:02:44 +05309119
Dhaval Giani0297b802008-02-29 10:02:44 +05309120out:
9121 return err;
9122}
9123
Ken Chene9515c32008-12-15 22:04:15 -08009124static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
9125 struct seq_file *m)
9126{
9127 struct cpuacct *ca = cgroup_ca(cgroup);
9128 u64 percpu;
9129 int i;
9130
9131 for_each_present_cpu(i) {
9132 percpu = cpuacct_cpuusage_read(ca, i);
9133 seq_printf(m, "%llu ", (unsigned long long) percpu);
9134 }
9135 seq_printf(m, "\n");
9136 return 0;
9137}
9138
Bharata B Raoef12fef2009-03-31 10:02:22 +05309139static const char *cpuacct_stat_desc[] = {
9140 [CPUACCT_STAT_USER] = "user",
9141 [CPUACCT_STAT_SYSTEM] = "system",
9142};
9143
9144static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9145 struct cgroup_map_cb *cb)
9146{
9147 struct cpuacct *ca = cgroup_ca(cgrp);
9148 int i;
9149
9150 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
9151 s64 val = percpu_counter_read(&ca->cpustat[i]);
9152 val = cputime64_to_clock_t(val);
9153 cb->fill(cb, cpuacct_stat_desc[i], val);
9154 }
9155 return 0;
9156}
9157
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009158static struct cftype files[] = {
9159 {
9160 .name = "usage",
Paul Menagef4c753b2008-04-29 00:59:56 -07009161 .read_u64 = cpuusage_read,
9162 .write_u64 = cpuusage_write,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009163 },
Ken Chene9515c32008-12-15 22:04:15 -08009164 {
9165 .name = "usage_percpu",
9166 .read_seq_string = cpuacct_percpu_seq_read,
9167 },
Bharata B Raoef12fef2009-03-31 10:02:22 +05309168 {
9169 .name = "stat",
9170 .read_map = cpuacct_stats_show,
9171 },
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009172};
9173
Dhaval Giani32cd7562008-02-29 10:02:43 +05309174static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009175{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309176 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009177}
9178
9179/*
9180 * charge this task's execution time to its accounting group.
9181 *
9182 * called with rq->lock held.
9183 */
9184static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9185{
9186 struct cpuacct *ca;
Bharata B Rao934352f2008-11-10 20:41:13 +05309187 int cpu;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009188
Li Zefanc40c6f82009-02-26 15:40:15 +08009189 if (unlikely(!cpuacct_subsys.active))
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009190 return;
9191
Bharata B Rao934352f2008-11-10 20:41:13 +05309192 cpu = task_cpu(tsk);
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309193
9194 rcu_read_lock();
9195
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009196 ca = task_ca(tsk);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009197
Bharata B Rao934352f2008-11-10 20:41:13 +05309198 for (; ca; ca = ca->parent) {
Rusty Russellb36128c2009-02-20 16:29:08 +09009199 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009200 *cpuusage += cputime;
9201 }
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309202
9203 rcu_read_unlock();
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009204}
9205
Bharata B Raoef12fef2009-03-31 10:02:22 +05309206/*
Anton Blanchardfa535a72010-02-02 14:46:13 -08009207 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9208 * in cputime_t units. As a result, cpuacct_update_stats calls
9209 * percpu_counter_add with values large enough to always overflow the
9210 * per cpu batch limit causing bad SMP scalability.
9211 *
9212 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9213 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9214 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9215 */
9216#ifdef CONFIG_SMP
9217#define CPUACCT_BATCH \
9218 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9219#else
9220#define CPUACCT_BATCH 0
9221#endif
9222
9223/*
Bharata B Raoef12fef2009-03-31 10:02:22 +05309224 * Charge the system/user time to the task's accounting group.
9225 */
9226static void cpuacct_update_stats(struct task_struct *tsk,
9227 enum cpuacct_stat_index idx, cputime_t val)
9228{
9229 struct cpuacct *ca;
Anton Blanchardfa535a72010-02-02 14:46:13 -08009230 int batch = CPUACCT_BATCH;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309231
9232 if (unlikely(!cpuacct_subsys.active))
9233 return;
9234
9235 rcu_read_lock();
9236 ca = task_ca(tsk);
9237
9238 do {
Anton Blanchardfa535a72010-02-02 14:46:13 -08009239 __percpu_counter_add(&ca->cpustat[idx], val, batch);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309240 ca = ca->parent;
9241 } while (ca);
9242 rcu_read_unlock();
9243}
9244
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009245struct cgroup_subsys cpuacct_subsys = {
9246 .name = "cpuacct",
9247 .create = cpuacct_create,
9248 .destroy = cpuacct_destroy,
9249 .populate = cpuacct_populate,
9250 .subsys_id = cpuacct_subsys_id,
9251};
9252#endif /* CONFIG_CGROUP_CPUACCT */
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009253