blob: f070d7c9d3ac71f8f64cc81ab01b8e0eba81a174 [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#define CSIGNAL 0x000000ff
5#define CLONE_VM 0x00000100
6#define CLONE_FS 0x00000200
7#define CLONE_FILES 0x00000400
8#define CLONE_SIGHAND 0x00000800
9#define CLONE_PTRACE 0x00002000
10#define CLONE_VFORK 0x00004000
11#define CLONE_PARENT 0x00008000
12#define CLONE_THREAD 0x00010000
13#define CLONE_NEWNS 0x00020000
14#define CLONE_SYSVSEM 0x00040000
15#define CLONE_SETTLS 0x00080000
16#define CLONE_PARENT_SETTID 0x00100000
17#define CLONE_CHILD_CLEARTID 0x00200000
18#define CLONE_DETACHED 0x00400000
19#define CLONE_UNTRACED 0x00800000
20#define CLONE_CHILD_SETTID 0x01000000
21#define CLONE_NEWUTS 0x04000000
22#define CLONE_NEWIPC 0x08000000
23#define CLONE_NEWUSER 0x10000000
24#define CLONE_NEWPID 0x20000000
25#define CLONE_NEWNET 0x40000000
26#define CLONE_IO 0x80000000
27
28#define SCHED_NORMAL 0
29#define SCHED_FIFO 1
30#define SCHED_RR 2
31#define SCHED_BATCH 3
32#define SCHED_IDLE 5
33#define SCHED_RESET_ON_FORK 0x40000000
34
35#ifdef __KERNEL__
36
37struct sched_param {
38 int sched_priority;
39};
40
41#include <asm/param.h>
42
43#include <linux/capability.h>
44#include <linux/threads.h>
45#include <linux/kernel.h>
46#include <linux/types.h>
47#include <linux/timex.h>
48#include <linux/jiffies.h>
49#include <linux/rbtree.h>
50#include <linux/thread_info.h>
51#include <linux/cpumask.h>
52#include <linux/errno.h>
53#include <linux/nodemask.h>
54#include <linux/mm_types.h>
55
56#include <asm/page.h>
57#include <asm/ptrace.h>
58#include <asm/cputime.h>
59
60#include <linux/smp.h>
61#include <linux/sem.h>
62#include <linux/signal.h>
63#include <linux/compiler.h>
64#include <linux/completion.h>
65#include <linux/pid.h>
66#include <linux/percpu.h>
67#include <linux/topology.h>
68#include <linux/proportions.h>
69#include <linux/seccomp.h>
70#include <linux/rcupdate.h>
71#include <linux/rculist.h>
72#include <linux/rtmutex.h>
73
74#include <linux/time.h>
75#include <linux/param.h>
76#include <linux/resource.h>
77#include <linux/timer.h>
78#include <linux/hrtimer.h>
79#include <linux/task_io_accounting.h>
80#include <linux/latencytop.h>
81#include <linux/cred.h>
82#include <linux/llist.h>
83
84#include <asm/processor.h>
85
86struct exec_domain;
87struct futex_pi_state;
88struct robust_list_head;
89struct bio_list;
90struct fs_struct;
91struct perf_event_context;
92struct blk_plug;
93
94#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
95
96extern unsigned long avenrun[];
97extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
98
99#define FSHIFT 11
100#define FIXED_1 (1<<FSHIFT)
101#define LOAD_FREQ (5*HZ+1)
102#define EXP_1 1884
103#define EXP_5 2014
104#define EXP_15 2037
105
106#define CALC_LOAD(load,exp,n) \
107 load *= exp; \
108 load += n*(FIXED_1-exp); \
109 load >>= FSHIFT;
110
111extern unsigned long total_forks;
112extern int nr_threads;
113DECLARE_PER_CPU(unsigned long, process_counts);
114extern int nr_processes(void);
115extern unsigned long nr_running(void);
116extern unsigned long nr_uninterruptible(void);
117extern unsigned long nr_iowait(void);
118extern unsigned long nr_iowait_cpu(int cpu);
119extern unsigned long this_cpu_load(void);
120
Flemmardfd04ea22013-04-10 17:42:46 +0200121extern void sched_get_nr_running_avg(int *avg, int *iowait_avg);
Nicholas Flintham1e3d3112013-04-10 10:48:38 +0100122
123extern void calc_global_load(unsigned long ticks);
124
125extern unsigned long get_parent_ip(unsigned long addr);
126
127struct seq_file;
128struct cfs_rq;
129struct task_group;
130#ifdef CONFIG_SCHED_DEBUG
131extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
132extern void proc_sched_set_task(struct task_struct *p);
133extern void
134print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
135#else
136static inline void
137proc_sched_show_task(struct task_struct *p, struct seq_file *m)
138{
139}
140static inline void proc_sched_set_task(struct task_struct *p)
141{
142}
143static inline void
144print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
145{
146}
147#endif
148
149#define TASK_RUNNING 0
150#define TASK_INTERRUPTIBLE 1
151#define TASK_UNINTERRUPTIBLE 2
152#define __TASK_STOPPED 4
153#define __TASK_TRACED 8
154#define EXIT_ZOMBIE 16
155#define EXIT_DEAD 32
156#define TASK_DEAD 64
157#define TASK_WAKEKILL 128
158#define TASK_WAKING 256
159#define TASK_STATE_MAX 512
160
161#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
162
163extern char ___assert_task_state[1 - 2*!!(
164 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
165
166#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
167#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
168#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
169
170#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
171#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
172
173#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
174 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
175 __TASK_TRACED)
176
177#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
178#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
179#define task_is_dead(task) ((task)->exit_state != 0)
180#define task_is_stopped_or_traced(task) \
181 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
182#define task_contributes_to_load(task) \
183 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
184 (task->flags & PF_FROZEN) == 0)
185
186#define __set_task_state(tsk, state_value) \
187 do { (tsk)->state = (state_value); } while (0)
188#define set_task_state(tsk, state_value) \
189 set_mb((tsk)->state, (state_value))
190
191#define __set_current_state(state_value) \
192 do { current->state = (state_value); } while (0)
193#define set_current_state(state_value) \
194 set_mb(current->state, (state_value))
195
196#define TASK_COMM_LEN 16
197
198#include <linux/spinlock.h>
199
200extern rwlock_t tasklist_lock;
201extern spinlock_t mmlist_lock;
202
203struct task_struct;
204
205#ifdef CONFIG_PROVE_RCU
206extern int lockdep_tasklist_lock_is_held(void);
207#endif
208
209extern void sched_init(void);
210extern void sched_init_smp(void);
211extern asmlinkage void schedule_tail(struct task_struct *prev);
212extern void init_idle(struct task_struct *idle, int cpu);
213extern void init_idle_bootup_task(struct task_struct *idle);
214
215extern int runqueue_is_locked(int cpu);
216
217#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
218extern void select_nohz_load_balancer(int stop_tick);
219extern void set_cpu_sd_state_idle(void);
220extern int get_nohz_timer_target(void);
221#else
222static inline void select_nohz_load_balancer(int stop_tick) { }
223static inline void set_cpu_sd_state_idle(void) { }
224#endif
225
226extern void show_state_filter(unsigned long state_filter);
227
228static inline void show_state(void)
229{
230 show_state_filter(0);
231}
232
233extern void show_regs(struct pt_regs *);
234
235extern void show_stack(struct task_struct *task, unsigned long *sp);
236
237void io_schedule(void);
238long io_schedule_timeout(long timeout);
239
240extern void cpu_init (void);
241extern void trap_init(void);
242extern void update_process_times(int user);
243extern void scheduler_tick(void);
244
245extern void sched_show_task(struct task_struct *p);
246
247#ifdef CONFIG_LOCKUP_DETECTOR
248extern void touch_softlockup_watchdog(void);
249extern void touch_softlockup_watchdog_sync(void);
250extern void touch_all_softlockup_watchdogs(void);
251extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
252 void __user *buffer,
253 size_t *lenp, loff_t *ppos);
254extern unsigned int softlockup_panic;
255void lockup_detector_init(void);
256#else
257static inline void touch_softlockup_watchdog(void)
258{
259}
260static inline void touch_softlockup_watchdog_sync(void)
261{
262}
263static inline void touch_all_softlockup_watchdogs(void)
264{
265}
266static inline void lockup_detector_init(void)
267{
268}
269#endif
270
271#ifdef CONFIG_DETECT_HUNG_TASK
272extern unsigned int sysctl_hung_task_panic;
273extern unsigned long sysctl_hung_task_check_count;
274extern unsigned long sysctl_hung_task_timeout_secs;
275extern unsigned long sysctl_hung_task_warnings;
276extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
277 void __user *buffer,
278 size_t *lenp, loff_t *ppos);
279#else
280enum { sysctl_hung_task_timeout_secs = 0 };
281#endif
282
283#define __sched __attribute__((__section__(".sched.text")))
284
285extern char __sched_text_start[], __sched_text_end[];
286
287extern int in_sched_functions(unsigned long addr);
288
289#define MAX_SCHEDULE_TIMEOUT LONG_MAX
290extern signed long schedule_timeout(signed long timeout);
291extern signed long schedule_timeout_interruptible(signed long timeout);
292extern signed long schedule_timeout_killable(signed long timeout);
293extern signed long schedule_timeout_uninterruptible(signed long timeout);
294asmlinkage void schedule(void);
295extern void schedule_preempt_disabled(void);
296extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
297
298struct nsproxy;
299struct user_namespace;
300
301#define MAPCOUNT_ELF_CORE_MARGIN (5)
302#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
303
304extern int sysctl_max_map_count;
305
306#include <linux/aio.h>
307
308#ifdef CONFIG_MMU
309extern void arch_pick_mmap_layout(struct mm_struct *mm);
310extern unsigned long
311arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
312 unsigned long, unsigned long);
313extern unsigned long
314arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
315 unsigned long len, unsigned long pgoff,
316 unsigned long flags);
317extern void arch_unmap_area(struct mm_struct *, unsigned long);
318extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
319#else
320static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
321#endif
322
323
324extern void set_dumpable(struct mm_struct *mm, int value);
325extern int get_dumpable(struct mm_struct *mm);
326
327#define MMF_DUMPABLE 0
328#define MMF_DUMP_SECURELY 1
329
330#define MMF_DUMPABLE_BITS 2
331#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
332
333#define MMF_DUMP_ANON_PRIVATE 2
334#define MMF_DUMP_ANON_SHARED 3
335#define MMF_DUMP_MAPPED_PRIVATE 4
336#define MMF_DUMP_MAPPED_SHARED 5
337#define MMF_DUMP_ELF_HEADERS 6
338#define MMF_DUMP_HUGETLB_PRIVATE 7
339#define MMF_DUMP_HUGETLB_SHARED 8
340
341#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
342#define MMF_DUMP_FILTER_BITS 7
343#define MMF_DUMP_FILTER_MASK \
344 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
345#define MMF_DUMP_FILTER_DEFAULT \
346 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
347 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
348
349#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
350# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
351#else
352# define MMF_DUMP_MASK_DEFAULT_ELF 0
353#endif
354
355#define MMF_VM_MERGEABLE 16
356#define MMF_VM_HUGEPAGE 17
357
358#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
359
360struct sighand_struct {
361 atomic_t count;
362 struct k_sigaction action[_NSIG];
363 spinlock_t siglock;
364 wait_queue_head_t signalfd_wqh;
365};
366
367struct pacct_struct {
368 int ac_flag;
369 long ac_exitcode;
370 unsigned long ac_mem;
371 cputime_t ac_utime, ac_stime;
372 unsigned long ac_minflt, ac_majflt;
373};
374
375struct cpu_itimer {
376 cputime_t expires;
377 cputime_t incr;
378 u32 error;
379 u32 incr_error;
380};
381
382struct task_cputime {
383 cputime_t utime;
384 cputime_t stime;
385 unsigned long long sum_exec_runtime;
386};
387#define prof_exp stime
388#define virt_exp utime
389#define sched_exp sum_exec_runtime
390
391#define INIT_CPUTIME \
392 (struct task_cputime) { \
393 .utime = 0, \
394 .stime = 0, \
395 .sum_exec_runtime = 0, \
396 }
397
398#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
399
400struct thread_group_cputimer {
401 struct task_cputime cputime;
402 int running;
403 raw_spinlock_t lock;
404};
405
406#include <linux/rwsem.h>
407struct autogroup;
408
409struct signal_struct {
410 atomic_t sigcnt;
411 atomic_t live;
412 int nr_threads;
413
414 wait_queue_head_t wait_chldexit;
415
416
417 struct task_struct *curr_target;
418
419
420 struct sigpending shared_pending;
421
422
423 int group_exit_code;
424 int notify_count;
425 struct task_struct *group_exit_task;
426
427
428 int group_stop_count;
429 unsigned int flags;
430
431 unsigned int is_child_subreaper:1;
432 unsigned int has_child_subreaper:1;
433
434
435 struct list_head posix_timers;
436
437
438 struct hrtimer real_timer;
439 struct pid *leader_pid;
440 ktime_t it_real_incr;
441
442 struct cpu_itimer it[2];
443
444 struct thread_group_cputimer cputimer;
445
446
447 struct task_cputime cputime_expires;
448
449 struct list_head cpu_timers[3];
450
451 struct pid *tty_old_pgrp;
452
453
454 int leader;
455
456 struct tty_struct *tty;
457
458#ifdef CONFIG_SCHED_AUTOGROUP
459 struct autogroup *autogroup;
460#endif
461 cputime_t utime, stime, cutime, cstime;
462 cputime_t gtime;
463 cputime_t cgtime;
464#ifndef CONFIG_VIRT_CPU_ACCOUNTING
465 cputime_t prev_utime, prev_stime;
466#endif
467 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
468 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
469 unsigned long inblock, oublock, cinblock, coublock;
470 unsigned long maxrss, cmaxrss;
471 struct task_io_accounting ioac;
472
473 unsigned long long sum_sched_runtime;
474
475 struct rlimit rlim[RLIM_NLIMITS];
476
477#ifdef CONFIG_BSD_PROCESS_ACCT
478 struct pacct_struct pacct;
479#endif
480#ifdef CONFIG_TASKSTATS
481 struct taskstats *stats;
482#endif
483#ifdef CONFIG_AUDIT
484 unsigned audit_tty;
485 struct tty_audit_buf *tty_audit_buf;
486#endif
487#ifdef CONFIG_CGROUPS
488 struct rw_semaphore group_rwsem;
489#endif
490
491 int oom_adj;
492 int oom_score_adj;
493 int oom_score_adj_min;
494
495 struct mutex cred_guard_mutex;
496};
497
498#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
499# define __ARCH_WANT_UNLOCKED_CTXSW
500#endif
501
502#define SIGNAL_STOP_STOPPED 0x00000001
503#define SIGNAL_STOP_CONTINUED 0x00000002
504#define SIGNAL_GROUP_EXIT 0x00000004
505#define SIGNAL_CLD_STOPPED 0x00000010
506#define SIGNAL_CLD_CONTINUED 0x00000020
507#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
508
509#define SIGNAL_UNKILLABLE 0x00000040
510
511static inline int signal_group_exit(const struct signal_struct *sig)
512{
513 return (sig->flags & SIGNAL_GROUP_EXIT) ||
514 (sig->group_exit_task != NULL);
515}
516
517struct user_struct {
518 atomic_t __count;
519 atomic_t processes;
520 atomic_t files;
521 atomic_t sigpending;
522#ifdef CONFIG_INOTIFY_USER
523 atomic_t inotify_watches;
524 atomic_t inotify_devs;
525#endif
526#ifdef CONFIG_FANOTIFY
527 atomic_t fanotify_listeners;
528#endif
529#ifdef CONFIG_EPOLL
530 atomic_long_t epoll_watches;
531#endif
532#ifdef CONFIG_POSIX_MQUEUE
533
534 unsigned long mq_bytes;
535#endif
536 unsigned long locked_shm;
537
538#ifdef CONFIG_KEYS
539 struct key *uid_keyring;
540 struct key *session_keyring;
541#endif
542
543
544 struct hlist_node uidhash_node;
545 uid_t uid;
546 struct user_namespace *user_ns;
547
548#ifdef CONFIG_PERF_EVENTS
549 atomic_long_t locked_vm;
550#endif
551};
552
553extern int uids_sysfs_init(void);
554
555extern struct user_struct *find_user(uid_t);
556
557extern struct user_struct root_user;
558#define INIT_USER (&root_user)
559
560
561struct backing_dev_info;
562struct reclaim_state;
563
564#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
565struct sched_info {
566
567 unsigned long pcount;
568 unsigned long long run_delay;
569
570
571 unsigned long long last_arrival,
572 last_queued;
573};
574#endif
575
576#ifdef CONFIG_TASK_DELAY_ACCT
577struct task_delay_info {
578 spinlock_t lock;
579 unsigned int flags;
580
581
582 struct timespec blkio_start, blkio_end;
583 u64 blkio_delay;
584 u64 swapin_delay;
585 u32 blkio_count;
586
587 u32 swapin_count;
588
589
590 struct timespec freepages_start, freepages_end;
591 u64 freepages_delay;
592 u32 freepages_count;
593};
594#endif
595
596static inline int sched_info_on(void)
597{
598#ifdef CONFIG_SCHEDSTATS
599 return 1;
600#elif defined(CONFIG_TASK_DELAY_ACCT)
601 extern int delayacct_on;
602 return delayacct_on;
603#else
604 return 0;
605#endif
606}
607
608enum cpu_idle_type {
609 CPU_IDLE,
610 CPU_NOT_IDLE,
611 CPU_NEWLY_IDLE,
612 CPU_MAX_IDLE_TYPES
613};
614
615#if 0
616# define SCHED_LOAD_RESOLUTION 10
617# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
618# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
619#else
620# define SCHED_LOAD_RESOLUTION 0
621# define scale_load(w) (w)
622# define scale_load_down(w) (w)
623#endif
624
625#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
626#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
627
628#define SCHED_POWER_SHIFT 10
629#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
630
631#ifdef CONFIG_SMP
632#define SD_LOAD_BALANCE 0x0001
633#define SD_BALANCE_NEWIDLE 0x0002
634#define SD_BALANCE_EXEC 0x0004
635#define SD_BALANCE_FORK 0x0008
636#define SD_BALANCE_WAKE 0x0010
637#define SD_WAKE_AFFINE 0x0020
638#define SD_PREFER_LOCAL 0x0040
639#define SD_SHARE_CPUPOWER 0x0080
640#define SD_POWERSAVINGS_BALANCE 0x0100
641#define SD_SHARE_PKG_RESOURCES 0x0200
642#define SD_SERIALIZE 0x0400
643#define SD_ASYM_PACKING 0x0800
644#define SD_PREFER_SIBLING 0x1000
645#define SD_OVERLAP 0x2000
646
647enum powersavings_balance_level {
648 POWERSAVINGS_BALANCE_NONE = 0,
649 POWERSAVINGS_BALANCE_BASIC,
650 POWERSAVINGS_BALANCE_WAKEUP,
651 MAX_POWERSAVINGS_BALANCE_LEVELS
652};
653
654extern int sched_mc_power_savings, sched_smt_power_savings;
655
656static inline int sd_balance_for_mc_power(void)
657{
658 if (sched_smt_power_savings)
659 return SD_POWERSAVINGS_BALANCE;
660
661 if (!sched_mc_power_savings)
662 return SD_PREFER_SIBLING;
663
664 return 0;
665}
666
667static inline int sd_balance_for_package_power(void)
668{
669 if (sched_mc_power_savings | sched_smt_power_savings)
670 return SD_POWERSAVINGS_BALANCE;
671
672 return SD_PREFER_SIBLING;
673}
674
675extern int __weak arch_sd_sibiling_asym_packing(void);
676
677
678static inline int sd_power_saving_flags(void)
679{
680 if (sched_mc_power_savings | sched_smt_power_savings)
681 return SD_BALANCE_NEWIDLE;
682
683 return 0;
684}
685
686struct sched_group_power {
687 atomic_t ref;
688 unsigned int power, power_orig;
689 unsigned long next_update;
690 atomic_t nr_busy_cpus;
691};
692
693struct sched_group {
694 struct sched_group *next;
695 atomic_t ref;
696
697 unsigned int group_weight;
698 struct sched_group_power *sgp;
699
700 unsigned long cpumask[0];
701};
702
703static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
704{
705 return to_cpumask(sg->cpumask);
706}
707
708static inline unsigned int group_first_cpu(struct sched_group *group)
709{
710 return cpumask_first(sched_group_cpus(group));
711}
712
713struct sched_domain_attr {
714 int relax_domain_level;
715};
716
717#define SD_ATTR_INIT (struct sched_domain_attr) { \
718 .relax_domain_level = -1, \
719}
720
721extern int sched_domain_level_max;
722
723struct sched_domain {
724
725 struct sched_domain *parent;
726 struct sched_domain *child;
727 struct sched_group *groups;
728 unsigned long min_interval;
729 unsigned long max_interval;
730 unsigned int busy_factor;
731 unsigned int imbalance_pct;
732 unsigned int cache_nice_tries;
733 unsigned int busy_idx;
734 unsigned int idle_idx;
735 unsigned int newidle_idx;
736 unsigned int wake_idx;
737 unsigned int forkexec_idx;
738 unsigned int smt_gain;
739 int flags;
740 int level;
741
742
743 unsigned long last_balance;
744 unsigned int balance_interval;
745 unsigned int nr_balance_failed;
746
747 u64 last_update;
748
749#ifdef CONFIG_SCHEDSTATS
750
751 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
752 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
753 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
754 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
755 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
756 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
757 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
758 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
759
760
761 unsigned int alb_count;
762 unsigned int alb_failed;
763 unsigned int alb_pushed;
764
765
766 unsigned int sbe_count;
767 unsigned int sbe_balanced;
768 unsigned int sbe_pushed;
769
770
771 unsigned int sbf_count;
772 unsigned int sbf_balanced;
773 unsigned int sbf_pushed;
774
775
776 unsigned int ttwu_wake_remote;
777 unsigned int ttwu_move_affine;
778 unsigned int ttwu_move_balance;
779#endif
780#ifdef CONFIG_SCHED_DEBUG
781 char *name;
782#endif
783 union {
784 void *private;
785 struct rcu_head rcu;
786 };
787
788 unsigned int span_weight;
789 unsigned long span[0];
790};
791
792static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
793{
794 return to_cpumask(sd->span);
795}
796
797extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
798 struct sched_domain_attr *dattr_new);
799
800cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
801void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
802
803static inline int test_sd_parent(struct sched_domain *sd, int flag)
804{
805 if (sd->parent && (sd->parent->flags & flag))
806 return 1;
807
808 return 0;
809}
810
811unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
812unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
813
814bool cpus_share_cache(int this_cpu, int that_cpu);
815
816#else
817
818struct sched_domain_attr;
819
820static inline void
821partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
822 struct sched_domain_attr *dattr_new)
823{
824}
825
826static inline bool cpus_share_cache(int this_cpu, int that_cpu)
827{
828 return true;
829}
830
831#endif
832
833
834struct io_context;
835
836
837#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
838extern void prefetch_stack(struct task_struct *t);
839#else
840static inline void prefetch_stack(struct task_struct *t) { }
841#endif
842
843struct audit_context;
844struct mempolicy;
845struct pipe_inode_info;
846struct uts_namespace;
847
848struct rq;
849struct sched_domain;
850
851#define WF_SYNC 0x01
852#define WF_FORK 0x02
853#define WF_MIGRATED 0x04
854
855#define ENQUEUE_WAKEUP 1
856#define ENQUEUE_HEAD 2
857#ifdef CONFIG_SMP
858#define ENQUEUE_WAKING 4
859#else
860#define ENQUEUE_WAKING 0
861#endif
862
863#define DEQUEUE_SLEEP 1
864
865struct sched_class {
866 const struct sched_class *next;
867
868 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
869 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
870 void (*yield_task) (struct rq *rq);
871 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
872
873 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
874
875 struct task_struct * (*pick_next_task) (struct rq *rq);
876 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
877
878#ifdef CONFIG_SMP
879 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
880
881 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
882 void (*post_schedule) (struct rq *this_rq);
883 void (*task_waking) (struct task_struct *task);
884 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
885
886 void (*set_cpus_allowed)(struct task_struct *p,
887 const struct cpumask *newmask);
888
889 void (*rq_online)(struct rq *rq);
890 void (*rq_offline)(struct rq *rq);
891#endif
892
893 void (*set_curr_task) (struct rq *rq);
894 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
895 void (*task_fork) (struct task_struct *p);
896
897 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
898 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
899 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
900 int oldprio);
901
902 unsigned int (*get_rr_interval) (struct rq *rq,
903 struct task_struct *task);
904
905#ifdef CONFIG_FAIR_GROUP_SCHED
906 void (*task_move_group) (struct task_struct *p, int on_rq);
907#endif
908};
909
910struct load_weight {
911 unsigned long weight, inv_weight;
912};
913
914#ifdef CONFIG_SCHEDSTATS
915struct sched_statistics {
916 u64 wait_start;
917 u64 wait_max;
918 u64 wait_count;
919 u64 wait_sum;
920 u64 iowait_count;
921 u64 iowait_sum;
922
923 u64 sleep_start;
924 u64 sleep_max;
925 s64 sum_sleep_runtime;
926
927 u64 block_start;
928 u64 block_max;
929 u64 exec_max;
930 u64 slice_max;
931
932 u64 nr_migrations_cold;
933 u64 nr_failed_migrations_affine;
934 u64 nr_failed_migrations_running;
935 u64 nr_failed_migrations_hot;
936 u64 nr_forced_migrations;
937
938 u64 nr_wakeups;
939 u64 nr_wakeups_sync;
940 u64 nr_wakeups_migrate;
941 u64 nr_wakeups_local;
942 u64 nr_wakeups_remote;
943 u64 nr_wakeups_affine;
944 u64 nr_wakeups_affine_attempts;
945 u64 nr_wakeups_passive;
946 u64 nr_wakeups_idle;
947};
948#endif
949
950struct sched_entity {
951 struct load_weight load;
952 struct rb_node run_node;
953 struct list_head group_node;
954 unsigned int on_rq;
955
956 u64 exec_start;
957 u64 sum_exec_runtime;
958 u64 vruntime;
959 u64 prev_sum_exec_runtime;
960
961 u64 nr_migrations;
962
963#ifdef CONFIG_SCHEDSTATS
964 struct sched_statistics statistics;
965#endif
966
967#ifdef CONFIG_FAIR_GROUP_SCHED
968 struct sched_entity *parent;
969
970 struct cfs_rq *cfs_rq;
971
972 struct cfs_rq *my_q;
973#endif
974};
975
976struct sched_rt_entity {
977 struct list_head run_list;
978 unsigned long timeout;
979 unsigned int time_slice;
980 int nr_cpus_allowed;
981
982 struct sched_rt_entity *back;
983#ifdef CONFIG_RT_GROUP_SCHED
984 struct sched_rt_entity *parent;
985
986 struct rt_rq *rt_rq;
987
988 struct rt_rq *my_q;
989#endif
990};
991
992#define RR_TIMESLICE (100 * HZ / 1000)
993
994struct rcu_node;
995
996enum perf_event_task_context {
997 perf_invalid_context = -1,
998 perf_hw_context = 0,
999 perf_sw_context,
1000 perf_nr_task_contexts,
1001};
1002
1003struct task_struct {
1004 volatile long state;
1005 void *stack;
1006 atomic_t usage;
1007 unsigned int flags;
1008 unsigned int ptrace;
1009
1010#ifdef CONFIG_SMP
1011 struct llist_node wake_entry;
1012 int on_cpu;
1013#endif
1014 int on_rq;
1015
1016 int prio, static_prio, normal_prio;
1017 unsigned int rt_priority;
1018 const struct sched_class *sched_class;
1019 struct sched_entity se;
1020 struct sched_rt_entity rt;
1021
1022#ifdef CONFIG_PREEMPT_NOTIFIERS
1023
1024 struct hlist_head preempt_notifiers;
1025#endif
1026
1027 unsigned char fpu_counter;
1028#ifdef CONFIG_BLK_DEV_IO_TRACE
1029 unsigned int btrace_seq;
1030#endif
1031
1032 unsigned int policy;
1033 cpumask_t cpus_allowed;
1034
1035#ifdef CONFIG_PREEMPT_RCU
1036 int rcu_read_lock_nesting;
1037 char rcu_read_unlock_special;
1038 struct list_head rcu_node_entry;
1039#endif
1040#ifdef CONFIG_TREE_PREEMPT_RCU
1041 struct rcu_node *rcu_blocked_node;
1042#endif
1043#ifdef CONFIG_RCU_BOOST
1044 struct rt_mutex *rcu_boost_mutex;
1045#endif
1046
1047#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1048 struct sched_info sched_info;
1049#endif
1050
1051 struct list_head tasks;
1052#ifdef CONFIG_SMP
1053 struct plist_node pushable_tasks;
1054#endif
1055
1056 struct mm_struct *mm, *active_mm;
1057#ifdef CONFIG_COMPAT_BRK
1058 unsigned brk_randomized:1;
1059#endif
1060#if defined(SPLIT_RSS_COUNTING)
1061 struct task_rss_stat rss_stat;
1062#endif
1063 int exit_state;
1064 int exit_code, exit_signal;
1065 int pdeath_signal;
1066 unsigned int jobctl;
1067
1068 unsigned int personality;
1069 unsigned did_exec:1;
1070 unsigned in_execve:1;
1071 unsigned in_iowait:1;
1072
1073
1074
1075 unsigned sched_reset_on_fork:1;
1076 unsigned sched_contributes_to_load:1;
1077
1078#ifdef CONFIG_GENERIC_HARDIRQS
1079
1080 unsigned irq_thread:1;
1081#endif
1082
1083 pid_t pid;
1084 pid_t tgid;
1085
1086#ifdef CONFIG_CC_STACKPROTECTOR
1087
1088 unsigned long stack_canary;
1089#endif
1090
1091 struct task_struct __rcu *real_parent;
1092 struct task_struct __rcu *parent;
1093 struct list_head children;
1094 struct list_head sibling;
1095 struct task_struct *group_leader;
1096
1097 struct list_head ptraced;
1098 struct list_head ptrace_entry;
1099
1100
1101 struct pid_link pids[PIDTYPE_MAX];
1102 struct list_head thread_group;
1103
1104 struct completion *vfork_done;
1105 int __user *set_child_tid;
1106 int __user *clear_child_tid;
1107
1108 cputime_t utime, stime, utimescaled, stimescaled;
1109 cputime_t gtime;
1110#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1111 cputime_t prev_utime, prev_stime;
1112#endif
1113 unsigned long nvcsw, nivcsw;
1114 struct timespec start_time;
1115 struct timespec real_start_time;
1116 unsigned long min_flt, maj_flt;
1117
1118 struct task_cputime cputime_expires;
1119 struct list_head cpu_timers[3];
1120
1121 const struct cred __rcu *real_cred;
1122 const struct cred __rcu *cred;
1123 struct cred *replacement_session_keyring;
1124
1125 char comm[TASK_COMM_LEN];
1126 int link_count, total_link_count;
1127#ifdef CONFIG_SYSVIPC
1128 struct sysv_sem sysvsem;
1129#endif
1130#ifdef CONFIG_DETECT_HUNG_TASK
1131 unsigned long last_switch_count;
1132#endif
1133 struct thread_struct thread;
1134 struct fs_struct *fs;
1135 struct files_struct *files;
1136 struct nsproxy *nsproxy;
1137 struct signal_struct *signal;
1138 struct sighand_struct *sighand;
1139
1140 sigset_t blocked, real_blocked;
1141 sigset_t saved_sigmask;
1142 struct sigpending pending;
1143
1144 unsigned long sas_ss_sp;
1145 size_t sas_ss_size;
1146 int (*notifier)(void *priv);
1147 void *notifier_data;
1148 sigset_t *notifier_mask;
1149 struct audit_context *audit_context;
1150#ifdef CONFIG_AUDITSYSCALL
1151 uid_t loginuid;
1152 unsigned int sessionid;
1153#endif
1154 seccomp_t seccomp;
1155
1156 u32 parent_exec_id;
1157 u32 self_exec_id;
1158 spinlock_t alloc_lock;
1159
1160
1161 raw_spinlock_t pi_lock;
1162
1163#ifdef CONFIG_RT_MUTEXES
1164
1165 struct plist_head pi_waiters;
1166
1167 struct rt_mutex_waiter *pi_blocked_on;
1168#endif
1169
1170#ifdef CONFIG_DEBUG_MUTEXES
1171
1172 struct mutex_waiter *blocked_on;
1173 struct task_struct *blocked_by;
1174 unsigned long blocked_since;
1175#endif
1176#ifdef CONFIG_TRACE_IRQFLAGS
1177 unsigned int irq_events;
1178 unsigned long hardirq_enable_ip;
1179 unsigned long hardirq_disable_ip;
1180 unsigned int hardirq_enable_event;
1181 unsigned int hardirq_disable_event;
1182 int hardirqs_enabled;
1183 int hardirq_context;
1184 unsigned long softirq_disable_ip;
1185 unsigned long softirq_enable_ip;
1186 unsigned int softirq_disable_event;
1187 unsigned int softirq_enable_event;
1188 int softirqs_enabled;
1189 int softirq_context;
1190#endif
1191#ifdef CONFIG_LOCKDEP
1192# define MAX_LOCK_DEPTH 48UL
1193 u64 curr_chain_key;
1194 int lockdep_depth;
1195 unsigned int lockdep_recursion;
1196 struct held_lock held_locks[MAX_LOCK_DEPTH];
1197 gfp_t lockdep_reclaim_gfp;
1198#endif
1199
1200 void *journal_info;
1201
1202 struct bio_list *bio_list;
1203
1204#ifdef CONFIG_BLOCK
1205 struct blk_plug *plug;
1206#endif
1207
1208 struct reclaim_state *reclaim_state;
1209
1210 struct backing_dev_info *backing_dev_info;
1211
1212 struct io_context *io_context;
1213
1214 unsigned long ptrace_message;
1215 siginfo_t *last_siginfo;
1216 struct task_io_accounting ioac;
1217#if defined(CONFIG_TASK_XACCT)
1218 u64 acct_rss_mem1;
1219 u64 acct_vm_mem1;
1220 cputime_t acct_timexpd;
1221#endif
1222#ifdef CONFIG_CPUSETS
1223 nodemask_t mems_allowed;
1224 seqcount_t mems_allowed_seq;
1225 int cpuset_mem_spread_rotor;
1226 int cpuset_slab_spread_rotor;
1227#endif
1228#ifdef CONFIG_CGROUPS
1229
1230 struct css_set __rcu *cgroups;
1231
1232 struct list_head cg_list;
1233#endif
1234#ifdef CONFIG_FUTEX
1235 struct robust_list_head __user *robust_list;
1236#ifdef CONFIG_COMPAT
1237 struct compat_robust_list_head __user *compat_robust_list;
1238#endif
1239 struct list_head pi_state_list;
1240 struct futex_pi_state *pi_state_cache;
1241#endif
1242#ifdef CONFIG_PERF_EVENTS
1243 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1244 struct mutex perf_event_mutex;
1245 struct list_head perf_event_list;
1246#endif
1247#ifdef CONFIG_NUMA
1248 struct mempolicy *mempolicy;
1249 short il_next;
1250 short pref_node_fork;
1251#endif
1252 struct rcu_head rcu;
1253
1254 struct pipe_inode_info *splice_pipe;
1255#ifdef CONFIG_TASK_DELAY_ACCT
1256 struct task_delay_info *delays;
1257#endif
1258#ifdef CONFIG_FAULT_INJECTION
1259 int make_it_fail;
1260#endif
1261 int nr_dirtied;
1262 int nr_dirtied_pause;
1263 unsigned long dirty_paused_when;
1264
1265#ifdef CONFIG_LATENCYTOP
1266 int latency_record_count;
1267 struct latency_record latency_record[LT_SAVECOUNT];
1268#endif
1269 unsigned long timer_slack_ns;
1270 unsigned long default_timer_slack_ns;
1271
1272 struct list_head *scm_work_list;
1273#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1274
1275 int curr_ret_stack;
1276
1277 struct ftrace_ret_stack *ret_stack;
1278
1279 unsigned long long ftrace_timestamp;
1280 atomic_t trace_overrun;
1281
1282 atomic_t tracing_graph_pause;
1283#endif
1284#ifdef CONFIG_TRACING
1285
1286 unsigned long trace;
1287
1288 unsigned long trace_recursion;
1289#endif
1290#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1291 struct memcg_batch_info {
1292 int do_batch;
1293 struct mem_cgroup *memcg;
1294 unsigned long nr_pages;
1295 unsigned long memsw_nr_pages;
1296 } memcg_batch;
1297#endif
1298#ifdef CONFIG_HAVE_HW_BREAKPOINT
1299 atomic_t ptrace_bp_refcnt;
1300#endif
1301};
1302
1303#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1304
1305
1306#define MAX_USER_RT_PRIO 100
1307#define MAX_RT_PRIO MAX_USER_RT_PRIO
1308
1309#define MAX_PRIO (MAX_RT_PRIO + 40)
1310#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1311
1312static inline int rt_prio(int prio)
1313{
1314 if (unlikely(prio < MAX_RT_PRIO))
1315 return 1;
1316 return 0;
1317}
1318
1319static inline int rt_task(struct task_struct *p)
1320{
1321 return rt_prio(p->prio);
1322}
1323
1324static inline struct pid *task_pid(struct task_struct *task)
1325{
1326 return task->pids[PIDTYPE_PID].pid;
1327}
1328
1329static inline struct pid *task_tgid(struct task_struct *task)
1330{
1331 return task->group_leader->pids[PIDTYPE_PID].pid;
1332}
1333
1334static inline struct pid *task_pgrp(struct task_struct *task)
1335{
1336 return task->group_leader->pids[PIDTYPE_PGID].pid;
1337}
1338
1339static inline struct pid *task_session(struct task_struct *task)
1340{
1341 return task->group_leader->pids[PIDTYPE_SID].pid;
1342}
1343
1344struct pid_namespace;
1345
1346pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1347 struct pid_namespace *ns);
1348
1349static inline pid_t task_pid_nr(struct task_struct *tsk)
1350{
1351 return tsk->pid;
1352}
1353
1354static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1355 struct pid_namespace *ns)
1356{
1357 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1358}
1359
1360static inline pid_t task_pid_vnr(struct task_struct *tsk)
1361{
1362 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1363}
1364
1365
1366static inline pid_t task_tgid_nr(struct task_struct *tsk)
1367{
1368 return tsk->tgid;
1369}
1370
1371pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1372
1373static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1374{
1375 return pid_vnr(task_tgid(tsk));
1376}
1377
1378
1379static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1380 struct pid_namespace *ns)
1381{
1382 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1383}
1384
1385static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1386{
1387 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1388}
1389
1390
1391static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1392 struct pid_namespace *ns)
1393{
1394 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1395}
1396
1397static inline pid_t task_session_vnr(struct task_struct *tsk)
1398{
1399 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1400}
1401
1402static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1403{
1404 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1405}
1406
1407static inline int pid_alive(struct task_struct *p)
1408{
1409 return p->pids[PIDTYPE_PID].pid != NULL;
1410}
1411
1412static inline int is_global_init(struct task_struct *tsk)
1413{
1414 return tsk->pid == 1;
1415}
1416
1417extern int is_container_init(struct task_struct *tsk);
1418
1419extern struct pid *cad_pid;
1420
1421extern void free_task(struct task_struct *tsk);
1422#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1423
1424extern void __put_task_struct(struct task_struct *t);
1425
1426static inline void put_task_struct(struct task_struct *t)
1427{
1428 if (atomic_dec_and_test(&t->usage))
1429 __put_task_struct(t);
1430}
1431
1432extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1433extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1434
1435extern int task_free_register(struct notifier_block *n);
1436extern int task_free_unregister(struct notifier_block *n);
1437
1438extern int task_fork_register(struct notifier_block *n);
1439extern int task_fork_unregister(struct notifier_block *n);
1440
1441#define PF_EXITING 0x00000004
1442#define PF_EXITPIDONE 0x00000008
1443#define PF_VCPU 0x00000010
1444#define PF_WQ_WORKER 0x00000020
1445#define PF_FORKNOEXEC 0x00000040
1446#define PF_MCE_PROCESS 0x00000080
1447#define PF_SUPERPRIV 0x00000100
1448#define PF_DUMPCORE 0x00000200
1449#define PF_SIGNALED 0x00000400
1450#define PF_MEMALLOC 0x00000800
1451#define PF_NPROC_EXCEEDED 0x00001000
1452#define PF_USED_MATH 0x00002000
1453#define PF_WAKE_UP_IDLE 0x00004000
1454#define PF_NOFREEZE 0x00008000
1455#define PF_FROZEN 0x00010000
1456#define PF_FSTRANS 0x00020000
1457#define PF_KSWAPD 0x00040000
1458#define PF_LESS_THROTTLE 0x00100000
1459#define PF_KTHREAD 0x00200000
1460#define PF_RANDOMIZE 0x00400000
1461#define PF_SWAPWRITE 0x00800000
1462#define PF_SPREAD_PAGE 0x01000000
1463#define PF_SPREAD_SLAB 0x02000000
1464#define PF_THREAD_BOUND 0x04000000
1465#define PF_MCE_EARLY 0x08000000
1466#define PF_MEMPOLICY 0x10000000
1467#define PF_MUTEX_TESTER 0x20000000
1468#define PF_FREEZER_SKIP 0x40000000
1469
1470#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1471#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1472#define clear_used_math() clear_stopped_child_used_math(current)
1473#define set_used_math() set_stopped_child_used_math(current)
1474#define conditional_stopped_child_used_math(condition, child) \
1475 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1476#define conditional_used_math(condition) \
1477 conditional_stopped_child_used_math(condition, current)
1478#define copy_to_stopped_child_used_math(child) \
1479 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1480#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1481#define used_math() tsk_used_math(current)
1482
1483#define JOBCTL_STOP_SIGMASK 0xffff
1484
1485#define JOBCTL_STOP_DEQUEUED_BIT 16
1486#define JOBCTL_STOP_PENDING_BIT 17
1487#define JOBCTL_STOP_CONSUME_BIT 18
1488#define JOBCTL_TRAP_STOP_BIT 19
1489#define JOBCTL_TRAP_NOTIFY_BIT 20
1490#define JOBCTL_TRAPPING_BIT 21
1491#define JOBCTL_LISTENING_BIT 22
1492
1493#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
1494#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
1495#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
1496#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
1497#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
1498#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
1499#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
1500
1501#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1502#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1503
1504extern bool task_set_jobctl_pending(struct task_struct *task,
1505 unsigned int mask);
1506extern void task_clear_jobctl_trapping(struct task_struct *task);
1507extern void task_clear_jobctl_pending(struct task_struct *task,
1508 unsigned int mask);
1509
1510#ifdef CONFIG_PREEMPT_RCU
1511
1512#define RCU_READ_UNLOCK_BLOCKED (1 << 0)
1513#define RCU_READ_UNLOCK_NEED_QS (1 << 1)
1514
1515static inline void rcu_copy_process(struct task_struct *p)
1516{
1517 p->rcu_read_lock_nesting = 0;
1518 p->rcu_read_unlock_special = 0;
1519#ifdef CONFIG_TREE_PREEMPT_RCU
1520 p->rcu_blocked_node = NULL;
1521#endif
1522#ifdef CONFIG_RCU_BOOST
1523 p->rcu_boost_mutex = NULL;
1524#endif
1525 INIT_LIST_HEAD(&p->rcu_node_entry);
1526}
1527
1528#else
1529
1530static inline void rcu_copy_process(struct task_struct *p)
1531{
1532}
1533
1534#endif
1535
1536#ifdef CONFIG_SMP
1537extern void do_set_cpus_allowed(struct task_struct *p,
1538 const struct cpumask *new_mask);
1539
1540extern int set_cpus_allowed_ptr(struct task_struct *p,
1541 const struct cpumask *new_mask);
1542#else
1543static inline void do_set_cpus_allowed(struct task_struct *p,
1544 const struct cpumask *new_mask)
1545{
1546}
1547static inline int set_cpus_allowed_ptr(struct task_struct *p,
1548 const struct cpumask *new_mask)
1549{
1550 if (!cpumask_test_cpu(0, new_mask))
1551 return -EINVAL;
1552 return 0;
1553}
1554#endif
1555
1556static inline void set_wake_up_idle(bool enabled)
1557{
1558 if (enabled)
1559 current->flags |= PF_WAKE_UP_IDLE;
1560 else
1561 current->flags &= ~PF_WAKE_UP_IDLE;
1562}
1563
1564#ifdef CONFIG_NO_HZ
1565void calc_load_enter_idle(void);
1566void calc_load_exit_idle(void);
1567#else
1568static inline void calc_load_enter_idle(void) { }
1569static inline void calc_load_exit_idle(void) { }
1570#endif
1571
1572#ifndef CONFIG_CPUMASK_OFFSTACK
1573static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1574{
1575 return set_cpus_allowed_ptr(p, &new_mask);
1576}
1577#endif
1578
1579extern unsigned long long notrace sched_clock(void);
1580extern u64 cpu_clock(int cpu);
1581extern u64 local_clock(void);
1582extern u64 sched_clock_cpu(int cpu);
1583
1584
1585extern void sched_clock_init(void);
1586
1587#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1588static inline void sched_clock_tick(void)
1589{
1590}
1591
1592static inline void sched_clock_idle_sleep_event(void)
1593{
1594}
1595
1596static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1597{
1598}
1599#else
1600extern int sched_clock_stable;
1601
1602extern void sched_clock_tick(void);
1603extern void sched_clock_idle_sleep_event(void);
1604extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1605#endif
1606
1607#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1608extern void enable_sched_clock_irqtime(void);
1609extern void disable_sched_clock_irqtime(void);
1610#else
1611static inline void enable_sched_clock_irqtime(void) {}
1612static inline void disable_sched_clock_irqtime(void) {}
1613#endif
1614
1615extern unsigned long long
1616task_sched_runtime(struct task_struct *task);
1617
1618#ifdef CONFIG_SMP
1619extern void sched_exec(void);
1620#else
1621#define sched_exec() {}
1622#endif
1623
1624extern void sched_clock_idle_sleep_event(void);
1625extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1626
1627#ifdef CONFIG_HOTPLUG_CPU
1628extern void idle_task_exit(void);
1629#else
1630static inline void idle_task_exit(void) {}
1631#endif
1632
1633#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1634extern void wake_up_idle_cpu(int cpu);
1635#else
1636static inline void wake_up_idle_cpu(int cpu) { }
1637#endif
1638
1639extern unsigned int sysctl_sched_latency;
1640extern unsigned int sysctl_sched_min_granularity;
1641extern unsigned int sysctl_sched_wakeup_granularity;
1642extern unsigned int sysctl_sched_child_runs_first;
1643extern unsigned int sysctl_sched_wake_to_idle;
1644
1645enum sched_tunable_scaling {
1646 SCHED_TUNABLESCALING_NONE,
1647 SCHED_TUNABLESCALING_LOG,
1648 SCHED_TUNABLESCALING_LINEAR,
1649 SCHED_TUNABLESCALING_END,
1650};
1651extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
1652
1653#ifdef CONFIG_SCHED_DEBUG
1654extern unsigned int sysctl_sched_migration_cost;
1655extern unsigned int sysctl_sched_nr_migrate;
1656extern unsigned int sysctl_sched_time_avg;
1657extern unsigned int sysctl_timer_migration;
1658extern unsigned int sysctl_sched_shares_window;
1659
1660int sched_proc_update_handler(struct ctl_table *table, int write,
1661 void __user *buffer, size_t *length,
1662 loff_t *ppos);
1663#endif
1664#ifdef CONFIG_SCHED_DEBUG
1665static inline unsigned int get_sysctl_timer_migration(void)
1666{
1667 return sysctl_timer_migration;
1668}
1669#else
1670static inline unsigned int get_sysctl_timer_migration(void)
1671{
1672 return 1;
1673}
1674#endif
1675extern unsigned int sysctl_sched_rt_period;
1676extern int sysctl_sched_rt_runtime;
1677
1678int sched_rt_handler(struct ctl_table *table, int write,
1679 void __user *buffer, size_t *lenp,
1680 loff_t *ppos);
1681
1682#ifdef CONFIG_SCHED_AUTOGROUP
1683extern unsigned int sysctl_sched_autogroup_enabled;
1684
1685extern void sched_autogroup_create_attach(struct task_struct *p);
1686extern void sched_autogroup_detach(struct task_struct *p);
1687extern void sched_autogroup_fork(struct signal_struct *sig);
1688extern void sched_autogroup_exit(struct signal_struct *sig);
1689#ifdef CONFIG_PROC_FS
1690extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
1691extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
1692#endif
1693#else
1694static inline void sched_autogroup_create_attach(struct task_struct *p) { }
1695static inline void sched_autogroup_detach(struct task_struct *p) { }
1696static inline void sched_autogroup_fork(struct signal_struct *sig) { }
1697static inline void sched_autogroup_exit(struct signal_struct *sig) { }
1698#endif
1699
1700#ifdef CONFIG_CFS_BANDWIDTH
1701extern unsigned int sysctl_sched_cfs_bandwidth_slice;
1702#endif
1703
1704#ifdef CONFIG_RT_MUTEXES
1705extern int rt_mutex_getprio(struct task_struct *p);
1706extern void rt_mutex_setprio(struct task_struct *p, int prio);
1707extern void rt_mutex_adjust_pi(struct task_struct *p);
1708static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
1709{
1710 return tsk->pi_blocked_on != NULL;
1711}
1712#else
1713static inline int rt_mutex_getprio(struct task_struct *p)
1714{
1715 return p->normal_prio;
1716}
1717# define rt_mutex_adjust_pi(p) do { } while (0)
1718static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
1719{
1720 return false;
1721}
1722#endif
1723
1724extern bool yield_to(struct task_struct *p, bool preempt);
1725extern void set_user_nice(struct task_struct *p, long nice);
1726extern int task_prio(const struct task_struct *p);
1727extern int task_nice(const struct task_struct *p);
1728extern int can_nice(const struct task_struct *p, const int nice);
1729extern int task_curr(const struct task_struct *p);
1730extern int idle_cpu(int cpu);
1731extern int sched_setscheduler(struct task_struct *, int,
1732 const struct sched_param *);
1733extern int sched_setscheduler_nocheck(struct task_struct *, int,
1734 const struct sched_param *);
1735extern struct task_struct *idle_task(int cpu);
1736static inline bool is_idle_task(const struct task_struct *p)
1737{
1738 return p->pid == 0;
1739}
1740extern struct task_struct *curr_task(int cpu);
1741extern void set_curr_task(int cpu, struct task_struct *p);
1742
1743void yield(void);
1744
1745extern struct exec_domain default_exec_domain;
1746
1747union thread_union {
1748 struct thread_info thread_info;
1749 unsigned long stack[THREAD_SIZE/sizeof(long)];
1750};
1751
1752#ifndef __HAVE_ARCH_KSTACK_END
1753static inline int kstack_end(void *addr)
1754{
1755 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1756}
1757#endif
1758
1759extern union thread_union init_thread_union;
1760extern struct task_struct init_task;
1761
1762extern struct mm_struct init_mm;
1763
1764extern struct pid_namespace init_pid_ns;
1765
1766
1767extern struct task_struct *find_task_by_vpid(pid_t nr);
1768extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1769 struct pid_namespace *ns);
1770
1771extern void __set_special_pids(struct pid *pid);
1772
1773extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
1774static inline struct user_struct *get_uid(struct user_struct *u)
1775{
1776 atomic_inc(&u->__count);
1777 return u;
1778}
1779extern void free_uid(struct user_struct *);
1780extern void release_uids(struct user_namespace *ns);
1781
1782#include <asm/current.h>
1783
1784extern void xtime_update(unsigned long ticks);
1785
1786extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1787extern int wake_up_process(struct task_struct *tsk);
1788extern void wake_up_new_task(struct task_struct *tsk);
1789#ifdef CONFIG_SMP
1790 extern void kick_process(struct task_struct *tsk);
1791#else
1792 static inline void kick_process(struct task_struct *tsk) { }
1793#endif
1794extern void sched_fork(struct task_struct *p);
1795extern void sched_dead(struct task_struct *p);
1796
1797extern void proc_caches_init(void);
1798extern void flush_signals(struct task_struct *);
1799extern void __flush_signals(struct task_struct *);
1800extern void ignore_signals(struct task_struct *);
1801extern void flush_signal_handlers(struct task_struct *, int force_default);
1802extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
1803
1804static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
1805{
1806 unsigned long flags;
1807 int ret;
1808
1809 spin_lock_irqsave(&tsk->sighand->siglock, flags);
1810 ret = dequeue_signal(tsk, mask, info);
1811 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
1812
1813 return ret;
1814}
1815
1816extern void block_all_signals(int (*notifier)(void *priv), void *priv,
1817 sigset_t *mask);
1818extern void unblock_all_signals(void);
1819extern void release_task(struct task_struct * p);
1820extern int send_sig_info(int, struct siginfo *, struct task_struct *);
1821extern int force_sigsegv(int, struct task_struct *);
1822extern int force_sig_info(int, struct siginfo *, struct task_struct *);
1823extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
1824extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
1825extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
1826 const struct cred *, u32);
1827extern int kill_pgrp(struct pid *pid, int sig, int priv);
1828extern int kill_pid(struct pid *pid, int sig, int priv);
1829extern int kill_proc_info(int, struct siginfo *, pid_t);
1830extern __must_check bool do_notify_parent(struct task_struct *, int);
1831extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
1832extern void force_sig(int, struct task_struct *);
1833extern int send_sig(int, struct task_struct *, int);
1834extern int zap_other_threads(struct task_struct *p);
1835extern struct sigqueue *sigqueue_alloc(void);
1836extern void sigqueue_free(struct sigqueue *);
1837extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
1838extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
1839extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
1840
1841static inline int kill_cad_pid(int sig, int priv)
1842{
1843 return kill_pid(cad_pid, sig, priv);
1844}
1845
1846#define SEND_SIG_NOINFO ((struct siginfo *) 0)
1847#define SEND_SIG_PRIV ((struct siginfo *) 1)
1848#define SEND_SIG_FORCED ((struct siginfo *) 2)
1849
1850static inline int on_sig_stack(unsigned long sp)
1851{
1852#ifdef CONFIG_STACK_GROWSUP
1853 return sp >= current->sas_ss_sp &&
1854 sp - current->sas_ss_sp < current->sas_ss_size;
1855#else
1856 return sp > current->sas_ss_sp &&
1857 sp - current->sas_ss_sp <= current->sas_ss_size;
1858#endif
1859}
1860
1861static inline int sas_ss_flags(unsigned long sp)
1862{
1863 return (current->sas_ss_size == 0 ? SS_DISABLE
1864 : on_sig_stack(sp) ? SS_ONSTACK : 0);
1865}
1866
1867extern struct mm_struct * mm_alloc(void);
1868
1869extern void __mmdrop(struct mm_struct *);
1870static inline void mmdrop(struct mm_struct * mm)
1871{
1872 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
1873 __mmdrop(mm);
1874}
1875
1876extern void mmput(struct mm_struct *);
1877extern struct mm_struct *get_task_mm(struct task_struct *task);
1878extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
1879extern void mm_release(struct task_struct *, struct mm_struct *);
1880extern struct mm_struct *dup_mm(struct task_struct *tsk);
1881
1882extern int copy_thread(unsigned long, unsigned long, unsigned long,
1883 struct task_struct *, struct pt_regs *);
1884extern void flush_thread(void);
1885extern void exit_thread(void);
1886
1887extern void exit_files(struct task_struct *);
1888extern void __cleanup_sighand(struct sighand_struct *);
1889
1890extern void exit_itimers(struct signal_struct *);
1891extern void flush_itimer_signals(void);
1892
1893extern void do_group_exit(int);
1894
1895extern void daemonize(const char *, ...);
1896extern int allow_signal(int);
1897extern int disallow_signal(int);
1898
1899extern int do_execve(const char *,
1900 const char __user * const __user *,
1901 const char __user * const __user *, struct pt_regs *);
1902extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
1903struct task_struct *fork_idle(int);
1904
1905extern void set_task_comm(struct task_struct *tsk, char *from);
1906extern char *get_task_comm(char *to, struct task_struct *tsk);
1907
1908#ifdef CONFIG_SMP
1909void scheduler_ipi(void);
1910extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1911#else
1912static inline void scheduler_ipi(void) { }
1913static inline unsigned long wait_task_inactive(struct task_struct *p,
1914 long match_state)
1915{
1916 return 1;
1917}
1918#endif
1919
1920#define next_task(p) \
1921 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
1922
1923#define for_each_process(p) \
1924 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
1925
1926extern bool current_is_single_threaded(void);
1927
1928#define do_each_thread(g, t) \
1929 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
1930
1931#define while_each_thread(g, t) \
1932 while ((t = next_thread(t)) != g)
1933
1934static inline int get_nr_threads(struct task_struct *tsk)
1935{
1936 return tsk->signal->nr_threads;
1937}
1938
1939static inline bool thread_group_leader(struct task_struct *p)
1940{
1941 return p->exit_signal >= 0;
1942}
1943
1944static inline int has_group_leader_pid(struct task_struct *p)
1945{
1946 return p->pid == p->tgid;
1947}
1948
1949static inline
1950int same_thread_group(struct task_struct *p1, struct task_struct *p2)
1951{
1952 return p1->tgid == p2->tgid;
1953}
1954
1955static inline struct task_struct *next_thread(const struct task_struct *p)
1956{
1957 return list_entry_rcu(p->thread_group.next,
1958 struct task_struct, thread_group);
1959}
1960
1961static inline int thread_group_empty(struct task_struct *p)
1962{
1963 return list_empty(&p->thread_group);
1964}
1965
1966#define delay_group_leader(p) \
1967 (thread_group_leader(p) && !thread_group_empty(p))
1968
1969static inline void task_lock(struct task_struct *p)
1970{
1971 spin_lock(&p->alloc_lock);
1972}
1973
1974static inline void task_unlock(struct task_struct *p)
1975{
1976 spin_unlock(&p->alloc_lock);
1977}
1978
1979extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1980 unsigned long *flags);
1981
1982static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
1983 unsigned long *flags)
1984{
1985 struct sighand_struct *ret;
1986
1987 ret = __lock_task_sighand(tsk, flags);
1988 (void)__cond_lock(&tsk->sighand->siglock, ret);
1989 return ret;
1990}
1991
1992static inline void unlock_task_sighand(struct task_struct *tsk,
1993 unsigned long *flags)
1994{
1995 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
1996}
1997
1998#ifdef CONFIG_CGROUPS
1999static inline void threadgroup_change_begin(struct task_struct *tsk)
2000{
2001 down_read(&tsk->signal->group_rwsem);
2002}
2003static inline void threadgroup_change_end(struct task_struct *tsk)
2004{
2005 up_read(&tsk->signal->group_rwsem);
2006}
2007
2008static inline void threadgroup_lock(struct task_struct *tsk)
2009{
2010 mutex_lock(&tsk->signal->cred_guard_mutex);
2011 down_write(&tsk->signal->group_rwsem);
2012}
2013
2014static inline void threadgroup_unlock(struct task_struct *tsk)
2015{
2016 up_write(&tsk->signal->group_rwsem);
2017 mutex_unlock(&tsk->signal->cred_guard_mutex);
2018}
2019#else
2020static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2021static inline void threadgroup_change_end(struct task_struct *tsk) {}
2022static inline void threadgroup_lock(struct task_struct *tsk) {}
2023static inline void threadgroup_unlock(struct task_struct *tsk) {}
2024#endif
2025
2026#ifndef __HAVE_THREAD_FUNCTIONS
2027
2028#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2029#define task_stack_page(task) ((task)->stack)
2030
2031static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2032{
2033 *task_thread_info(p) = *task_thread_info(org);
2034 task_thread_info(p)->task = p;
2035}
2036
2037static inline unsigned long *end_of_stack(struct task_struct *p)
2038{
2039 return (unsigned long *)(task_thread_info(p) + 1);
2040}
2041
2042#endif
2043
2044static inline int object_is_on_stack(void *obj)
2045{
2046 void *stack = task_stack_page(current);
2047
2048 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2049}
2050
2051extern void thread_info_cache_init(void);
2052
2053#ifdef CONFIG_DEBUG_STACK_USAGE
2054static inline unsigned long stack_not_used(struct task_struct *p)
2055{
2056 unsigned long *n = end_of_stack(p);
2057
2058 do {
2059 n++;
2060 } while (!*n);
2061
2062 return (unsigned long)n - (unsigned long)end_of_stack(p);
2063}
2064#endif
2065
2066static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2067{
2068 set_ti_thread_flag(task_thread_info(tsk), flag);
2069}
2070
2071static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2072{
2073 clear_ti_thread_flag(task_thread_info(tsk), flag);
2074}
2075
2076static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2077{
2078 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2079}
2080
2081static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2082{
2083 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2084}
2085
2086static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2087{
2088 return test_ti_thread_flag(task_thread_info(tsk), flag);
2089}
2090
2091static inline void set_tsk_need_resched(struct task_struct *tsk)
2092{
2093 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2094}
2095
2096static inline void clear_tsk_need_resched(struct task_struct *tsk)
2097{
2098 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2099}
2100
2101static inline int test_tsk_need_resched(struct task_struct *tsk)
2102{
2103 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2104}
2105
2106static inline int restart_syscall(void)
2107{
2108 set_tsk_thread_flag(current, TIF_SIGPENDING);
2109 return -ERESTARTNOINTR;
2110}
2111
2112static inline int signal_pending(struct task_struct *p)
2113{
2114 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2115}
2116
2117static inline int __fatal_signal_pending(struct task_struct *p)
2118{
2119 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2120}
2121
2122static inline int fatal_signal_pending(struct task_struct *p)
2123{
2124 return signal_pending(p) && __fatal_signal_pending(p);
2125}
2126
2127static inline int signal_pending_state(long state, struct task_struct *p)
2128{
2129 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2130 return 0;
2131 if (!signal_pending(p))
2132 return 0;
2133
2134 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2135}
2136
2137static inline int need_resched(void)
2138{
2139 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2140}
2141
2142extern int _cond_resched(void);
2143
2144#define cond_resched() ({ \
2145 __might_sleep(__FILE__, __LINE__, 0); \
2146 _cond_resched(); \
2147})
2148
2149extern int __cond_resched_lock(spinlock_t *lock);
2150
2151#ifdef CONFIG_PREEMPT_COUNT
2152#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2153#else
2154#define PREEMPT_LOCK_OFFSET 0
2155#endif
2156
2157#define cond_resched_lock(lock) ({ \
2158 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2159 __cond_resched_lock(lock); \
2160})
2161
2162extern int __cond_resched_softirq(void);
2163
2164#define cond_resched_softirq() ({ \
2165 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2166 __cond_resched_softirq(); \
2167})
2168
2169static inline int spin_needbreak(spinlock_t *lock)
2170{
2171#ifdef CONFIG_PREEMPT
2172 return spin_is_contended(lock);
2173#else
2174 return 0;
2175#endif
2176}
2177
2178void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2179void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2180
2181static inline void thread_group_cputime_init(struct signal_struct *sig)
2182{
2183 raw_spin_lock_init(&sig->cputimer.lock);
2184}
2185
2186extern void recalc_sigpending_and_wake(struct task_struct *t);
2187extern void recalc_sigpending(void);
2188
2189extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2190
2191#ifdef CONFIG_SMP
2192
2193static inline unsigned int task_cpu(const struct task_struct *p)
2194{
2195 return task_thread_info(p)->cpu;
2196}
2197
2198extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2199
2200#else
2201
2202static inline unsigned int task_cpu(const struct task_struct *p)
2203{
2204 return 0;
2205}
2206
2207static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2208{
2209}
2210
2211#endif
2212
2213extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2214extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2215
2216extern void normalize_rt_tasks(void);
2217
2218#ifdef CONFIG_CGROUP_SCHED
2219
2220extern struct task_group root_task_group;
2221
2222extern struct task_group *sched_create_group(struct task_group *parent);
2223extern void sched_destroy_group(struct task_group *tg);
2224extern void sched_move_task(struct task_struct *tsk);
2225#ifdef CONFIG_FAIR_GROUP_SCHED
2226extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2227extern unsigned long sched_group_shares(struct task_group *tg);
2228#endif
2229#ifdef CONFIG_RT_GROUP_SCHED
2230extern int sched_group_set_rt_runtime(struct task_group *tg,
2231 long rt_runtime_us);
2232extern long sched_group_rt_runtime(struct task_group *tg);
2233extern int sched_group_set_rt_period(struct task_group *tg,
2234 long rt_period_us);
2235extern long sched_group_rt_period(struct task_group *tg);
2236extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2237#endif
2238#endif
2239
2240extern int task_can_switch_user(struct user_struct *up,
2241 struct task_struct *tsk);
2242
2243#ifdef CONFIG_TASK_XACCT
2244static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2245{
2246 tsk->ioac.rchar += amt;
2247}
2248
2249static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2250{
2251 tsk->ioac.wchar += amt;
2252}
2253
2254static inline void inc_syscr(struct task_struct *tsk)
2255{
2256 tsk->ioac.syscr++;
2257}
2258
2259static inline void inc_syscw(struct task_struct *tsk)
2260{
2261 tsk->ioac.syscw++;
2262}
2263#else
2264static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2265{
2266}
2267
2268static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2269{
2270}
2271
2272static inline void inc_syscr(struct task_struct *tsk)
2273{
2274}
2275
2276static inline void inc_syscw(struct task_struct *tsk)
2277{
2278}
2279#endif
2280
2281#ifndef TASK_SIZE_OF
2282#define TASK_SIZE_OF(tsk) TASK_SIZE
2283#endif
2284
2285#ifdef CONFIG_MM_OWNER
2286extern void mm_update_next_owner(struct mm_struct *mm);
2287extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2288#else
2289static inline void mm_update_next_owner(struct mm_struct *mm)
2290{
2291}
2292
2293static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2294{
2295}
2296#endif
2297
2298static inline unsigned long task_rlimit(const struct task_struct *tsk,
2299 unsigned int limit)
2300{
2301 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2302}
2303
2304static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2305 unsigned int limit)
2306{
2307 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2308}
2309
2310static inline unsigned long rlimit(unsigned int limit)
2311{
2312 return task_rlimit(current, limit);
2313}
2314
2315static inline unsigned long rlimit_max(unsigned int limit)
2316{
2317 return task_rlimit_max(current, limit);
2318}
2319
2320#ifdef CONFIG_CGROUP_TIMER_SLACK
2321extern unsigned long task_get_effective_timer_slack(struct task_struct *tsk);
2322#else
2323static inline unsigned long task_get_effective_timer_slack(
2324 struct task_struct *tsk)
2325{
2326 return tsk->timer_slack_ns;
2327}
2328#endif
2329
2330#endif
2331
2332#endif