| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_SCHED_H | 
 | 2 | #define _LINUX_SCHED_H | 
 | 3 |  | 
 | 4 | #include <asm/param.h>	/* for HZ */ | 
 | 5 |  | 
 | 6 | #include <linux/config.h> | 
 | 7 | #include <linux/capability.h> | 
 | 8 | #include <linux/threads.h> | 
 | 9 | #include <linux/kernel.h> | 
 | 10 | #include <linux/types.h> | 
 | 11 | #include <linux/timex.h> | 
 | 12 | #include <linux/jiffies.h> | 
 | 13 | #include <linux/rbtree.h> | 
 | 14 | #include <linux/thread_info.h> | 
 | 15 | #include <linux/cpumask.h> | 
 | 16 | #include <linux/errno.h> | 
 | 17 | #include <linux/nodemask.h> | 
 | 18 |  | 
 | 19 | #include <asm/system.h> | 
 | 20 | #include <asm/semaphore.h> | 
 | 21 | #include <asm/page.h> | 
 | 22 | #include <asm/ptrace.h> | 
 | 23 | #include <asm/mmu.h> | 
 | 24 | #include <asm/cputime.h> | 
 | 25 |  | 
 | 26 | #include <linux/smp.h> | 
 | 27 | #include <linux/sem.h> | 
 | 28 | #include <linux/signal.h> | 
 | 29 | #include <linux/securebits.h> | 
 | 30 | #include <linux/fs_struct.h> | 
 | 31 | #include <linux/compiler.h> | 
 | 32 | #include <linux/completion.h> | 
 | 33 | #include <linux/pid.h> | 
 | 34 | #include <linux/percpu.h> | 
 | 35 | #include <linux/topology.h> | 
 | 36 | #include <linux/seccomp.h> | 
| Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 37 | #include <linux/rcupdate.h> | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 38 | #include <linux/futex.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
| H. J. Lu | 36d57ac | 2005-09-06 15:16:49 -0700 | [diff] [blame] | 40 | #include <linux/auxvec.h>	/* For AT_VECTOR_SIZE */ | 
 | 41 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | struct exec_domain; | 
 | 43 |  | 
 | 44 | /* | 
 | 45 |  * cloning flags: | 
 | 46 |  */ | 
 | 47 | #define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */ | 
 | 48 | #define CLONE_VM	0x00000100	/* set if VM shared between processes */ | 
 | 49 | #define CLONE_FS	0x00000200	/* set if fs info shared between processes */ | 
 | 50 | #define CLONE_FILES	0x00000400	/* set if open files shared between processes */ | 
 | 51 | #define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */ | 
 | 52 | #define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */ | 
 | 53 | #define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */ | 
 | 54 | #define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */ | 
 | 55 | #define CLONE_THREAD	0x00010000	/* Same thread group? */ | 
 | 56 | #define CLONE_NEWNS	0x00020000	/* New namespace group? */ | 
 | 57 | #define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */ | 
 | 58 | #define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */ | 
 | 59 | #define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */ | 
 | 60 | #define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */ | 
 | 61 | #define CLONE_DETACHED		0x00400000	/* Unused, ignored */ | 
 | 62 | #define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */ | 
 | 63 | #define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */ | 
 | 64 | #define CLONE_STOPPED		0x02000000	/* Start in stopped state */ | 
 | 65 |  | 
 | 66 | /* | 
 | 67 |  * List of flags we want to share for kernel threads, | 
 | 68 |  * if only because they are not used by them anyway. | 
 | 69 |  */ | 
 | 70 | #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND) | 
 | 71 |  | 
 | 72 | /* | 
 | 73 |  * These are the constant used to fake the fixed-point load-average | 
 | 74 |  * counting. Some notes: | 
 | 75 |  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives | 
 | 76 |  *    a load-average precision of 10 bits integer + 11 bits fractional | 
 | 77 |  *  - if you want to count load-averages more often, you need more | 
 | 78 |  *    precision, or rounding will get you. With 2-second counting freq, | 
 | 79 |  *    the EXP_n values would be 1981, 2034 and 2043 if still using only | 
 | 80 |  *    11 bit fractions. | 
 | 81 |  */ | 
 | 82 | extern unsigned long avenrun[];		/* Load averages */ | 
 | 83 |  | 
 | 84 | #define FSHIFT		11		/* nr of bits of precision */ | 
 | 85 | #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */ | 
 | 86 | #define LOAD_FREQ	(5*HZ)		/* 5 sec intervals */ | 
 | 87 | #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */ | 
 | 88 | #define EXP_5		2014		/* 1/exp(5sec/5min) */ | 
 | 89 | #define EXP_15		2037		/* 1/exp(5sec/15min) */ | 
 | 90 |  | 
 | 91 | #define CALC_LOAD(load,exp,n) \ | 
 | 92 | 	load *= exp; \ | 
 | 93 | 	load += n*(FIXED_1-exp); \ | 
 | 94 | 	load >>= FSHIFT; | 
 | 95 |  | 
 | 96 | extern unsigned long total_forks; | 
 | 97 | extern int nr_threads; | 
 | 98 | extern int last_pid; | 
 | 99 | DECLARE_PER_CPU(unsigned long, process_counts); | 
 | 100 | extern int nr_processes(void); | 
 | 101 | extern unsigned long nr_running(void); | 
 | 102 | extern unsigned long nr_uninterruptible(void); | 
| Jack Steiner | db1b1fe | 2006-03-31 02:31:21 -0800 | [diff] [blame] | 103 | extern unsigned long nr_active(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | extern unsigned long nr_iowait(void); | 
 | 105 |  | 
 | 106 | #include <linux/time.h> | 
 | 107 | #include <linux/param.h> | 
 | 108 | #include <linux/resource.h> | 
 | 109 | #include <linux/timer.h> | 
| Thomas Gleixner | 2ff678b | 2006-01-09 20:52:34 -0800 | [diff] [blame] | 110 | #include <linux/hrtimer.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 |  | 
 | 112 | #include <asm/processor.h> | 
 | 113 |  | 
| Linus Torvalds | 4a8342d | 2005-09-29 15:18:21 -0700 | [diff] [blame] | 114 | /* | 
 | 115 |  * Task state bitmask. NOTE! These bits are also | 
 | 116 |  * encoded in fs/proc/array.c: get_task_state(). | 
 | 117 |  * | 
 | 118 |  * We have two separate sets of flags: task->state | 
 | 119 |  * is about runnability, while task->exit_state are | 
 | 120 |  * about the task exiting. Confusing, but this way | 
 | 121 |  * modifying one set can't modify the other one by | 
 | 122 |  * mistake. | 
 | 123 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | #define TASK_RUNNING		0 | 
 | 125 | #define TASK_INTERRUPTIBLE	1 | 
 | 126 | #define TASK_UNINTERRUPTIBLE	2 | 
| Linus Torvalds | 4a8342d | 2005-09-29 15:18:21 -0700 | [diff] [blame] | 127 | #define TASK_STOPPED		4 | 
 | 128 | #define TASK_TRACED		8 | 
 | 129 | /* in tsk->exit_state */ | 
 | 130 | #define EXIT_ZOMBIE		16 | 
 | 131 | #define EXIT_DEAD		32 | 
 | 132 | /* in tsk->state again */ | 
 | 133 | #define TASK_NONINTERACTIVE	64 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 |  | 
 | 135 | #define __set_task_state(tsk, state_value)		\ | 
 | 136 | 	do { (tsk)->state = (state_value); } while (0) | 
 | 137 | #define set_task_state(tsk, state_value)		\ | 
 | 138 | 	set_mb((tsk)->state, (state_value)) | 
 | 139 |  | 
| Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 140 | /* | 
 | 141 |  * set_current_state() includes a barrier so that the write of current->state | 
 | 142 |  * is correctly serialised wrt the caller's subsequent test of whether to | 
 | 143 |  * actually sleep: | 
 | 144 |  * | 
 | 145 |  *	set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 146 |  *	if (do_i_need_to_sleep()) | 
 | 147 |  *		schedule(); | 
 | 148 |  * | 
 | 149 |  * If the caller does not need such serialisation then use __set_current_state() | 
 | 150 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | #define __set_current_state(state_value)			\ | 
 | 152 | 	do { current->state = (state_value); } while (0) | 
 | 153 | #define set_current_state(state_value)		\ | 
 | 154 | 	set_mb(current->state, (state_value)) | 
 | 155 |  | 
 | 156 | /* Task command name length */ | 
 | 157 | #define TASK_COMM_LEN 16 | 
 | 158 |  | 
 | 159 | /* | 
 | 160 |  * Scheduling policies | 
 | 161 |  */ | 
 | 162 | #define SCHED_NORMAL		0 | 
 | 163 | #define SCHED_FIFO		1 | 
 | 164 | #define SCHED_RR		2 | 
| Ingo Molnar | b0a9499 | 2006-01-14 13:20:41 -0800 | [diff] [blame] | 165 | #define SCHED_BATCH		3 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 |  | 
 | 167 | struct sched_param { | 
 | 168 | 	int sched_priority; | 
 | 169 | }; | 
 | 170 |  | 
 | 171 | #ifdef __KERNEL__ | 
 | 172 |  | 
 | 173 | #include <linux/spinlock.h> | 
 | 174 |  | 
 | 175 | /* | 
 | 176 |  * This serializes "schedule()" and also protects | 
 | 177 |  * the run-queue from deletions/modifications (but | 
 | 178 |  * _adding_ to the beginning of the run-queue has | 
 | 179 |  * a separate lock). | 
 | 180 |  */ | 
 | 181 | extern rwlock_t tasklist_lock; | 
 | 182 | extern spinlock_t mmlist_lock; | 
 | 183 |  | 
 | 184 | typedef struct task_struct task_t; | 
 | 185 |  | 
 | 186 | extern void sched_init(void); | 
 | 187 | extern void sched_init_smp(void); | 
 | 188 | extern void init_idle(task_t *idle, int cpu); | 
 | 189 |  | 
 | 190 | extern cpumask_t nohz_cpu_mask; | 
 | 191 |  | 
 | 192 | extern void show_state(void); | 
 | 193 | extern void show_regs(struct pt_regs *); | 
 | 194 |  | 
 | 195 | /* | 
 | 196 |  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current | 
 | 197 |  * task), SP is the stack pointer of the first frame that should be shown in the back | 
 | 198 |  * trace (or NULL if the entire call-chain of the task should be shown). | 
 | 199 |  */ | 
 | 200 | extern void show_stack(struct task_struct *task, unsigned long *sp); | 
 | 201 |  | 
 | 202 | void io_schedule(void); | 
 | 203 | long io_schedule_timeout(long timeout); | 
 | 204 |  | 
 | 205 | extern void cpu_init (void); | 
 | 206 | extern void trap_init(void); | 
 | 207 | extern void update_process_times(int user); | 
 | 208 | extern void scheduler_tick(void); | 
 | 209 |  | 
| Ingo Molnar | 8446f1d | 2005-09-06 15:16:27 -0700 | [diff] [blame] | 210 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 
| Ingo Molnar | 6687a97 | 2006-03-24 03:18:41 -0800 | [diff] [blame] | 211 | extern void softlockup_tick(void); | 
| Ingo Molnar | 8446f1d | 2005-09-06 15:16:27 -0700 | [diff] [blame] | 212 | extern void spawn_softlockup_task(void); | 
 | 213 | extern void touch_softlockup_watchdog(void); | 
 | 214 | #else | 
| Ingo Molnar | 6687a97 | 2006-03-24 03:18:41 -0800 | [diff] [blame] | 215 | static inline void softlockup_tick(void) | 
| Ingo Molnar | 8446f1d | 2005-09-06 15:16:27 -0700 | [diff] [blame] | 216 | { | 
 | 217 | } | 
 | 218 | static inline void spawn_softlockup_task(void) | 
 | 219 | { | 
 | 220 | } | 
 | 221 | static inline void touch_softlockup_watchdog(void) | 
 | 222 | { | 
 | 223 | } | 
 | 224 | #endif | 
 | 225 |  | 
 | 226 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | /* Attach to any functions which should be ignored in wchan output. */ | 
 | 228 | #define __sched		__attribute__((__section__(".sched.text"))) | 
 | 229 | /* Is this address in the __sched functions? */ | 
 | 230 | extern int in_sched_functions(unsigned long addr); | 
 | 231 |  | 
 | 232 | #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX | 
 | 233 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); | 
| Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 234 | extern signed long schedule_timeout_interruptible(signed long timeout); | 
 | 235 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | asmlinkage void schedule(void); | 
 | 237 |  | 
 | 238 | struct namespace; | 
 | 239 |  | 
 | 240 | /* Maximum number of active map areas.. This is a random (large) number */ | 
 | 241 | #define DEFAULT_MAX_MAP_COUNT	65536 | 
 | 242 |  | 
 | 243 | extern int sysctl_max_map_count; | 
 | 244 |  | 
 | 245 | #include <linux/aio.h> | 
 | 246 |  | 
 | 247 | extern unsigned long | 
 | 248 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | 
 | 249 | 		       unsigned long, unsigned long); | 
 | 250 | extern unsigned long | 
 | 251 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | 
 | 252 | 			  unsigned long len, unsigned long pgoff, | 
 | 253 | 			  unsigned long flags); | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 254 | extern void arch_unmap_area(struct mm_struct *, unsigned long); | 
 | 255 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 |  | 
| Hugh Dickins | f412ac0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 257 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS | 
 | 258 | /* | 
 | 259 |  * The mm counters are not protected by its page_table_lock, | 
 | 260 |  * so must be incremented atomically. | 
 | 261 |  */ | 
| Christoph Lameter | d3cb487 | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 262 | #define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value) | 
 | 263 | #define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member)) | 
 | 264 | #define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) | 
 | 265 | #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) | 
 | 266 | #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) | 
 | 267 | typedef atomic_long_t mm_counter_t; | 
| Hugh Dickins | f412ac0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 268 |  | 
 | 269 | #else  /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ | 
 | 270 | /* | 
 | 271 |  * The mm counters are protected by its page_table_lock, | 
 | 272 |  * so can be incremented directly. | 
 | 273 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | #define set_mm_counter(mm, member, value) (mm)->_##member = (value) | 
 | 275 | #define get_mm_counter(mm, member) ((mm)->_##member) | 
 | 276 | #define add_mm_counter(mm, member, value) (mm)->_##member += (value) | 
 | 277 | #define inc_mm_counter(mm, member) (mm)->_##member++ | 
 | 278 | #define dec_mm_counter(mm, member) (mm)->_##member-- | 
| Hugh Dickins | f412ac0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 279 | typedef unsigned long mm_counter_t; | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 280 |  | 
| Hugh Dickins | f412ac0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 281 | #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ | 
 | 282 |  | 
 | 283 | #define get_mm_rss(mm)					\ | 
 | 284 | 	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) | 
| Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 285 | #define update_hiwater_rss(mm)	do {			\ | 
 | 286 | 	unsigned long _rss = get_mm_rss(mm);		\ | 
 | 287 | 	if ((mm)->hiwater_rss < _rss)			\ | 
 | 288 | 		(mm)->hiwater_rss = _rss;		\ | 
 | 289 | } while (0) | 
 | 290 | #define update_hiwater_vm(mm)	do {			\ | 
 | 291 | 	if ((mm)->hiwater_vm < (mm)->total_vm)		\ | 
 | 292 | 		(mm)->hiwater_vm = (mm)->total_vm;	\ | 
 | 293 | } while (0) | 
 | 294 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | struct mm_struct { | 
 | 296 | 	struct vm_area_struct * mmap;		/* list of VMAs */ | 
 | 297 | 	struct rb_root mm_rb; | 
 | 298 | 	struct vm_area_struct * mmap_cache;	/* last find_vma result */ | 
 | 299 | 	unsigned long (*get_unmapped_area) (struct file *filp, | 
 | 300 | 				unsigned long addr, unsigned long len, | 
 | 301 | 				unsigned long pgoff, unsigned long flags); | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 302 | 	void (*unmap_area) (struct mm_struct *mm, unsigned long addr); | 
| Benjamin Herrenschmidt | 0551fbd | 2006-02-28 16:59:19 -0800 | [diff] [blame] | 303 | 	unsigned long mmap_base;		/* base of mmap area */ | 
 | 304 | 	unsigned long task_size;		/* size of task vm space */ | 
 | 305 | 	unsigned long cached_hole_size;         /* if non-zero, the largest hole below free_area_cache */ | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 306 | 	unsigned long free_area_cache;		/* first hole of size cached_hole_size or larger */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | 	pgd_t * pgd; | 
 | 308 | 	atomic_t mm_users;			/* How many users with user space? */ | 
 | 309 | 	atomic_t mm_count;			/* How many references to "struct mm_struct" (users count as 1) */ | 
 | 310 | 	int map_count;				/* number of VMAs */ | 
 | 311 | 	struct rw_semaphore mmap_sem; | 
 | 312 | 	spinlock_t page_table_lock;		/* Protects page tables and some counters */ | 
 | 313 |  | 
 | 314 | 	struct list_head mmlist;		/* List of maybe swapped mm's.  These are globally strung | 
 | 315 | 						 * together off init_mm.mmlist, and are protected | 
 | 316 | 						 * by mmlist_lock | 
 | 317 | 						 */ | 
 | 318 |  | 
| Hugh Dickins | f412ac0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 319 | 	/* Special counters, in some configurations protected by the | 
 | 320 | 	 * page_table_lock, in other configurations by being atomic. | 
 | 321 | 	 */ | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 322 | 	mm_counter_t _file_rss; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | 	mm_counter_t _anon_rss; | 
 | 324 |  | 
| Hugh Dickins | f449952 | 2005-10-29 18:16:19 -0700 | [diff] [blame] | 325 | 	unsigned long hiwater_rss;	/* High-watermark of RSS usage */ | 
 | 326 | 	unsigned long hiwater_vm;	/* High-water virtual memory usage */ | 
 | 327 |  | 
 | 328 | 	unsigned long total_vm, locked_vm, shared_vm, exec_vm; | 
 | 329 | 	unsigned long stack_vm, reserved_vm, def_flags, nr_ptes; | 
 | 330 | 	unsigned long start_code, end_code, start_data, end_data; | 
 | 331 | 	unsigned long start_brk, brk, start_stack; | 
 | 332 | 	unsigned long arg_start, arg_end, env_start, env_end; | 
 | 333 |  | 
| H. J. Lu | 36d57ac | 2005-09-06 15:16:49 -0700 | [diff] [blame] | 334 | 	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 |  | 
| Alan Cox | d6e7114 | 2005-06-23 00:09:43 -0700 | [diff] [blame] | 336 | 	unsigned dumpable:2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | 	cpumask_t cpu_vm_mask; | 
 | 338 |  | 
 | 339 | 	/* Architecture-specific MM context */ | 
 | 340 | 	mm_context_t context; | 
 | 341 |  | 
 | 342 | 	/* Token based thrashing protection. */ | 
 | 343 | 	unsigned long swap_token_time; | 
 | 344 | 	char recent_pagein; | 
 | 345 |  | 
 | 346 | 	/* coredumping support */ | 
 | 347 | 	int core_waiters; | 
 | 348 | 	struct completion *core_startup_done, core_done; | 
 | 349 |  | 
 | 350 | 	/* aio bits */ | 
 | 351 | 	rwlock_t		ioctx_list_lock; | 
 | 352 | 	struct kioctx		*ioctx_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | }; | 
 | 354 |  | 
 | 355 | struct sighand_struct { | 
 | 356 | 	atomic_t		count; | 
 | 357 | 	struct k_sigaction	action[_NSIG]; | 
 | 358 | 	spinlock_t		siglock; | 
 | 359 | }; | 
 | 360 |  | 
 | 361 | /* | 
 | 362 |  * NOTE! "signal_struct" does not have it's own | 
 | 363 |  * locking, because a shared signal_struct always | 
 | 364 |  * implies a shared sighand_struct, so locking | 
 | 365 |  * sighand_struct is always a proper superset of | 
 | 366 |  * the locking of signal_struct. | 
 | 367 |  */ | 
 | 368 | struct signal_struct { | 
 | 369 | 	atomic_t		count; | 
 | 370 | 	atomic_t		live; | 
 | 371 |  | 
 | 372 | 	wait_queue_head_t	wait_chldexit;	/* for wait4() */ | 
 | 373 |  | 
 | 374 | 	/* current thread group signal load-balancing target: */ | 
 | 375 | 	task_t			*curr_target; | 
 | 376 |  | 
 | 377 | 	/* shared signal handling: */ | 
 | 378 | 	struct sigpending	shared_pending; | 
 | 379 |  | 
 | 380 | 	/* thread group exit support */ | 
 | 381 | 	int			group_exit_code; | 
 | 382 | 	/* overloaded: | 
 | 383 | 	 * - notify group_exit_task when ->count is equal to notify_count | 
 | 384 | 	 * - everyone except group_exit_task is stopped during signal delivery | 
 | 385 | 	 *   of fatal signals, group_exit_task processes the signal. | 
 | 386 | 	 */ | 
 | 387 | 	struct task_struct	*group_exit_task; | 
 | 388 | 	int			notify_count; | 
 | 389 |  | 
 | 390 | 	/* thread group stop support, overloads group_exit_code too */ | 
 | 391 | 	int			group_stop_count; | 
 | 392 | 	unsigned int		flags; /* see SIGNAL_* flags below */ | 
 | 393 |  | 
 | 394 | 	/* POSIX.1b Interval Timers */ | 
 | 395 | 	struct list_head posix_timers; | 
 | 396 |  | 
 | 397 | 	/* ITIMER_REAL timer for the process */ | 
| Thomas Gleixner | 2ff678b | 2006-01-09 20:52:34 -0800 | [diff] [blame] | 398 | 	struct hrtimer real_timer; | 
| Roman Zippel | 05cfb61 | 2006-03-26 01:38:12 -0800 | [diff] [blame] | 399 | 	struct task_struct *tsk; | 
| Thomas Gleixner | 2ff678b | 2006-01-09 20:52:34 -0800 | [diff] [blame] | 400 | 	ktime_t it_real_incr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 |  | 
 | 402 | 	/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ | 
 | 403 | 	cputime_t it_prof_expires, it_virt_expires; | 
 | 404 | 	cputime_t it_prof_incr, it_virt_incr; | 
 | 405 |  | 
 | 406 | 	/* job control IDs */ | 
 | 407 | 	pid_t pgrp; | 
 | 408 | 	pid_t tty_old_pgrp; | 
 | 409 | 	pid_t session; | 
 | 410 | 	/* boolean value for session group leader */ | 
 | 411 | 	int leader; | 
 | 412 |  | 
 | 413 | 	struct tty_struct *tty; /* NULL if no tty */ | 
 | 414 |  | 
 | 415 | 	/* | 
 | 416 | 	 * Cumulative resource counters for dead threads in the group, | 
 | 417 | 	 * and for reaped dead child processes forked by this group. | 
 | 418 | 	 * Live threads maintain their own counters and add to these | 
 | 419 | 	 * in __exit_signal, except for the group leader. | 
 | 420 | 	 */ | 
 | 421 | 	cputime_t utime, stime, cutime, cstime; | 
 | 422 | 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 
 | 423 | 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; | 
 | 424 |  | 
 | 425 | 	/* | 
 | 426 | 	 * Cumulative ns of scheduled CPU time for dead threads in the | 
 | 427 | 	 * group, not including a zombie group leader.  (This only differs | 
 | 428 | 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something | 
 | 429 | 	 * other than jiffies.) | 
 | 430 | 	 */ | 
 | 431 | 	unsigned long long sched_time; | 
 | 432 |  | 
 | 433 | 	/* | 
 | 434 | 	 * We don't bother to synchronize most readers of this at all, | 
 | 435 | 	 * because there is no reader checking a limit that actually needs | 
 | 436 | 	 * to get both rlim_cur and rlim_max atomically, and either one | 
 | 437 | 	 * alone is a single word that can safely be read normally. | 
 | 438 | 	 * getrlimit/setrlimit use task_lock(current->group_leader) to | 
 | 439 | 	 * protect this instead of the siglock, because they really | 
 | 440 | 	 * have no need to disable irqs. | 
 | 441 | 	 */ | 
 | 442 | 	struct rlimit rlim[RLIM_NLIMITS]; | 
 | 443 |  | 
 | 444 | 	struct list_head cpu_timers[3]; | 
 | 445 |  | 
 | 446 | 	/* keep the process-shared keyrings here so that they do the right | 
 | 447 | 	 * thing in threads created with CLONE_THREAD */ | 
 | 448 | #ifdef CONFIG_KEYS | 
 | 449 | 	struct key *session_keyring;	/* keyring inherited over fork */ | 
 | 450 | 	struct key *process_keyring;	/* keyring private to this process */ | 
 | 451 | #endif | 
 | 452 | }; | 
 | 453 |  | 
| Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 454 | /* Context switch must be unlocked if interrupts are to be enabled */ | 
 | 455 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 
 | 456 | # define __ARCH_WANT_UNLOCKED_CTXSW | 
 | 457 | #endif | 
 | 458 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | /* | 
 | 460 |  * Bits in flags field of signal_struct. | 
 | 461 |  */ | 
 | 462 | #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */ | 
 | 463 | #define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */ | 
 | 464 | #define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */ | 
 | 465 | #define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */ | 
 | 466 |  | 
 | 467 |  | 
 | 468 | /* | 
 | 469 |  * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 
| Ingo Molnar | b0a9499 | 2006-01-14 13:20:41 -0800 | [diff] [blame] | 470 |  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH | 
 | 471 |  * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority | 
 | 472 |  * values are inverted: lower p->prio value means higher priority. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 |  * | 
 | 474 |  * The MAX_USER_RT_PRIO value allows the actual maximum | 
 | 475 |  * RT priority to be separate from the value exported to | 
 | 476 |  * user-space.  This allows kernel threads to set their | 
 | 477 |  * priority to a value higher than any user task. Note: | 
 | 478 |  * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. | 
 | 479 |  */ | 
 | 480 |  | 
 | 481 | #define MAX_USER_RT_PRIO	100 | 
 | 482 | #define MAX_RT_PRIO		MAX_USER_RT_PRIO | 
 | 483 |  | 
 | 484 | #define MAX_PRIO		(MAX_RT_PRIO + 40) | 
 | 485 |  | 
 | 486 | #define rt_task(p)		(unlikely((p)->prio < MAX_RT_PRIO)) | 
| Con Kolivas | d425b27 | 2006-03-31 02:31:29 -0800 | [diff] [blame] | 487 | #define batch_task(p)		(unlikely((p)->policy == SCHED_BATCH)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 |  | 
 | 489 | /* | 
 | 490 |  * Some day this will be a full-fledged user tracking system.. | 
 | 491 |  */ | 
 | 492 | struct user_struct { | 
 | 493 | 	atomic_t __count;	/* reference count */ | 
 | 494 | 	atomic_t processes;	/* How many processes does this user have? */ | 
 | 495 | 	atomic_t files;		/* How many open files does this user have? */ | 
 | 496 | 	atomic_t sigpending;	/* How many pending signals does this user have? */ | 
| Robert Love | 0eeca28 | 2005-07-12 17:06:03 -0400 | [diff] [blame] | 497 | #ifdef CONFIG_INOTIFY | 
 | 498 | 	atomic_t inotify_watches; /* How many inotify watches does this user have? */ | 
 | 499 | 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */ | 
 | 500 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | 	/* protected by mq_lock	*/ | 
 | 502 | 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */ | 
 | 503 | 	unsigned long locked_shm; /* How many pages of mlocked shm ? */ | 
 | 504 |  | 
 | 505 | #ifdef CONFIG_KEYS | 
 | 506 | 	struct key *uid_keyring;	/* UID specific keyring */ | 
 | 507 | 	struct key *session_keyring;	/* UID's default session keyring */ | 
 | 508 | #endif | 
 | 509 |  | 
 | 510 | 	/* Hash table maintenance information */ | 
 | 511 | 	struct list_head uidhash_list; | 
 | 512 | 	uid_t uid; | 
 | 513 | }; | 
 | 514 |  | 
 | 515 | extern struct user_struct *find_user(uid_t); | 
 | 516 |  | 
 | 517 | extern struct user_struct root_user; | 
 | 518 | #define INIT_USER (&root_user) | 
 | 519 |  | 
 | 520 | typedef struct prio_array prio_array_t; | 
 | 521 | struct backing_dev_info; | 
 | 522 | struct reclaim_state; | 
 | 523 |  | 
 | 524 | #ifdef CONFIG_SCHEDSTATS | 
 | 525 | struct sched_info { | 
 | 526 | 	/* cumulative counters */ | 
 | 527 | 	unsigned long	cpu_time,	/* time spent on the cpu */ | 
 | 528 | 			run_delay,	/* time spent waiting on a runqueue */ | 
 | 529 | 			pcnt;		/* # of timeslices run on this cpu */ | 
 | 530 |  | 
 | 531 | 	/* timestamps */ | 
 | 532 | 	unsigned long	last_arrival,	/* when we last ran on a cpu */ | 
 | 533 | 			last_queued;	/* when we were last queued to run */ | 
 | 534 | }; | 
 | 535 |  | 
 | 536 | extern struct file_operations proc_schedstat_operations; | 
 | 537 | #endif | 
 | 538 |  | 
 | 539 | enum idle_type | 
 | 540 | { | 
 | 541 | 	SCHED_IDLE, | 
 | 542 | 	NOT_IDLE, | 
 | 543 | 	NEWLY_IDLE, | 
 | 544 | 	MAX_IDLE_TYPES | 
 | 545 | }; | 
 | 546 |  | 
 | 547 | /* | 
 | 548 |  * sched-domains (multiprocessor balancing) declarations: | 
 | 549 |  */ | 
 | 550 | #ifdef CONFIG_SMP | 
 | 551 | #define SCHED_LOAD_SCALE	128UL	/* increase resolution of load */ | 
 | 552 |  | 
 | 553 | #define SD_LOAD_BALANCE		1	/* Do load balancing on this domain. */ | 
 | 554 | #define SD_BALANCE_NEWIDLE	2	/* Balance when about to become idle */ | 
 | 555 | #define SD_BALANCE_EXEC		4	/* Balance on exec */ | 
| Nick Piggin | 147cbb4 | 2005-06-25 14:57:19 -0700 | [diff] [blame] | 556 | #define SD_BALANCE_FORK		8	/* Balance on fork, clone */ | 
 | 557 | #define SD_WAKE_IDLE		16	/* Wake to idle CPU on task wakeup */ | 
 | 558 | #define SD_WAKE_AFFINE		32	/* Wake task to waking CPU */ | 
 | 559 | #define SD_WAKE_BALANCE		64	/* Perform balancing at task wakeup */ | 
 | 560 | #define SD_SHARE_CPUPOWER	128	/* Domain members share cpu power */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 |  | 
 | 562 | struct sched_group { | 
 | 563 | 	struct sched_group *next;	/* Must be a circular list */ | 
 | 564 | 	cpumask_t cpumask; | 
 | 565 |  | 
 | 566 | 	/* | 
 | 567 | 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 
 | 568 | 	 * single CPU. This is read only (except for setup, hotplug CPU). | 
 | 569 | 	 */ | 
 | 570 | 	unsigned long cpu_power; | 
 | 571 | }; | 
 | 572 |  | 
 | 573 | struct sched_domain { | 
 | 574 | 	/* These fields must be setup */ | 
 | 575 | 	struct sched_domain *parent;	/* top domain must be null terminated */ | 
 | 576 | 	struct sched_group *groups;	/* the balancing groups of the domain */ | 
 | 577 | 	cpumask_t span;			/* span of all CPUs in this domain */ | 
 | 578 | 	unsigned long min_interval;	/* Minimum balance interval ms */ | 
 | 579 | 	unsigned long max_interval;	/* Maximum balance interval ms */ | 
 | 580 | 	unsigned int busy_factor;	/* less balancing by factor if busy */ | 
 | 581 | 	unsigned int imbalance_pct;	/* No balance until over watermark */ | 
 | 582 | 	unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ | 
 | 583 | 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */ | 
 | 584 | 	unsigned int per_cpu_gain;	/* CPU % gained by adding domain cpus */ | 
| Nick Piggin | 7897986 | 2005-06-25 14:57:13 -0700 | [diff] [blame] | 585 | 	unsigned int busy_idx; | 
 | 586 | 	unsigned int idle_idx; | 
 | 587 | 	unsigned int newidle_idx; | 
 | 588 | 	unsigned int wake_idx; | 
| Nick Piggin | 147cbb4 | 2005-06-25 14:57:19 -0700 | [diff] [blame] | 589 | 	unsigned int forkexec_idx; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | 	int flags;			/* See SD_* */ | 
 | 591 |  | 
 | 592 | 	/* Runtime fields. */ | 
 | 593 | 	unsigned long last_balance;	/* init to jiffies. units in jiffies */ | 
 | 594 | 	unsigned int balance_interval;	/* initialise to 1. units in ms. */ | 
 | 595 | 	unsigned int nr_balance_failed; /* initialise to 0 */ | 
 | 596 |  | 
 | 597 | #ifdef CONFIG_SCHEDSTATS | 
 | 598 | 	/* load_balance() stats */ | 
 | 599 | 	unsigned long lb_cnt[MAX_IDLE_TYPES]; | 
 | 600 | 	unsigned long lb_failed[MAX_IDLE_TYPES]; | 
 | 601 | 	unsigned long lb_balanced[MAX_IDLE_TYPES]; | 
 | 602 | 	unsigned long lb_imbalance[MAX_IDLE_TYPES]; | 
 | 603 | 	unsigned long lb_gained[MAX_IDLE_TYPES]; | 
 | 604 | 	unsigned long lb_hot_gained[MAX_IDLE_TYPES]; | 
 | 605 | 	unsigned long lb_nobusyg[MAX_IDLE_TYPES]; | 
 | 606 | 	unsigned long lb_nobusyq[MAX_IDLE_TYPES]; | 
 | 607 |  | 
 | 608 | 	/* Active load balancing */ | 
 | 609 | 	unsigned long alb_cnt; | 
 | 610 | 	unsigned long alb_failed; | 
 | 611 | 	unsigned long alb_pushed; | 
 | 612 |  | 
| Nick Piggin | 68767a0 | 2005-06-25 14:57:20 -0700 | [diff] [blame] | 613 | 	/* SD_BALANCE_EXEC stats */ | 
 | 614 | 	unsigned long sbe_cnt; | 
 | 615 | 	unsigned long sbe_balanced; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | 	unsigned long sbe_pushed; | 
 | 617 |  | 
| Nick Piggin | 68767a0 | 2005-06-25 14:57:20 -0700 | [diff] [blame] | 618 | 	/* SD_BALANCE_FORK stats */ | 
 | 619 | 	unsigned long sbf_cnt; | 
 | 620 | 	unsigned long sbf_balanced; | 
 | 621 | 	unsigned long sbf_pushed; | 
 | 622 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | 	/* try_to_wake_up() stats */ | 
 | 624 | 	unsigned long ttwu_wake_remote; | 
 | 625 | 	unsigned long ttwu_move_affine; | 
 | 626 | 	unsigned long ttwu_move_balance; | 
 | 627 | #endif | 
 | 628 | }; | 
 | 629 |  | 
| Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 630 | extern void partition_sched_domains(cpumask_t *partition1, | 
 | 631 | 				    cpumask_t *partition2); | 
| akpm@osdl.org | 198e2f1 | 2006-01-12 01:05:30 -0800 | [diff] [blame] | 632 |  | 
 | 633 | /* | 
 | 634 |  * Maximum cache size the migration-costs auto-tuning code will | 
 | 635 |  * search from: | 
 | 636 |  */ | 
 | 637 | extern unsigned int max_cache_size; | 
 | 638 |  | 
 | 639 | #endif	/* CONFIG_SMP */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 |  | 
 | 641 |  | 
 | 642 | struct io_context;			/* See blkdev.h */ | 
 | 643 | void exit_io_context(void); | 
 | 644 | struct cpuset; | 
 | 645 |  | 
 | 646 | #define NGROUPS_SMALL		32 | 
 | 647 | #define NGROUPS_PER_BLOCK	((int)(PAGE_SIZE / sizeof(gid_t))) | 
 | 648 | struct group_info { | 
 | 649 | 	int ngroups; | 
 | 650 | 	atomic_t usage; | 
 | 651 | 	gid_t small_block[NGROUPS_SMALL]; | 
 | 652 | 	int nblocks; | 
 | 653 | 	gid_t *blocks[0]; | 
 | 654 | }; | 
 | 655 |  | 
 | 656 | /* | 
 | 657 |  * get_group_info() must be called with the owning task locked (via task_lock()) | 
 | 658 |  * when task != current.  The reason being that the vast majority of callers are | 
 | 659 |  * looking at current->group_info, which can not be changed except by the | 
 | 660 |  * current task.  Changing current->group_info requires the task lock, too. | 
 | 661 |  */ | 
 | 662 | #define get_group_info(group_info) do { \ | 
 | 663 | 	atomic_inc(&(group_info)->usage); \ | 
 | 664 | } while (0) | 
 | 665 |  | 
 | 666 | #define put_group_info(group_info) do { \ | 
 | 667 | 	if (atomic_dec_and_test(&(group_info)->usage)) \ | 
 | 668 | 		groups_free(group_info); \ | 
 | 669 | } while (0) | 
 | 670 |  | 
| David Howells | 3e30148 | 2005-06-23 22:00:56 -0700 | [diff] [blame] | 671 | extern struct group_info *groups_alloc(int gidsetsize); | 
 | 672 | extern void groups_free(struct group_info *group_info); | 
 | 673 | extern int set_current_groups(struct group_info *group_info); | 
 | 674 | extern int groups_search(struct group_info *group_info, gid_t grp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | /* access the groups "array" with this macro */ | 
 | 676 | #define GROUP_AT(gi, i) \ | 
 | 677 |     ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) | 
 | 678 |  | 
| Chen, Kenneth W | 383f283 | 2005-09-09 13:02:02 -0700 | [diff] [blame] | 679 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK | 
 | 680 | extern void prefetch_stack(struct task_struct*); | 
 | 681 | #else | 
 | 682 | static inline void prefetch_stack(struct task_struct *t) { } | 
 | 683 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 |  | 
 | 685 | struct audit_context;		/* See audit.c */ | 
 | 686 | struct mempolicy; | 
 | 687 |  | 
| Con Kolivas | 3dee386 | 2006-03-31 02:31:23 -0800 | [diff] [blame] | 688 | enum sleep_type { | 
 | 689 | 	SLEEP_NORMAL, | 
 | 690 | 	SLEEP_NONINTERACTIVE, | 
 | 691 | 	SLEEP_INTERACTIVE, | 
 | 692 | 	SLEEP_INTERRUPTED, | 
 | 693 | }; | 
 | 694 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | struct task_struct { | 
 | 696 | 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */ | 
 | 697 | 	struct thread_info *thread_info; | 
 | 698 | 	atomic_t usage; | 
 | 699 | 	unsigned long flags;	/* per process flags, defined below */ | 
 | 700 | 	unsigned long ptrace; | 
 | 701 |  | 
| Paolo 'Blaisorblade' Giarrusso | 3677209 | 2005-05-05 16:16:12 -0700 | [diff] [blame] | 702 | 	int lock_depth;		/* BKL lock depth */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 |  | 
| Chen, Kenneth W | d6077cb | 2006-02-14 13:53:10 -0800 | [diff] [blame] | 704 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 
| Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 705 | 	int oncpu; | 
 | 706 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | 	int prio, static_prio; | 
 | 708 | 	struct list_head run_list; | 
 | 709 | 	prio_array_t *array; | 
 | 710 |  | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 711 | 	unsigned short ioprio; | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 712 | 	unsigned int btrace_seq; | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 713 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | 	unsigned long sleep_avg; | 
 | 715 | 	unsigned long long timestamp, last_ran; | 
 | 716 | 	unsigned long long sched_time; /* sched_clock time spent running */ | 
| Con Kolivas | 3dee386 | 2006-03-31 02:31:23 -0800 | [diff] [blame] | 717 | 	enum sleep_type sleep_type; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 |  | 
 | 719 | 	unsigned long policy; | 
 | 720 | 	cpumask_t cpus_allowed; | 
 | 721 | 	unsigned int time_slice, first_time_slice; | 
 | 722 |  | 
 | 723 | #ifdef CONFIG_SCHEDSTATS | 
 | 724 | 	struct sched_info sched_info; | 
 | 725 | #endif | 
 | 726 |  | 
 | 727 | 	struct list_head tasks; | 
 | 728 | 	/* | 
 | 729 | 	 * ptrace_list/ptrace_children forms the list of my children | 
 | 730 | 	 * that were stolen by a ptracer. | 
 | 731 | 	 */ | 
 | 732 | 	struct list_head ptrace_children; | 
 | 733 | 	struct list_head ptrace_list; | 
 | 734 |  | 
 | 735 | 	struct mm_struct *mm, *active_mm; | 
 | 736 |  | 
 | 737 | /* task state */ | 
 | 738 | 	struct linux_binfmt *binfmt; | 
 | 739 | 	long exit_state; | 
 | 740 | 	int exit_code, exit_signal; | 
 | 741 | 	int pdeath_signal;  /*  The signal sent when the parent dies  */ | 
 | 742 | 	/* ??? */ | 
 | 743 | 	unsigned long personality; | 
 | 744 | 	unsigned did_exec:1; | 
 | 745 | 	pid_t pid; | 
 | 746 | 	pid_t tgid; | 
 | 747 | 	/*  | 
 | 748 | 	 * pointers to (original) parent process, youngest child, younger sibling, | 
 | 749 | 	 * older sibling, respectively.  (p->father can be replaced with  | 
 | 750 | 	 * p->parent->pid) | 
 | 751 | 	 */ | 
 | 752 | 	struct task_struct *real_parent; /* real parent process (when being debugged) */ | 
 | 753 | 	struct task_struct *parent;	/* parent process */ | 
 | 754 | 	/* | 
 | 755 | 	 * children/sibling forms the list of my children plus the | 
 | 756 | 	 * tasks I'm ptracing. | 
 | 757 | 	 */ | 
 | 758 | 	struct list_head children;	/* list of my children */ | 
 | 759 | 	struct list_head sibling;	/* linkage in my parent's children list */ | 
 | 760 | 	struct task_struct *group_leader;	/* threadgroup leader */ | 
 | 761 |  | 
 | 762 | 	/* PID/PID hash table linkage. */ | 
| Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 763 | 	struct pid_link pids[PIDTYPE_MAX]; | 
| Oleg Nesterov | 47e6532 | 2006-03-28 16:11:25 -0800 | [diff] [blame] | 764 | 	struct list_head thread_group; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 |  | 
 | 766 | 	struct completion *vfork_done;		/* for vfork() */ | 
 | 767 | 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */ | 
 | 768 | 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */ | 
 | 769 |  | 
 | 770 | 	unsigned long rt_priority; | 
 | 771 | 	cputime_t utime, stime; | 
 | 772 | 	unsigned long nvcsw, nivcsw; /* context switch counts */ | 
 | 773 | 	struct timespec start_time; | 
 | 774 | /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ | 
 | 775 | 	unsigned long min_flt, maj_flt; | 
 | 776 |  | 
 | 777 |   	cputime_t it_prof_expires, it_virt_expires; | 
 | 778 | 	unsigned long long it_sched_expires; | 
 | 779 | 	struct list_head cpu_timers[3]; | 
 | 780 |  | 
 | 781 | /* process credentials */ | 
 | 782 | 	uid_t uid,euid,suid,fsuid; | 
 | 783 | 	gid_t gid,egid,sgid,fsgid; | 
 | 784 | 	struct group_info *group_info; | 
 | 785 | 	kernel_cap_t   cap_effective, cap_inheritable, cap_permitted; | 
 | 786 | 	unsigned keep_capabilities:1; | 
 | 787 | 	struct user_struct *user; | 
 | 788 | #ifdef CONFIG_KEYS | 
| David Howells | b5f545c | 2006-01-08 01:02:47 -0800 | [diff] [blame] | 789 | 	struct key *request_key_auth;	/* assumed request_key authority */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | 	struct key *thread_keyring;	/* keyring private to this thread */ | 
| David Howells | 3e30148 | 2005-06-23 22:00:56 -0700 | [diff] [blame] | 791 | 	unsigned char jit_keyring;	/* default keyring to attach requested keys to */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | #endif | 
 | 793 | 	int oomkilladj; /* OOM kill score adjustment (bit shift). */ | 
| Paolo 'Blaisorblade' Giarrusso | 3677209 | 2005-05-05 16:16:12 -0700 | [diff] [blame] | 794 | 	char comm[TASK_COMM_LEN]; /* executable name excluding path | 
 | 795 | 				     - access with [gs]et_task_comm (which lock | 
 | 796 | 				       it with task_lock()) | 
 | 797 | 				     - initialized normally by flush_old_exec */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | /* file system info */ | 
 | 799 | 	int link_count, total_link_count; | 
 | 800 | /* ipc stuff */ | 
 | 801 | 	struct sysv_sem sysvsem; | 
 | 802 | /* CPU-specific state of this task */ | 
 | 803 | 	struct thread_struct thread; | 
 | 804 | /* filesystem information */ | 
 | 805 | 	struct fs_struct *fs; | 
 | 806 | /* open file information */ | 
 | 807 | 	struct files_struct *files; | 
 | 808 | /* namespace */ | 
 | 809 | 	struct namespace *namespace; | 
 | 810 | /* signal handlers */ | 
 | 811 | 	struct signal_struct *signal; | 
 | 812 | 	struct sighand_struct *sighand; | 
 | 813 |  | 
 | 814 | 	sigset_t blocked, real_blocked; | 
| David Woodhouse | 150256d | 2006-01-18 17:43:57 -0800 | [diff] [blame] | 815 | 	sigset_t saved_sigmask;		/* To be restored with TIF_RESTORE_SIGMASK */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | 	struct sigpending pending; | 
 | 817 |  | 
 | 818 | 	unsigned long sas_ss_sp; | 
 | 819 | 	size_t sas_ss_size; | 
 | 820 | 	int (*notifier)(void *priv); | 
 | 821 | 	void *notifier_data; | 
 | 822 | 	sigset_t *notifier_mask; | 
 | 823 | 	 | 
 | 824 | 	void *security; | 
 | 825 | 	struct audit_context *audit_context; | 
 | 826 | 	seccomp_t seccomp; | 
 | 827 |  | 
 | 828 | /* Thread group tracking */ | 
 | 829 |    	u32 parent_exec_id; | 
 | 830 |    	u32 self_exec_id; | 
 | 831 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 
 | 832 | 	spinlock_t alloc_lock; | 
 | 833 | /* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ | 
 | 834 | 	spinlock_t proc_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 835 |  | 
| Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 836 | #ifdef CONFIG_DEBUG_MUTEXES | 
 | 837 | 	/* mutex deadlock detection */ | 
 | 838 | 	struct mutex_waiter *blocked_on; | 
 | 839 | #endif | 
 | 840 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 | /* journalling filesystem info */ | 
 | 842 | 	void *journal_info; | 
 | 843 |  | 
 | 844 | /* VM state */ | 
 | 845 | 	struct reclaim_state *reclaim_state; | 
 | 846 |  | 
 | 847 | 	struct dentry *proc_dentry; | 
 | 848 | 	struct backing_dev_info *backing_dev_info; | 
 | 849 |  | 
 | 850 | 	struct io_context *io_context; | 
 | 851 |  | 
 | 852 | 	unsigned long ptrace_message; | 
 | 853 | 	siginfo_t *last_siginfo; /* For ptrace use.  */ | 
 | 854 | /* | 
 | 855 |  * current io wait handle: wait queue entry to use for io waits | 
 | 856 |  * If this thread is processing aio, this points at the waitqueue | 
 | 857 |  * inside the currently handled kiocb. It may be NULL (i.e. default | 
 | 858 |  * to a stack based synchronous wait) if its doing sync IO. | 
 | 859 |  */ | 
 | 860 | 	wait_queue_t *io_wait; | 
 | 861 | /* i/o counters(bytes read/written, #syscalls */ | 
 | 862 | 	u64 rchar, wchar, syscr, syscw; | 
 | 863 | #if defined(CONFIG_BSD_PROCESS_ACCT) | 
 | 864 | 	u64 acct_rss_mem1;	/* accumulated rss usage */ | 
 | 865 | 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */ | 
 | 866 | 	clock_t acct_stimexpd;	/* clock_t-converted stime since last update */ | 
 | 867 | #endif | 
 | 868 | #ifdef CONFIG_NUMA | 
 | 869 |   	struct mempolicy *mempolicy; | 
 | 870 | 	short il_next; | 
 | 871 | #endif | 
 | 872 | #ifdef CONFIG_CPUSETS | 
 | 873 | 	struct cpuset *cpuset; | 
 | 874 | 	nodemask_t mems_allowed; | 
 | 875 | 	int cpuset_mems_generation; | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 876 | 	int cpuset_mem_spread_rotor; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | #endif | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 878 | 	struct robust_list_head __user *robust_list; | 
| Ingo Molnar | 34f192c | 2006-03-27 01:16:24 -0800 | [diff] [blame] | 879 | #ifdef CONFIG_COMPAT | 
 | 880 | 	struct compat_robust_list_head __user *compat_robust_list; | 
 | 881 | #endif | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 882 |  | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 883 | 	atomic_t fs_excl;	/* holding fs exclusive resources */ | 
| Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 884 | 	struct rcu_head rcu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | }; | 
 | 886 |  | 
 | 887 | static inline pid_t process_group(struct task_struct *tsk) | 
 | 888 | { | 
 | 889 | 	return tsk->signal->pgrp; | 
 | 890 | } | 
 | 891 |  | 
 | 892 | /** | 
 | 893 |  * pid_alive - check that a task structure is not stale | 
 | 894 |  * @p: Task structure to be checked. | 
 | 895 |  * | 
 | 896 |  * Test if a process is not yet dead (at most zombie state) | 
 | 897 |  * If pid_alive fails, then pointers within the task structure | 
 | 898 |  * can be stale and must not be dereferenced. | 
 | 899 |  */ | 
 | 900 | static inline int pid_alive(struct task_struct *p) | 
 | 901 | { | 
| Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 902 | 	return p->pids[PIDTYPE_PID].pid != NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | } | 
 | 904 |  | 
 | 905 | extern void free_task(struct task_struct *tsk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) | 
| Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 907 |  | 
| Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 908 | extern void __put_task_struct_cb(struct rcu_head *rhp); | 
| Andrew Morton | 158d9eb | 2006-03-31 02:31:34 -0800 | [diff] [blame] | 909 | extern void __put_task_struct(struct task_struct *t); | 
| Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 910 |  | 
 | 911 | static inline void put_task_struct(struct task_struct *t) | 
 | 912 | { | 
 | 913 | 	if (atomic_dec_and_test(&t->usage)) | 
| Eric W. Biederman | 8c7904a | 2006-03-31 02:31:37 -0800 | [diff] [blame] | 914 | 		__put_task_struct(t); | 
| Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 915 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 |  | 
 | 917 | /* | 
 | 918 |  * Per process flags | 
 | 919 |  */ | 
 | 920 | #define PF_ALIGNWARN	0x00000001	/* Print alignment warning msgs */ | 
 | 921 | 					/* Not implemented yet, only for 486*/ | 
 | 922 | #define PF_STARTING	0x00000002	/* being created */ | 
 | 923 | #define PF_EXITING	0x00000004	/* getting shut down */ | 
 | 924 | #define PF_DEAD		0x00000008	/* Dead */ | 
 | 925 | #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */ | 
 | 926 | #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */ | 
 | 927 | #define PF_DUMPCORE	0x00000200	/* dumped core */ | 
 | 928 | #define PF_SIGNALED	0x00000400	/* killed by a signal */ | 
 | 929 | #define PF_MEMALLOC	0x00000800	/* Allocating memory */ | 
 | 930 | #define PF_FLUSHER	0x00001000	/* responsible for disk writeback */ | 
 | 931 | #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */ | 
 | 932 | #define PF_FREEZE	0x00004000	/* this task is being frozen for suspend now */ | 
 | 933 | #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */ | 
 | 934 | #define PF_FROZEN	0x00010000	/* frozen for system suspend */ | 
 | 935 | #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */ | 
 | 936 | #define PF_KSWAPD	0x00040000	/* I am kswapd */ | 
 | 937 | #define PF_SWAPOFF	0x00080000	/* I am in swapoff */ | 
 | 938 | #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */ | 
 | 939 | #define PF_SYNCWRITE	0x00200000	/* I am doing a sync write */ | 
 | 940 | #define PF_BORROWED_MM	0x00400000	/* I am a kthread doing use_mm */ | 
 | 941 | #define PF_RANDOMIZE	0x00800000	/* randomize virtual address space */ | 
| Christoph Lameter | 930d915 | 2006-01-08 01:00:47 -0800 | [diff] [blame] | 942 | #define PF_SWAPWRITE	0x01000000	/* Allowed to write to swap */ | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 943 | #define PF_SPREAD_PAGE	0x04000000	/* Spread page cache over cpuset */ | 
 | 944 | #define PF_SPREAD_SLAB	0x08000000	/* Spread some slab caches over cpuset */ | 
| Paul Jackson | c61afb1 | 2006-03-24 03:16:08 -0800 | [diff] [blame] | 945 | #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 946 |  | 
 | 947 | /* | 
 | 948 |  * Only the _current_ task can read/write to tsk->flags, but other | 
 | 949 |  * tasks can access tsk->flags in readonly mode for example | 
 | 950 |  * with tsk_used_math (like during threaded core dumping). | 
 | 951 |  * There is however an exception to this rule during ptrace | 
 | 952 |  * or during fork: the ptracer task is allowed to write to the | 
 | 953 |  * child->flags of its traced child (same goes for fork, the parent | 
 | 954 |  * can write to the child->flags), because we're guaranteed the | 
 | 955 |  * child is not running and in turn not changing child->flags | 
 | 956 |  * at the same time the parent does it. | 
 | 957 |  */ | 
 | 958 | #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) | 
 | 959 | #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) | 
 | 960 | #define clear_used_math() clear_stopped_child_used_math(current) | 
 | 961 | #define set_used_math() set_stopped_child_used_math(current) | 
 | 962 | #define conditional_stopped_child_used_math(condition, child) \ | 
 | 963 | 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) | 
 | 964 | #define conditional_used_math(condition) \ | 
 | 965 | 	conditional_stopped_child_used_math(condition, current) | 
 | 966 | #define copy_to_stopped_child_used_math(child) \ | 
 | 967 | 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) | 
 | 968 | /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ | 
 | 969 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | 
 | 970 | #define used_math() tsk_used_math(current) | 
 | 971 |  | 
 | 972 | #ifdef CONFIG_SMP | 
 | 973 | extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); | 
 | 974 | #else | 
 | 975 | static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) | 
 | 976 | { | 
| Paul Jackson | 4098f99 | 2005-10-30 15:03:21 -0800 | [diff] [blame] | 977 | 	if (!cpu_isset(0, new_mask)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 | 		return -EINVAL; | 
 | 979 | 	return 0; | 
 | 980 | } | 
 | 981 | #endif | 
 | 982 |  | 
 | 983 | extern unsigned long long sched_clock(void); | 
 | 984 | extern unsigned long long current_sched_time(const task_t *current_task); | 
 | 985 |  | 
 | 986 | /* sched_exec is called by processes performing an exec */ | 
 | 987 | #ifdef CONFIG_SMP | 
 | 988 | extern void sched_exec(void); | 
 | 989 | #else | 
 | 990 | #define sched_exec()   {} | 
 | 991 | #endif | 
 | 992 |  | 
 | 993 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 994 | extern void idle_task_exit(void); | 
 | 995 | #else | 
 | 996 | static inline void idle_task_exit(void) {} | 
 | 997 | #endif | 
 | 998 |  | 
 | 999 | extern void sched_idle_next(void); | 
 | 1000 | extern void set_user_nice(task_t *p, long nice); | 
 | 1001 | extern int task_prio(const task_t *p); | 
 | 1002 | extern int task_nice(const task_t *p); | 
| Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 1003 | extern int can_nice(const task_t *p, const int nice); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1004 | extern int task_curr(const task_t *p); | 
 | 1005 | extern int idle_cpu(int cpu); | 
 | 1006 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); | 
 | 1007 | extern task_t *idle_task(int cpu); | 
| Keith Owens | a2a9798 | 2005-09-11 17:19:06 +1000 | [diff] [blame] | 1008 | extern task_t *curr_task(int cpu); | 
 | 1009 | extern void set_curr_task(int cpu, task_t *p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 |  | 
 | 1011 | void yield(void); | 
 | 1012 |  | 
 | 1013 | /* | 
 | 1014 |  * The default (Linux) execution domain. | 
 | 1015 |  */ | 
 | 1016 | extern struct exec_domain	default_exec_domain; | 
 | 1017 |  | 
 | 1018 | union thread_union { | 
 | 1019 | 	struct thread_info thread_info; | 
 | 1020 | 	unsigned long stack[THREAD_SIZE/sizeof(long)]; | 
 | 1021 | }; | 
 | 1022 |  | 
 | 1023 | #ifndef __HAVE_ARCH_KSTACK_END | 
 | 1024 | static inline int kstack_end(void *addr) | 
 | 1025 | { | 
 | 1026 | 	/* Reliable end of stack detection: | 
 | 1027 | 	 * Some APM bios versions misalign the stack | 
 | 1028 | 	 */ | 
 | 1029 | 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); | 
 | 1030 | } | 
 | 1031 | #endif | 
 | 1032 |  | 
 | 1033 | extern union thread_union init_thread_union; | 
 | 1034 | extern struct task_struct init_task; | 
 | 1035 |  | 
 | 1036 | extern struct   mm_struct init_mm; | 
 | 1037 |  | 
 | 1038 | #define find_task_by_pid(nr)	find_task_by_pid_type(PIDTYPE_PID, nr) | 
 | 1039 | extern struct task_struct *find_task_by_pid_type(int type, int pid); | 
 | 1040 | extern void set_special_pids(pid_t session, pid_t pgrp); | 
 | 1041 | extern void __set_special_pids(pid_t session, pid_t pgrp); | 
 | 1042 |  | 
 | 1043 | /* per-UID process charging. */ | 
 | 1044 | extern struct user_struct * alloc_uid(uid_t); | 
 | 1045 | static inline struct user_struct *get_uid(struct user_struct *u) | 
 | 1046 | { | 
 | 1047 | 	atomic_inc(&u->__count); | 
 | 1048 | 	return u; | 
 | 1049 | } | 
 | 1050 | extern void free_uid(struct user_struct *); | 
 | 1051 | extern void switch_uid(struct user_struct *); | 
 | 1052 |  | 
 | 1053 | #include <asm/current.h> | 
 | 1054 |  | 
 | 1055 | extern void do_timer(struct pt_regs *); | 
 | 1056 |  | 
 | 1057 | extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); | 
 | 1058 | extern int FASTCALL(wake_up_process(struct task_struct * tsk)); | 
 | 1059 | extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, | 
 | 1060 | 						unsigned long clone_flags)); | 
 | 1061 | #ifdef CONFIG_SMP | 
 | 1062 |  extern void kick_process(struct task_struct *tsk); | 
 | 1063 | #else | 
 | 1064 |  static inline void kick_process(struct task_struct *tsk) { } | 
 | 1065 | #endif | 
| Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 1066 | extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | extern void FASTCALL(sched_exit(task_t * p)); | 
 | 1068 |  | 
 | 1069 | extern int in_group_p(gid_t); | 
 | 1070 | extern int in_egroup_p(gid_t); | 
 | 1071 |  | 
 | 1072 | extern void proc_caches_init(void); | 
 | 1073 | extern void flush_signals(struct task_struct *); | 
 | 1074 | extern void flush_signal_handlers(struct task_struct *, int force_default); | 
 | 1075 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | 
 | 1076 |  | 
 | 1077 | static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | 
 | 1078 | { | 
 | 1079 | 	unsigned long flags; | 
 | 1080 | 	int ret; | 
 | 1081 |  | 
 | 1082 | 	spin_lock_irqsave(&tsk->sighand->siglock, flags); | 
 | 1083 | 	ret = dequeue_signal(tsk, mask, info); | 
 | 1084 | 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | 
 | 1085 |  | 
 | 1086 | 	return ret; | 
 | 1087 | }	 | 
 | 1088 |  | 
 | 1089 | extern void block_all_signals(int (*notifier)(void *priv), void *priv, | 
 | 1090 | 			      sigset_t *mask); | 
 | 1091 | extern void unblock_all_signals(void); | 
 | 1092 | extern void release_task(struct task_struct * p); | 
 | 1093 | extern int send_sig_info(int, struct siginfo *, struct task_struct *); | 
 | 1094 | extern int send_group_sig_info(int, struct siginfo *, struct task_struct *); | 
 | 1095 | extern int force_sigsegv(int, struct task_struct *); | 
 | 1096 | extern int force_sig_info(int, struct siginfo *, struct task_struct *); | 
 | 1097 | extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); | 
 | 1098 | extern int kill_pg_info(int, struct siginfo *, pid_t); | 
 | 1099 | extern int kill_proc_info(int, struct siginfo *, pid_t); | 
| Harald Welte | 4611383 | 2005-10-10 19:44:29 +0200 | [diff] [blame] | 1100 | extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1101 | extern void do_notify_parent(struct task_struct *, int); | 
 | 1102 | extern void force_sig(int, struct task_struct *); | 
 | 1103 | extern void force_sig_specific(int, struct task_struct *); | 
 | 1104 | extern int send_sig(int, struct task_struct *, int); | 
 | 1105 | extern void zap_other_threads(struct task_struct *p); | 
 | 1106 | extern int kill_pg(pid_t, int, int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1107 | extern int kill_proc(pid_t, int, int); | 
 | 1108 | extern struct sigqueue *sigqueue_alloc(void); | 
 | 1109 | extern void sigqueue_free(struct sigqueue *); | 
 | 1110 | extern int send_sigqueue(int, struct sigqueue *,  struct task_struct *); | 
 | 1111 | extern int send_group_sigqueue(int, struct sigqueue *,  struct task_struct *); | 
| Oleg Nesterov | 9ac95f2 | 2006-02-09 22:41:50 +0300 | [diff] [blame] | 1112 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1113 | extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); | 
 | 1114 |  | 
 | 1115 | /* These can be the second arg to send_sig_info/send_group_sig_info.  */ | 
 | 1116 | #define SEND_SIG_NOINFO ((struct siginfo *) 0) | 
 | 1117 | #define SEND_SIG_PRIV	((struct siginfo *) 1) | 
 | 1118 | #define SEND_SIG_FORCED	((struct siginfo *) 2) | 
 | 1119 |  | 
| Oleg Nesterov | 621d312 | 2005-10-30 15:03:45 -0800 | [diff] [blame] | 1120 | static inline int is_si_special(const struct siginfo *info) | 
 | 1121 | { | 
 | 1122 | 	return info <= SEND_SIG_FORCED; | 
 | 1123 | } | 
 | 1124 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1125 | /* True if we are on the alternate signal stack.  */ | 
 | 1126 |  | 
 | 1127 | static inline int on_sig_stack(unsigned long sp) | 
 | 1128 | { | 
 | 1129 | 	return (sp - current->sas_ss_sp < current->sas_ss_size); | 
 | 1130 | } | 
 | 1131 |  | 
 | 1132 | static inline int sas_ss_flags(unsigned long sp) | 
 | 1133 | { | 
 | 1134 | 	return (current->sas_ss_size == 0 ? SS_DISABLE | 
 | 1135 | 		: on_sig_stack(sp) ? SS_ONSTACK : 0); | 
 | 1136 | } | 
 | 1137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1138 | /* | 
 | 1139 |  * Routines for handling mm_structs | 
 | 1140 |  */ | 
 | 1141 | extern struct mm_struct * mm_alloc(void); | 
 | 1142 |  | 
 | 1143 | /* mmdrop drops the mm and the page tables */ | 
 | 1144 | extern void FASTCALL(__mmdrop(struct mm_struct *)); | 
 | 1145 | static inline void mmdrop(struct mm_struct * mm) | 
 | 1146 | { | 
 | 1147 | 	if (atomic_dec_and_test(&mm->mm_count)) | 
 | 1148 | 		__mmdrop(mm); | 
 | 1149 | } | 
 | 1150 |  | 
 | 1151 | /* mmput gets rid of the mappings and all user-space */ | 
 | 1152 | extern void mmput(struct mm_struct *); | 
 | 1153 | /* Grab a reference to a task's mm, if it is not already going away */ | 
 | 1154 | extern struct mm_struct *get_task_mm(struct task_struct *task); | 
 | 1155 | /* Remove the current tasks stale references to the old mm_struct */ | 
 | 1156 | extern void mm_release(struct task_struct *, struct mm_struct *); | 
 | 1157 |  | 
 | 1158 | extern int  copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); | 
 | 1159 | extern void flush_thread(void); | 
 | 1160 | extern void exit_thread(void); | 
 | 1161 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 | extern void exit_files(struct task_struct *); | 
| Oleg Nesterov | 6b3934e | 2006-03-28 16:11:16 -0800 | [diff] [blame] | 1163 | extern void __cleanup_signal(struct signal_struct *); | 
| Oleg Nesterov | a7e5328 | 2006-03-28 16:11:27 -0800 | [diff] [blame] | 1164 | extern void __cleanup_sighand(struct sighand_struct *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 | extern void exit_itimers(struct signal_struct *); | 
 | 1166 |  | 
 | 1167 | extern NORET_TYPE void do_group_exit(int); | 
 | 1168 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | extern void daemonize(const char *, ...); | 
 | 1170 | extern int allow_signal(int); | 
 | 1171 | extern int disallow_signal(int); | 
 | 1172 | extern task_t *child_reaper; | 
 | 1173 |  | 
 | 1174 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); | 
 | 1175 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); | 
 | 1176 | task_t *fork_idle(int); | 
 | 1177 |  | 
 | 1178 | extern void set_task_comm(struct task_struct *tsk, char *from); | 
 | 1179 | extern void get_task_comm(char *to, struct task_struct *tsk); | 
 | 1180 |  | 
 | 1181 | #ifdef CONFIG_SMP | 
 | 1182 | extern void wait_task_inactive(task_t * p); | 
 | 1183 | #else | 
 | 1184 | #define wait_task_inactive(p)	do { } while (0) | 
 | 1185 | #endif | 
 | 1186 |  | 
 | 1187 | #define remove_parent(p)	list_del_init(&(p)->sibling) | 
| Oleg Nesterov | 8fafabd | 2006-03-28 16:11:05 -0800 | [diff] [blame] | 1188 | #define add_parent(p)		list_add_tail(&(p)->sibling,&(p)->parent->children) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1190 | #define next_task(p)	list_entry((p)->tasks.next, struct task_struct, tasks) | 
 | 1191 | #define prev_task(p)	list_entry((p)->tasks.prev, struct task_struct, tasks) | 
 | 1192 |  | 
 | 1193 | #define for_each_process(p) \ | 
 | 1194 | 	for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 
 | 1195 |  | 
 | 1196 | /* | 
 | 1197 |  * Careful: do_each_thread/while_each_thread is a double loop so | 
 | 1198 |  *          'break' will not work as expected - use goto instead. | 
 | 1199 |  */ | 
 | 1200 | #define do_each_thread(g, t) \ | 
 | 1201 | 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do | 
 | 1202 |  | 
 | 1203 | #define while_each_thread(g, t) \ | 
 | 1204 | 	while ((t = next_thread(t)) != g) | 
 | 1205 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | #define thread_group_leader(p)	(p->pid == p->tgid) | 
 | 1207 |  | 
| Oleg Nesterov | 47e6532 | 2006-03-28 16:11:25 -0800 | [diff] [blame] | 1208 | static inline task_t *next_thread(task_t *p) | 
 | 1209 | { | 
 | 1210 | 	return list_entry(rcu_dereference(p->thread_group.next), | 
 | 1211 | 				task_t, thread_group); | 
 | 1212 | } | 
 | 1213 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 | static inline int thread_group_empty(task_t *p) | 
 | 1215 | { | 
| Oleg Nesterov | 47e6532 | 2006-03-28 16:11:25 -0800 | [diff] [blame] | 1216 | 	return list_empty(&p->thread_group); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 | } | 
 | 1218 |  | 
 | 1219 | #define delay_group_leader(p) \ | 
 | 1220 | 		(thread_group_leader(p) && !thread_group_empty(p)) | 
 | 1221 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 | /* | 
 | 1223 |  * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1224 |  * subscriptions and synchronises with wait4().  Also used in procfs.  Also | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1225 |  * pins the final release of task.io_context.  Also protects ->cpuset. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1226 |  * | 
 | 1227 |  * Nests both inside and outside of read_lock(&tasklist_lock). | 
 | 1228 |  * It must not be nested with write_lock_irq(&tasklist_lock), | 
 | 1229 |  * neither inside nor outside. | 
 | 1230 |  */ | 
 | 1231 | static inline void task_lock(struct task_struct *p) | 
 | 1232 | { | 
 | 1233 | 	spin_lock(&p->alloc_lock); | 
 | 1234 | } | 
 | 1235 |  | 
 | 1236 | static inline void task_unlock(struct task_struct *p) | 
 | 1237 | { | 
 | 1238 | 	spin_unlock(&p->alloc_lock); | 
 | 1239 | } | 
 | 1240 |  | 
| Oleg Nesterov | f63ee72 | 2006-03-28 16:11:13 -0800 | [diff] [blame] | 1241 | extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, | 
 | 1242 | 							unsigned long *flags); | 
 | 1243 |  | 
 | 1244 | static inline void unlock_task_sighand(struct task_struct *tsk, | 
 | 1245 | 						unsigned long *flags) | 
 | 1246 | { | 
 | 1247 | 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); | 
 | 1248 | } | 
 | 1249 |  | 
| Al Viro | f037360 | 2005-11-13 16:06:57 -0800 | [diff] [blame] | 1250 | #ifndef __HAVE_THREAD_FUNCTIONS | 
 | 1251 |  | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1252 | #define task_thread_info(task) (task)->thread_info | 
| Al Viro | 9fc6587 | 2006-01-12 01:05:34 -0800 | [diff] [blame] | 1253 | #define task_stack_page(task) ((void*)((task)->thread_info)) | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1254 |  | 
| Al Viro | 10ebffd | 2005-11-13 16:06:56 -0800 | [diff] [blame] | 1255 | static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) | 
 | 1256 | { | 
 | 1257 | 	*task_thread_info(p) = *task_thread_info(org); | 
 | 1258 | 	task_thread_info(p)->task = p; | 
 | 1259 | } | 
 | 1260 |  | 
 | 1261 | static inline unsigned long *end_of_stack(struct task_struct *p) | 
 | 1262 | { | 
 | 1263 | 	return (unsigned long *)(p->thread_info + 1); | 
 | 1264 | } | 
 | 1265 |  | 
| Al Viro | f037360 | 2005-11-13 16:06:57 -0800 | [diff] [blame] | 1266 | #endif | 
 | 1267 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1268 | /* set thread flags in other task's structures | 
 | 1269 |  * - see asm/thread_info.h for TIF_xxxx flags available | 
 | 1270 |  */ | 
 | 1271 | static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) | 
 | 1272 | { | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1273 | 	set_ti_thread_flag(task_thread_info(tsk), flag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1274 | } | 
 | 1275 |  | 
 | 1276 | static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) | 
 | 1277 | { | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1278 | 	clear_ti_thread_flag(task_thread_info(tsk), flag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1279 | } | 
 | 1280 |  | 
 | 1281 | static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) | 
 | 1282 | { | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1283 | 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1284 | } | 
 | 1285 |  | 
 | 1286 | static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) | 
 | 1287 | { | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1288 | 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | } | 
 | 1290 |  | 
 | 1291 | static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) | 
 | 1292 | { | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1293 | 	return test_ti_thread_flag(task_thread_info(tsk), flag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | } | 
 | 1295 |  | 
 | 1296 | static inline void set_tsk_need_resched(struct task_struct *tsk) | 
 | 1297 | { | 
 | 1298 | 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 
 | 1299 | } | 
 | 1300 |  | 
 | 1301 | static inline void clear_tsk_need_resched(struct task_struct *tsk) | 
 | 1302 | { | 
 | 1303 | 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 
 | 1304 | } | 
 | 1305 |  | 
 | 1306 | static inline int signal_pending(struct task_struct *p) | 
 | 1307 | { | 
 | 1308 | 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); | 
 | 1309 | } | 
 | 1310 |    | 
 | 1311 | static inline int need_resched(void) | 
 | 1312 | { | 
 | 1313 | 	return unlikely(test_thread_flag(TIF_NEED_RESCHED)); | 
 | 1314 | } | 
 | 1315 |  | 
 | 1316 | /* | 
 | 1317 |  * cond_resched() and cond_resched_lock(): latency reduction via | 
 | 1318 |  * explicit rescheduling in places that are safe. The return | 
 | 1319 |  * value indicates whether a reschedule was done in fact. | 
 | 1320 |  * cond_resched_lock() will drop the spinlock before scheduling, | 
 | 1321 |  * cond_resched_softirq() will enable bhs before scheduling. | 
 | 1322 |  */ | 
 | 1323 | extern int cond_resched(void); | 
 | 1324 | extern int cond_resched_lock(spinlock_t * lock); | 
 | 1325 | extern int cond_resched_softirq(void); | 
 | 1326 |  | 
 | 1327 | /* | 
 | 1328 |  * Does a critical section need to be broken due to another | 
 | 1329 |  * task waiting?: | 
 | 1330 |  */ | 
 | 1331 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | 
 | 1332 | # define need_lockbreak(lock) ((lock)->break_lock) | 
 | 1333 | #else | 
 | 1334 | # define need_lockbreak(lock) 0 | 
 | 1335 | #endif | 
 | 1336 |  | 
 | 1337 | /* | 
 | 1338 |  * Does a critical section need to be broken due to another | 
 | 1339 |  * task waiting or preemption being signalled: | 
 | 1340 |  */ | 
 | 1341 | static inline int lock_need_resched(spinlock_t *lock) | 
 | 1342 | { | 
 | 1343 | 	if (need_lockbreak(lock) || need_resched()) | 
 | 1344 | 		return 1; | 
 | 1345 | 	return 0; | 
 | 1346 | } | 
 | 1347 |  | 
 | 1348 | /* Reevaluate whether the task has signals pending delivery. | 
 | 1349 |    This is required every time the blocked sigset_t changes. | 
 | 1350 |    callers must hold sighand->siglock.  */ | 
 | 1351 |  | 
 | 1352 | extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t)); | 
 | 1353 | extern void recalc_sigpending(void); | 
 | 1354 |  | 
 | 1355 | extern void signal_wake_up(struct task_struct *t, int resume_stopped); | 
 | 1356 |  | 
 | 1357 | /* | 
 | 1358 |  * Wrappers for p->thread_info->cpu access. No-op on UP. | 
 | 1359 |  */ | 
 | 1360 | #ifdef CONFIG_SMP | 
 | 1361 |  | 
 | 1362 | static inline unsigned int task_cpu(const struct task_struct *p) | 
 | 1363 | { | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1364 | 	return task_thread_info(p)->cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1365 | } | 
 | 1366 |  | 
 | 1367 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | 
 | 1368 | { | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1369 | 	task_thread_info(p)->cpu = cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 | } | 
 | 1371 |  | 
 | 1372 | #else | 
 | 1373 |  | 
 | 1374 | static inline unsigned int task_cpu(const struct task_struct *p) | 
 | 1375 | { | 
 | 1376 | 	return 0; | 
 | 1377 | } | 
 | 1378 |  | 
 | 1379 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | 
 | 1380 | { | 
 | 1381 | } | 
 | 1382 |  | 
 | 1383 | #endif /* CONFIG_SMP */ | 
 | 1384 |  | 
 | 1385 | #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT | 
 | 1386 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | 
 | 1387 | #else | 
 | 1388 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) | 
 | 1389 | { | 
 | 1390 | 	mm->mmap_base = TASK_UNMAPPED_BASE; | 
 | 1391 | 	mm->get_unmapped_area = arch_get_unmapped_area; | 
 | 1392 | 	mm->unmap_area = arch_unmap_area; | 
 | 1393 | } | 
 | 1394 | #endif | 
 | 1395 |  | 
 | 1396 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); | 
 | 1397 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 
 | 1398 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 | extern void normalize_rt_tasks(void); | 
 | 1400 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 | #ifdef CONFIG_PM | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1402 | /* | 
 | 1403 |  * Check if a process has been frozen | 
 | 1404 |  */ | 
 | 1405 | static inline int frozen(struct task_struct *p) | 
 | 1406 | { | 
 | 1407 | 	return p->flags & PF_FROZEN; | 
 | 1408 | } | 
 | 1409 |  | 
 | 1410 | /* | 
 | 1411 |  * Check if there is a request to freeze a process | 
 | 1412 |  */ | 
 | 1413 | static inline int freezing(struct task_struct *p) | 
 | 1414 | { | 
 | 1415 | 	return p->flags & PF_FREEZE; | 
 | 1416 | } | 
 | 1417 |  | 
 | 1418 | /* | 
 | 1419 |  * Request that a process be frozen | 
 | 1420 |  * FIXME: SMP problem. We may not modify other process' flags! | 
 | 1421 |  */ | 
 | 1422 | static inline void freeze(struct task_struct *p) | 
 | 1423 | { | 
 | 1424 | 	p->flags |= PF_FREEZE; | 
 | 1425 | } | 
 | 1426 |  | 
 | 1427 | /* | 
 | 1428 |  * Wake up a frozen process | 
 | 1429 |  */ | 
 | 1430 | static inline int thaw_process(struct task_struct *p) | 
 | 1431 | { | 
 | 1432 | 	if (frozen(p)) { | 
 | 1433 | 		p->flags &= ~PF_FROZEN; | 
 | 1434 | 		wake_up_process(p); | 
 | 1435 | 		return 1; | 
 | 1436 | 	} | 
 | 1437 | 	return 0; | 
 | 1438 | } | 
 | 1439 |  | 
 | 1440 | /* | 
 | 1441 |  * freezing is complete, mark process as frozen | 
 | 1442 |  */ | 
 | 1443 | static inline void frozen_process(struct task_struct *p) | 
 | 1444 | { | 
 | 1445 | 	p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN; | 
 | 1446 | } | 
 | 1447 |  | 
 | 1448 | extern void refrigerator(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1449 | extern int freeze_processes(void); | 
 | 1450 | extern void thaw_processes(void); | 
 | 1451 |  | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1452 | static inline int try_to_freeze(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1453 | { | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1454 | 	if (freezing(current)) { | 
 | 1455 | 		refrigerator(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1456 | 		return 1; | 
 | 1457 | 	} else | 
 | 1458 | 		return 0; | 
 | 1459 | } | 
 | 1460 | #else | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1461 | static inline int frozen(struct task_struct *p) { return 0; } | 
 | 1462 | static inline int freezing(struct task_struct *p) { return 0; } | 
 | 1463 | static inline void freeze(struct task_struct *p) { BUG(); } | 
 | 1464 | static inline int thaw_process(struct task_struct *p) { return 1; } | 
 | 1465 | static inline void frozen_process(struct task_struct *p) { BUG(); } | 
 | 1466 |  | 
 | 1467 | static inline void refrigerator(void) {} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | static inline int freeze_processes(void) { BUG(); return 0; } | 
 | 1469 | static inline void thaw_processes(void) {} | 
 | 1470 |  | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1471 | static inline int try_to_freeze(void) { return 0; } | 
 | 1472 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1473 | #endif /* CONFIG_PM */ | 
 | 1474 | #endif /* __KERNEL__ */ | 
 | 1475 |  | 
 | 1476 | #endif |