| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_SCHED_H | 
|  | 2 | #define _LINUX_SCHED_H | 
|  | 3 |  | 
|  | 4 | #include <asm/param.h>	/* for HZ */ | 
|  | 5 |  | 
|  | 6 | #include <linux/config.h> | 
|  | 7 | #include <linux/capability.h> | 
|  | 8 | #include <linux/threads.h> | 
|  | 9 | #include <linux/kernel.h> | 
|  | 10 | #include <linux/types.h> | 
|  | 11 | #include <linux/timex.h> | 
|  | 12 | #include <linux/jiffies.h> | 
|  | 13 | #include <linux/rbtree.h> | 
|  | 14 | #include <linux/thread_info.h> | 
|  | 15 | #include <linux/cpumask.h> | 
|  | 16 | #include <linux/errno.h> | 
|  | 17 | #include <linux/nodemask.h> | 
|  | 18 |  | 
|  | 19 | #include <asm/system.h> | 
|  | 20 | #include <asm/semaphore.h> | 
|  | 21 | #include <asm/page.h> | 
|  | 22 | #include <asm/ptrace.h> | 
|  | 23 | #include <asm/mmu.h> | 
|  | 24 | #include <asm/cputime.h> | 
|  | 25 |  | 
|  | 26 | #include <linux/smp.h> | 
|  | 27 | #include <linux/sem.h> | 
|  | 28 | #include <linux/signal.h> | 
|  | 29 | #include <linux/securebits.h> | 
|  | 30 | #include <linux/fs_struct.h> | 
|  | 31 | #include <linux/compiler.h> | 
|  | 32 | #include <linux/completion.h> | 
|  | 33 | #include <linux/pid.h> | 
|  | 34 | #include <linux/percpu.h> | 
|  | 35 | #include <linux/topology.h> | 
|  | 36 | #include <linux/seccomp.h> | 
|  | 37 |  | 
|  | 38 | struct exec_domain; | 
|  | 39 |  | 
|  | 40 | /* | 
|  | 41 | * cloning flags: | 
|  | 42 | */ | 
|  | 43 | #define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */ | 
|  | 44 | #define CLONE_VM	0x00000100	/* set if VM shared between processes */ | 
|  | 45 | #define CLONE_FS	0x00000200	/* set if fs info shared between processes */ | 
|  | 46 | #define CLONE_FILES	0x00000400	/* set if open files shared between processes */ | 
|  | 47 | #define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */ | 
|  | 48 | #define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */ | 
|  | 49 | #define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */ | 
|  | 50 | #define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */ | 
|  | 51 | #define CLONE_THREAD	0x00010000	/* Same thread group? */ | 
|  | 52 | #define CLONE_NEWNS	0x00020000	/* New namespace group? */ | 
|  | 53 | #define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */ | 
|  | 54 | #define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */ | 
|  | 55 | #define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */ | 
|  | 56 | #define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */ | 
|  | 57 | #define CLONE_DETACHED		0x00400000	/* Unused, ignored */ | 
|  | 58 | #define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */ | 
|  | 59 | #define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */ | 
|  | 60 | #define CLONE_STOPPED		0x02000000	/* Start in stopped state */ | 
|  | 61 |  | 
|  | 62 | /* | 
|  | 63 | * List of flags we want to share for kernel threads, | 
|  | 64 | * if only because they are not used by them anyway. | 
|  | 65 | */ | 
|  | 66 | #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND) | 
|  | 67 |  | 
|  | 68 | /* | 
|  | 69 | * These are the constant used to fake the fixed-point load-average | 
|  | 70 | * counting. Some notes: | 
|  | 71 | *  - 11 bit fractions expand to 22 bits by the multiplies: this gives | 
|  | 72 | *    a load-average precision of 10 bits integer + 11 bits fractional | 
|  | 73 | *  - if you want to count load-averages more often, you need more | 
|  | 74 | *    precision, or rounding will get you. With 2-second counting freq, | 
|  | 75 | *    the EXP_n values would be 1981, 2034 and 2043 if still using only | 
|  | 76 | *    11 bit fractions. | 
|  | 77 | */ | 
|  | 78 | extern unsigned long avenrun[];		/* Load averages */ | 
|  | 79 |  | 
|  | 80 | #define FSHIFT		11		/* nr of bits of precision */ | 
|  | 81 | #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */ | 
|  | 82 | #define LOAD_FREQ	(5*HZ)		/* 5 sec intervals */ | 
|  | 83 | #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */ | 
|  | 84 | #define EXP_5		2014		/* 1/exp(5sec/5min) */ | 
|  | 85 | #define EXP_15		2037		/* 1/exp(5sec/15min) */ | 
|  | 86 |  | 
|  | 87 | #define CALC_LOAD(load,exp,n) \ | 
|  | 88 | load *= exp; \ | 
|  | 89 | load += n*(FIXED_1-exp); \ | 
|  | 90 | load >>= FSHIFT; | 
|  | 91 |  | 
|  | 92 | extern unsigned long total_forks; | 
|  | 93 | extern int nr_threads; | 
|  | 94 | extern int last_pid; | 
|  | 95 | DECLARE_PER_CPU(unsigned long, process_counts); | 
|  | 96 | extern int nr_processes(void); | 
|  | 97 | extern unsigned long nr_running(void); | 
|  | 98 | extern unsigned long nr_uninterruptible(void); | 
|  | 99 | extern unsigned long nr_iowait(void); | 
|  | 100 |  | 
|  | 101 | #include <linux/time.h> | 
|  | 102 | #include <linux/param.h> | 
|  | 103 | #include <linux/resource.h> | 
|  | 104 | #include <linux/timer.h> | 
|  | 105 |  | 
|  | 106 | #include <asm/processor.h> | 
|  | 107 |  | 
|  | 108 | #define TASK_RUNNING		0 | 
|  | 109 | #define TASK_INTERRUPTIBLE	1 | 
|  | 110 | #define TASK_UNINTERRUPTIBLE	2 | 
|  | 111 | #define TASK_STOPPED		4 | 
|  | 112 | #define TASK_TRACED		8 | 
|  | 113 | #define EXIT_ZOMBIE		16 | 
|  | 114 | #define EXIT_DEAD		32 | 
|  | 115 |  | 
|  | 116 | #define __set_task_state(tsk, state_value)		\ | 
|  | 117 | do { (tsk)->state = (state_value); } while (0) | 
|  | 118 | #define set_task_state(tsk, state_value)		\ | 
|  | 119 | set_mb((tsk)->state, (state_value)) | 
|  | 120 |  | 
|  | 121 | #define __set_current_state(state_value)			\ | 
|  | 122 | do { current->state = (state_value); } while (0) | 
|  | 123 | #define set_current_state(state_value)		\ | 
|  | 124 | set_mb(current->state, (state_value)) | 
|  | 125 |  | 
|  | 126 | /* Task command name length */ | 
|  | 127 | #define TASK_COMM_LEN 16 | 
|  | 128 |  | 
|  | 129 | /* | 
|  | 130 | * Scheduling policies | 
|  | 131 | */ | 
|  | 132 | #define SCHED_NORMAL		0 | 
|  | 133 | #define SCHED_FIFO		1 | 
|  | 134 | #define SCHED_RR		2 | 
|  | 135 |  | 
|  | 136 | struct sched_param { | 
|  | 137 | int sched_priority; | 
|  | 138 | }; | 
|  | 139 |  | 
|  | 140 | #ifdef __KERNEL__ | 
|  | 141 |  | 
|  | 142 | #include <linux/spinlock.h> | 
|  | 143 |  | 
|  | 144 | /* | 
|  | 145 | * This serializes "schedule()" and also protects | 
|  | 146 | * the run-queue from deletions/modifications (but | 
|  | 147 | * _adding_ to the beginning of the run-queue has | 
|  | 148 | * a separate lock). | 
|  | 149 | */ | 
|  | 150 | extern rwlock_t tasklist_lock; | 
|  | 151 | extern spinlock_t mmlist_lock; | 
|  | 152 |  | 
|  | 153 | typedef struct task_struct task_t; | 
|  | 154 |  | 
|  | 155 | extern void sched_init(void); | 
|  | 156 | extern void sched_init_smp(void); | 
|  | 157 | extern void init_idle(task_t *idle, int cpu); | 
|  | 158 |  | 
|  | 159 | extern cpumask_t nohz_cpu_mask; | 
|  | 160 |  | 
|  | 161 | extern void show_state(void); | 
|  | 162 | extern void show_regs(struct pt_regs *); | 
|  | 163 |  | 
|  | 164 | /* | 
|  | 165 | * TASK is a pointer to the task whose backtrace we want to see (or NULL for current | 
|  | 166 | * task), SP is the stack pointer of the first frame that should be shown in the back | 
|  | 167 | * trace (or NULL if the entire call-chain of the task should be shown). | 
|  | 168 | */ | 
|  | 169 | extern void show_stack(struct task_struct *task, unsigned long *sp); | 
|  | 170 |  | 
|  | 171 | void io_schedule(void); | 
|  | 172 | long io_schedule_timeout(long timeout); | 
|  | 173 |  | 
|  | 174 | extern void cpu_init (void); | 
|  | 175 | extern void trap_init(void); | 
|  | 176 | extern void update_process_times(int user); | 
|  | 177 | extern void scheduler_tick(void); | 
|  | 178 |  | 
|  | 179 | /* Attach to any functions which should be ignored in wchan output. */ | 
|  | 180 | #define __sched		__attribute__((__section__(".sched.text"))) | 
|  | 181 | /* Is this address in the __sched functions? */ | 
|  | 182 | extern int in_sched_functions(unsigned long addr); | 
|  | 183 |  | 
|  | 184 | #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX | 
|  | 185 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); | 
|  | 186 | asmlinkage void schedule(void); | 
|  | 187 |  | 
|  | 188 | struct namespace; | 
|  | 189 |  | 
|  | 190 | /* Maximum number of active map areas.. This is a random (large) number */ | 
|  | 191 | #define DEFAULT_MAX_MAP_COUNT	65536 | 
|  | 192 |  | 
|  | 193 | extern int sysctl_max_map_count; | 
|  | 194 |  | 
|  | 195 | #include <linux/aio.h> | 
|  | 196 |  | 
|  | 197 | extern unsigned long | 
|  | 198 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | 
|  | 199 | unsigned long, unsigned long); | 
|  | 200 | extern unsigned long | 
|  | 201 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | 
|  | 202 | unsigned long len, unsigned long pgoff, | 
|  | 203 | unsigned long flags); | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 204 | extern void arch_unmap_area(struct mm_struct *, unsigned long); | 
|  | 205 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 |  | 
|  | 207 | #define set_mm_counter(mm, member, value) (mm)->_##member = (value) | 
|  | 208 | #define get_mm_counter(mm, member) ((mm)->_##member) | 
|  | 209 | #define add_mm_counter(mm, member, value) (mm)->_##member += (value) | 
|  | 210 | #define inc_mm_counter(mm, member) (mm)->_##member++ | 
|  | 211 | #define dec_mm_counter(mm, member) (mm)->_##member-- | 
|  | 212 | typedef unsigned long mm_counter_t; | 
|  | 213 |  | 
|  | 214 | struct mm_struct { | 
|  | 215 | struct vm_area_struct * mmap;		/* list of VMAs */ | 
|  | 216 | struct rb_root mm_rb; | 
|  | 217 | struct vm_area_struct * mmap_cache;	/* last find_vma result */ | 
|  | 218 | unsigned long (*get_unmapped_area) (struct file *filp, | 
|  | 219 | unsigned long addr, unsigned long len, | 
|  | 220 | unsigned long pgoff, unsigned long flags); | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 221 | void (*unmap_area) (struct mm_struct *mm, unsigned long addr); | 
|  | 222 | unsigned long mmap_base;		/* base of mmap area */ | 
|  | 223 | unsigned long cached_hole_size;         /* if non-zero, the largest hole below free_area_cache */ | 
|  | 224 | unsigned long free_area_cache;		/* first hole of size cached_hole_size or larger */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | pgd_t * pgd; | 
|  | 226 | atomic_t mm_users;			/* How many users with user space? */ | 
|  | 227 | atomic_t mm_count;			/* How many references to "struct mm_struct" (users count as 1) */ | 
|  | 228 | int map_count;				/* number of VMAs */ | 
|  | 229 | struct rw_semaphore mmap_sem; | 
|  | 230 | spinlock_t page_table_lock;		/* Protects page tables and some counters */ | 
|  | 231 |  | 
|  | 232 | struct list_head mmlist;		/* List of maybe swapped mm's.  These are globally strung | 
|  | 233 | * together off init_mm.mmlist, and are protected | 
|  | 234 | * by mmlist_lock | 
|  | 235 | */ | 
|  | 236 |  | 
|  | 237 | unsigned long start_code, end_code, start_data, end_data; | 
|  | 238 | unsigned long start_brk, brk, start_stack; | 
|  | 239 | unsigned long arg_start, arg_end, env_start, env_end; | 
|  | 240 | unsigned long total_vm, locked_vm, shared_vm; | 
|  | 241 | unsigned long exec_vm, stack_vm, reserved_vm, def_flags, nr_ptes; | 
|  | 242 |  | 
|  | 243 | /* Special counters protected by the page_table_lock */ | 
|  | 244 | mm_counter_t _rss; | 
|  | 245 | mm_counter_t _anon_rss; | 
|  | 246 |  | 
|  | 247 | unsigned long saved_auxv[42]; /* for /proc/PID/auxv */ | 
|  | 248 |  | 
| Alan Cox | d6e7114 | 2005-06-23 00:09:43 -0700 | [diff] [blame] | 249 | unsigned dumpable:2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | cpumask_t cpu_vm_mask; | 
|  | 251 |  | 
|  | 252 | /* Architecture-specific MM context */ | 
|  | 253 | mm_context_t context; | 
|  | 254 |  | 
|  | 255 | /* Token based thrashing protection. */ | 
|  | 256 | unsigned long swap_token_time; | 
|  | 257 | char recent_pagein; | 
|  | 258 |  | 
|  | 259 | /* coredumping support */ | 
|  | 260 | int core_waiters; | 
|  | 261 | struct completion *core_startup_done, core_done; | 
|  | 262 |  | 
|  | 263 | /* aio bits */ | 
|  | 264 | rwlock_t		ioctx_list_lock; | 
|  | 265 | struct kioctx		*ioctx_list; | 
|  | 266 |  | 
|  | 267 | struct kioctx		default_kioctx; | 
|  | 268 |  | 
|  | 269 | unsigned long hiwater_rss;	/* High-water RSS usage */ | 
|  | 270 | unsigned long hiwater_vm;	/* High-water virtual memory usage */ | 
|  | 271 | }; | 
|  | 272 |  | 
|  | 273 | struct sighand_struct { | 
|  | 274 | atomic_t		count; | 
|  | 275 | struct k_sigaction	action[_NSIG]; | 
|  | 276 | spinlock_t		siglock; | 
|  | 277 | }; | 
|  | 278 |  | 
|  | 279 | /* | 
|  | 280 | * NOTE! "signal_struct" does not have it's own | 
|  | 281 | * locking, because a shared signal_struct always | 
|  | 282 | * implies a shared sighand_struct, so locking | 
|  | 283 | * sighand_struct is always a proper superset of | 
|  | 284 | * the locking of signal_struct. | 
|  | 285 | */ | 
|  | 286 | struct signal_struct { | 
|  | 287 | atomic_t		count; | 
|  | 288 | atomic_t		live; | 
|  | 289 |  | 
|  | 290 | wait_queue_head_t	wait_chldexit;	/* for wait4() */ | 
|  | 291 |  | 
|  | 292 | /* current thread group signal load-balancing target: */ | 
|  | 293 | task_t			*curr_target; | 
|  | 294 |  | 
|  | 295 | /* shared signal handling: */ | 
|  | 296 | struct sigpending	shared_pending; | 
|  | 297 |  | 
|  | 298 | /* thread group exit support */ | 
|  | 299 | int			group_exit_code; | 
|  | 300 | /* overloaded: | 
|  | 301 | * - notify group_exit_task when ->count is equal to notify_count | 
|  | 302 | * - everyone except group_exit_task is stopped during signal delivery | 
|  | 303 | *   of fatal signals, group_exit_task processes the signal. | 
|  | 304 | */ | 
|  | 305 | struct task_struct	*group_exit_task; | 
|  | 306 | int			notify_count; | 
|  | 307 |  | 
|  | 308 | /* thread group stop support, overloads group_exit_code too */ | 
|  | 309 | int			group_stop_count; | 
|  | 310 | unsigned int		flags; /* see SIGNAL_* flags below */ | 
|  | 311 |  | 
|  | 312 | /* POSIX.1b Interval Timers */ | 
|  | 313 | struct list_head posix_timers; | 
|  | 314 |  | 
|  | 315 | /* ITIMER_REAL timer for the process */ | 
|  | 316 | struct timer_list real_timer; | 
|  | 317 | unsigned long it_real_value, it_real_incr; | 
|  | 318 |  | 
|  | 319 | /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ | 
|  | 320 | cputime_t it_prof_expires, it_virt_expires; | 
|  | 321 | cputime_t it_prof_incr, it_virt_incr; | 
|  | 322 |  | 
|  | 323 | /* job control IDs */ | 
|  | 324 | pid_t pgrp; | 
|  | 325 | pid_t tty_old_pgrp; | 
|  | 326 | pid_t session; | 
|  | 327 | /* boolean value for session group leader */ | 
|  | 328 | int leader; | 
|  | 329 |  | 
|  | 330 | struct tty_struct *tty; /* NULL if no tty */ | 
|  | 331 |  | 
|  | 332 | /* | 
|  | 333 | * Cumulative resource counters for dead threads in the group, | 
|  | 334 | * and for reaped dead child processes forked by this group. | 
|  | 335 | * Live threads maintain their own counters and add to these | 
|  | 336 | * in __exit_signal, except for the group leader. | 
|  | 337 | */ | 
|  | 338 | cputime_t utime, stime, cutime, cstime; | 
|  | 339 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 
|  | 340 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; | 
|  | 341 |  | 
|  | 342 | /* | 
|  | 343 | * Cumulative ns of scheduled CPU time for dead threads in the | 
|  | 344 | * group, not including a zombie group leader.  (This only differs | 
|  | 345 | * from jiffies_to_ns(utime + stime) if sched_clock uses something | 
|  | 346 | * other than jiffies.) | 
|  | 347 | */ | 
|  | 348 | unsigned long long sched_time; | 
|  | 349 |  | 
|  | 350 | /* | 
|  | 351 | * We don't bother to synchronize most readers of this at all, | 
|  | 352 | * because there is no reader checking a limit that actually needs | 
|  | 353 | * to get both rlim_cur and rlim_max atomically, and either one | 
|  | 354 | * alone is a single word that can safely be read normally. | 
|  | 355 | * getrlimit/setrlimit use task_lock(current->group_leader) to | 
|  | 356 | * protect this instead of the siglock, because they really | 
|  | 357 | * have no need to disable irqs. | 
|  | 358 | */ | 
|  | 359 | struct rlimit rlim[RLIM_NLIMITS]; | 
|  | 360 |  | 
|  | 361 | struct list_head cpu_timers[3]; | 
|  | 362 |  | 
|  | 363 | /* keep the process-shared keyrings here so that they do the right | 
|  | 364 | * thing in threads created with CLONE_THREAD */ | 
|  | 365 | #ifdef CONFIG_KEYS | 
|  | 366 | struct key *session_keyring;	/* keyring inherited over fork */ | 
|  | 367 | struct key *process_keyring;	/* keyring private to this process */ | 
|  | 368 | #endif | 
|  | 369 | }; | 
|  | 370 |  | 
| Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 371 | /* Context switch must be unlocked if interrupts are to be enabled */ | 
|  | 372 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 
|  | 373 | # define __ARCH_WANT_UNLOCKED_CTXSW | 
|  | 374 | #endif | 
|  | 375 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | /* | 
|  | 377 | * Bits in flags field of signal_struct. | 
|  | 378 | */ | 
|  | 379 | #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */ | 
|  | 380 | #define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */ | 
|  | 381 | #define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */ | 
|  | 382 | #define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */ | 
|  | 383 |  | 
|  | 384 |  | 
|  | 385 | /* | 
|  | 386 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 
|  | 387 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL tasks are | 
|  | 388 | * in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values | 
|  | 389 | * are inverted: lower p->prio value means higher priority. | 
|  | 390 | * | 
|  | 391 | * The MAX_USER_RT_PRIO value allows the actual maximum | 
|  | 392 | * RT priority to be separate from the value exported to | 
|  | 393 | * user-space.  This allows kernel threads to set their | 
|  | 394 | * priority to a value higher than any user task. Note: | 
|  | 395 | * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. | 
|  | 396 | */ | 
|  | 397 |  | 
|  | 398 | #define MAX_USER_RT_PRIO	100 | 
|  | 399 | #define MAX_RT_PRIO		MAX_USER_RT_PRIO | 
|  | 400 |  | 
|  | 401 | #define MAX_PRIO		(MAX_RT_PRIO + 40) | 
|  | 402 |  | 
|  | 403 | #define rt_task(p)		(unlikely((p)->prio < MAX_RT_PRIO)) | 
|  | 404 |  | 
|  | 405 | /* | 
|  | 406 | * Some day this will be a full-fledged user tracking system.. | 
|  | 407 | */ | 
|  | 408 | struct user_struct { | 
|  | 409 | atomic_t __count;	/* reference count */ | 
|  | 410 | atomic_t processes;	/* How many processes does this user have? */ | 
|  | 411 | atomic_t files;		/* How many open files does this user have? */ | 
|  | 412 | atomic_t sigpending;	/* How many pending signals does this user have? */ | 
| Robert Love | 0eeca28 | 2005-07-12 17:06:03 -0400 | [diff] [blame] | 413 | #ifdef CONFIG_INOTIFY | 
|  | 414 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ | 
|  | 415 | atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */ | 
|  | 416 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | /* protected by mq_lock	*/ | 
|  | 418 | unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */ | 
|  | 419 | unsigned long locked_shm; /* How many pages of mlocked shm ? */ | 
|  | 420 |  | 
|  | 421 | #ifdef CONFIG_KEYS | 
|  | 422 | struct key *uid_keyring;	/* UID specific keyring */ | 
|  | 423 | struct key *session_keyring;	/* UID's default session keyring */ | 
|  | 424 | #endif | 
|  | 425 |  | 
|  | 426 | /* Hash table maintenance information */ | 
|  | 427 | struct list_head uidhash_list; | 
|  | 428 | uid_t uid; | 
|  | 429 | }; | 
|  | 430 |  | 
|  | 431 | extern struct user_struct *find_user(uid_t); | 
|  | 432 |  | 
|  | 433 | extern struct user_struct root_user; | 
|  | 434 | #define INIT_USER (&root_user) | 
|  | 435 |  | 
|  | 436 | typedef struct prio_array prio_array_t; | 
|  | 437 | struct backing_dev_info; | 
|  | 438 | struct reclaim_state; | 
|  | 439 |  | 
|  | 440 | #ifdef CONFIG_SCHEDSTATS | 
|  | 441 | struct sched_info { | 
|  | 442 | /* cumulative counters */ | 
|  | 443 | unsigned long	cpu_time,	/* time spent on the cpu */ | 
|  | 444 | run_delay,	/* time spent waiting on a runqueue */ | 
|  | 445 | pcnt;		/* # of timeslices run on this cpu */ | 
|  | 446 |  | 
|  | 447 | /* timestamps */ | 
|  | 448 | unsigned long	last_arrival,	/* when we last ran on a cpu */ | 
|  | 449 | last_queued;	/* when we were last queued to run */ | 
|  | 450 | }; | 
|  | 451 |  | 
|  | 452 | extern struct file_operations proc_schedstat_operations; | 
|  | 453 | #endif | 
|  | 454 |  | 
|  | 455 | enum idle_type | 
|  | 456 | { | 
|  | 457 | SCHED_IDLE, | 
|  | 458 | NOT_IDLE, | 
|  | 459 | NEWLY_IDLE, | 
|  | 460 | MAX_IDLE_TYPES | 
|  | 461 | }; | 
|  | 462 |  | 
|  | 463 | /* | 
|  | 464 | * sched-domains (multiprocessor balancing) declarations: | 
|  | 465 | */ | 
|  | 466 | #ifdef CONFIG_SMP | 
|  | 467 | #define SCHED_LOAD_SCALE	128UL	/* increase resolution of load */ | 
|  | 468 |  | 
|  | 469 | #define SD_LOAD_BALANCE		1	/* Do load balancing on this domain. */ | 
|  | 470 | #define SD_BALANCE_NEWIDLE	2	/* Balance when about to become idle */ | 
|  | 471 | #define SD_BALANCE_EXEC		4	/* Balance on exec */ | 
| Nick Piggin | 147cbb4 | 2005-06-25 14:57:19 -0700 | [diff] [blame] | 472 | #define SD_BALANCE_FORK		8	/* Balance on fork, clone */ | 
|  | 473 | #define SD_WAKE_IDLE		16	/* Wake to idle CPU on task wakeup */ | 
|  | 474 | #define SD_WAKE_AFFINE		32	/* Wake task to waking CPU */ | 
|  | 475 | #define SD_WAKE_BALANCE		64	/* Perform balancing at task wakeup */ | 
|  | 476 | #define SD_SHARE_CPUPOWER	128	/* Domain members share cpu power */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 |  | 
|  | 478 | struct sched_group { | 
|  | 479 | struct sched_group *next;	/* Must be a circular list */ | 
|  | 480 | cpumask_t cpumask; | 
|  | 481 |  | 
|  | 482 | /* | 
|  | 483 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 
|  | 484 | * single CPU. This is read only (except for setup, hotplug CPU). | 
|  | 485 | */ | 
|  | 486 | unsigned long cpu_power; | 
|  | 487 | }; | 
|  | 488 |  | 
|  | 489 | struct sched_domain { | 
|  | 490 | /* These fields must be setup */ | 
|  | 491 | struct sched_domain *parent;	/* top domain must be null terminated */ | 
|  | 492 | struct sched_group *groups;	/* the balancing groups of the domain */ | 
|  | 493 | cpumask_t span;			/* span of all CPUs in this domain */ | 
|  | 494 | unsigned long min_interval;	/* Minimum balance interval ms */ | 
|  | 495 | unsigned long max_interval;	/* Maximum balance interval ms */ | 
|  | 496 | unsigned int busy_factor;	/* less balancing by factor if busy */ | 
|  | 497 | unsigned int imbalance_pct;	/* No balance until over watermark */ | 
|  | 498 | unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ | 
|  | 499 | unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */ | 
|  | 500 | unsigned int per_cpu_gain;	/* CPU % gained by adding domain cpus */ | 
| Nick Piggin | 7897986 | 2005-06-25 14:57:13 -0700 | [diff] [blame] | 501 | unsigned int busy_idx; | 
|  | 502 | unsigned int idle_idx; | 
|  | 503 | unsigned int newidle_idx; | 
|  | 504 | unsigned int wake_idx; | 
| Nick Piggin | 147cbb4 | 2005-06-25 14:57:19 -0700 | [diff] [blame] | 505 | unsigned int forkexec_idx; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | int flags;			/* See SD_* */ | 
|  | 507 |  | 
|  | 508 | /* Runtime fields. */ | 
|  | 509 | unsigned long last_balance;	/* init to jiffies. units in jiffies */ | 
|  | 510 | unsigned int balance_interval;	/* initialise to 1. units in ms. */ | 
|  | 511 | unsigned int nr_balance_failed; /* initialise to 0 */ | 
|  | 512 |  | 
|  | 513 | #ifdef CONFIG_SCHEDSTATS | 
|  | 514 | /* load_balance() stats */ | 
|  | 515 | unsigned long lb_cnt[MAX_IDLE_TYPES]; | 
|  | 516 | unsigned long lb_failed[MAX_IDLE_TYPES]; | 
|  | 517 | unsigned long lb_balanced[MAX_IDLE_TYPES]; | 
|  | 518 | unsigned long lb_imbalance[MAX_IDLE_TYPES]; | 
|  | 519 | unsigned long lb_gained[MAX_IDLE_TYPES]; | 
|  | 520 | unsigned long lb_hot_gained[MAX_IDLE_TYPES]; | 
|  | 521 | unsigned long lb_nobusyg[MAX_IDLE_TYPES]; | 
|  | 522 | unsigned long lb_nobusyq[MAX_IDLE_TYPES]; | 
|  | 523 |  | 
|  | 524 | /* Active load balancing */ | 
|  | 525 | unsigned long alb_cnt; | 
|  | 526 | unsigned long alb_failed; | 
|  | 527 | unsigned long alb_pushed; | 
|  | 528 |  | 
| Nick Piggin | 68767a0 | 2005-06-25 14:57:20 -0700 | [diff] [blame] | 529 | /* SD_BALANCE_EXEC stats */ | 
|  | 530 | unsigned long sbe_cnt; | 
|  | 531 | unsigned long sbe_balanced; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | unsigned long sbe_pushed; | 
|  | 533 |  | 
| Nick Piggin | 68767a0 | 2005-06-25 14:57:20 -0700 | [diff] [blame] | 534 | /* SD_BALANCE_FORK stats */ | 
|  | 535 | unsigned long sbf_cnt; | 
|  | 536 | unsigned long sbf_balanced; | 
|  | 537 | unsigned long sbf_pushed; | 
|  | 538 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | /* try_to_wake_up() stats */ | 
|  | 540 | unsigned long ttwu_wake_remote; | 
|  | 541 | unsigned long ttwu_move_affine; | 
|  | 542 | unsigned long ttwu_move_balance; | 
|  | 543 | #endif | 
|  | 544 | }; | 
|  | 545 |  | 
| Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 546 | extern void partition_sched_domains(cpumask_t *partition1, | 
|  | 547 | cpumask_t *partition2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | #ifdef ARCH_HAS_SCHED_DOMAIN | 
|  | 549 | /* Useful helpers that arch setup code may use. Defined in kernel/sched.c */ | 
|  | 550 | extern cpumask_t cpu_isolated_map; | 
|  | 551 | extern void init_sched_build_groups(struct sched_group groups[], | 
|  | 552 | cpumask_t span, int (*group_fn)(int cpu)); | 
|  | 553 | extern void cpu_attach_domain(struct sched_domain *sd, int cpu); | 
|  | 554 | #endif /* ARCH_HAS_SCHED_DOMAIN */ | 
|  | 555 | #endif /* CONFIG_SMP */ | 
|  | 556 |  | 
|  | 557 |  | 
|  | 558 | struct io_context;			/* See blkdev.h */ | 
|  | 559 | void exit_io_context(void); | 
|  | 560 | struct cpuset; | 
|  | 561 |  | 
|  | 562 | #define NGROUPS_SMALL		32 | 
|  | 563 | #define NGROUPS_PER_BLOCK	((int)(PAGE_SIZE / sizeof(gid_t))) | 
|  | 564 | struct group_info { | 
|  | 565 | int ngroups; | 
|  | 566 | atomic_t usage; | 
|  | 567 | gid_t small_block[NGROUPS_SMALL]; | 
|  | 568 | int nblocks; | 
|  | 569 | gid_t *blocks[0]; | 
|  | 570 | }; | 
|  | 571 |  | 
|  | 572 | /* | 
|  | 573 | * get_group_info() must be called with the owning task locked (via task_lock()) | 
|  | 574 | * when task != current.  The reason being that the vast majority of callers are | 
|  | 575 | * looking at current->group_info, which can not be changed except by the | 
|  | 576 | * current task.  Changing current->group_info requires the task lock, too. | 
|  | 577 | */ | 
|  | 578 | #define get_group_info(group_info) do { \ | 
|  | 579 | atomic_inc(&(group_info)->usage); \ | 
|  | 580 | } while (0) | 
|  | 581 |  | 
|  | 582 | #define put_group_info(group_info) do { \ | 
|  | 583 | if (atomic_dec_and_test(&(group_info)->usage)) \ | 
|  | 584 | groups_free(group_info); \ | 
|  | 585 | } while (0) | 
|  | 586 |  | 
| David Howells | 3e30148 | 2005-06-23 22:00:56 -0700 | [diff] [blame] | 587 | extern struct group_info *groups_alloc(int gidsetsize); | 
|  | 588 | extern void groups_free(struct group_info *group_info); | 
|  | 589 | extern int set_current_groups(struct group_info *group_info); | 
|  | 590 | extern int groups_search(struct group_info *group_info, gid_t grp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | /* access the groups "array" with this macro */ | 
|  | 592 | #define GROUP_AT(gi, i) \ | 
|  | 593 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) | 
|  | 594 |  | 
|  | 595 |  | 
|  | 596 | struct audit_context;		/* See audit.c */ | 
|  | 597 | struct mempolicy; | 
|  | 598 |  | 
|  | 599 | struct task_struct { | 
|  | 600 | volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */ | 
|  | 601 | struct thread_info *thread_info; | 
|  | 602 | atomic_t usage; | 
|  | 603 | unsigned long flags;	/* per process flags, defined below */ | 
|  | 604 | unsigned long ptrace; | 
|  | 605 |  | 
| Paolo 'Blaisorblade' Giarrusso | 3677209 | 2005-05-05 16:16:12 -0700 | [diff] [blame] | 606 | int lock_depth;		/* BKL lock depth */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 |  | 
| Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 608 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 
|  | 609 | int oncpu; | 
|  | 610 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | int prio, static_prio; | 
|  | 612 | struct list_head run_list; | 
|  | 613 | prio_array_t *array; | 
|  | 614 |  | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 615 | unsigned short ioprio; | 
|  | 616 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | unsigned long sleep_avg; | 
|  | 618 | unsigned long long timestamp, last_ran; | 
|  | 619 | unsigned long long sched_time; /* sched_clock time spent running */ | 
|  | 620 | int activated; | 
|  | 621 |  | 
|  | 622 | unsigned long policy; | 
|  | 623 | cpumask_t cpus_allowed; | 
|  | 624 | unsigned int time_slice, first_time_slice; | 
|  | 625 |  | 
|  | 626 | #ifdef CONFIG_SCHEDSTATS | 
|  | 627 | struct sched_info sched_info; | 
|  | 628 | #endif | 
|  | 629 |  | 
|  | 630 | struct list_head tasks; | 
|  | 631 | /* | 
|  | 632 | * ptrace_list/ptrace_children forms the list of my children | 
|  | 633 | * that were stolen by a ptracer. | 
|  | 634 | */ | 
|  | 635 | struct list_head ptrace_children; | 
|  | 636 | struct list_head ptrace_list; | 
|  | 637 |  | 
|  | 638 | struct mm_struct *mm, *active_mm; | 
|  | 639 |  | 
|  | 640 | /* task state */ | 
|  | 641 | struct linux_binfmt *binfmt; | 
|  | 642 | long exit_state; | 
|  | 643 | int exit_code, exit_signal; | 
|  | 644 | int pdeath_signal;  /*  The signal sent when the parent dies  */ | 
|  | 645 | /* ??? */ | 
|  | 646 | unsigned long personality; | 
|  | 647 | unsigned did_exec:1; | 
|  | 648 | pid_t pid; | 
|  | 649 | pid_t tgid; | 
|  | 650 | /* | 
|  | 651 | * pointers to (original) parent process, youngest child, younger sibling, | 
|  | 652 | * older sibling, respectively.  (p->father can be replaced with | 
|  | 653 | * p->parent->pid) | 
|  | 654 | */ | 
|  | 655 | struct task_struct *real_parent; /* real parent process (when being debugged) */ | 
|  | 656 | struct task_struct *parent;	/* parent process */ | 
|  | 657 | /* | 
|  | 658 | * children/sibling forms the list of my children plus the | 
|  | 659 | * tasks I'm ptracing. | 
|  | 660 | */ | 
|  | 661 | struct list_head children;	/* list of my children */ | 
|  | 662 | struct list_head sibling;	/* linkage in my parent's children list */ | 
|  | 663 | struct task_struct *group_leader;	/* threadgroup leader */ | 
|  | 664 |  | 
|  | 665 | /* PID/PID hash table linkage. */ | 
|  | 666 | struct pid pids[PIDTYPE_MAX]; | 
|  | 667 |  | 
|  | 668 | struct completion *vfork_done;		/* for vfork() */ | 
|  | 669 | int __user *set_child_tid;		/* CLONE_CHILD_SETTID */ | 
|  | 670 | int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */ | 
|  | 671 |  | 
|  | 672 | unsigned long rt_priority; | 
|  | 673 | cputime_t utime, stime; | 
|  | 674 | unsigned long nvcsw, nivcsw; /* context switch counts */ | 
|  | 675 | struct timespec start_time; | 
|  | 676 | /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ | 
|  | 677 | unsigned long min_flt, maj_flt; | 
|  | 678 |  | 
|  | 679 | cputime_t it_prof_expires, it_virt_expires; | 
|  | 680 | unsigned long long it_sched_expires; | 
|  | 681 | struct list_head cpu_timers[3]; | 
|  | 682 |  | 
|  | 683 | /* process credentials */ | 
|  | 684 | uid_t uid,euid,suid,fsuid; | 
|  | 685 | gid_t gid,egid,sgid,fsgid; | 
|  | 686 | struct group_info *group_info; | 
|  | 687 | kernel_cap_t   cap_effective, cap_inheritable, cap_permitted; | 
|  | 688 | unsigned keep_capabilities:1; | 
|  | 689 | struct user_struct *user; | 
|  | 690 | #ifdef CONFIG_KEYS | 
|  | 691 | struct key *thread_keyring;	/* keyring private to this thread */ | 
| David Howells | 3e30148 | 2005-06-23 22:00:56 -0700 | [diff] [blame] | 692 | unsigned char jit_keyring;	/* default keyring to attach requested keys to */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | #endif | 
|  | 694 | int oomkilladj; /* OOM kill score adjustment (bit shift). */ | 
| Paolo 'Blaisorblade' Giarrusso | 3677209 | 2005-05-05 16:16:12 -0700 | [diff] [blame] | 695 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 
|  | 696 | - access with [gs]et_task_comm (which lock | 
|  | 697 | it with task_lock()) | 
|  | 698 | - initialized normally by flush_old_exec */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | /* file system info */ | 
|  | 700 | int link_count, total_link_count; | 
|  | 701 | /* ipc stuff */ | 
|  | 702 | struct sysv_sem sysvsem; | 
|  | 703 | /* CPU-specific state of this task */ | 
|  | 704 | struct thread_struct thread; | 
|  | 705 | /* filesystem information */ | 
|  | 706 | struct fs_struct *fs; | 
|  | 707 | /* open file information */ | 
|  | 708 | struct files_struct *files; | 
|  | 709 | /* namespace */ | 
|  | 710 | struct namespace *namespace; | 
|  | 711 | /* signal handlers */ | 
|  | 712 | struct signal_struct *signal; | 
|  | 713 | struct sighand_struct *sighand; | 
|  | 714 |  | 
|  | 715 | sigset_t blocked, real_blocked; | 
|  | 716 | struct sigpending pending; | 
|  | 717 |  | 
|  | 718 | unsigned long sas_ss_sp; | 
|  | 719 | size_t sas_ss_size; | 
|  | 720 | int (*notifier)(void *priv); | 
|  | 721 | void *notifier_data; | 
|  | 722 | sigset_t *notifier_mask; | 
|  | 723 |  | 
|  | 724 | void *security; | 
|  | 725 | struct audit_context *audit_context; | 
|  | 726 | seccomp_t seccomp; | 
|  | 727 |  | 
|  | 728 | /* Thread group tracking */ | 
|  | 729 | u32 parent_exec_id; | 
|  | 730 | u32 self_exec_id; | 
|  | 731 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 
|  | 732 | spinlock_t alloc_lock; | 
|  | 733 | /* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ | 
|  | 734 | spinlock_t proc_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 |  | 
|  | 736 | /* journalling filesystem info */ | 
|  | 737 | void *journal_info; | 
|  | 738 |  | 
|  | 739 | /* VM state */ | 
|  | 740 | struct reclaim_state *reclaim_state; | 
|  | 741 |  | 
|  | 742 | struct dentry *proc_dentry; | 
|  | 743 | struct backing_dev_info *backing_dev_info; | 
|  | 744 |  | 
|  | 745 | struct io_context *io_context; | 
|  | 746 |  | 
|  | 747 | unsigned long ptrace_message; | 
|  | 748 | siginfo_t *last_siginfo; /* For ptrace use.  */ | 
|  | 749 | /* | 
|  | 750 | * current io wait handle: wait queue entry to use for io waits | 
|  | 751 | * If this thread is processing aio, this points at the waitqueue | 
|  | 752 | * inside the currently handled kiocb. It may be NULL (i.e. default | 
|  | 753 | * to a stack based synchronous wait) if its doing sync IO. | 
|  | 754 | */ | 
|  | 755 | wait_queue_t *io_wait; | 
|  | 756 | /* i/o counters(bytes read/written, #syscalls */ | 
|  | 757 | u64 rchar, wchar, syscr, syscw; | 
|  | 758 | #if defined(CONFIG_BSD_PROCESS_ACCT) | 
|  | 759 | u64 acct_rss_mem1;	/* accumulated rss usage */ | 
|  | 760 | u64 acct_vm_mem1;	/* accumulated virtual memory usage */ | 
|  | 761 | clock_t acct_stimexpd;	/* clock_t-converted stime since last update */ | 
|  | 762 | #endif | 
|  | 763 | #ifdef CONFIG_NUMA | 
|  | 764 | struct mempolicy *mempolicy; | 
|  | 765 | short il_next; | 
|  | 766 | #endif | 
|  | 767 | #ifdef CONFIG_CPUSETS | 
|  | 768 | struct cpuset *cpuset; | 
|  | 769 | nodemask_t mems_allowed; | 
|  | 770 | int cpuset_mems_generation; | 
|  | 771 | #endif | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 772 | atomic_t fs_excl;	/* holding fs exclusive resources */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | }; | 
|  | 774 |  | 
|  | 775 | static inline pid_t process_group(struct task_struct *tsk) | 
|  | 776 | { | 
|  | 777 | return tsk->signal->pgrp; | 
|  | 778 | } | 
|  | 779 |  | 
|  | 780 | /** | 
|  | 781 | * pid_alive - check that a task structure is not stale | 
|  | 782 | * @p: Task structure to be checked. | 
|  | 783 | * | 
|  | 784 | * Test if a process is not yet dead (at most zombie state) | 
|  | 785 | * If pid_alive fails, then pointers within the task structure | 
|  | 786 | * can be stale and must not be dereferenced. | 
|  | 787 | */ | 
|  | 788 | static inline int pid_alive(struct task_struct *p) | 
|  | 789 | { | 
|  | 790 | return p->pids[PIDTYPE_PID].nr != 0; | 
|  | 791 | } | 
|  | 792 |  | 
|  | 793 | extern void free_task(struct task_struct *tsk); | 
|  | 794 | extern void __put_task_struct(struct task_struct *tsk); | 
|  | 795 | #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) | 
|  | 796 | #define put_task_struct(tsk) \ | 
|  | 797 | do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) | 
|  | 798 |  | 
|  | 799 | /* | 
|  | 800 | * Per process flags | 
|  | 801 | */ | 
|  | 802 | #define PF_ALIGNWARN	0x00000001	/* Print alignment warning msgs */ | 
|  | 803 | /* Not implemented yet, only for 486*/ | 
|  | 804 | #define PF_STARTING	0x00000002	/* being created */ | 
|  | 805 | #define PF_EXITING	0x00000004	/* getting shut down */ | 
|  | 806 | #define PF_DEAD		0x00000008	/* Dead */ | 
|  | 807 | #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */ | 
|  | 808 | #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */ | 
|  | 809 | #define PF_DUMPCORE	0x00000200	/* dumped core */ | 
|  | 810 | #define PF_SIGNALED	0x00000400	/* killed by a signal */ | 
|  | 811 | #define PF_MEMALLOC	0x00000800	/* Allocating memory */ | 
|  | 812 | #define PF_FLUSHER	0x00001000	/* responsible for disk writeback */ | 
|  | 813 | #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */ | 
|  | 814 | #define PF_FREEZE	0x00004000	/* this task is being frozen for suspend now */ | 
|  | 815 | #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */ | 
|  | 816 | #define PF_FROZEN	0x00010000	/* frozen for system suspend */ | 
|  | 817 | #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */ | 
|  | 818 | #define PF_KSWAPD	0x00040000	/* I am kswapd */ | 
|  | 819 | #define PF_SWAPOFF	0x00080000	/* I am in swapoff */ | 
|  | 820 | #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */ | 
|  | 821 | #define PF_SYNCWRITE	0x00200000	/* I am doing a sync write */ | 
|  | 822 | #define PF_BORROWED_MM	0x00400000	/* I am a kthread doing use_mm */ | 
|  | 823 | #define PF_RANDOMIZE	0x00800000	/* randomize virtual address space */ | 
|  | 824 |  | 
|  | 825 | /* | 
|  | 826 | * Only the _current_ task can read/write to tsk->flags, but other | 
|  | 827 | * tasks can access tsk->flags in readonly mode for example | 
|  | 828 | * with tsk_used_math (like during threaded core dumping). | 
|  | 829 | * There is however an exception to this rule during ptrace | 
|  | 830 | * or during fork: the ptracer task is allowed to write to the | 
|  | 831 | * child->flags of its traced child (same goes for fork, the parent | 
|  | 832 | * can write to the child->flags), because we're guaranteed the | 
|  | 833 | * child is not running and in turn not changing child->flags | 
|  | 834 | * at the same time the parent does it. | 
|  | 835 | */ | 
|  | 836 | #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) | 
|  | 837 | #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) | 
|  | 838 | #define clear_used_math() clear_stopped_child_used_math(current) | 
|  | 839 | #define set_used_math() set_stopped_child_used_math(current) | 
|  | 840 | #define conditional_stopped_child_used_math(condition, child) \ | 
|  | 841 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) | 
|  | 842 | #define conditional_used_math(condition) \ | 
|  | 843 | conditional_stopped_child_used_math(condition, current) | 
|  | 844 | #define copy_to_stopped_child_used_math(child) \ | 
|  | 845 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) | 
|  | 846 | /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ | 
|  | 847 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | 
|  | 848 | #define used_math() tsk_used_math(current) | 
|  | 849 |  | 
|  | 850 | #ifdef CONFIG_SMP | 
|  | 851 | extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); | 
|  | 852 | #else | 
|  | 853 | static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) | 
|  | 854 | { | 
|  | 855 | if (!cpus_intersects(new_mask, cpu_online_map)) | 
|  | 856 | return -EINVAL; | 
|  | 857 | return 0; | 
|  | 858 | } | 
|  | 859 | #endif | 
|  | 860 |  | 
|  | 861 | extern unsigned long long sched_clock(void); | 
|  | 862 | extern unsigned long long current_sched_time(const task_t *current_task); | 
|  | 863 |  | 
|  | 864 | /* sched_exec is called by processes performing an exec */ | 
|  | 865 | #ifdef CONFIG_SMP | 
|  | 866 | extern void sched_exec(void); | 
|  | 867 | #else | 
|  | 868 | #define sched_exec()   {} | 
|  | 869 | #endif | 
|  | 870 |  | 
|  | 871 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 872 | extern void idle_task_exit(void); | 
|  | 873 | #else | 
|  | 874 | static inline void idle_task_exit(void) {} | 
|  | 875 | #endif | 
|  | 876 |  | 
|  | 877 | extern void sched_idle_next(void); | 
|  | 878 | extern void set_user_nice(task_t *p, long nice); | 
|  | 879 | extern int task_prio(const task_t *p); | 
|  | 880 | extern int task_nice(const task_t *p); | 
| Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 881 | extern int can_nice(const task_t *p, const int nice); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | extern int task_curr(const task_t *p); | 
|  | 883 | extern int idle_cpu(int cpu); | 
|  | 884 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); | 
|  | 885 | extern task_t *idle_task(int cpu); | 
|  | 886 |  | 
|  | 887 | void yield(void); | 
|  | 888 |  | 
|  | 889 | /* | 
|  | 890 | * The default (Linux) execution domain. | 
|  | 891 | */ | 
|  | 892 | extern struct exec_domain	default_exec_domain; | 
|  | 893 |  | 
|  | 894 | union thread_union { | 
|  | 895 | struct thread_info thread_info; | 
|  | 896 | unsigned long stack[THREAD_SIZE/sizeof(long)]; | 
|  | 897 | }; | 
|  | 898 |  | 
|  | 899 | #ifndef __HAVE_ARCH_KSTACK_END | 
|  | 900 | static inline int kstack_end(void *addr) | 
|  | 901 | { | 
|  | 902 | /* Reliable end of stack detection: | 
|  | 903 | * Some APM bios versions misalign the stack | 
|  | 904 | */ | 
|  | 905 | return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); | 
|  | 906 | } | 
|  | 907 | #endif | 
|  | 908 |  | 
|  | 909 | extern union thread_union init_thread_union; | 
|  | 910 | extern struct task_struct init_task; | 
|  | 911 |  | 
|  | 912 | extern struct   mm_struct init_mm; | 
|  | 913 |  | 
|  | 914 | #define find_task_by_pid(nr)	find_task_by_pid_type(PIDTYPE_PID, nr) | 
|  | 915 | extern struct task_struct *find_task_by_pid_type(int type, int pid); | 
|  | 916 | extern void set_special_pids(pid_t session, pid_t pgrp); | 
|  | 917 | extern void __set_special_pids(pid_t session, pid_t pgrp); | 
|  | 918 |  | 
|  | 919 | /* per-UID process charging. */ | 
|  | 920 | extern struct user_struct * alloc_uid(uid_t); | 
|  | 921 | static inline struct user_struct *get_uid(struct user_struct *u) | 
|  | 922 | { | 
|  | 923 | atomic_inc(&u->__count); | 
|  | 924 | return u; | 
|  | 925 | } | 
|  | 926 | extern void free_uid(struct user_struct *); | 
|  | 927 | extern void switch_uid(struct user_struct *); | 
|  | 928 |  | 
|  | 929 | #include <asm/current.h> | 
|  | 930 |  | 
|  | 931 | extern void do_timer(struct pt_regs *); | 
|  | 932 |  | 
|  | 933 | extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); | 
|  | 934 | extern int FASTCALL(wake_up_process(struct task_struct * tsk)); | 
|  | 935 | extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, | 
|  | 936 | unsigned long clone_flags)); | 
|  | 937 | #ifdef CONFIG_SMP | 
|  | 938 | extern void kick_process(struct task_struct *tsk); | 
|  | 939 | #else | 
|  | 940 | static inline void kick_process(struct task_struct *tsk) { } | 
|  | 941 | #endif | 
| Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 942 | extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | extern void FASTCALL(sched_exit(task_t * p)); | 
|  | 944 |  | 
|  | 945 | extern int in_group_p(gid_t); | 
|  | 946 | extern int in_egroup_p(gid_t); | 
|  | 947 |  | 
|  | 948 | extern void proc_caches_init(void); | 
|  | 949 | extern void flush_signals(struct task_struct *); | 
|  | 950 | extern void flush_signal_handlers(struct task_struct *, int force_default); | 
|  | 951 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | 
|  | 952 |  | 
|  | 953 | static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | 
|  | 954 | { | 
|  | 955 | unsigned long flags; | 
|  | 956 | int ret; | 
|  | 957 |  | 
|  | 958 | spin_lock_irqsave(&tsk->sighand->siglock, flags); | 
|  | 959 | ret = dequeue_signal(tsk, mask, info); | 
|  | 960 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | 
|  | 961 |  | 
|  | 962 | return ret; | 
|  | 963 | } | 
|  | 964 |  | 
|  | 965 | extern void block_all_signals(int (*notifier)(void *priv), void *priv, | 
|  | 966 | sigset_t *mask); | 
|  | 967 | extern void unblock_all_signals(void); | 
|  | 968 | extern void release_task(struct task_struct * p); | 
|  | 969 | extern int send_sig_info(int, struct siginfo *, struct task_struct *); | 
|  | 970 | extern int send_group_sig_info(int, struct siginfo *, struct task_struct *); | 
|  | 971 | extern int force_sigsegv(int, struct task_struct *); | 
|  | 972 | extern int force_sig_info(int, struct siginfo *, struct task_struct *); | 
|  | 973 | extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); | 
|  | 974 | extern int kill_pg_info(int, struct siginfo *, pid_t); | 
|  | 975 | extern int kill_proc_info(int, struct siginfo *, pid_t); | 
|  | 976 | extern void do_notify_parent(struct task_struct *, int); | 
|  | 977 | extern void force_sig(int, struct task_struct *); | 
|  | 978 | extern void force_sig_specific(int, struct task_struct *); | 
|  | 979 | extern int send_sig(int, struct task_struct *, int); | 
|  | 980 | extern void zap_other_threads(struct task_struct *p); | 
|  | 981 | extern int kill_pg(pid_t, int, int); | 
|  | 982 | extern int kill_sl(pid_t, int, int); | 
|  | 983 | extern int kill_proc(pid_t, int, int); | 
|  | 984 | extern struct sigqueue *sigqueue_alloc(void); | 
|  | 985 | extern void sigqueue_free(struct sigqueue *); | 
|  | 986 | extern int send_sigqueue(int, struct sigqueue *,  struct task_struct *); | 
|  | 987 | extern int send_group_sigqueue(int, struct sigqueue *,  struct task_struct *); | 
|  | 988 | extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *); | 
|  | 989 | extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); | 
|  | 990 |  | 
|  | 991 | /* These can be the second arg to send_sig_info/send_group_sig_info.  */ | 
|  | 992 | #define SEND_SIG_NOINFO ((struct siginfo *) 0) | 
|  | 993 | #define SEND_SIG_PRIV	((struct siginfo *) 1) | 
|  | 994 | #define SEND_SIG_FORCED	((struct siginfo *) 2) | 
|  | 995 |  | 
|  | 996 | /* True if we are on the alternate signal stack.  */ | 
|  | 997 |  | 
|  | 998 | static inline int on_sig_stack(unsigned long sp) | 
|  | 999 | { | 
|  | 1000 | return (sp - current->sas_ss_sp < current->sas_ss_size); | 
|  | 1001 | } | 
|  | 1002 |  | 
|  | 1003 | static inline int sas_ss_flags(unsigned long sp) | 
|  | 1004 | { | 
|  | 1005 | return (current->sas_ss_size == 0 ? SS_DISABLE | 
|  | 1006 | : on_sig_stack(sp) ? SS_ONSTACK : 0); | 
|  | 1007 | } | 
|  | 1008 |  | 
|  | 1009 |  | 
|  | 1010 | #ifdef CONFIG_SECURITY | 
|  | 1011 | /* code is in security.c */ | 
|  | 1012 | extern int capable(int cap); | 
|  | 1013 | #else | 
|  | 1014 | static inline int capable(int cap) | 
|  | 1015 | { | 
|  | 1016 | if (cap_raised(current->cap_effective, cap)) { | 
|  | 1017 | current->flags |= PF_SUPERPRIV; | 
|  | 1018 | return 1; | 
|  | 1019 | } | 
|  | 1020 | return 0; | 
|  | 1021 | } | 
|  | 1022 | #endif | 
|  | 1023 |  | 
|  | 1024 | /* | 
|  | 1025 | * Routines for handling mm_structs | 
|  | 1026 | */ | 
|  | 1027 | extern struct mm_struct * mm_alloc(void); | 
|  | 1028 |  | 
|  | 1029 | /* mmdrop drops the mm and the page tables */ | 
|  | 1030 | extern void FASTCALL(__mmdrop(struct mm_struct *)); | 
|  | 1031 | static inline void mmdrop(struct mm_struct * mm) | 
|  | 1032 | { | 
|  | 1033 | if (atomic_dec_and_test(&mm->mm_count)) | 
|  | 1034 | __mmdrop(mm); | 
|  | 1035 | } | 
|  | 1036 |  | 
|  | 1037 | /* mmput gets rid of the mappings and all user-space */ | 
|  | 1038 | extern void mmput(struct mm_struct *); | 
|  | 1039 | /* Grab a reference to a task's mm, if it is not already going away */ | 
|  | 1040 | extern struct mm_struct *get_task_mm(struct task_struct *task); | 
|  | 1041 | /* Remove the current tasks stale references to the old mm_struct */ | 
|  | 1042 | extern void mm_release(struct task_struct *, struct mm_struct *); | 
|  | 1043 |  | 
|  | 1044 | extern int  copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); | 
|  | 1045 | extern void flush_thread(void); | 
|  | 1046 | extern void exit_thread(void); | 
|  | 1047 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1048 | extern void exit_files(struct task_struct *); | 
|  | 1049 | extern void exit_signal(struct task_struct *); | 
|  | 1050 | extern void __exit_signal(struct task_struct *); | 
|  | 1051 | extern void exit_sighand(struct task_struct *); | 
|  | 1052 | extern void __exit_sighand(struct task_struct *); | 
|  | 1053 | extern void exit_itimers(struct signal_struct *); | 
|  | 1054 |  | 
|  | 1055 | extern NORET_TYPE void do_group_exit(int); | 
|  | 1056 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1057 | extern void daemonize(const char *, ...); | 
|  | 1058 | extern int allow_signal(int); | 
|  | 1059 | extern int disallow_signal(int); | 
|  | 1060 | extern task_t *child_reaper; | 
|  | 1061 |  | 
|  | 1062 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); | 
|  | 1063 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); | 
|  | 1064 | task_t *fork_idle(int); | 
|  | 1065 |  | 
|  | 1066 | extern void set_task_comm(struct task_struct *tsk, char *from); | 
|  | 1067 | extern void get_task_comm(char *to, struct task_struct *tsk); | 
|  | 1068 |  | 
|  | 1069 | #ifdef CONFIG_SMP | 
|  | 1070 | extern void wait_task_inactive(task_t * p); | 
|  | 1071 | #else | 
|  | 1072 | #define wait_task_inactive(p)	do { } while (0) | 
|  | 1073 | #endif | 
|  | 1074 |  | 
|  | 1075 | #define remove_parent(p)	list_del_init(&(p)->sibling) | 
|  | 1076 | #define add_parent(p, parent)	list_add_tail(&(p)->sibling,&(parent)->children) | 
|  | 1077 |  | 
|  | 1078 | #define REMOVE_LINKS(p) do {					\ | 
|  | 1079 | if (thread_group_leader(p))				\ | 
|  | 1080 | list_del_init(&(p)->tasks);			\ | 
|  | 1081 | remove_parent(p);					\ | 
|  | 1082 | } while (0) | 
|  | 1083 |  | 
|  | 1084 | #define SET_LINKS(p) do {					\ | 
|  | 1085 | if (thread_group_leader(p))				\ | 
|  | 1086 | list_add_tail(&(p)->tasks,&init_task.tasks);	\ | 
|  | 1087 | add_parent(p, (p)->parent);				\ | 
|  | 1088 | } while (0) | 
|  | 1089 |  | 
|  | 1090 | #define next_task(p)	list_entry((p)->tasks.next, struct task_struct, tasks) | 
|  | 1091 | #define prev_task(p)	list_entry((p)->tasks.prev, struct task_struct, tasks) | 
|  | 1092 |  | 
|  | 1093 | #define for_each_process(p) \ | 
|  | 1094 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 
|  | 1095 |  | 
|  | 1096 | /* | 
|  | 1097 | * Careful: do_each_thread/while_each_thread is a double loop so | 
|  | 1098 | *          'break' will not work as expected - use goto instead. | 
|  | 1099 | */ | 
|  | 1100 | #define do_each_thread(g, t) \ | 
|  | 1101 | for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do | 
|  | 1102 |  | 
|  | 1103 | #define while_each_thread(g, t) \ | 
|  | 1104 | while ((t = next_thread(t)) != g) | 
|  | 1105 |  | 
|  | 1106 | extern task_t * FASTCALL(next_thread(const task_t *p)); | 
|  | 1107 |  | 
|  | 1108 | #define thread_group_leader(p)	(p->pid == p->tgid) | 
|  | 1109 |  | 
|  | 1110 | static inline int thread_group_empty(task_t *p) | 
|  | 1111 | { | 
|  | 1112 | return list_empty(&p->pids[PIDTYPE_TGID].pid_list); | 
|  | 1113 | } | 
|  | 1114 |  | 
|  | 1115 | #define delay_group_leader(p) \ | 
|  | 1116 | (thread_group_leader(p) && !thread_group_empty(p)) | 
|  | 1117 |  | 
|  | 1118 | extern void unhash_process(struct task_struct *p); | 
|  | 1119 |  | 
|  | 1120 | /* | 
|  | 1121 | * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1122 | * subscriptions and synchronises with wait4().  Also used in procfs.  Also | 
|  | 1123 | * pins the final release of task.io_context. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | * | 
|  | 1125 | * Nests both inside and outside of read_lock(&tasklist_lock). | 
|  | 1126 | * It must not be nested with write_lock_irq(&tasklist_lock), | 
|  | 1127 | * neither inside nor outside. | 
|  | 1128 | */ | 
|  | 1129 | static inline void task_lock(struct task_struct *p) | 
|  | 1130 | { | 
|  | 1131 | spin_lock(&p->alloc_lock); | 
|  | 1132 | } | 
|  | 1133 |  | 
|  | 1134 | static inline void task_unlock(struct task_struct *p) | 
|  | 1135 | { | 
|  | 1136 | spin_unlock(&p->alloc_lock); | 
|  | 1137 | } | 
|  | 1138 |  | 
|  | 1139 | /* set thread flags in other task's structures | 
|  | 1140 | * - see asm/thread_info.h for TIF_xxxx flags available | 
|  | 1141 | */ | 
|  | 1142 | static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) | 
|  | 1143 | { | 
|  | 1144 | set_ti_thread_flag(tsk->thread_info,flag); | 
|  | 1145 | } | 
|  | 1146 |  | 
|  | 1147 | static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) | 
|  | 1148 | { | 
|  | 1149 | clear_ti_thread_flag(tsk->thread_info,flag); | 
|  | 1150 | } | 
|  | 1151 |  | 
|  | 1152 | static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) | 
|  | 1153 | { | 
|  | 1154 | return test_and_set_ti_thread_flag(tsk->thread_info,flag); | 
|  | 1155 | } | 
|  | 1156 |  | 
|  | 1157 | static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) | 
|  | 1158 | { | 
|  | 1159 | return test_and_clear_ti_thread_flag(tsk->thread_info,flag); | 
|  | 1160 | } | 
|  | 1161 |  | 
|  | 1162 | static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) | 
|  | 1163 | { | 
|  | 1164 | return test_ti_thread_flag(tsk->thread_info,flag); | 
|  | 1165 | } | 
|  | 1166 |  | 
|  | 1167 | static inline void set_tsk_need_resched(struct task_struct *tsk) | 
|  | 1168 | { | 
|  | 1169 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 
|  | 1170 | } | 
|  | 1171 |  | 
|  | 1172 | static inline void clear_tsk_need_resched(struct task_struct *tsk) | 
|  | 1173 | { | 
|  | 1174 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 
|  | 1175 | } | 
|  | 1176 |  | 
|  | 1177 | static inline int signal_pending(struct task_struct *p) | 
|  | 1178 | { | 
|  | 1179 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); | 
|  | 1180 | } | 
|  | 1181 |  | 
|  | 1182 | static inline int need_resched(void) | 
|  | 1183 | { | 
|  | 1184 | return unlikely(test_thread_flag(TIF_NEED_RESCHED)); | 
|  | 1185 | } | 
|  | 1186 |  | 
|  | 1187 | /* | 
|  | 1188 | * cond_resched() and cond_resched_lock(): latency reduction via | 
|  | 1189 | * explicit rescheduling in places that are safe. The return | 
|  | 1190 | * value indicates whether a reschedule was done in fact. | 
|  | 1191 | * cond_resched_lock() will drop the spinlock before scheduling, | 
|  | 1192 | * cond_resched_softirq() will enable bhs before scheduling. | 
|  | 1193 | */ | 
|  | 1194 | extern int cond_resched(void); | 
|  | 1195 | extern int cond_resched_lock(spinlock_t * lock); | 
|  | 1196 | extern int cond_resched_softirq(void); | 
|  | 1197 |  | 
|  | 1198 | /* | 
|  | 1199 | * Does a critical section need to be broken due to another | 
|  | 1200 | * task waiting?: | 
|  | 1201 | */ | 
|  | 1202 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | 
|  | 1203 | # define need_lockbreak(lock) ((lock)->break_lock) | 
|  | 1204 | #else | 
|  | 1205 | # define need_lockbreak(lock) 0 | 
|  | 1206 | #endif | 
|  | 1207 |  | 
|  | 1208 | /* | 
|  | 1209 | * Does a critical section need to be broken due to another | 
|  | 1210 | * task waiting or preemption being signalled: | 
|  | 1211 | */ | 
|  | 1212 | static inline int lock_need_resched(spinlock_t *lock) | 
|  | 1213 | { | 
|  | 1214 | if (need_lockbreak(lock) || need_resched()) | 
|  | 1215 | return 1; | 
|  | 1216 | return 0; | 
|  | 1217 | } | 
|  | 1218 |  | 
|  | 1219 | /* Reevaluate whether the task has signals pending delivery. | 
|  | 1220 | This is required every time the blocked sigset_t changes. | 
|  | 1221 | callers must hold sighand->siglock.  */ | 
|  | 1222 |  | 
|  | 1223 | extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t)); | 
|  | 1224 | extern void recalc_sigpending(void); | 
|  | 1225 |  | 
|  | 1226 | extern void signal_wake_up(struct task_struct *t, int resume_stopped); | 
|  | 1227 |  | 
|  | 1228 | /* | 
|  | 1229 | * Wrappers for p->thread_info->cpu access. No-op on UP. | 
|  | 1230 | */ | 
|  | 1231 | #ifdef CONFIG_SMP | 
|  | 1232 |  | 
|  | 1233 | static inline unsigned int task_cpu(const struct task_struct *p) | 
|  | 1234 | { | 
|  | 1235 | return p->thread_info->cpu; | 
|  | 1236 | } | 
|  | 1237 |  | 
|  | 1238 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | 
|  | 1239 | { | 
|  | 1240 | p->thread_info->cpu = cpu; | 
|  | 1241 | } | 
|  | 1242 |  | 
|  | 1243 | #else | 
|  | 1244 |  | 
|  | 1245 | static inline unsigned int task_cpu(const struct task_struct *p) | 
|  | 1246 | { | 
|  | 1247 | return 0; | 
|  | 1248 | } | 
|  | 1249 |  | 
|  | 1250 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | 
|  | 1251 | { | 
|  | 1252 | } | 
|  | 1253 |  | 
|  | 1254 | #endif /* CONFIG_SMP */ | 
|  | 1255 |  | 
|  | 1256 | #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT | 
|  | 1257 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | 
|  | 1258 | #else | 
|  | 1259 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) | 
|  | 1260 | { | 
|  | 1261 | mm->mmap_base = TASK_UNMAPPED_BASE; | 
|  | 1262 | mm->get_unmapped_area = arch_get_unmapped_area; | 
|  | 1263 | mm->unmap_area = arch_unmap_area; | 
|  | 1264 | } | 
|  | 1265 | #endif | 
|  | 1266 |  | 
|  | 1267 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); | 
|  | 1268 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 
|  | 1269 |  | 
|  | 1270 | #ifdef CONFIG_MAGIC_SYSRQ | 
|  | 1271 |  | 
|  | 1272 | extern void normalize_rt_tasks(void); | 
|  | 1273 |  | 
|  | 1274 | #endif | 
|  | 1275 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 | #ifdef CONFIG_PM | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1277 | /* | 
|  | 1278 | * Check if a process has been frozen | 
|  | 1279 | */ | 
|  | 1280 | static inline int frozen(struct task_struct *p) | 
|  | 1281 | { | 
|  | 1282 | return p->flags & PF_FROZEN; | 
|  | 1283 | } | 
|  | 1284 |  | 
|  | 1285 | /* | 
|  | 1286 | * Check if there is a request to freeze a process | 
|  | 1287 | */ | 
|  | 1288 | static inline int freezing(struct task_struct *p) | 
|  | 1289 | { | 
|  | 1290 | return p->flags & PF_FREEZE; | 
|  | 1291 | } | 
|  | 1292 |  | 
|  | 1293 | /* | 
|  | 1294 | * Request that a process be frozen | 
|  | 1295 | * FIXME: SMP problem. We may not modify other process' flags! | 
|  | 1296 | */ | 
|  | 1297 | static inline void freeze(struct task_struct *p) | 
|  | 1298 | { | 
|  | 1299 | p->flags |= PF_FREEZE; | 
|  | 1300 | } | 
|  | 1301 |  | 
|  | 1302 | /* | 
|  | 1303 | * Wake up a frozen process | 
|  | 1304 | */ | 
|  | 1305 | static inline int thaw_process(struct task_struct *p) | 
|  | 1306 | { | 
|  | 1307 | if (frozen(p)) { | 
|  | 1308 | p->flags &= ~PF_FROZEN; | 
|  | 1309 | wake_up_process(p); | 
|  | 1310 | return 1; | 
|  | 1311 | } | 
|  | 1312 | return 0; | 
|  | 1313 | } | 
|  | 1314 |  | 
|  | 1315 | /* | 
|  | 1316 | * freezing is complete, mark process as frozen | 
|  | 1317 | */ | 
|  | 1318 | static inline void frozen_process(struct task_struct *p) | 
|  | 1319 | { | 
|  | 1320 | p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN; | 
|  | 1321 | } | 
|  | 1322 |  | 
|  | 1323 | extern void refrigerator(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1324 | extern int freeze_processes(void); | 
|  | 1325 | extern void thaw_processes(void); | 
|  | 1326 |  | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1327 | static inline int try_to_freeze(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 | { | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1329 | if (freezing(current)) { | 
|  | 1330 | refrigerator(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | return 1; | 
|  | 1332 | } else | 
|  | 1333 | return 0; | 
|  | 1334 | } | 
|  | 1335 | #else | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1336 | static inline int frozen(struct task_struct *p) { return 0; } | 
|  | 1337 | static inline int freezing(struct task_struct *p) { return 0; } | 
|  | 1338 | static inline void freeze(struct task_struct *p) { BUG(); } | 
|  | 1339 | static inline int thaw_process(struct task_struct *p) { return 1; } | 
|  | 1340 | static inline void frozen_process(struct task_struct *p) { BUG(); } | 
|  | 1341 |  | 
|  | 1342 | static inline void refrigerator(void) {} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1343 | static inline int freeze_processes(void) { BUG(); return 0; } | 
|  | 1344 | static inline void thaw_processes(void) {} | 
|  | 1345 |  | 
| Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 1346 | static inline int try_to_freeze(void) { return 0; } | 
|  | 1347 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1348 | #endif /* CONFIG_PM */ | 
|  | 1349 | #endif /* __KERNEL__ */ | 
|  | 1350 |  | 
|  | 1351 | #endif |