| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/kernel/fork.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1991, 1992  Linus Torvalds | 
|  | 5 | */ | 
|  | 6 |  | 
|  | 7 | /* | 
|  | 8 | *  'fork.c' contains the help-routines for the 'fork' system call | 
|  | 9 | * (see also entry.S and others). | 
|  | 10 | * Fork is rather simple, once you get the hang of it, but the memory | 
|  | 11 | * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' | 
|  | 12 | */ | 
|  | 13 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/slab.h> | 
|  | 15 | #include <linux/init.h> | 
|  | 16 | #include <linux/unistd.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/module.h> | 
|  | 18 | #include <linux/vmalloc.h> | 
|  | 19 | #include <linux/completion.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/personality.h> | 
|  | 21 | #include <linux/mempolicy.h> | 
|  | 22 | #include <linux/sem.h> | 
|  | 23 | #include <linux/file.h> | 
| Al Viro | 9f3acc3 | 2008-04-24 07:44:08 -0400 | [diff] [blame] | 24 | #include <linux/fdtable.h> | 
| Jens Axboe | da9cbc8 | 2008-06-30 20:42:08 +0200 | [diff] [blame] | 25 | #include <linux/iocontext.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/key.h> | 
|  | 27 | #include <linux/binfmts.h> | 
|  | 28 | #include <linux/mman.h> | 
| Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 29 | #include <linux/mmu_notifier.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/fs.h> | 
| Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 31 | #include <linux/nsproxy.h> | 
| Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 32 | #include <linux/capability.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <linux/cpu.h> | 
| Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 34 | #include <linux/cgroup.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/security.h> | 
| Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 36 | #include <linux/hugetlb.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/swap.h> | 
|  | 38 | #include <linux/syscalls.h> | 
|  | 39 | #include <linux/jiffies.h> | 
|  | 40 | #include <linux/futex.h> | 
| Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 41 | #include <linux/compat.h> | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 42 | #include <linux/kthread.h> | 
| Andrew Morton | 7c3ab73 | 2006-12-10 02:19:19 -0800 | [diff] [blame] | 43 | #include <linux/task_io_accounting_ops.h> | 
| Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 44 | #include <linux/rcupdate.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include <linux/ptrace.h> | 
|  | 46 | #include <linux/mount.h> | 
|  | 47 | #include <linux/audit.h> | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 48 | #include <linux/memcontrol.h> | 
| Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 49 | #include <linux/ftrace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #include <linux/profile.h> | 
|  | 51 | #include <linux/rmap.h> | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 52 | #include <linux/ksm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #include <linux/acct.h> | 
| Jay Lan | 8f0ab51 | 2006-09-30 23:28:59 -0700 | [diff] [blame] | 54 | #include <linux/tsacct_kern.h> | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 55 | #include <linux/cn_proc.h> | 
| Rafael J. Wysocki | ba96a0c | 2007-05-23 13:57:25 -0700 | [diff] [blame] | 56 | #include <linux/freezer.h> | 
| Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 57 | #include <linux/delayacct.h> | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 58 | #include <linux/taskstats_kern.h> | 
| Arjan van de Ven | 0a42540 | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 59 | #include <linux/random.h> | 
| Miloslav Trmac | 522ed77 | 2007-07-15 23:40:56 -0700 | [diff] [blame] | 60 | #include <linux/tty.h> | 
| Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 61 | #include <linux/blkdev.h> | 
| Al Viro | 5ad4e53 | 2009-03-29 19:50:06 -0400 | [diff] [blame] | 62 | #include <linux/fs_struct.h> | 
| Eric Sandeen | 7c9f886 | 2008-04-22 16:38:23 -0500 | [diff] [blame] | 63 | #include <linux/magic.h> | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 64 | #include <linux/perf_event.h> | 
| Stanislaw Gruszka | 42c4ab4 | 2009-07-29 12:15:26 +0200 | [diff] [blame] | 65 | #include <linux/posix-timers.h> | 
| Avi Kivity | 8e7cac7 | 2009-11-29 16:34:48 +0200 | [diff] [blame] | 66 | #include <linux/user-return-notifier.h> | 
| Ying Han | 3d5992d | 2010-10-26 14:21:23 -0700 | [diff] [blame] | 67 | #include <linux/oom.h> | 
| Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 68 | #include <linux/khugepaged.h> | 
| Oleg Nesterov | d80e731 | 2012-02-24 20:07:11 +0100 | [diff] [blame] | 69 | #include <linux/signalfd.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 |  | 
|  | 71 | #include <asm/pgtable.h> | 
|  | 72 | #include <asm/pgalloc.h> | 
|  | 73 | #include <asm/uaccess.h> | 
|  | 74 | #include <asm/mmu_context.h> | 
|  | 75 | #include <asm/cacheflush.h> | 
|  | 76 | #include <asm/tlbflush.h> | 
|  | 77 |  | 
| Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 78 | #include <trace/events/sched.h> | 
|  | 79 |  | 
| KAMEZAWA Hiroyuki | 43d2b11 | 2012-01-10 15:08:09 -0800 | [diff] [blame] | 80 | #define CREATE_TRACE_POINTS | 
|  | 81 | #include <trace/events/task.h> | 
|  | 82 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | /* | 
|  | 84 | * Protected counters by write_lock_irq(&tasklist_lock) | 
|  | 85 | */ | 
|  | 86 | unsigned long total_forks;	/* Handle normal Linux uptimes. */ | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 87 | int nr_threads;			/* The idle threads do not count.. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 |  | 
|  | 89 | int max_threads;		/* tunable limit on nr_threads */ | 
|  | 90 |  | 
|  | 91 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; | 
|  | 92 |  | 
| Christoph Hellwig | c59923a | 2006-07-10 04:45:40 -0700 | [diff] [blame] | 93 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */ | 
| Paul E. McKenney | db1466b | 2010-03-03 07:46:56 -0800 | [diff] [blame] | 94 |  | 
|  | 95 | #ifdef CONFIG_PROVE_RCU | 
|  | 96 | int lockdep_tasklist_lock_is_held(void) | 
|  | 97 | { | 
|  | 98 | return lockdep_is_held(&tasklist_lock); | 
|  | 99 | } | 
|  | 100 | EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); | 
|  | 101 | #endif /* #ifdef CONFIG_PROVE_RCU */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 |  | 
|  | 103 | int nr_processes(void) | 
|  | 104 | { | 
|  | 105 | int cpu; | 
|  | 106 | int total = 0; | 
|  | 107 |  | 
| Ian Campbell | 1d51075 | 2009-11-03 10:11:14 +0000 | [diff] [blame] | 108 | for_each_possible_cpu(cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | total += per_cpu(process_counts, cpu); | 
|  | 110 |  | 
|  | 111 | return total; | 
|  | 112 | } | 
|  | 113 |  | 
|  | 114 | #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR | 
| Eric Dumazet | 504f52b | 2011-03-22 16:30:41 -0700 | [diff] [blame] | 115 | # define alloc_task_struct_node(node)		\ | 
|  | 116 | kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node) | 
|  | 117 | # define free_task_struct(tsk)			\ | 
|  | 118 | kmem_cache_free(task_struct_cachep, (tsk)) | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 119 | static struct kmem_cache *task_struct_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | #endif | 
|  | 121 |  | 
| FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 122 | #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR | 
| Eric Dumazet | b6a8401 | 2011-03-22 16:30:42 -0700 | [diff] [blame] | 123 | static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | 
|  | 124 | int node) | 
| FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 125 | { | 
|  | 126 | #ifdef CONFIG_DEBUG_STACK_USAGE | 
|  | 127 | gfp_t mask = GFP_KERNEL | __GFP_ZERO; | 
|  | 128 | #else | 
|  | 129 | gfp_t mask = GFP_KERNEL; | 
|  | 130 | #endif | 
| Eric Dumazet | b6a8401 | 2011-03-22 16:30:42 -0700 | [diff] [blame] | 131 | struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER); | 
|  | 132 |  | 
|  | 133 | return page ? page_address(page) : NULL; | 
| FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 134 | } | 
|  | 135 |  | 
|  | 136 | static inline void free_thread_info(struct thread_info *ti) | 
|  | 137 | { | 
|  | 138 | free_pages((unsigned long)ti, THREAD_SIZE_ORDER); | 
|  | 139 | } | 
|  | 140 | #endif | 
|  | 141 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | /* SLAB cache for signal_struct structures (tsk->signal) */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 143 | static struct kmem_cache *signal_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 |  | 
|  | 145 | /* SLAB cache for sighand_struct structures (tsk->sighand) */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 146 | struct kmem_cache *sighand_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 |  | 
|  | 148 | /* SLAB cache for files_struct structures (tsk->files) */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 149 | struct kmem_cache *files_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 |  | 
|  | 151 | /* SLAB cache for fs_struct structures (tsk->fs) */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 152 | struct kmem_cache *fs_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 |  | 
|  | 154 | /* SLAB cache for vm_area_struct structures */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 155 | struct kmem_cache *vm_area_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 |  | 
|  | 157 | /* SLAB cache for mm_struct structures (tsk->mm) */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 158 | static struct kmem_cache *mm_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 |  | 
| KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 160 | static void account_kernel_stack(struct thread_info *ti, int account) | 
|  | 161 | { | 
|  | 162 | struct zone *zone = page_zone(virt_to_page(ti)); | 
|  | 163 |  | 
|  | 164 | mod_zone_page_state(zone, NR_KERNEL_STACK, account); | 
|  | 165 | } | 
|  | 166 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | void free_task(struct task_struct *tsk) | 
|  | 168 | { | 
| KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 169 | account_kernel_stack(tsk->stack, -1); | 
| Roman Zippel | f7e4217 | 2007-05-09 02:35:17 -0700 | [diff] [blame] | 170 | free_thread_info(tsk->stack); | 
| Ingo Molnar | 23f78d4 | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 171 | rt_mutex_debug_task_free(tsk); | 
| Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 172 | ftrace_graph_exit_task(tsk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | free_task_struct(tsk); | 
|  | 174 | } | 
|  | 175 | EXPORT_SYMBOL(free_task); | 
|  | 176 |  | 
| Oleg Nesterov | ea6d290 | 2010-05-26 14:43:16 -0700 | [diff] [blame] | 177 | static inline void free_signal_struct(struct signal_struct *sig) | 
|  | 178 | { | 
| Oleg Nesterov | 97101eb | 2010-05-26 14:43:20 -0700 | [diff] [blame] | 179 | taskstats_tgid_free(sig); | 
| Mike Galbraith | 1c5354d | 2011-01-05 11:16:04 +0100 | [diff] [blame] | 180 | sched_autogroup_exit(sig); | 
| Oleg Nesterov | ea6d290 | 2010-05-26 14:43:16 -0700 | [diff] [blame] | 181 | kmem_cache_free(signal_cachep, sig); | 
|  | 182 | } | 
|  | 183 |  | 
|  | 184 | static inline void put_signal_struct(struct signal_struct *sig) | 
|  | 185 | { | 
| Mike Galbraith | 1c5354d | 2011-01-05 11:16:04 +0100 | [diff] [blame] | 186 | if (atomic_dec_and_test(&sig->sigcnt)) | 
| Oleg Nesterov | ea6d290 | 2010-05-26 14:43:16 -0700 | [diff] [blame] | 187 | free_signal_struct(sig); | 
|  | 188 | } | 
|  | 189 |  | 
| Andrew Morton | 158d9eb | 2006-03-31 02:31:34 -0800 | [diff] [blame] | 190 | void __put_task_struct(struct task_struct *tsk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | { | 
| Eugene Teo | 270f722 | 2007-10-18 23:40:38 -0700 | [diff] [blame] | 192 | WARN_ON(!tsk->exit_state); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | WARN_ON(atomic_read(&tsk->usage)); | 
|  | 194 | WARN_ON(tsk == current); | 
|  | 195 |  | 
| Kees Cook | 1a2a4d0 | 2011-12-21 12:17:03 -0800 | [diff] [blame] | 196 | security_task_free(tsk); | 
| David Howells | e0e8173 | 2009-09-02 09:13:40 +0100 | [diff] [blame] | 197 | exit_creds(tsk); | 
| Shailabh Nagar | 35df17c | 2006-08-31 21:27:38 -0700 | [diff] [blame] | 198 | delayacct_tsk_free(tsk); | 
| Oleg Nesterov | ea6d290 | 2010-05-26 14:43:16 -0700 | [diff] [blame] | 199 | put_signal_struct(tsk->signal); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 |  | 
|  | 201 | if (!profile_handoff_task(tsk)) | 
|  | 202 | free_task(tsk); | 
|  | 203 | } | 
| Rik van Riel | 77c100c | 2011-02-01 09:51:46 -0500 | [diff] [blame] | 204 | EXPORT_SYMBOL_GPL(__put_task_struct); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 |  | 
| Suresh Siddha | 2adee9b | 2008-04-16 10:25:35 +0200 | [diff] [blame] | 206 | /* | 
|  | 207 | * macro override instead of weak attribute alias, to workaround | 
|  | 208 | * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions. | 
|  | 209 | */ | 
|  | 210 | #ifndef arch_task_cache_init | 
|  | 211 | #define arch_task_cache_init() | 
|  | 212 | #endif | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 213 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | void __init fork_init(unsigned long mempages) | 
|  | 215 | { | 
|  | 216 | #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR | 
|  | 217 | #ifndef ARCH_MIN_TASKALIGN | 
|  | 218 | #define ARCH_MIN_TASKALIGN	L1_CACHE_BYTES | 
|  | 219 | #endif | 
|  | 220 | /* create a slab on which task_structs can be allocated */ | 
|  | 221 | task_struct_cachep = | 
|  | 222 | kmem_cache_create("task_struct", sizeof(struct task_struct), | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 223 | ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | #endif | 
|  | 225 |  | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 226 | /* do the arch specific task caches init */ | 
|  | 227 | arch_task_cache_init(); | 
|  | 228 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | /* | 
|  | 230 | * The default maximum number of threads is set to a safe | 
|  | 231 | * value: the thread structures can take up at most half | 
|  | 232 | * of memory. | 
|  | 233 | */ | 
|  | 234 | max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); | 
|  | 235 |  | 
|  | 236 | /* | 
|  | 237 | * we need to allow at least 20 threads to boot a system | 
|  | 238 | */ | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 239 | if (max_threads < 20) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | max_threads = 20; | 
|  | 241 |  | 
|  | 242 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; | 
|  | 243 | init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; | 
|  | 244 | init_task.signal->rlim[RLIMIT_SIGPENDING] = | 
|  | 245 | init_task.signal->rlim[RLIMIT_NPROC]; | 
|  | 246 | } | 
|  | 247 |  | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 248 | int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, | 
|  | 249 | struct task_struct *src) | 
|  | 250 | { | 
|  | 251 | *dst = *src; | 
|  | 252 | return 0; | 
|  | 253 | } | 
|  | 254 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | static struct task_struct *dup_task_struct(struct task_struct *orig) | 
|  | 256 | { | 
|  | 257 | struct task_struct *tsk; | 
|  | 258 | struct thread_info *ti; | 
| Eric Sandeen | 7c9f886 | 2008-04-22 16:38:23 -0500 | [diff] [blame] | 259 | unsigned long *stackend; | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 260 | int node = tsk_fork_get_node(orig); | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 261 | int err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 |  | 
|  | 263 | prepare_to_copy(orig); | 
|  | 264 |  | 
| Eric Dumazet | 504f52b | 2011-03-22 16:30:41 -0700 | [diff] [blame] | 265 | tsk = alloc_task_struct_node(node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | if (!tsk) | 
|  | 267 | return NULL; | 
|  | 268 |  | 
| Eric Dumazet | b6a8401 | 2011-03-22 16:30:42 -0700 | [diff] [blame] | 269 | ti = alloc_thread_info_node(tsk, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | if (!ti) { | 
|  | 271 | free_task_struct(tsk); | 
|  | 272 | return NULL; | 
|  | 273 | } | 
|  | 274 |  | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 275 | err = arch_dup_task_struct(tsk, orig); | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 276 | if (err) | 
|  | 277 | goto out; | 
|  | 278 |  | 
| Roman Zippel | f7e4217 | 2007-05-09 02:35:17 -0700 | [diff] [blame] | 279 | tsk->stack = ti; | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 280 |  | 
| Al Viro | 10ebffd | 2005-11-13 16:06:56 -0800 | [diff] [blame] | 281 | setup_thread_stack(tsk, orig); | 
| Avi Kivity | 8e7cac7 | 2009-11-29 16:34:48 +0200 | [diff] [blame] | 282 | clear_user_return_notifier(tsk); | 
| Mike Galbraith | f26f9af | 2010-12-08 11:05:42 +0100 | [diff] [blame] | 283 | clear_tsk_need_resched(tsk); | 
| Eric Sandeen | 7c9f886 | 2008-04-22 16:38:23 -0500 | [diff] [blame] | 284 | stackend = end_of_stack(tsk); | 
|  | 285 | *stackend = STACK_END_MAGIC;	/* for overflow detection */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 |  | 
| Arjan van de Ven | 0a42540 | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 287 | #ifdef CONFIG_CC_STACKPROTECTOR | 
|  | 288 | tsk->stack_canary = get_random_int(); | 
|  | 289 | #endif | 
|  | 290 |  | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 291 | /* | 
|  | 292 | * One for us, one for whoever does the "release_task()" (usually | 
|  | 293 | * parent) | 
|  | 294 | */ | 
|  | 295 | atomic_set(&tsk->usage, 2); | 
| Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 296 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 297 | tsk->btrace_seq = 0; | 
| Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 298 | #endif | 
| Jens Axboe | a0aa7f6 | 2006-04-20 13:05:33 +0200 | [diff] [blame] | 299 | tsk->splice_pipe = NULL; | 
| KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 300 |  | 
|  | 301 | account_kernel_stack(ti, 1); | 
|  | 302 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | return tsk; | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 304 |  | 
|  | 305 | out: | 
|  | 306 | free_thread_info(ti); | 
|  | 307 | free_task_struct(tsk); | 
|  | 308 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | } | 
|  | 310 |  | 
|  | 311 | #ifdef CONFIG_MMU | 
| Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 312 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | { | 
| Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 314 | struct vm_area_struct *mpnt, *tmp, *prev, **pprev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | struct rb_node **rb_link, *rb_parent; | 
|  | 316 | int retval; | 
|  | 317 | unsigned long charge; | 
|  | 318 | struct mempolicy *pol; | 
|  | 319 |  | 
|  | 320 | down_write(&oldmm->mmap_sem); | 
| Ralf Baechle | ec8c044 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 321 | flush_cache_dup_mm(oldmm); | 
| Ingo Molnar | ad33945 | 2006-07-03 00:25:15 -0700 | [diff] [blame] | 322 | /* | 
|  | 323 | * Not linked in yet - no deadlock potential: | 
|  | 324 | */ | 
|  | 325 | down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); | 
| Hugh Dickins | 7ee7823 | 2005-10-29 18:16:08 -0700 | [diff] [blame] | 326 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | mm->locked_vm = 0; | 
|  | 328 | mm->mmap = NULL; | 
|  | 329 | mm->mmap_cache = NULL; | 
|  | 330 | mm->free_area_cache = oldmm->mmap_base; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 331 | mm->cached_hole_size = ~0UL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | mm->map_count = 0; | 
| Rusty Russell | 9489424 | 2009-03-30 22:05:12 -0600 | [diff] [blame] | 333 | cpumask_clear(mm_cpumask(mm)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | mm->mm_rb = RB_ROOT; | 
|  | 335 | rb_link = &mm->mm_rb.rb_node; | 
|  | 336 | rb_parent = NULL; | 
|  | 337 | pprev = &mm->mmap; | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 338 | retval = ksm_fork(mm, oldmm); | 
|  | 339 | if (retval) | 
|  | 340 | goto out; | 
| Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 341 | retval = khugepaged_fork(mm, oldmm); | 
|  | 342 | if (retval) | 
|  | 343 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 |  | 
| Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 345 | prev = NULL; | 
| Hugh Dickins | fd3e42f | 2005-10-29 18:16:06 -0700 | [diff] [blame] | 346 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | struct file *file; | 
|  | 348 |  | 
|  | 349 | if (mpnt->vm_flags & VM_DONTCOPY) { | 
| Hugh Dickins | 3b6bfcd | 2005-07-12 13:58:09 -0700 | [diff] [blame] | 350 | long pages = vma_pages(mpnt); | 
|  | 351 | mm->total_vm -= pages; | 
| Hugh Dickins | ab50b8e | 2005-10-29 18:15:56 -0700 | [diff] [blame] | 352 | vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, | 
| Hugh Dickins | 3b6bfcd | 2005-07-12 13:58:09 -0700 | [diff] [blame] | 353 | -pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | continue; | 
|  | 355 | } | 
|  | 356 | charge = 0; | 
|  | 357 | if (mpnt->vm_flags & VM_ACCOUNT) { | 
|  | 358 | unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; | 
| Al Viro | 191c542 | 2012-02-13 03:58:52 +0000 | [diff] [blame] | 359 | if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | goto fail_nomem; | 
|  | 361 | charge = len; | 
|  | 362 | } | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 363 | tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | if (!tmp) | 
|  | 365 | goto fail_nomem; | 
|  | 366 | *tmp = *mpnt; | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 367 | INIT_LIST_HEAD(&tmp->anon_vma_chain); | 
| Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 368 | pol = mpol_dup(vma_policy(mpnt)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | retval = PTR_ERR(pol); | 
|  | 370 | if (IS_ERR(pol)) | 
|  | 371 | goto fail_nomem_policy; | 
|  | 372 | vma_set_policy(tmp, pol); | 
| Andrea Arcangeli | a247c3a | 2010-09-22 13:05:12 -0700 | [diff] [blame] | 373 | tmp->vm_mm = mm; | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 374 | if (anon_vma_fork(tmp, mpnt)) | 
|  | 375 | goto fail_nomem_anon_vma_fork; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | tmp->vm_flags &= ~VM_LOCKED; | 
| Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 377 | tmp->vm_next = tmp->vm_prev = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | file = tmp->vm_file; | 
|  | 379 | if (file) { | 
| Josef "Jeff" Sipek | f3a43f3 | 2006-12-08 02:36:43 -0800 | [diff] [blame] | 380 | struct inode *inode = file->f_path.dentry->d_inode; | 
| Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 381 | struct address_space *mapping = file->f_mapping; | 
|  | 382 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | get_file(file); | 
|  | 384 | if (tmp->vm_flags & VM_DENYWRITE) | 
|  | 385 | atomic_dec(&inode->i_writecount); | 
| Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 386 | mutex_lock(&mapping->i_mmap_mutex); | 
| Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 387 | if (tmp->vm_flags & VM_SHARED) | 
|  | 388 | mapping->i_mmap_writable++; | 
| Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 389 | flush_dcache_mmap_lock(mapping); | 
|  | 390 | /* insert tmp into the share list, just after mpnt */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | vma_prio_tree_add(tmp, mpnt); | 
| Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 392 | flush_dcache_mmap_unlock(mapping); | 
| Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 393 | mutex_unlock(&mapping->i_mmap_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | } | 
|  | 395 |  | 
|  | 396 | /* | 
| Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 397 | * Clear hugetlb-related page reserves for children. This only | 
|  | 398 | * affects MAP_PRIVATE mappings. Faults generated by the child | 
|  | 399 | * are not guaranteed to succeed, even if read-only | 
|  | 400 | */ | 
|  | 401 | if (is_vm_hugetlb_page(tmp)) | 
|  | 402 | reset_vma_resv_huge_pages(tmp); | 
|  | 403 |  | 
|  | 404 | /* | 
| Hugh Dickins | 7ee7823 | 2005-10-29 18:16:08 -0700 | [diff] [blame] | 405 | * Link in the new vma and copy the page table entries. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | *pprev = tmp; | 
|  | 408 | pprev = &tmp->vm_next; | 
| Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 409 | tmp->vm_prev = prev; | 
|  | 410 | prev = tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 |  | 
|  | 412 | __vma_link_rb(mm, tmp, rb_link, rb_parent); | 
|  | 413 | rb_link = &tmp->vm_rb.rb_right; | 
|  | 414 | rb_parent = &tmp->vm_rb; | 
|  | 415 |  | 
|  | 416 | mm->map_count++; | 
| Hugh Dickins | 0b0db14 | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 417 | retval = copy_page_range(mm, oldmm, mpnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 |  | 
|  | 419 | if (tmp->vm_ops && tmp->vm_ops->open) | 
|  | 420 | tmp->vm_ops->open(tmp); | 
|  | 421 |  | 
|  | 422 | if (retval) | 
|  | 423 | goto out; | 
|  | 424 | } | 
| Jeremy Fitzhardinge | d6dd61c | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 425 | /* a new mm has just been created */ | 
|  | 426 | arch_dup_mmap(oldmm, mm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | retval = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | out: | 
| Hugh Dickins | 7ee7823 | 2005-10-29 18:16:08 -0700 | [diff] [blame] | 429 | up_write(&mm->mmap_sem); | 
| Hugh Dickins | fd3e42f | 2005-10-29 18:16:06 -0700 | [diff] [blame] | 430 | flush_tlb_mm(oldmm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | up_write(&oldmm->mmap_sem); | 
|  | 432 | return retval; | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 433 | fail_nomem_anon_vma_fork: | 
|  | 434 | mpol_put(pol); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | fail_nomem_policy: | 
|  | 436 | kmem_cache_free(vm_area_cachep, tmp); | 
|  | 437 | fail_nomem: | 
|  | 438 | retval = -ENOMEM; | 
|  | 439 | vm_unacct_memory(charge); | 
|  | 440 | goto out; | 
|  | 441 | } | 
|  | 442 |  | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 443 | static inline int mm_alloc_pgd(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | { | 
|  | 445 | mm->pgd = pgd_alloc(mm); | 
|  | 446 | if (unlikely(!mm->pgd)) | 
|  | 447 | return -ENOMEM; | 
|  | 448 | return 0; | 
|  | 449 | } | 
|  | 450 |  | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 451 | static inline void mm_free_pgd(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | { | 
| Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 453 | pgd_free(mm, mm->pgd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | } | 
|  | 455 | #else | 
|  | 456 | #define dup_mmap(mm, oldmm)	(0) | 
|  | 457 | #define mm_alloc_pgd(mm)	(0) | 
|  | 458 | #define mm_free_pgd(mm) | 
|  | 459 | #endif /* CONFIG_MMU */ | 
|  | 460 |  | 
| Daniel Walker | 23ff444 | 2007-10-18 03:06:07 -0700 | [diff] [blame] | 461 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 |  | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 463 | #define allocate_mm()	(kmem_cache_alloc(mm_cachep, GFP_KERNEL)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | #define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm))) | 
|  | 465 |  | 
| Hidehiro Kawai | 4cb0e11 | 2009-01-06 14:42:47 -0800 | [diff] [blame] | 466 | static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; | 
|  | 467 |  | 
|  | 468 | static int __init coredump_filter_setup(char *s) | 
|  | 469 | { | 
|  | 470 | default_dump_filter = | 
|  | 471 | (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & | 
|  | 472 | MMF_DUMP_FILTER_MASK; | 
|  | 473 | return 1; | 
|  | 474 | } | 
|  | 475 |  | 
|  | 476 | __setup("coredump_filter=", coredump_filter_setup); | 
|  | 477 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | #include <linux/init_task.h> | 
|  | 479 |  | 
| Alexey Dobriyan | 858f099 | 2009-09-23 15:57:32 -0700 | [diff] [blame] | 480 | static void mm_init_aio(struct mm_struct *mm) | 
|  | 481 | { | 
|  | 482 | #ifdef CONFIG_AIO | 
|  | 483 | spin_lock_init(&mm->ioctx_lock); | 
|  | 484 | INIT_HLIST_HEAD(&mm->ioctx_list); | 
|  | 485 | #endif | 
|  | 486 | } | 
|  | 487 |  | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 488 | static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | { | 
|  | 490 | atomic_set(&mm->mm_users, 1); | 
|  | 491 | atomic_set(&mm->mm_count, 1); | 
|  | 492 | init_rwsem(&mm->mmap_sem); | 
|  | 493 | INIT_LIST_HEAD(&mm->mmlist); | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 494 | mm->flags = (current->mm) ? | 
|  | 495 | (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; | 
| Oleg Nesterov | 999d9fc | 2008-07-25 01:47:41 -0700 | [diff] [blame] | 496 | mm->core_state = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | mm->nr_ptes = 0; | 
| KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 498 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | spin_lock_init(&mm->page_table_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 501 | mm->cached_hole_size = ~0UL; | 
| Alexey Dobriyan | 858f099 | 2009-09-23 15:57:32 -0700 | [diff] [blame] | 502 | mm_init_aio(mm); | 
| Balbir Singh | cf475ad | 2008-04-29 01:00:16 -0700 | [diff] [blame] | 503 | mm_init_owner(mm, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 |  | 
|  | 505 | if (likely(!mm_alloc_pgd(mm))) { | 
|  | 506 | mm->def_flags = 0; | 
| Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 507 | mmu_notifier_mm_init(mm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | return mm; | 
|  | 509 | } | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 510 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | free_mm(mm); | 
|  | 512 | return NULL; | 
|  | 513 | } | 
|  | 514 |  | 
| Konstantin Khlebnikov | c3f0327 | 2012-03-21 16:33:48 -0700 | [diff] [blame] | 515 | static void check_mm(struct mm_struct *mm) | 
|  | 516 | { | 
|  | 517 | int i; | 
|  | 518 |  | 
|  | 519 | for (i = 0; i < NR_MM_COUNTERS; i++) { | 
|  | 520 | long x = atomic_long_read(&mm->rss_stat.count[i]); | 
|  | 521 |  | 
|  | 522 | if (unlikely(x)) | 
|  | 523 | printk(KERN_ALERT "BUG: Bad rss-counter state " | 
|  | 524 | "mm:%p idx:%d val:%ld\n", mm, i, x); | 
|  | 525 | } | 
|  | 526 |  | 
|  | 527 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|  | 528 | VM_BUG_ON(mm->pmd_huge_pte); | 
|  | 529 | #endif | 
|  | 530 | } | 
|  | 531 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | /* | 
|  | 533 | * Allocate and initialize an mm_struct. | 
|  | 534 | */ | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 535 | struct mm_struct *mm_alloc(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | { | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 537 | struct mm_struct *mm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 |  | 
|  | 539 | mm = allocate_mm(); | 
| KOSAKI Motohiro | de03c72 | 2011-05-24 17:12:15 -0700 | [diff] [blame] | 540 | if (!mm) | 
|  | 541 | return NULL; | 
|  | 542 |  | 
|  | 543 | memset(mm, 0, sizeof(*mm)); | 
| Linus Torvalds | 6345d24 | 2011-05-29 11:32:28 -0700 | [diff] [blame] | 544 | mm_init_cpumask(mm); | 
|  | 545 | return mm_init(mm, current); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | } | 
|  | 547 |  | 
|  | 548 | /* | 
|  | 549 | * Called when the last reference to the mm | 
|  | 550 | * is dropped: either by a lazy thread or by | 
|  | 551 | * mmput. Free the page directory and the mm. | 
|  | 552 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 553 | void __mmdrop(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | { | 
|  | 555 | BUG_ON(mm == &init_mm); | 
|  | 556 | mm_free_pgd(mm); | 
|  | 557 | destroy_context(mm); | 
| Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 558 | mmu_notifier_mm_destroy(mm); | 
| Konstantin Khlebnikov | c3f0327 | 2012-03-21 16:33:48 -0700 | [diff] [blame] | 559 | check_mm(mm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | free_mm(mm); | 
|  | 561 | } | 
| Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 562 | EXPORT_SYMBOL_GPL(__mmdrop); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 |  | 
|  | 564 | /* | 
|  | 565 | * Decrement the use count and release all resources for an mm. | 
|  | 566 | */ | 
|  | 567 | void mmput(struct mm_struct *mm) | 
|  | 568 | { | 
| Andrew Morton | 0ae26f1 | 2006-06-23 02:05:15 -0700 | [diff] [blame] | 569 | might_sleep(); | 
|  | 570 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | if (atomic_dec_and_test(&mm->mm_users)) { | 
|  | 572 | exit_aio(mm); | 
| Andrea Arcangeli | 1c2fb7a | 2009-09-21 17:02:22 -0700 | [diff] [blame] | 573 | ksm_exit(mm); | 
| Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 574 | khugepaged_exit(mm); /* must run before exit_mmap */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | exit_mmap(mm); | 
| Matt Helsley | 925d1c4 | 2008-04-29 01:01:36 -0700 | [diff] [blame] | 576 | set_mm_exe_file(mm, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | if (!list_empty(&mm->mmlist)) { | 
|  | 578 | spin_lock(&mmlist_lock); | 
|  | 579 | list_del(&mm->mmlist); | 
|  | 580 | spin_unlock(&mmlist_lock); | 
|  | 581 | } | 
|  | 582 | put_swap_token(mm); | 
| Hiroshi Shimamoto | 801460d | 2009-09-23 15:57:41 -0700 | [diff] [blame] | 583 | if (mm->binfmt) | 
|  | 584 | module_put(mm->binfmt->module); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | mmdrop(mm); | 
|  | 586 | } | 
|  | 587 | } | 
|  | 588 | EXPORT_SYMBOL_GPL(mmput); | 
|  | 589 |  | 
| Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 590 | /* | 
|  | 591 | * We added or removed a vma mapping the executable. The vmas are only mapped | 
|  | 592 | * during exec and are not mapped with the mmap system call. | 
|  | 593 | * Callers must hold down_write() on the mm's mmap_sem for these | 
|  | 594 | */ | 
|  | 595 | void added_exe_file_vma(struct mm_struct *mm) | 
|  | 596 | { | 
|  | 597 | mm->num_exe_file_vmas++; | 
|  | 598 | } | 
|  | 599 |  | 
|  | 600 | void removed_exe_file_vma(struct mm_struct *mm) | 
|  | 601 | { | 
|  | 602 | mm->num_exe_file_vmas--; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 603 | if ((mm->num_exe_file_vmas == 0) && mm->exe_file) { | 
| Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 604 | fput(mm->exe_file); | 
|  | 605 | mm->exe_file = NULL; | 
|  | 606 | } | 
|  | 607 |  | 
|  | 608 | } | 
|  | 609 |  | 
|  | 610 | void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) | 
|  | 611 | { | 
|  | 612 | if (new_exe_file) | 
|  | 613 | get_file(new_exe_file); | 
|  | 614 | if (mm->exe_file) | 
|  | 615 | fput(mm->exe_file); | 
|  | 616 | mm->exe_file = new_exe_file; | 
|  | 617 | mm->num_exe_file_vmas = 0; | 
|  | 618 | } | 
|  | 619 |  | 
|  | 620 | struct file *get_mm_exe_file(struct mm_struct *mm) | 
|  | 621 | { | 
|  | 622 | struct file *exe_file; | 
|  | 623 |  | 
|  | 624 | /* We need mmap_sem to protect against races with removal of | 
|  | 625 | * VM_EXECUTABLE vmas */ | 
|  | 626 | down_read(&mm->mmap_sem); | 
|  | 627 | exe_file = mm->exe_file; | 
|  | 628 | if (exe_file) | 
|  | 629 | get_file(exe_file); | 
|  | 630 | up_read(&mm->mmap_sem); | 
|  | 631 | return exe_file; | 
|  | 632 | } | 
|  | 633 |  | 
|  | 634 | static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) | 
|  | 635 | { | 
|  | 636 | /* It's safe to write the exe_file pointer without exe_file_lock because | 
|  | 637 | * this is called during fork when the task is not yet in /proc */ | 
|  | 638 | newmm->exe_file = get_mm_exe_file(oldmm); | 
|  | 639 | } | 
|  | 640 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | /** | 
|  | 642 | * get_task_mm - acquire a reference to the task's mm | 
|  | 643 | * | 
| Oleg Nesterov | 246bb0b | 2008-07-25 01:47:38 -0700 | [diff] [blame] | 644 | * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | * this kernel workthread has transiently adopted a user mm with use_mm, | 
|  | 646 | * to do its AIO) is not set and if so returns a reference to it, after | 
|  | 647 | * bumping up the use count.  User must release the mm via mmput() | 
|  | 648 | * after use.  Typically used by /proc and ptrace. | 
|  | 649 | */ | 
|  | 650 | struct mm_struct *get_task_mm(struct task_struct *task) | 
|  | 651 | { | 
|  | 652 | struct mm_struct *mm; | 
|  | 653 |  | 
|  | 654 | task_lock(task); | 
|  | 655 | mm = task->mm; | 
|  | 656 | if (mm) { | 
| Oleg Nesterov | 246bb0b | 2008-07-25 01:47:38 -0700 | [diff] [blame] | 657 | if (task->flags & PF_KTHREAD) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | mm = NULL; | 
|  | 659 | else | 
|  | 660 | atomic_inc(&mm->mm_users); | 
|  | 661 | } | 
|  | 662 | task_unlock(task); | 
|  | 663 | return mm; | 
|  | 664 | } | 
|  | 665 | EXPORT_SYMBOL_GPL(get_task_mm); | 
|  | 666 |  | 
| Christopher Yeoh | 8cdb878 | 2012-02-02 11:34:09 +1030 | [diff] [blame] | 667 | struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) | 
|  | 668 | { | 
|  | 669 | struct mm_struct *mm; | 
|  | 670 | int err; | 
|  | 671 |  | 
|  | 672 | err =  mutex_lock_killable(&task->signal->cred_guard_mutex); | 
|  | 673 | if (err) | 
|  | 674 | return ERR_PTR(err); | 
|  | 675 |  | 
|  | 676 | mm = get_task_mm(task); | 
|  | 677 | if (mm && mm != current->mm && | 
|  | 678 | !ptrace_may_access(task, mode)) { | 
|  | 679 | mmput(mm); | 
|  | 680 | mm = ERR_PTR(-EACCES); | 
|  | 681 | } | 
|  | 682 | mutex_unlock(&task->signal->cred_guard_mutex); | 
|  | 683 |  | 
|  | 684 | return mm; | 
|  | 685 | } | 
|  | 686 |  | 
| Oleg Nesterov | 57b59c4 | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 687 | static void complete_vfork_done(struct task_struct *tsk) | 
| Oleg Nesterov | c415c3b | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 688 | { | 
| Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 689 | struct completion *vfork; | 
| Oleg Nesterov | c415c3b | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 690 |  | 
| Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 691 | task_lock(tsk); | 
|  | 692 | vfork = tsk->vfork_done; | 
|  | 693 | if (likely(vfork)) { | 
|  | 694 | tsk->vfork_done = NULL; | 
|  | 695 | complete(vfork); | 
|  | 696 | } | 
|  | 697 | task_unlock(tsk); | 
|  | 698 | } | 
|  | 699 |  | 
|  | 700 | static int wait_for_vfork_done(struct task_struct *child, | 
|  | 701 | struct completion *vfork) | 
|  | 702 | { | 
|  | 703 | int killed; | 
|  | 704 |  | 
|  | 705 | freezer_do_not_count(); | 
|  | 706 | killed = wait_for_completion_killable(vfork); | 
|  | 707 | freezer_count(); | 
|  | 708 |  | 
|  | 709 | if (killed) { | 
|  | 710 | task_lock(child); | 
|  | 711 | child->vfork_done = NULL; | 
|  | 712 | task_unlock(child); | 
|  | 713 | } | 
|  | 714 |  | 
|  | 715 | put_task_struct(child); | 
|  | 716 | return killed; | 
| Oleg Nesterov | c415c3b | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 717 | } | 
|  | 718 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | /* Please note the differences between mmput and mm_release. | 
|  | 720 | * mmput is called whenever we stop holding onto a mm_struct, | 
|  | 721 | * error success whatever. | 
|  | 722 | * | 
|  | 723 | * mm_release is called after a mm_struct has been removed | 
|  | 724 | * from the current process. | 
|  | 725 | * | 
|  | 726 | * This difference is important for error handling, when we | 
|  | 727 | * only half set up a mm_struct for a new process and need to restore | 
|  | 728 | * the old one.  Because we mmput the new mm_struct before | 
|  | 729 | * restoring the old one. . . | 
|  | 730 | * Eric Biederman 10 January 1998 | 
|  | 731 | */ | 
|  | 732 | void mm_release(struct task_struct *tsk, struct mm_struct *mm) | 
|  | 733 | { | 
| Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 734 | /* Get rid of any futexes when releasing the mm */ | 
|  | 735 | #ifdef CONFIG_FUTEX | 
| Peter Zijlstra | fc6b177 | 2009-10-05 18:17:32 +0200 | [diff] [blame] | 736 | if (unlikely(tsk->robust_list)) { | 
| Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 737 | exit_robust_list(tsk); | 
| Peter Zijlstra | fc6b177 | 2009-10-05 18:17:32 +0200 | [diff] [blame] | 738 | tsk->robust_list = NULL; | 
|  | 739 | } | 
| Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 740 | #ifdef CONFIG_COMPAT | 
| Peter Zijlstra | fc6b177 | 2009-10-05 18:17:32 +0200 | [diff] [blame] | 741 | if (unlikely(tsk->compat_robust_list)) { | 
| Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 742 | compat_exit_robust_list(tsk); | 
| Peter Zijlstra | fc6b177 | 2009-10-05 18:17:32 +0200 | [diff] [blame] | 743 | tsk->compat_robust_list = NULL; | 
|  | 744 | } | 
| Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 745 | #endif | 
| Thomas Gleixner | 322a2c1 | 2009-10-05 18:18:03 +0200 | [diff] [blame] | 746 | if (unlikely(!list_empty(&tsk->pi_state_list))) | 
|  | 747 | exit_pi_state_list(tsk); | 
| Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 748 | #endif | 
|  | 749 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | /* Get rid of any cached register state */ | 
|  | 751 | deactivate_mm(tsk, mm); | 
|  | 752 |  | 
| Oleg Nesterov | c415c3b | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 753 | if (tsk->vfork_done) | 
|  | 754 | complete_vfork_done(tsk); | 
| Roland McGrath | fec1d01 | 2006-12-06 20:36:34 -0800 | [diff] [blame] | 755 |  | 
|  | 756 | /* | 
|  | 757 | * If we're exiting normally, clear a user-space tid field if | 
|  | 758 | * requested.  We leave this alone when dying by signal, to leave | 
|  | 759 | * the value intact in a core dump, and to save the unnecessary | 
| Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 760 | * trouble, say, a killed vfork parent shouldn't touch this mm. | 
|  | 761 | * Userland only wants this done for a sys_exit. | 
| Roland McGrath | fec1d01 | 2006-12-06 20:36:34 -0800 | [diff] [blame] | 762 | */ | 
| Eric Dumazet | 9c8a822 | 2009-08-06 15:09:28 -0700 | [diff] [blame] | 763 | if (tsk->clear_child_tid) { | 
|  | 764 | if (!(tsk->flags & PF_SIGNALED) && | 
|  | 765 | atomic_read(&mm->mm_users) > 1) { | 
|  | 766 | /* | 
|  | 767 | * We don't check the error code - if userspace has | 
|  | 768 | * not set up a proper pointer then tough luck. | 
|  | 769 | */ | 
|  | 770 | put_user(0, tsk->clear_child_tid); | 
|  | 771 | sys_futex(tsk->clear_child_tid, FUTEX_WAKE, | 
|  | 772 | 1, NULL, NULL, 0); | 
|  | 773 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | tsk->clear_child_tid = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | } | 
|  | 776 | } | 
|  | 777 |  | 
| JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 778 | /* | 
|  | 779 | * Allocate a new mm structure and copy contents from the | 
|  | 780 | * mm structure of the passed in task structure. | 
|  | 781 | */ | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 782 | struct mm_struct *dup_mm(struct task_struct *tsk) | 
| JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 783 | { | 
|  | 784 | struct mm_struct *mm, *oldmm = current->mm; | 
|  | 785 | int err; | 
|  | 786 |  | 
|  | 787 | if (!oldmm) | 
|  | 788 | return NULL; | 
|  | 789 |  | 
|  | 790 | mm = allocate_mm(); | 
|  | 791 | if (!mm) | 
|  | 792 | goto fail_nomem; | 
|  | 793 |  | 
|  | 794 | memcpy(mm, oldmm, sizeof(*mm)); | 
| Linus Torvalds | 6345d24 | 2011-05-29 11:32:28 -0700 | [diff] [blame] | 795 | mm_init_cpumask(mm); | 
| JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 796 |  | 
| Ashwin Chaugule | 7602bdf | 2006-12-06 20:31:57 -0800 | [diff] [blame] | 797 | /* Initializing for Swap token stuff */ | 
|  | 798 | mm->token_priority = 0; | 
|  | 799 | mm->last_interval = 0; | 
|  | 800 |  | 
| Andrea Arcangeli | e7a00c4 | 2011-01-13 15:46:45 -0800 | [diff] [blame] | 801 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|  | 802 | mm->pmd_huge_pte = NULL; | 
|  | 803 | #endif | 
|  | 804 |  | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 805 | if (!mm_init(mm, tsk)) | 
| JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 806 | goto fail_nomem; | 
|  | 807 |  | 
|  | 808 | if (init_new_context(tsk, mm)) | 
|  | 809 | goto fail_nocontext; | 
|  | 810 |  | 
| Matt Helsley | 925d1c4 | 2008-04-29 01:01:36 -0700 | [diff] [blame] | 811 | dup_mm_exe_file(oldmm, mm); | 
|  | 812 |  | 
| JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 813 | err = dup_mmap(mm, oldmm); | 
|  | 814 | if (err) | 
|  | 815 | goto free_pt; | 
|  | 816 |  | 
|  | 817 | mm->hiwater_rss = get_mm_rss(mm); | 
|  | 818 | mm->hiwater_vm = mm->total_vm; | 
|  | 819 |  | 
| Hiroshi Shimamoto | 801460d | 2009-09-23 15:57:41 -0700 | [diff] [blame] | 820 | if (mm->binfmt && !try_module_get(mm->binfmt->module)) | 
|  | 821 | goto free_pt; | 
|  | 822 |  | 
| JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 823 | return mm; | 
|  | 824 |  | 
|  | 825 | free_pt: | 
| Hiroshi Shimamoto | 801460d | 2009-09-23 15:57:41 -0700 | [diff] [blame] | 826 | /* don't put binfmt in mmput, we haven't got module yet */ | 
|  | 827 | mm->binfmt = NULL; | 
| JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 828 | mmput(mm); | 
|  | 829 |  | 
|  | 830 | fail_nomem: | 
|  | 831 | return NULL; | 
|  | 832 |  | 
|  | 833 | fail_nocontext: | 
|  | 834 | /* | 
|  | 835 | * If init_new_context() failed, we cannot use mmput() to free the mm | 
|  | 836 | * because it calls destroy_context() | 
|  | 837 | */ | 
|  | 838 | mm_free_pgd(mm); | 
|  | 839 | free_mm(mm); | 
|  | 840 | return NULL; | 
|  | 841 | } | 
|  | 842 |  | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 843 | static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 844 | { | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 845 | struct mm_struct *mm, *oldmm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | int retval; | 
|  | 847 |  | 
|  | 848 | tsk->min_flt = tsk->maj_flt = 0; | 
|  | 849 | tsk->nvcsw = tsk->nivcsw = 0; | 
| Mandeep Singh Baines | 17406b8 | 2009-02-06 15:37:47 -0800 | [diff] [blame] | 850 | #ifdef CONFIG_DETECT_HUNG_TASK | 
|  | 851 | tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; | 
|  | 852 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 |  | 
|  | 854 | tsk->mm = NULL; | 
|  | 855 | tsk->active_mm = NULL; | 
|  | 856 |  | 
|  | 857 | /* | 
|  | 858 | * Are we cloning a kernel thread? | 
|  | 859 | * | 
|  | 860 | * We need to steal a active VM for that.. | 
|  | 861 | */ | 
|  | 862 | oldmm = current->mm; | 
|  | 863 | if (!oldmm) | 
|  | 864 | return 0; | 
|  | 865 |  | 
|  | 866 | if (clone_flags & CLONE_VM) { | 
|  | 867 | atomic_inc(&oldmm->mm_users); | 
|  | 868 | mm = oldmm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | goto good_mm; | 
|  | 870 | } | 
|  | 871 |  | 
|  | 872 | retval = -ENOMEM; | 
| JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 873 | mm = dup_mm(tsk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | if (!mm) | 
|  | 875 | goto fail_nomem; | 
|  | 876 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | good_mm: | 
| Ashwin Chaugule | 7602bdf | 2006-12-06 20:31:57 -0800 | [diff] [blame] | 878 | /* Initializing for Swap token stuff */ | 
|  | 879 | mm->token_priority = 0; | 
|  | 880 | mm->last_interval = 0; | 
|  | 881 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | tsk->mm = mm; | 
|  | 883 | tsk->active_mm = mm; | 
|  | 884 | return 0; | 
|  | 885 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | fail_nomem: | 
|  | 887 | return retval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 | } | 
|  | 889 |  | 
| Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 890 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | { | 
| Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 892 | struct fs_struct *fs = current->fs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | if (clone_flags & CLONE_FS) { | 
| Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 894 | /* tsk->fs is already what we want */ | 
| Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 895 | spin_lock(&fs->lock); | 
| Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 896 | if (fs->in_exec) { | 
| Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 897 | spin_unlock(&fs->lock); | 
| Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 898 | return -EAGAIN; | 
|  | 899 | } | 
|  | 900 | fs->users++; | 
| Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 901 | spin_unlock(&fs->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | return 0; | 
|  | 903 | } | 
| Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 904 | tsk->fs = copy_fs_struct(fs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 | if (!tsk->fs) | 
|  | 906 | return -ENOMEM; | 
|  | 907 | return 0; | 
|  | 908 | } | 
|  | 909 |  | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 910 | static int copy_files(unsigned long clone_flags, struct task_struct *tsk) | 
| JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 911 | { | 
|  | 912 | struct files_struct *oldf, *newf; | 
|  | 913 | int error = 0; | 
|  | 914 |  | 
|  | 915 | /* | 
|  | 916 | * A background process may not have any files ... | 
|  | 917 | */ | 
|  | 918 | oldf = current->files; | 
|  | 919 | if (!oldf) | 
|  | 920 | goto out; | 
|  | 921 |  | 
|  | 922 | if (clone_flags & CLONE_FILES) { | 
|  | 923 | atomic_inc(&oldf->count); | 
|  | 924 | goto out; | 
|  | 925 | } | 
|  | 926 |  | 
| JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 927 | newf = dup_fd(oldf, &error); | 
|  | 928 | if (!newf) | 
|  | 929 | goto out; | 
|  | 930 |  | 
|  | 931 | tsk->files = newf; | 
|  | 932 | error = 0; | 
|  | 933 | out: | 
|  | 934 | return error; | 
|  | 935 | } | 
|  | 936 |  | 
| Jens Axboe | fadad878 | 2008-01-24 08:54:47 +0100 | [diff] [blame] | 937 | static int copy_io(unsigned long clone_flags, struct task_struct *tsk) | 
| Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 938 | { | 
|  | 939 | #ifdef CONFIG_BLOCK | 
|  | 940 | struct io_context *ioc = current->io_context; | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 941 | struct io_context *new_ioc; | 
| Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 942 |  | 
|  | 943 | if (!ioc) | 
|  | 944 | return 0; | 
| Jens Axboe | fadad878 | 2008-01-24 08:54:47 +0100 | [diff] [blame] | 945 | /* | 
|  | 946 | * Share io context with parent, if CLONE_IO is set | 
|  | 947 | */ | 
|  | 948 | if (clone_flags & CLONE_IO) { | 
|  | 949 | tsk->io_context = ioc_task_link(ioc); | 
|  | 950 | if (unlikely(!tsk->io_context)) | 
|  | 951 | return -ENOMEM; | 
|  | 952 | } else if (ioprio_valid(ioc->ioprio)) { | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 953 | new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); | 
|  | 954 | if (unlikely(!new_ioc)) | 
| Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 955 | return -ENOMEM; | 
|  | 956 |  | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 957 | new_ioc->ioprio = ioc->ioprio; | 
| Tejun Heo | 11a3122 | 2012-02-07 07:51:30 +0100 | [diff] [blame] | 958 | put_io_context(new_ioc); | 
| Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 959 | } | 
|  | 960 | #endif | 
|  | 961 | return 0; | 
|  | 962 | } | 
|  | 963 |  | 
| Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 964 | static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | { | 
|  | 966 | struct sighand_struct *sig; | 
|  | 967 |  | 
| Zhaolei | 6034880 | 2009-01-06 14:40:46 -0800 | [diff] [blame] | 968 | if (clone_flags & CLONE_SIGHAND) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | atomic_inc(¤t->sighand->count); | 
|  | 970 | return 0; | 
|  | 971 | } | 
|  | 972 | sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); | 
| Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 973 | rcu_assign_pointer(tsk->sighand, sig); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 | if (!sig) | 
|  | 975 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 976 | atomic_set(&sig->count, 1); | 
|  | 977 | memcpy(sig->action, current->sighand->action, sizeof(sig->action)); | 
|  | 978 | return 0; | 
|  | 979 | } | 
|  | 980 |  | 
| Oleg Nesterov | a7e5328 | 2006-03-28 16:11:27 -0800 | [diff] [blame] | 981 | void __cleanup_sighand(struct sighand_struct *sighand) | 
| Oleg Nesterov | c81addc | 2006-03-28 16:11:17 -0800 | [diff] [blame] | 982 | { | 
| Oleg Nesterov | d80e731 | 2012-02-24 20:07:11 +0100 | [diff] [blame] | 983 | if (atomic_dec_and_test(&sighand->count)) { | 
|  | 984 | signalfd_cleanup(sighand); | 
| Oleg Nesterov | c81addc | 2006-03-28 16:11:17 -0800 | [diff] [blame] | 985 | kmem_cache_free(sighand_cachep, sighand); | 
| Oleg Nesterov | d80e731 | 2012-02-24 20:07:11 +0100 | [diff] [blame] | 986 | } | 
| Oleg Nesterov | c81addc | 2006-03-28 16:11:17 -0800 | [diff] [blame] | 987 | } | 
|  | 988 |  | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 989 |  | 
|  | 990 | /* | 
|  | 991 | * Initialize POSIX timer handling for a thread group. | 
|  | 992 | */ | 
|  | 993 | static void posix_cpu_timers_init_group(struct signal_struct *sig) | 
|  | 994 | { | 
| Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 995 | unsigned long cpu_limit; | 
|  | 996 |  | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 997 | /* Thread group counters. */ | 
|  | 998 | thread_group_cputime_init(sig); | 
|  | 999 |  | 
| Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 1000 | cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); | 
|  | 1001 | if (cpu_limit != RLIM_INFINITY) { | 
|  | 1002 | sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); | 
| Oleg Nesterov | 6279a751 | 2009-03-27 01:06:07 +0100 | [diff] [blame] | 1003 | sig->cputimer.running = 1; | 
|  | 1004 | } | 
|  | 1005 |  | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1006 | /* The timer lists. */ | 
|  | 1007 | INIT_LIST_HEAD(&sig->cpu_timers[0]); | 
|  | 1008 | INIT_LIST_HEAD(&sig->cpu_timers[1]); | 
|  | 1009 | INIT_LIST_HEAD(&sig->cpu_timers[2]); | 
|  | 1010 | } | 
|  | 1011 |  | 
| Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1012 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | { | 
|  | 1014 | struct signal_struct *sig; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 |  | 
| Oleg Nesterov | 4ab6c08 | 2009-08-26 14:29:24 -0700 | [diff] [blame] | 1016 | if (clone_flags & CLONE_THREAD) | 
| Peter Zijlstra | 490dea4 | 2008-11-24 17:06:57 +0100 | [diff] [blame] | 1017 | return 0; | 
| Oleg Nesterov | 6279a751 | 2009-03-27 01:06:07 +0100 | [diff] [blame] | 1018 |  | 
| Veaceslav Falico | a56704e | 2010-03-10 15:23:01 -0800 | [diff] [blame] | 1019 | sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | tsk->signal = sig; | 
|  | 1021 | if (!sig) | 
|  | 1022 | return -ENOMEM; | 
|  | 1023 |  | 
| Oleg Nesterov | b3ac022 | 2010-05-26 14:43:24 -0700 | [diff] [blame] | 1024 | sig->nr_threads = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1025 | atomic_set(&sig->live, 1); | 
| Oleg Nesterov | b3ac022 | 2010-05-26 14:43:24 -0700 | [diff] [blame] | 1026 | atomic_set(&sig->sigcnt, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 | init_waitqueue_head(&sig->wait_chldexit); | 
| Sukadev Bhattiprolu | b3bfa0c | 2009-04-02 16:58:08 -0700 | [diff] [blame] | 1028 | if (clone_flags & CLONE_NEWPID) | 
|  | 1029 | sig->flags |= SIGNAL_UNKILLABLE; | 
| Oleg Nesterov | db51aec | 2008-04-30 00:52:52 -0700 | [diff] [blame] | 1030 | sig->curr_target = tsk; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1031 | init_sigpending(&sig->shared_pending); | 
|  | 1032 | INIT_LIST_HEAD(&sig->posix_timers); | 
|  | 1033 |  | 
| Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1034 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 | sig->real_timer.function = it_real_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1037 | task_lock(current->group_leader); | 
|  | 1038 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); | 
|  | 1039 | task_unlock(current->group_leader); | 
|  | 1040 |  | 
| Oleg Nesterov | 6279a751 | 2009-03-27 01:06:07 +0100 | [diff] [blame] | 1041 | posix_cpu_timers_init_group(sig); | 
|  | 1042 |  | 
| Miloslav Trmac | 522ed77 | 2007-07-15 23:40:56 -0700 | [diff] [blame] | 1043 | tty_audit_fork(sig); | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 1044 | sched_autogroup_fork(sig); | 
| Miloslav Trmac | 522ed77 | 2007-07-15 23:40:56 -0700 | [diff] [blame] | 1045 |  | 
| Ben Blum | 4714d1d | 2011-05-26 16:25:18 -0700 | [diff] [blame] | 1046 | #ifdef CONFIG_CGROUPS | 
| Tejun Heo | 257058a | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1047 | init_rwsem(&sig->group_rwsem); | 
| Ben Blum | 4714d1d | 2011-05-26 16:25:18 -0700 | [diff] [blame] | 1048 | #endif | 
|  | 1049 |  | 
| KOSAKI Motohiro | 28b83c5 | 2009-09-21 17:03:13 -0700 | [diff] [blame] | 1050 | sig->oom_adj = current->signal->oom_adj; | 
| David Rientjes | a63d83f | 2010-08-09 17:19:46 -0700 | [diff] [blame] | 1051 | sig->oom_score_adj = current->signal->oom_score_adj; | 
| Mandeep Singh Baines | dabb16f | 2011-01-13 15:46:05 -0800 | [diff] [blame] | 1052 | sig->oom_score_adj_min = current->signal->oom_score_adj_min; | 
| KOSAKI Motohiro | 28b83c5 | 2009-09-21 17:03:13 -0700 | [diff] [blame] | 1053 |  | 
| Lennart Poettering | ebec18a | 2012-03-23 15:01:54 -0700 | [diff] [blame] | 1054 | sig->has_child_subreaper = current->signal->has_child_subreaper || | 
|  | 1055 | current->signal->is_child_subreaper; | 
|  | 1056 |  | 
| KOSAKI Motohiro | 9b1bf12 | 2010-10-27 15:34:08 -0700 | [diff] [blame] | 1057 | mutex_init(&sig->cred_guard_mutex); | 
|  | 1058 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | return 0; | 
|  | 1060 | } | 
|  | 1061 |  | 
| Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1062 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 | { | 
|  | 1064 | unsigned long new_flags = p->flags; | 
|  | 1065 |  | 
| Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 1066 | new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | new_flags |= PF_FORKNOEXEC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | p->flags = new_flags; | 
|  | 1069 | } | 
|  | 1070 |  | 
| Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 1071 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | { | 
|  | 1073 | current->clear_child_tid = tidptr; | 
|  | 1074 |  | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1075 | return task_pid_vnr(current); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | } | 
|  | 1077 |  | 
| Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1078 | static void rt_mutex_init_task(struct task_struct *p) | 
| Ingo Molnar | 23f78d4 | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 1079 | { | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 1080 | raw_spin_lock_init(&p->pi_lock); | 
| Zilvinas Valinskas | e29e175 | 2007-03-16 13:38:34 -0800 | [diff] [blame] | 1081 | #ifdef CONFIG_RT_MUTEXES | 
| Dima Zavin | 732375c | 2011-07-07 17:27:59 -0700 | [diff] [blame] | 1082 | plist_head_init(&p->pi_waiters); | 
| Ingo Molnar | 23f78d4 | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 1083 | p->pi_blocked_on = NULL; | 
| Ingo Molnar | 23f78d4 | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 1084 | #endif | 
|  | 1085 | } | 
|  | 1086 |  | 
| Balbir Singh | cf475ad | 2008-04-29 01:00:16 -0700 | [diff] [blame] | 1087 | #ifdef CONFIG_MM_OWNER | 
|  | 1088 | void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | 
|  | 1089 | { | 
|  | 1090 | mm->owner = p; | 
|  | 1091 | } | 
|  | 1092 | #endif /* CONFIG_MM_OWNER */ | 
|  | 1093 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1094 | /* | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1095 | * Initialize POSIX timer handling for a single task. | 
|  | 1096 | */ | 
|  | 1097 | static void posix_cpu_timers_init(struct task_struct *tsk) | 
|  | 1098 | { | 
| Martin Schwidefsky | 6486163 | 2011-12-15 14:56:09 +0100 | [diff] [blame] | 1099 | tsk->cputime_expires.prof_exp = 0; | 
|  | 1100 | tsk->cputime_expires.virt_exp = 0; | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1101 | tsk->cputime_expires.sched_exp = 0; | 
|  | 1102 | INIT_LIST_HEAD(&tsk->cpu_timers[0]); | 
|  | 1103 | INIT_LIST_HEAD(&tsk->cpu_timers[1]); | 
|  | 1104 | INIT_LIST_HEAD(&tsk->cpu_timers[2]); | 
|  | 1105 | } | 
|  | 1106 |  | 
|  | 1107 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | * This creates a new process as a copy of the old one, | 
|  | 1109 | * but does not actually start it yet. | 
|  | 1110 | * | 
|  | 1111 | * It copies the registers, and all the appropriate | 
|  | 1112 | * parts of the process environment (as per the clone | 
|  | 1113 | * flags). The actual kick-off is left to the caller. | 
|  | 1114 | */ | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1115 | static struct task_struct *copy_process(unsigned long clone_flags, | 
|  | 1116 | unsigned long stack_start, | 
|  | 1117 | struct pt_regs *regs, | 
|  | 1118 | unsigned long stack_size, | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1119 | int __user *child_tidptr, | 
| Roland McGrath | 09a0539 | 2008-07-25 19:45:47 -0700 | [diff] [blame] | 1120 | struct pid *pid, | 
|  | 1121 | int trace) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | { | 
|  | 1123 | int retval; | 
| Mariusz Kozlowski | a24efe6 | 2007-10-18 23:41:09 -0700 | [diff] [blame] | 1124 | struct task_struct *p; | 
| Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 1125 | int cgroup_callbacks_done = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 |  | 
|  | 1127 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) | 
|  | 1128 | return ERR_PTR(-EINVAL); | 
|  | 1129 |  | 
|  | 1130 | /* | 
|  | 1131 | * Thread groups must share signals as well, and detached threads | 
|  | 1132 | * can only be started up within the thread group. | 
|  | 1133 | */ | 
|  | 1134 | if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) | 
|  | 1135 | return ERR_PTR(-EINVAL); | 
|  | 1136 |  | 
|  | 1137 | /* | 
|  | 1138 | * Shared signal handlers imply shared VM. By way of the above, | 
|  | 1139 | * thread groups also imply shared VM. Blocking this case allows | 
|  | 1140 | * for various simplifications in other code. | 
|  | 1141 | */ | 
|  | 1142 | if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) | 
|  | 1143 | return ERR_PTR(-EINVAL); | 
|  | 1144 |  | 
| Sukadev Bhattiprolu | 123be07 | 2009-09-23 15:57:20 -0700 | [diff] [blame] | 1145 | /* | 
|  | 1146 | * Siblings of global init remain as zombies on exit since they are | 
|  | 1147 | * not reaped by their parent (swapper). To solve this and to avoid | 
|  | 1148 | * multi-rooted process trees, prevent global and container-inits | 
|  | 1149 | * from creating siblings. | 
|  | 1150 | */ | 
|  | 1151 | if ((clone_flags & CLONE_PARENT) && | 
|  | 1152 | current->signal->flags & SIGNAL_UNKILLABLE) | 
|  | 1153 | return ERR_PTR(-EINVAL); | 
|  | 1154 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1155 | retval = security_task_create(clone_flags); | 
|  | 1156 | if (retval) | 
|  | 1157 | goto fork_out; | 
|  | 1158 |  | 
|  | 1159 | retval = -ENOMEM; | 
|  | 1160 | p = dup_task_struct(current); | 
|  | 1161 | if (!p) | 
|  | 1162 | goto fork_out; | 
|  | 1163 |  | 
| Steven Rostedt | f7e8b61 | 2009-06-02 16:39:48 -0400 | [diff] [blame] | 1164 | ftrace_graph_init_task(p); | 
|  | 1165 |  | 
| Peter Zijlstra | bea493a | 2006-10-17 00:10:33 -0700 | [diff] [blame] | 1166 | rt_mutex_init_task(p); | 
|  | 1167 |  | 
| Ingo Molnar | d12c1a3 | 2008-07-14 12:09:28 +0200 | [diff] [blame] | 1168 | #ifdef CONFIG_PROVE_LOCKING | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 1169 | DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); | 
|  | 1170 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); | 
|  | 1171 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1172 | retval = -EAGAIN; | 
| David Howells | 3b11a1d | 2008-11-14 10:39:26 +1100 | [diff] [blame] | 1173 | if (atomic_read(&p->real_cred->user->processes) >= | 
| Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 1174 | task_rlimit(p, RLIMIT_NPROC)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && | 
| Serge Hallyn | 18b6e04 | 2008-10-15 16:38:45 -0500 | [diff] [blame] | 1176 | p->real_cred->user != INIT_USER) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1177 | goto bad_fork_free; | 
|  | 1178 | } | 
| Vasiliy Kulikov | 72fa599 | 2011-08-08 19:02:04 +0400 | [diff] [blame] | 1179 | current->flags &= ~PF_NPROC_EXCEEDED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1180 |  | 
| David Howells | f1752ee | 2008-11-14 10:39:17 +1100 | [diff] [blame] | 1181 | retval = copy_creds(p, clone_flags); | 
|  | 1182 | if (retval < 0) | 
|  | 1183 | goto bad_fork_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 |  | 
|  | 1185 | /* | 
|  | 1186 | * If multiple threads are within copy_process(), then this check | 
|  | 1187 | * triggers too late. This doesn't hurt, the check is only there | 
|  | 1188 | * to stop root fork bombs. | 
|  | 1189 | */ | 
| Li Zefan | 04ec93f | 2009-02-06 08:17:19 +0000 | [diff] [blame] | 1190 | retval = -EAGAIN; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | if (nr_threads >= max_threads) | 
|  | 1192 | goto bad_fork_cleanup_count; | 
|  | 1193 |  | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1194 | if (!try_module_get(task_thread_info(p)->exec_domain->module)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1195 | goto bad_fork_cleanup_count; | 
|  | 1196 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | p->did_exec = 0; | 
| Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 1198 | delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1199 | copy_flags(clone_flags, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | INIT_LIST_HEAD(&p->children); | 
|  | 1201 | INIT_LIST_HEAD(&p->sibling); | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 1202 | rcu_copy_process(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1203 | p->vfork_done = NULL; | 
|  | 1204 | spin_lock_init(&p->alloc_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1205 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | init_sigpending(&p->pending); | 
|  | 1207 |  | 
| Martin Schwidefsky | 6486163 | 2011-12-15 14:56:09 +0100 | [diff] [blame] | 1208 | p->utime = p->stime = p->gtime = 0; | 
|  | 1209 | p->utimescaled = p->stimescaled = 0; | 
| Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 1210 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 
| Martin Schwidefsky | 6486163 | 2011-12-15 14:56:09 +0100 | [diff] [blame] | 1211 | p->prev_utime = p->prev_stime = 0; | 
| Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 1212 | #endif | 
| KAMEZAWA Hiroyuki | a3a2e76 | 2010-04-06 14:34:42 -0700 | [diff] [blame] | 1213 | #if defined(SPLIT_RSS_COUNTING) | 
|  | 1214 | memset(&p->rss_stat, 0, sizeof(p->rss_stat)); | 
|  | 1215 | #endif | 
| Balbir Singh | 172ba84 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 1216 |  | 
| Arjan van de Ven | 6976675 | 2008-09-01 15:52:40 -0700 | [diff] [blame] | 1217 | p->default_timer_slack_ns = current->timer_slack_ns; | 
|  | 1218 |  | 
| Andrea Righi | 5995477 | 2008-07-27 17:29:15 +0200 | [diff] [blame] | 1219 | task_io_accounting_init(&p->ioac); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 | acct_clear_integrals(p); | 
|  | 1221 |  | 
| Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1222 | posix_cpu_timers_init(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1224 | do_posix_clock_monotonic_gettime(&p->start_time); | 
| Tomas Janousek | 924b42d | 2007-07-15 23:39:42 -0700 | [diff] [blame] | 1225 | p->real_start_time = p->start_time; | 
|  | 1226 | monotonic_to_bootbased(&p->real_start_time); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | p->io_context = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | p->audit_context = NULL; | 
| Ben Blum | 4714d1d | 2011-05-26 16:25:18 -0700 | [diff] [blame] | 1229 | if (clone_flags & CLONE_THREAD) | 
| Tejun Heo | 257058a | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1230 | threadgroup_change_begin(current); | 
| Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 1231 | cgroup_fork(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1232 | #ifdef CONFIG_NUMA | 
| Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 1233 | p->mempolicy = mpol_dup(p->mempolicy); | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1234 | if (IS_ERR(p->mempolicy)) { | 
|  | 1235 | retval = PTR_ERR(p->mempolicy); | 
|  | 1236 | p->mempolicy = NULL; | 
|  | 1237 | goto bad_fork_cleanup_cgroup; | 
|  | 1238 | } | 
| Paul Jackson | c61afb1 | 2006-03-24 03:16:08 -0800 | [diff] [blame] | 1239 | mpol_fix_fork_child_flag(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1240 | #endif | 
| Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 1241 | #ifdef CONFIG_CPUSETS | 
|  | 1242 | p->cpuset_mem_spread_rotor = NUMA_NO_NODE; | 
|  | 1243 | p->cpuset_slab_spread_rotor = NUMA_NO_NODE; | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1244 | seqcount_init(&p->mems_allowed_seq); | 
| Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 1245 | #endif | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 1246 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 1247 | p->irq_events = 0; | 
| Russell King | b36e475 | 2006-08-27 12:26:34 +0100 | [diff] [blame] | 1248 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 
|  | 1249 | p->hardirqs_enabled = 1; | 
|  | 1250 | #else | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 1251 | p->hardirqs_enabled = 0; | 
| Russell King | b36e475 | 2006-08-27 12:26:34 +0100 | [diff] [blame] | 1252 | #endif | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 1253 | p->hardirq_enable_ip = 0; | 
|  | 1254 | p->hardirq_enable_event = 0; | 
|  | 1255 | p->hardirq_disable_ip = _THIS_IP_; | 
|  | 1256 | p->hardirq_disable_event = 0; | 
|  | 1257 | p->softirqs_enabled = 1; | 
|  | 1258 | p->softirq_enable_ip = _THIS_IP_; | 
|  | 1259 | p->softirq_enable_event = 0; | 
|  | 1260 | p->softirq_disable_ip = 0; | 
|  | 1261 | p->softirq_disable_event = 0; | 
|  | 1262 | p->hardirq_context = 0; | 
|  | 1263 | p->softirq_context = 0; | 
|  | 1264 | #endif | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1265 | #ifdef CONFIG_LOCKDEP | 
|  | 1266 | p->lockdep_depth = 0; /* no locks held yet */ | 
|  | 1267 | p->curr_chain_key = 0; | 
|  | 1268 | p->lockdep_recursion = 0; | 
|  | 1269 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1270 |  | 
| Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 1271 | #ifdef CONFIG_DEBUG_MUTEXES | 
|  | 1272 | p->blocked_on = NULL; /* not blocked yet */ | 
|  | 1273 | #endif | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 1274 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
|  | 1275 | p->memcg_batch.do_batch = 0; | 
|  | 1276 | p->memcg_batch.memcg = NULL; | 
|  | 1277 | #endif | 
| Markus Metzger | 0f48140 | 2009-04-03 16:43:48 +0200 | [diff] [blame] | 1278 |  | 
| Srivatsa Vaddagiri | 3c90e6e | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1279 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 
| Samir Bellabes | 3e51e3e | 2011-05-11 18:18:05 +0200 | [diff] [blame] | 1280 | sched_fork(p); | 
| Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 1281 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1282 | retval = perf_event_init_task(p); | 
| Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 1283 | if (retval) | 
|  | 1284 | goto bad_fork_cleanup_policy; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1285 | retval = audit_alloc(p); | 
|  | 1286 | if (retval) | 
| David Howells | f1752ee | 2008-11-14 10:39:17 +1100 | [diff] [blame] | 1287 | goto bad_fork_cleanup_policy; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 | /* copy all the process information */ | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1289 | retval = copy_semundo(clone_flags, p); | 
|  | 1290 | if (retval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1291 | goto bad_fork_cleanup_audit; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1292 | retval = copy_files(clone_flags, p); | 
|  | 1293 | if (retval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | goto bad_fork_cleanup_semundo; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1295 | retval = copy_fs(clone_flags, p); | 
|  | 1296 | if (retval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1297 | goto bad_fork_cleanup_files; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1298 | retval = copy_sighand(clone_flags, p); | 
|  | 1299 | if (retval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1300 | goto bad_fork_cleanup_fs; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1301 | retval = copy_signal(clone_flags, p); | 
|  | 1302 | if (retval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | goto bad_fork_cleanup_sighand; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1304 | retval = copy_mm(clone_flags, p); | 
|  | 1305 | if (retval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | goto bad_fork_cleanup_signal; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1307 | retval = copy_namespaces(clone_flags, p); | 
|  | 1308 | if (retval) | 
| David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 1309 | goto bad_fork_cleanup_mm; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1310 | retval = copy_io(clone_flags, p); | 
|  | 1311 | if (retval) | 
| Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1312 | goto bad_fork_cleanup_namespaces; | 
| Alexey Dobriyan | 6f2c55b | 2009-04-02 16:56:59 -0700 | [diff] [blame] | 1313 | retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | if (retval) | 
| Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1315 | goto bad_fork_cleanup_io; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1316 |  | 
| Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 1317 | if (pid != &init_struct_pid) { | 
|  | 1318 | retval = -ENOMEM; | 
| Eric W. Biederman | 61bce0f | 2009-01-07 18:08:49 -0800 | [diff] [blame] | 1319 | pid = alloc_pid(p->nsproxy->pid_ns); | 
| Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 1320 | if (!pid) | 
| Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1321 | goto bad_fork_cleanup_io; | 
| Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 1322 | } | 
|  | 1323 |  | 
|  | 1324 | p->pid = pid_nr(pid); | 
|  | 1325 | p->tgid = p->pid; | 
|  | 1326 | if (clone_flags & CLONE_THREAD) | 
|  | 1327 | p->tgid = current->tgid; | 
|  | 1328 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | 
|  | 1330 | /* | 
|  | 1331 | * Clear TID on mm_release()? | 
|  | 1332 | */ | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1333 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1334 | #ifdef CONFIG_BLOCK | 
|  | 1335 | p->plug = NULL; | 
|  | 1336 | #endif | 
| Alexey Dobriyan | 42b2dd0 | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1337 | #ifdef CONFIG_FUTEX | 
| Ingo Molnar | 8f17d3a | 2006-03-27 01:16:27 -0800 | [diff] [blame] | 1338 | p->robust_list = NULL; | 
|  | 1339 | #ifdef CONFIG_COMPAT | 
|  | 1340 | p->compat_robust_list = NULL; | 
|  | 1341 | #endif | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1342 | INIT_LIST_HEAD(&p->pi_state_list); | 
|  | 1343 | p->pi_state_cache = NULL; | 
| Alexey Dobriyan | 42b2dd0 | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1344 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1345 | /* | 
| GOTO Masanori | f9a3879 | 2006-03-13 21:20:44 -0800 | [diff] [blame] | 1346 | * sigaltstack should be cleared when sharing the same VM | 
|  | 1347 | */ | 
|  | 1348 | if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) | 
|  | 1349 | p->sas_ss_sp = p->sas_ss_size = 0; | 
|  | 1350 |  | 
|  | 1351 | /* | 
| Oleg Nesterov | 6580807 | 2009-12-15 16:47:16 -0800 | [diff] [blame] | 1352 | * Syscall tracing and stepping should be turned off in the | 
|  | 1353 | * child regardless of CLONE_PTRACE. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | */ | 
| Oleg Nesterov | 6580807 | 2009-12-15 16:47:16 -0800 | [diff] [blame] | 1355 | user_disable_single_step(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1356 | clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); | 
| Laurent Vivier | ed75e8d | 2005-09-03 15:57:18 -0700 | [diff] [blame] | 1357 | #ifdef TIF_SYSCALL_EMU | 
|  | 1358 | clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); | 
|  | 1359 | #endif | 
| Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1360 | clear_all_latency_tracing(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1361 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | /* ok, now we should be set up.. */ | 
| Oleg Nesterov | 5f8aadd | 2012-03-14 19:55:38 +0100 | [diff] [blame] | 1363 | if (clone_flags & CLONE_THREAD) | 
|  | 1364 | p->exit_signal = -1; | 
|  | 1365 | else if (clone_flags & CLONE_PARENT) | 
|  | 1366 | p->exit_signal = current->group_leader->exit_signal; | 
|  | 1367 | else | 
|  | 1368 | p->exit_signal = (clone_flags & CSIGNAL); | 
|  | 1369 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 | p->pdeath_signal = 0; | 
|  | 1371 | p->exit_state = 0; | 
|  | 1372 |  | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1373 | p->nr_dirtied = 0; | 
|  | 1374 | p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); | 
| Wu Fengguang | 8371235 | 2011-06-11 19:25:42 -0600 | [diff] [blame] | 1375 | p->dirty_paused_when = 0; | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1376 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | /* | 
|  | 1378 | * Ok, make it visible to the rest of the system. | 
|  | 1379 | * We dont wake it up yet. | 
|  | 1380 | */ | 
|  | 1381 | p->group_leader = p; | 
| Oleg Nesterov | 47e6532 | 2006-03-28 16:11:25 -0800 | [diff] [blame] | 1382 | INIT_LIST_HEAD(&p->thread_group); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1383 |  | 
| Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 1384 | /* Now that the task is set up, run cgroup callbacks if | 
|  | 1385 | * necessary. We need to run them before the task is visible | 
|  | 1386 | * on the tasklist. */ | 
|  | 1387 | cgroup_fork_callbacks(p); | 
|  | 1388 | cgroup_callbacks_done = 1; | 
|  | 1389 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1390 | /* Need tasklist lock for parent etc handling! */ | 
|  | 1391 | write_lock_irq(&tasklist_lock); | 
|  | 1392 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1393 | /* CLONE_PARENT re-uses the old parent */ | 
| Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 1394 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1395 | p->real_parent = current->real_parent; | 
| Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 1396 | p->parent_exec_id = current->parent_exec_id; | 
|  | 1397 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1398 | p->real_parent = current; | 
| Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 1399 | p->parent_exec_id = current->self_exec_id; | 
|  | 1400 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 |  | 
| Oleg Nesterov | 3f17da6 | 2006-02-15 22:13:24 +0300 | [diff] [blame] | 1402 | spin_lock(¤t->sighand->siglock); | 
| Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 1403 |  | 
|  | 1404 | /* | 
|  | 1405 | * Process group and session signals need to be delivered to just the | 
|  | 1406 | * parent before the fork or both the parent and the child after the | 
|  | 1407 | * fork. Restart if a signal comes in before we add the new process to | 
|  | 1408 | * it's process group. | 
|  | 1409 | * A fatal signal pending means that current will exit, so the new | 
|  | 1410 | * thread can't slip out of an OOM kill (or normal SIGKILL). | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1411 | */ | 
| Daniel Walker | 23ff444 | 2007-10-18 03:06:07 -0700 | [diff] [blame] | 1412 | recalc_sigpending(); | 
| Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 1413 | if (signal_pending(current)) { | 
|  | 1414 | spin_unlock(¤t->sighand->siglock); | 
|  | 1415 | write_unlock_irq(&tasklist_lock); | 
|  | 1416 | retval = -ERESTARTNOINTR; | 
| Steven Rostedt | f7e8b61 | 2009-06-02 16:39:48 -0400 | [diff] [blame] | 1417 | goto bad_fork_free_pid; | 
| Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 1418 | } | 
|  | 1419 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1420 | if (clone_flags & CLONE_THREAD) { | 
| Oleg Nesterov | b3ac022 | 2010-05-26 14:43:24 -0700 | [diff] [blame] | 1421 | current->signal->nr_threads++; | 
| Oleg Nesterov | 4ab6c08 | 2009-08-26 14:29:24 -0700 | [diff] [blame] | 1422 | atomic_inc(¤t->signal->live); | 
| Oleg Nesterov | b3ac022 | 2010-05-26 14:43:24 -0700 | [diff] [blame] | 1423 | atomic_inc(¤t->signal->sigcnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1424 | p->group_leader = current->group_leader; | 
| Oleg Nesterov | 47e6532 | 2006-03-28 16:11:25 -0800 | [diff] [blame] | 1425 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1426 | } | 
|  | 1427 |  | 
| Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 1428 | if (likely(p->pid)) { | 
| Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 1429 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1430 |  | 
| Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 1431 | if (thread_group_leader(p)) { | 
| Eric W. Biederman | 45a6862 | 2011-03-23 16:43:12 -0700 | [diff] [blame] | 1432 | if (is_child_reaper(pid)) | 
| Pavel Emelyanov | 30e49c2 | 2007-10-18 23:40:10 -0700 | [diff] [blame] | 1433 | p->nsproxy->pid_ns->child_reaper = p; | 
| Oleg Nesterov | c97d989 | 2006-03-28 16:11:06 -0800 | [diff] [blame] | 1434 |  | 
| Oleg Nesterov | fea9d17 | 2008-02-08 04:19:19 -0800 | [diff] [blame] | 1435 | p->signal->leader_pid = pid; | 
| Alan Cox | 9c9f4de | 2008-10-13 10:37:26 +0100 | [diff] [blame] | 1436 | p->signal->tty = tty_kref_get(current->signal->tty); | 
| Eric W. Biederman | 5cd1756 | 2007-12-04 23:45:04 -0800 | [diff] [blame] | 1437 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); | 
|  | 1438 | attach_pid(p, PIDTYPE_SID, task_session(current)); | 
| Oleg Nesterov | 9cd80bb | 2009-12-17 15:27:15 -0800 | [diff] [blame] | 1439 | list_add_tail(&p->sibling, &p->real_parent->children); | 
| Eric W. Biederman | 5e85d4a | 2006-04-18 22:20:16 -0700 | [diff] [blame] | 1440 | list_add_tail_rcu(&p->tasks, &init_task.tasks); | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 1441 | __this_cpu_inc(process_counts); | 
| Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 1442 | } | 
| Sukadev Bhattiprolu | 8586899 | 2007-05-10 22:23:03 -0700 | [diff] [blame] | 1443 | attach_pid(p, PIDTYPE_PID, pid); | 
| Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 1444 | nr_threads++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1445 | } | 
|  | 1446 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1447 | total_forks++; | 
| Oleg Nesterov | 3f17da6 | 2006-02-15 22:13:24 +0300 | [diff] [blame] | 1448 | spin_unlock(¤t->sighand->siglock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1449 | write_unlock_irq(&tasklist_lock); | 
| Andrew Morton | c13cf85 | 2005-11-28 13:43:48 -0800 | [diff] [blame] | 1450 | proc_fork_connector(p); | 
| Paul Menage | 817929e | 2007-10-18 23:39:36 -0700 | [diff] [blame] | 1451 | cgroup_post_fork(p); | 
| Ben Blum | 4714d1d | 2011-05-26 16:25:18 -0700 | [diff] [blame] | 1452 | if (clone_flags & CLONE_THREAD) | 
| Tejun Heo | 257058a | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1453 | threadgroup_change_end(current); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1454 | perf_event_fork(p); | 
| KAMEZAWA Hiroyuki | 43d2b11 | 2012-01-10 15:08:09 -0800 | [diff] [blame] | 1455 |  | 
|  | 1456 | trace_task_newtask(p, clone_flags); | 
|  | 1457 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1458 | return p; | 
|  | 1459 |  | 
| Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 1460 | bad_fork_free_pid: | 
|  | 1461 | if (pid != &init_struct_pid) | 
|  | 1462 | free_pid(pid); | 
| Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1463 | bad_fork_cleanup_io: | 
| Louis Rilling | b69f229 | 2009-12-04 14:52:42 +0100 | [diff] [blame] | 1464 | if (p->io_context) | 
|  | 1465 | exit_io_context(p); | 
| Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 1466 | bad_fork_cleanup_namespaces: | 
| Linus Torvalds | 444f378 | 2007-01-30 13:35:18 -0800 | [diff] [blame] | 1467 | exit_task_namespaces(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | bad_fork_cleanup_mm: | 
| David Rientjes | c9f0124 | 2011-10-31 17:07:15 -0700 | [diff] [blame] | 1469 | if (p->mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1470 | mmput(p->mm); | 
|  | 1471 | bad_fork_cleanup_signal: | 
| Oleg Nesterov | 4ab6c08 | 2009-08-26 14:29:24 -0700 | [diff] [blame] | 1472 | if (!(clone_flags & CLONE_THREAD)) | 
| Mike Galbraith | 1c5354d | 2011-01-05 11:16:04 +0100 | [diff] [blame] | 1473 | free_signal_struct(p->signal); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1474 | bad_fork_cleanup_sighand: | 
| Oleg Nesterov | a7e5328 | 2006-03-28 16:11:27 -0800 | [diff] [blame] | 1475 | __cleanup_sighand(p->sighand); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1476 | bad_fork_cleanup_fs: | 
|  | 1477 | exit_fs(p); /* blocking */ | 
|  | 1478 | bad_fork_cleanup_files: | 
|  | 1479 | exit_files(p); /* blocking */ | 
|  | 1480 | bad_fork_cleanup_semundo: | 
|  | 1481 | exit_sem(p); | 
|  | 1482 | bad_fork_cleanup_audit: | 
|  | 1483 | audit_free(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1484 | bad_fork_cleanup_policy: | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1485 | perf_event_free_task(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1486 | #ifdef CONFIG_NUMA | 
| Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 1487 | mpol_put(p->mempolicy); | 
| Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 1488 | bad_fork_cleanup_cgroup: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1489 | #endif | 
| Ben Blum | 4714d1d | 2011-05-26 16:25:18 -0700 | [diff] [blame] | 1490 | if (clone_flags & CLONE_THREAD) | 
| Tejun Heo | 257058a | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1491 | threadgroup_change_end(current); | 
| Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 1492 | cgroup_exit(p, cgroup_callbacks_done); | 
| Shailabh Nagar | 35df17c | 2006-08-31 21:27:38 -0700 | [diff] [blame] | 1493 | delayacct_tsk_free(p); | 
| Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1494 | module_put(task_thread_info(p)->exec_domain->module); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1495 | bad_fork_cleanup_count: | 
| David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 1496 | atomic_dec(&p->cred->user->processes); | 
| David Howells | e0e8173 | 2009-09-02 09:13:40 +0100 | [diff] [blame] | 1497 | exit_creds(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | bad_fork_free: | 
|  | 1499 | free_task(p); | 
| Oleg Nesterov | fe7d37d | 2006-01-08 01:04:02 -0800 | [diff] [blame] | 1500 | fork_out: | 
|  | 1501 | return ERR_PTR(retval); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1502 | } | 
|  | 1503 |  | 
| Adrian Bunk | 6b2fb3c | 2008-02-06 01:37:55 -0800 | [diff] [blame] | 1504 | noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1505 | { | 
|  | 1506 | memset(regs, 0, sizeof(struct pt_regs)); | 
|  | 1507 | return regs; | 
|  | 1508 | } | 
|  | 1509 |  | 
| Oleg Nesterov | f106eee | 2010-05-26 14:44:11 -0700 | [diff] [blame] | 1510 | static inline void init_idle_pids(struct pid_link *links) | 
|  | 1511 | { | 
|  | 1512 | enum pid_type type; | 
|  | 1513 |  | 
|  | 1514 | for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { | 
|  | 1515 | INIT_HLIST_NODE(&links[type].node); /* not really needed */ | 
|  | 1516 | links[type].pid = &init_struct_pid; | 
|  | 1517 | } | 
|  | 1518 | } | 
|  | 1519 |  | 
| Al Viro | 9abcf40 | 2007-02-01 13:52:48 +0000 | [diff] [blame] | 1520 | struct task_struct * __cpuinit fork_idle(int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1521 | { | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1522 | struct task_struct *task; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | struct pt_regs regs; | 
|  | 1524 |  | 
| Pavel Emelyanov | 30e49c2 | 2007-10-18 23:40:10 -0700 | [diff] [blame] | 1525 | task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, | 
| Roland McGrath | 09a0539 | 2008-07-25 19:45:47 -0700 | [diff] [blame] | 1526 | &init_struct_pid, 0); | 
| Oleg Nesterov | f106eee | 2010-05-26 14:44:11 -0700 | [diff] [blame] | 1527 | if (!IS_ERR(task)) { | 
|  | 1528 | init_idle_pids(task->pids); | 
| Akinobu Mita | 753ca4f | 2006-11-25 11:09:34 -0800 | [diff] [blame] | 1529 | init_idle(task, cpu); | 
| Oleg Nesterov | f106eee | 2010-05-26 14:44:11 -0700 | [diff] [blame] | 1530 | } | 
| Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 1531 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 | return task; | 
|  | 1533 | } | 
|  | 1534 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | /* | 
|  | 1536 | *  Ok, this is the main fork-routine. | 
|  | 1537 | * | 
|  | 1538 | * It copies the process, and if successful kick-starts | 
|  | 1539 | * it and waits for it to finish using the VM if required. | 
|  | 1540 | */ | 
|  | 1541 | long do_fork(unsigned long clone_flags, | 
|  | 1542 | unsigned long stack_start, | 
|  | 1543 | struct pt_regs *regs, | 
|  | 1544 | unsigned long stack_size, | 
|  | 1545 | int __user *parent_tidptr, | 
|  | 1546 | int __user *child_tidptr) | 
|  | 1547 | { | 
|  | 1548 | struct task_struct *p; | 
|  | 1549 | int trace = 0; | 
| Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 1550 | long nr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1551 |  | 
| Andrew Morton | bdff746 | 2008-02-04 22:27:22 -0800 | [diff] [blame] | 1552 | /* | 
| Serge Hallyn | 18b6e04 | 2008-10-15 16:38:45 -0500 | [diff] [blame] | 1553 | * Do some preliminary argument and permissions checking before we | 
|  | 1554 | * actually start allocating stuff | 
|  | 1555 | */ | 
|  | 1556 | if (clone_flags & CLONE_NEWUSER) { | 
|  | 1557 | if (clone_flags & CLONE_THREAD) | 
|  | 1558 | return -EINVAL; | 
|  | 1559 | /* hopefully this check will go away when userns support is | 
|  | 1560 | * complete | 
|  | 1561 | */ | 
| Serge E. Hallyn | 7657d90 | 2008-12-03 13:17:33 -0600 | [diff] [blame] | 1562 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || | 
|  | 1563 | !capable(CAP_SETGID)) | 
| Serge Hallyn | 18b6e04 | 2008-10-15 16:38:45 -0500 | [diff] [blame] | 1564 | return -EPERM; | 
|  | 1565 | } | 
|  | 1566 |  | 
|  | 1567 | /* | 
| Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 1568 | * Determine whether and which event to report to ptracer.  When | 
|  | 1569 | * called from kernel_thread or CLONE_UNTRACED is explicitly | 
|  | 1570 | * requested, no event is reported; otherwise, report if the event | 
|  | 1571 | * for the type of forking is enabled. | 
| Roland McGrath | 09a0539 | 2008-07-25 19:45:47 -0700 | [diff] [blame] | 1572 | */ | 
| Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 1573 | if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) { | 
|  | 1574 | if (clone_flags & CLONE_VFORK) | 
|  | 1575 | trace = PTRACE_EVENT_VFORK; | 
|  | 1576 | else if ((clone_flags & CSIGNAL) != SIGCHLD) | 
|  | 1577 | trace = PTRACE_EVENT_CLONE; | 
|  | 1578 | else | 
|  | 1579 | trace = PTRACE_EVENT_FORK; | 
|  | 1580 |  | 
|  | 1581 | if (likely(!ptrace_event_enabled(current, trace))) | 
|  | 1582 | trace = 0; | 
|  | 1583 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1584 |  | 
| Sukadev Bhattiprolu | a6f5e06 | 2007-10-18 23:39:53 -0700 | [diff] [blame] | 1585 | p = copy_process(clone_flags, stack_start, regs, stack_size, | 
| Roland McGrath | 09a0539 | 2008-07-25 19:45:47 -0700 | [diff] [blame] | 1586 | child_tidptr, NULL, trace); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1587 | /* | 
|  | 1588 | * Do this prior waking up the new thread - the thread pointer | 
|  | 1589 | * might get invalid after that point, if the thread exits quickly. | 
|  | 1590 | */ | 
|  | 1591 | if (!IS_ERR(p)) { | 
|  | 1592 | struct completion vfork; | 
|  | 1593 |  | 
| Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 1594 | trace_sched_process_fork(current, p); | 
|  | 1595 |  | 
| Pavel Emelyanov | 6c5f3e7 | 2008-02-08 04:19:20 -0800 | [diff] [blame] | 1596 | nr = task_pid_vnr(p); | 
| Pavel Emelyanov | 30e49c2 | 2007-10-18 23:40:10 -0700 | [diff] [blame] | 1597 |  | 
|  | 1598 | if (clone_flags & CLONE_PARENT_SETTID) | 
|  | 1599 | put_user(nr, parent_tidptr); | 
| Sukadev Bhattiprolu | a6f5e06 | 2007-10-18 23:39:53 -0700 | [diff] [blame] | 1600 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 | if (clone_flags & CLONE_VFORK) { | 
|  | 1602 | p->vfork_done = &vfork; | 
|  | 1603 | init_completion(&vfork); | 
| Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1604 | get_task_struct(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | } | 
|  | 1606 |  | 
| Samir Bellabes | 3e51e3e | 2011-05-11 18:18:05 +0200 | [diff] [blame] | 1607 | wake_up_new_task(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1608 |  | 
| Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 1609 | /* forking complete and child started to run, tell ptracer */ | 
|  | 1610 | if (unlikely(trace)) | 
|  | 1611 | ptrace_event(trace, nr); | 
| Roland McGrath | 09a0539 | 2008-07-25 19:45:47 -0700 | [diff] [blame] | 1612 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1613 | if (clone_flags & CLONE_VFORK) { | 
| Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1614 | if (!wait_for_vfork_done(p, &vfork)) | 
|  | 1615 | ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1616 | } | 
|  | 1617 | } else { | 
| Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 1618 | nr = PTR_ERR(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1619 | } | 
| Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 1620 | return nr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1621 | } | 
|  | 1622 |  | 
| Ravikiran G Thirumalai | 5fd63b3 | 2006-01-11 22:46:15 +0100 | [diff] [blame] | 1623 | #ifndef ARCH_MIN_MMSTRUCT_ALIGN | 
|  | 1624 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 | 
|  | 1625 | #endif | 
|  | 1626 |  | 
| Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 1627 | static void sighand_ctor(void *data) | 
| Oleg Nesterov | aa1757f | 2006-03-28 16:11:12 -0800 | [diff] [blame] | 1628 | { | 
|  | 1629 | struct sighand_struct *sighand = data; | 
|  | 1630 |  | 
| Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 1631 | spin_lock_init(&sighand->siglock); | 
| Davide Libenzi | b8fceee | 2007-09-20 12:40:16 -0700 | [diff] [blame] | 1632 | init_waitqueue_head(&sighand->signalfd_wqh); | 
| Oleg Nesterov | aa1757f | 2006-03-28 16:11:12 -0800 | [diff] [blame] | 1633 | } | 
|  | 1634 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1635 | void __init proc_caches_init(void) | 
|  | 1636 | { | 
|  | 1637 | sighand_cachep = kmem_cache_create("sighand_cache", | 
|  | 1638 | sizeof(struct sighand_struct), 0, | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 1639 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| | 
|  | 1640 | SLAB_NOTRACK, sighand_ctor); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1641 | signal_cachep = kmem_cache_create("signal_cache", | 
|  | 1642 | sizeof(struct signal_struct), 0, | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 1643 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 1644 | files_cachep = kmem_cache_create("files_cache", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1645 | sizeof(struct files_struct), 0, | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 1646 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 1647 | fs_cachep = kmem_cache_create("fs_cache", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1648 | sizeof(struct fs_struct), 0, | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 1649 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); | 
| Linus Torvalds | 6345d24 | 2011-05-29 11:32:28 -0700 | [diff] [blame] | 1650 | /* | 
|  | 1651 | * FIXME! The "sizeof(struct mm_struct)" currently includes the | 
|  | 1652 | * whole struct cpumask for the OFFSTACK case. We could change | 
|  | 1653 | * this to *only* allocate as much of it as required by the | 
|  | 1654 | * maximum number of CPU's we can ever have.  The cpumask_allocation | 
|  | 1655 | * is at the end of the structure, exactly for that reason. | 
|  | 1656 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1657 | mm_cachep = kmem_cache_create("mm_struct", | 
| Ravikiran G Thirumalai | 5fd63b3 | 2006-01-11 22:46:15 +0100 | [diff] [blame] | 1658 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 1659 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); | 
| David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1660 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); | 
| David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1661 | mmap_init(); | 
| Al Viro | 6657719 | 2011-06-28 15:41:10 -0400 | [diff] [blame] | 1662 | nsproxy_cache_init(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1663 | } | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1664 |  | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1665 | /* | 
| Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 1666 | * Check constraints on flags passed to the unshare system call. | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1667 | */ | 
| Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 1668 | static int check_unshare_flags(unsigned long unshare_flags) | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1669 | { | 
| Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 1670 | if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| | 
|  | 1671 | CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| | 
|  | 1672 | CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET)) | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1673 | return -EINVAL; | 
| Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 1674 | /* | 
|  | 1675 | * Not implemented, but pretend it works if there is nothing to | 
|  | 1676 | * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND | 
|  | 1677 | * needs to unshare vm. | 
|  | 1678 | */ | 
|  | 1679 | if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { | 
|  | 1680 | /* FIXME: get_task_mm() increments ->mm_users */ | 
|  | 1681 | if (atomic_read(¤t->mm->mm_users) > 1) | 
|  | 1682 | return -EINVAL; | 
|  | 1683 | } | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1684 |  | 
|  | 1685 | return 0; | 
|  | 1686 | } | 
|  | 1687 |  | 
|  | 1688 | /* | 
| JANAK DESAI | 99d1419 | 2006-02-07 12:58:59 -0800 | [diff] [blame] | 1689 | * Unshare the filesystem structure if it is being shared | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1690 | */ | 
|  | 1691 | static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) | 
|  | 1692 | { | 
|  | 1693 | struct fs_struct *fs = current->fs; | 
|  | 1694 |  | 
| Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1695 | if (!(unshare_flags & CLONE_FS) || !fs) | 
|  | 1696 | return 0; | 
|  | 1697 |  | 
|  | 1698 | /* don't need lock here; in the worst case we'll do useless copy */ | 
|  | 1699 | if (fs->users == 1) | 
|  | 1700 | return 0; | 
|  | 1701 |  | 
|  | 1702 | *new_fsp = copy_fs_struct(fs); | 
|  | 1703 | if (!*new_fsp) | 
|  | 1704 | return -ENOMEM; | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1705 |  | 
|  | 1706 | return 0; | 
|  | 1707 | } | 
|  | 1708 |  | 
|  | 1709 | /* | 
| JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 1710 | * Unshare file descriptor table if it is being shared | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1711 | */ | 
|  | 1712 | static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) | 
|  | 1713 | { | 
|  | 1714 | struct files_struct *fd = current->files; | 
| JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 1715 | int error = 0; | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1716 |  | 
|  | 1717 | if ((unshare_flags & CLONE_FILES) && | 
| JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 1718 | (fd && atomic_read(&fd->count) > 1)) { | 
|  | 1719 | *new_fdp = dup_fd(fd, &error); | 
|  | 1720 | if (!*new_fdp) | 
|  | 1721 | return error; | 
|  | 1722 | } | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1723 |  | 
|  | 1724 | return 0; | 
|  | 1725 | } | 
|  | 1726 |  | 
|  | 1727 | /* | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1728 | * unshare allows a process to 'unshare' part of the process | 
|  | 1729 | * context which was originally shared using clone.  copy_* | 
|  | 1730 | * functions used by do_fork() cannot be used here directly | 
|  | 1731 | * because they modify an inactive task_struct that is being | 
|  | 1732 | * constructed. Here we are modifying the current, active, | 
|  | 1733 | * task_struct. | 
|  | 1734 | */ | 
| Heiko Carstens | 6559eed8 | 2009-01-14 14:14:32 +0100 | [diff] [blame] | 1735 | SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1736 | { | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1737 | struct fs_struct *fs, *new_fs = NULL; | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1738 | struct files_struct *fd, *new_fd = NULL; | 
| Pavel Emelyanov | cf7b708 | 2007-10-18 23:39:54 -0700 | [diff] [blame] | 1739 | struct nsproxy *new_nsproxy = NULL; | 
| Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 1740 | int do_sysvsem = 0; | 
| Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 1741 | int err; | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1742 |  | 
| Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 1743 | err = check_unshare_flags(unshare_flags); | 
|  | 1744 | if (err) | 
| Eric W. Biederman | 06f9d4f | 2006-03-22 00:07:40 -0800 | [diff] [blame] | 1745 | goto bad_unshare_out; | 
|  | 1746 |  | 
| Manfred Spraul | 6013f67 | 2008-04-29 01:00:59 -0700 | [diff] [blame] | 1747 | /* | 
| Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 1748 | * If unsharing namespace, must also unshare filesystem information. | 
|  | 1749 | */ | 
|  | 1750 | if (unshare_flags & CLONE_NEWNS) | 
|  | 1751 | unshare_flags |= CLONE_FS; | 
|  | 1752 | /* | 
| Manfred Spraul | 6013f67 | 2008-04-29 01:00:59 -0700 | [diff] [blame] | 1753 | * CLONE_NEWIPC must also detach from the undolist: after switching | 
|  | 1754 | * to a new ipc namespace, the semaphore arrays from the old | 
|  | 1755 | * namespace are unreachable. | 
|  | 1756 | */ | 
|  | 1757 | if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) | 
| Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 1758 | do_sysvsem = 1; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1759 | err = unshare_fs(unshare_flags, &new_fs); | 
|  | 1760 | if (err) | 
| Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 1761 | goto bad_unshare_out; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1762 | err = unshare_fd(unshare_flags, &new_fd); | 
|  | 1763 | if (err) | 
| Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 1764 | goto bad_unshare_cleanup_fs; | 
| Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1765 | err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs); | 
|  | 1766 | if (err) | 
| Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 1767 | goto bad_unshare_cleanup_fd; | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1768 |  | 
| Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 1769 | if (new_fs || new_fd || do_sysvsem || new_nsproxy) { | 
| Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 1770 | if (do_sysvsem) { | 
|  | 1771 | /* | 
|  | 1772 | * CLONE_SYSVSEM is equivalent to sys_exit(). | 
|  | 1773 | */ | 
|  | 1774 | exit_sem(current); | 
|  | 1775 | } | 
| Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 1776 |  | 
| Serge Hallyn | c0b2fc3 | 2006-10-02 02:18:18 -0700 | [diff] [blame] | 1777 | if (new_nsproxy) { | 
| Pavel Emelyanov | cf7b708 | 2007-10-18 23:39:54 -0700 | [diff] [blame] | 1778 | switch_task_namespaces(current, new_nsproxy); | 
|  | 1779 | new_nsproxy = NULL; | 
| Serge Hallyn | c0b2fc3 | 2006-10-02 02:18:18 -0700 | [diff] [blame] | 1780 | } | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1781 |  | 
| Pavel Emelyanov | cf7b708 | 2007-10-18 23:39:54 -0700 | [diff] [blame] | 1782 | task_lock(current); | 
|  | 1783 |  | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1784 | if (new_fs) { | 
|  | 1785 | fs = current->fs; | 
| Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 1786 | spin_lock(&fs->lock); | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1787 | current->fs = new_fs; | 
| Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1788 | if (--fs->users) | 
|  | 1789 | new_fs = NULL; | 
|  | 1790 | else | 
|  | 1791 | new_fs = fs; | 
| Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 1792 | spin_unlock(&fs->lock); | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1793 | } | 
|  | 1794 |  | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1795 | if (new_fd) { | 
|  | 1796 | fd = current->files; | 
|  | 1797 | current->files = new_fd; | 
|  | 1798 | new_fd = fd; | 
|  | 1799 | } | 
|  | 1800 |  | 
|  | 1801 | task_unlock(current); | 
|  | 1802 | } | 
|  | 1803 |  | 
| Serge Hallyn | c0b2fc3 | 2006-10-02 02:18:18 -0700 | [diff] [blame] | 1804 | if (new_nsproxy) | 
| Linus Torvalds | 444f378 | 2007-01-30 13:35:18 -0800 | [diff] [blame] | 1805 | put_nsproxy(new_nsproxy); | 
| Serge Hallyn | c0b2fc3 | 2006-10-02 02:18:18 -0700 | [diff] [blame] | 1806 |  | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1807 | bad_unshare_cleanup_fd: | 
|  | 1808 | if (new_fd) | 
|  | 1809 | put_files_struct(new_fd); | 
|  | 1810 |  | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1811 | bad_unshare_cleanup_fs: | 
|  | 1812 | if (new_fs) | 
| Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1813 | free_fs_struct(new_fs); | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1814 |  | 
| JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 1815 | bad_unshare_out: | 
|  | 1816 | return err; | 
|  | 1817 | } | 
| Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 1818 |  | 
|  | 1819 | /* | 
|  | 1820 | *	Helper to unshare the files of the current task. | 
|  | 1821 | *	We don't want to expose copy_files internals to | 
|  | 1822 | *	the exec layer of the kernel. | 
|  | 1823 | */ | 
|  | 1824 |  | 
|  | 1825 | int unshare_files(struct files_struct **displaced) | 
|  | 1826 | { | 
|  | 1827 | struct task_struct *task = current; | 
| Al Viro | 5070451 | 2008-04-26 05:25:00 +0100 | [diff] [blame] | 1828 | struct files_struct *copy = NULL; | 
| Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 1829 | int error; | 
|  | 1830 |  | 
|  | 1831 | error = unshare_fd(CLONE_FILES, ©); | 
|  | 1832 | if (error || !copy) { | 
|  | 1833 | *displaced = NULL; | 
|  | 1834 | return error; | 
|  | 1835 | } | 
|  | 1836 | *displaced = task->files; | 
|  | 1837 | task_lock(task); | 
|  | 1838 | task->files = copy; | 
|  | 1839 | task_unlock(task); | 
|  | 1840 | return 0; | 
|  | 1841 | } |