| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * The "user cache". | 
 | 3 |  * | 
 | 4 |  * (C) Copyright 1991-2000 Linus Torvalds | 
 | 5 |  * | 
 | 6 |  * We have a per-user structure to keep track of how many | 
 | 7 |  * processes, files etc the user has claimed, in order to be | 
 | 8 |  * able to have per-user limits for system resources.  | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/init.h> | 
 | 12 | #include <linux/sched.h> | 
 | 13 | #include <linux/slab.h> | 
 | 14 | #include <linux/bitops.h> | 
 | 15 | #include <linux/key.h> | 
| Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 16 | #include <linux/interrupt.h> | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 17 | #include <linux/module.h> | 
 | 18 | #include <linux/user_namespace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 |  | 
| Pavel Emelyanov | aee16ce | 2008-02-08 04:18:23 -0800 | [diff] [blame] | 20 | struct user_namespace init_user_ns = { | 
 | 21 | 	.kref = { | 
 | 22 | 		.refcount	= ATOMIC_INIT(2), | 
 | 23 | 	}, | 
 | 24 | 	.root_user = &root_user, | 
 | 25 | }; | 
 | 26 | EXPORT_SYMBOL_GPL(init_user_ns); | 
 | 27 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | /* | 
 | 29 |  * UID task count cache, to get fast user lookup in "alloc_uid" | 
 | 30 |  * when changing user ID's (ie setuid() and friends). | 
 | 31 |  */ | 
 | 32 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #define UIDHASH_MASK		(UIDHASH_SZ - 1) | 
 | 34 | #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 35 | #define uidhashentry(ns, uid)	((ns)->uidhash_table + __uidhashfn((uid))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 37 | static struct kmem_cache *uid_cachep; | 
| Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 38 |  | 
 | 39 | /* | 
 | 40 |  * The uidhash_lock is mostly taken from process context, but it is | 
 | 41 |  * occasionally also taken from softirq/tasklet context, when | 
 | 42 |  * task-structs get RCU-freed. Hence all locking must be softirq-safe. | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 43 |  * But free_uid() is also called with local interrupts disabled, and running | 
 | 44 |  * local_bh_enable() with local interrupts disabled is an error - we'll run | 
 | 45 |  * softirq callbacks, and they can unconditionally enable interrupts, and | 
 | 46 |  * the caller of free_uid() didn't expect that.. | 
| Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 47 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | static DEFINE_SPINLOCK(uidhash_lock); | 
 | 49 |  | 
 | 50 | struct user_struct root_user = { | 
 | 51 | 	.__count	= ATOMIC_INIT(1), | 
 | 52 | 	.processes	= ATOMIC_INIT(1), | 
 | 53 | 	.files		= ATOMIC_INIT(0), | 
 | 54 | 	.sigpending	= ATOMIC_INIT(0), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | 	.locked_shm     = 0, | 
 | 56 | #ifdef CONFIG_KEYS | 
 | 57 | 	.uid_keyring	= &root_user_keyring, | 
 | 58 | 	.session_keyring = &root_session_keyring, | 
 | 59 | #endif | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 60 | #ifdef CONFIG_USER_SCHED | 
| Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 61 | 	.tg		= &init_task_group, | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 62 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | }; | 
 | 64 |  | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 65 | /* | 
 | 66 |  * These routines must be called with the uidhash spinlock held! | 
 | 67 |  */ | 
| Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 68 | static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 69 | { | 
 | 70 | 	hlist_add_head(&up->uidhash_node, hashent); | 
 | 71 | } | 
 | 72 |  | 
| Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 73 | static void uid_hash_remove(struct user_struct *up) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 74 | { | 
 | 75 | 	hlist_del_init(&up->uidhash_node); | 
 | 76 | } | 
 | 77 |  | 
| Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 78 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 79 | { | 
 | 80 | 	struct user_struct *user; | 
 | 81 | 	struct hlist_node *h; | 
 | 82 |  | 
 | 83 | 	hlist_for_each_entry(user, h, hashent, uidhash_node) { | 
 | 84 | 		if (user->uid == uid) { | 
 | 85 | 			atomic_inc(&user->__count); | 
 | 86 | 			return user; | 
 | 87 | 		} | 
 | 88 | 	} | 
 | 89 |  | 
 | 90 | 	return NULL; | 
 | 91 | } | 
 | 92 |  | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 93 | #ifdef CONFIG_USER_SCHED | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 94 |  | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 95 | static void sched_destroy_user(struct user_struct *up) | 
 | 96 | { | 
 | 97 | 	sched_destroy_group(up->tg); | 
 | 98 | } | 
 | 99 |  | 
 | 100 | static int sched_create_user(struct user_struct *up) | 
 | 101 | { | 
 | 102 | 	int rc = 0; | 
 | 103 |  | 
| Peter Zijlstra | eff766a | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 104 | 	up->tg = sched_create_group(&root_task_group); | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 105 | 	if (IS_ERR(up->tg)) | 
 | 106 | 		rc = -ENOMEM; | 
 | 107 |  | 
 | 108 | 	return rc; | 
 | 109 | } | 
 | 110 |  | 
 | 111 | static void sched_switch_user(struct task_struct *p) | 
 | 112 | { | 
 | 113 | 	sched_move_task(p); | 
 | 114 | } | 
 | 115 |  | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 116 | #else	/* CONFIG_USER_SCHED */ | 
| Dhaval Giani | b1a8c17 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 117 |  | 
 | 118 | static void sched_destroy_user(struct user_struct *up) { } | 
 | 119 | static int sched_create_user(struct user_struct *up) { return 0; } | 
 | 120 | static void sched_switch_user(struct task_struct *p) { } | 
 | 121 |  | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 122 | #endif	/* CONFIG_USER_SCHED */ | 
| Dhaval Giani | b1a8c17 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 123 |  | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 124 | #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) | 
| Dhaval Giani | b1a8c17 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 125 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 126 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ | 
| Dhaval Giani | b1a8c17 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 127 | static DEFINE_MUTEX(uids_mutex); | 
 | 128 |  | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 129 | static inline void uids_mutex_lock(void) | 
 | 130 | { | 
 | 131 | 	mutex_lock(&uids_mutex); | 
 | 132 | } | 
 | 133 |  | 
 | 134 | static inline void uids_mutex_unlock(void) | 
 | 135 | { | 
 | 136 | 	mutex_unlock(&uids_mutex); | 
 | 137 | } | 
 | 138 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 139 | /* uid directory attributes */ | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 140 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 141 | static ssize_t cpu_shares_show(struct kobject *kobj, | 
 | 142 | 			       struct kobj_attribute *attr, | 
 | 143 | 			       char *buf) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 144 | { | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 145 | 	struct user_struct *up = container_of(kobj, struct user_struct, kobj); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 146 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 147 | 	return sprintf(buf, "%lu\n", sched_group_shares(up->tg)); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 148 | } | 
 | 149 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 150 | static ssize_t cpu_shares_store(struct kobject *kobj, | 
 | 151 | 				struct kobj_attribute *attr, | 
 | 152 | 				const char *buf, size_t size) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 153 | { | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 154 | 	struct user_struct *up = container_of(kobj, struct user_struct, kobj); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 155 | 	unsigned long shares; | 
 | 156 | 	int rc; | 
 | 157 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 158 | 	sscanf(buf, "%lu", &shares); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 159 |  | 
 | 160 | 	rc = sched_group_set_shares(up->tg, shares); | 
 | 161 |  | 
 | 162 | 	return (rc ? rc : size); | 
 | 163 | } | 
 | 164 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 165 | static struct kobj_attribute cpu_share_attr = | 
 | 166 | 	__ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 167 | #endif | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 168 |  | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 169 | #ifdef CONFIG_RT_GROUP_SCHED | 
| Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 170 | static ssize_t cpu_rt_runtime_show(struct kobject *kobj, | 
 | 171 | 				   struct kobj_attribute *attr, | 
 | 172 | 				   char *buf) | 
 | 173 | { | 
 | 174 | 	struct user_struct *up = container_of(kobj, struct user_struct, kobj); | 
 | 175 |  | 
 | 176 | 	return sprintf(buf, "%lu\n", sched_group_rt_runtime(up->tg)); | 
 | 177 | } | 
 | 178 |  | 
 | 179 | static ssize_t cpu_rt_runtime_store(struct kobject *kobj, | 
 | 180 | 				    struct kobj_attribute *attr, | 
 | 181 | 				    const char *buf, size_t size) | 
 | 182 | { | 
 | 183 | 	struct user_struct *up = container_of(kobj, struct user_struct, kobj); | 
 | 184 | 	unsigned long rt_runtime; | 
 | 185 | 	int rc; | 
 | 186 |  | 
 | 187 | 	sscanf(buf, "%lu", &rt_runtime); | 
 | 188 |  | 
 | 189 | 	rc = sched_group_set_rt_runtime(up->tg, rt_runtime); | 
 | 190 |  | 
 | 191 | 	return (rc ? rc : size); | 
 | 192 | } | 
 | 193 |  | 
 | 194 | static struct kobj_attribute cpu_rt_runtime_attr = | 
 | 195 | 	__ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store); | 
| Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 196 |  | 
 | 197 | static ssize_t cpu_rt_period_show(struct kobject *kobj, | 
 | 198 | 				   struct kobj_attribute *attr, | 
 | 199 | 				   char *buf) | 
 | 200 | { | 
 | 201 | 	struct user_struct *up = container_of(kobj, struct user_struct, kobj); | 
 | 202 |  | 
 | 203 | 	return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg)); | 
 | 204 | } | 
 | 205 |  | 
 | 206 | static ssize_t cpu_rt_period_store(struct kobject *kobj, | 
 | 207 | 				    struct kobj_attribute *attr, | 
 | 208 | 				    const char *buf, size_t size) | 
 | 209 | { | 
 | 210 | 	struct user_struct *up = container_of(kobj, struct user_struct, kobj); | 
 | 211 | 	unsigned long rt_period; | 
 | 212 | 	int rc; | 
 | 213 |  | 
 | 214 | 	sscanf(buf, "%lu", &rt_period); | 
 | 215 |  | 
 | 216 | 	rc = sched_group_set_rt_period(up->tg, rt_period); | 
 | 217 |  | 
 | 218 | 	return (rc ? rc : size); | 
 | 219 | } | 
 | 220 |  | 
 | 221 | static struct kobj_attribute cpu_rt_period_attr = | 
 | 222 | 	__ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store); | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 223 | #endif | 
| Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 224 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 225 | /* default attributes per uid directory */ | 
 | 226 | static struct attribute *uids_attributes[] = { | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 227 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 228 | 	&cpu_share_attr.attr, | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 229 | #endif | 
 | 230 | #ifdef CONFIG_RT_GROUP_SCHED | 
| Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 231 | 	&cpu_rt_runtime_attr.attr, | 
| Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 232 | 	&cpu_rt_period_attr.attr, | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 233 | #endif | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 234 | 	NULL | 
 | 235 | }; | 
 | 236 |  | 
 | 237 | /* the lifetime of user_struct is not managed by the core (now) */ | 
 | 238 | static void uids_release(struct kobject *kobj) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 239 | { | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 240 | 	return; | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 241 | } | 
 | 242 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 243 | static struct kobj_type uids_ktype = { | 
 | 244 | 	.sysfs_ops = &kobj_sysfs_ops, | 
 | 245 | 	.default_attrs = uids_attributes, | 
 | 246 | 	.release = uids_release, | 
 | 247 | }; | 
 | 248 |  | 
 | 249 | /* create /sys/kernel/uids/<uid>/cpu_share file for this user */ | 
 | 250 | static int uids_user_create(struct user_struct *up) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 251 | { | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 252 | 	struct kobject *kobj = &up->kobj; | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 253 | 	int error; | 
 | 254 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 255 | 	memset(kobj, 0, sizeof(struct kobject)); | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 256 | 	kobj->kset = uids_kset; | 
| Greg Kroah-Hartman | cf15126 | 2007-12-17 23:05:35 -0700 | [diff] [blame] | 257 | 	error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); | 
 | 258 | 	if (error) { | 
 | 259 | 		kobject_put(kobj); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 260 | 		goto done; | 
| Greg Kroah-Hartman | cf15126 | 2007-12-17 23:05:35 -0700 | [diff] [blame] | 261 | 	} | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 262 |  | 
| Srivatsa Vaddagiri | fb7dde3 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 263 | 	kobject_uevent(kobj, KOBJ_ADD); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 264 | done: | 
 | 265 | 	return error; | 
 | 266 | } | 
 | 267 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 268 | /* create these entries in sysfs: | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 269 |  * 	"/sys/kernel/uids" directory | 
 | 270 |  * 	"/sys/kernel/uids/0" directory (for root user) | 
 | 271 |  * 	"/sys/kernel/uids/0/cpu_share" file (for root user) | 
 | 272 |  */ | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 273 | int __init uids_sysfs_init(void) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 274 | { | 
| Greg Kroah-Hartman | 0ff21e4 | 2007-11-06 10:36:58 -0800 | [diff] [blame] | 275 | 	uids_kset = kset_create_and_add("uids", NULL, kernel_kobj); | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 276 | 	if (!uids_kset) | 
 | 277 | 		return -ENOMEM; | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 278 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 279 | 	return uids_user_create(&root_user); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 280 | } | 
 | 281 |  | 
 | 282 | /* work function to remove sysfs directory for a user and free up | 
 | 283 |  * corresponding structures. | 
 | 284 |  */ | 
 | 285 | static void remove_user_sysfs_dir(struct work_struct *w) | 
 | 286 | { | 
 | 287 | 	struct user_struct *up = container_of(w, struct user_struct, work); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 288 | 	unsigned long flags; | 
 | 289 | 	int remove_user = 0; | 
 | 290 |  | 
 | 291 | 	/* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() | 
 | 292 | 	 * atomic. | 
 | 293 | 	 */ | 
 | 294 | 	uids_mutex_lock(); | 
 | 295 |  | 
 | 296 | 	local_irq_save(flags); | 
 | 297 |  | 
 | 298 | 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | 
 | 299 | 		uid_hash_remove(up); | 
 | 300 | 		remove_user = 1; | 
 | 301 | 		spin_unlock_irqrestore(&uidhash_lock, flags); | 
 | 302 | 	} else { | 
 | 303 | 		local_irq_restore(flags); | 
 | 304 | 	} | 
 | 305 |  | 
 | 306 | 	if (!remove_user) | 
 | 307 | 		goto done; | 
 | 308 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 309 | 	kobject_uevent(&up->kobj, KOBJ_REMOVE); | 
 | 310 | 	kobject_del(&up->kobj); | 
 | 311 | 	kobject_put(&up->kobj); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 312 |  | 
 | 313 | 	sched_destroy_user(up); | 
 | 314 | 	key_put(up->uid_keyring); | 
 | 315 | 	key_put(up->session_keyring); | 
 | 316 | 	kmem_cache_free(uid_cachep, up); | 
 | 317 |  | 
 | 318 | done: | 
 | 319 | 	uids_mutex_unlock(); | 
 | 320 | } | 
 | 321 |  | 
 | 322 | /* IRQs are disabled and uidhash_lock is held upon function entry. | 
 | 323 |  * IRQ state (as stored in flags) is restored and uidhash_lock released | 
 | 324 |  * upon function exit. | 
 | 325 |  */ | 
 | 326 | static inline void free_user(struct user_struct *up, unsigned long flags) | 
 | 327 | { | 
 | 328 | 	/* restore back the count */ | 
 | 329 | 	atomic_inc(&up->__count); | 
 | 330 | 	spin_unlock_irqrestore(&uidhash_lock, flags); | 
 | 331 |  | 
 | 332 | 	INIT_WORK(&up->work, remove_user_sysfs_dir); | 
 | 333 | 	schedule_work(&up->work); | 
 | 334 | } | 
 | 335 |  | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 336 | #else	/* CONFIG_USER_SCHED && CONFIG_SYSFS */ | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 337 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 338 | int uids_sysfs_init(void) { return 0; } | 
 | 339 | static inline int uids_user_create(struct user_struct *up) { return 0; } | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 340 | static inline void uids_mutex_lock(void) { } | 
 | 341 | static inline void uids_mutex_unlock(void) { } | 
 | 342 |  | 
 | 343 | /* IRQs are disabled and uidhash_lock is held upon function entry. | 
 | 344 |  * IRQ state (as stored in flags) is restored and uidhash_lock released | 
 | 345 |  * upon function exit. | 
 | 346 |  */ | 
 | 347 | static inline void free_user(struct user_struct *up, unsigned long flags) | 
 | 348 | { | 
 | 349 | 	uid_hash_remove(up); | 
 | 350 | 	spin_unlock_irqrestore(&uidhash_lock, flags); | 
 | 351 | 	sched_destroy_user(up); | 
 | 352 | 	key_put(up->uid_keyring); | 
 | 353 | 	key_put(up->session_keyring); | 
 | 354 | 	kmem_cache_free(uid_cachep, up); | 
 | 355 | } | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 356 |  | 
| Dhaval Giani | b1a8c17 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 357 | #endif | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 358 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 |  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The | 
 | 361 |  * caller must undo that ref with free_uid(). | 
 | 362 |  * | 
 | 363 |  * If the user_struct could not be found, return NULL. | 
 | 364 |  */ | 
 | 365 | struct user_struct *find_user(uid_t uid) | 
 | 366 | { | 
 | 367 | 	struct user_struct *ret; | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 368 | 	unsigned long flags; | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 369 | 	struct user_namespace *ns = current->nsproxy->user_ns; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 |  | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 371 | 	spin_lock_irqsave(&uidhash_lock, flags); | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 372 | 	ret = uid_hash_find(uid, uidhashentry(ns, uid)); | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 373 | 	spin_unlock_irqrestore(&uidhash_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | 	return ret; | 
 | 375 | } | 
 | 376 |  | 
 | 377 | void free_uid(struct user_struct *up) | 
 | 378 | { | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 379 | 	unsigned long flags; | 
 | 380 |  | 
| Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 381 | 	if (!up) | 
 | 382 | 		return; | 
 | 383 |  | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 384 | 	local_irq_save(flags); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 385 | 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) | 
 | 386 | 		free_user(up, flags); | 
 | 387 | 	else | 
| Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 388 | 		local_irq_restore(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | } | 
 | 390 |  | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 391 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | { | 
| Pavel Emelyanov | 735de22 | 2007-09-18 22:46:44 -0700 | [diff] [blame] | 393 | 	struct hlist_head *hashent = uidhashentry(ns, uid); | 
| Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 394 | 	struct user_struct *up, *new; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 |  | 
| Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 396 | 	/* Make uid_hash_find() + uids_user_create() + uid_hash_insert() | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 397 | 	 * atomic. | 
 | 398 | 	 */ | 
 | 399 | 	uids_mutex_lock(); | 
 | 400 |  | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 401 | 	spin_lock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | 	up = uid_hash_find(uid, hashent); | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 403 | 	spin_unlock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 |  | 
 | 405 | 	if (!up) { | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 406 | 		new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); | 
| Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 407 | 		if (!new) | 
 | 408 | 			goto out_unlock; | 
| Pavel Emelyanov | 5e8869b | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 409 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | 		new->uid = uid; | 
 | 411 | 		atomic_set(&new->__count, 1); | 
 | 412 | 		atomic_set(&new->processes, 0); | 
 | 413 | 		atomic_set(&new->files, 0); | 
 | 414 | 		atomic_set(&new->sigpending, 0); | 
| Amy Griffis | 2d9048e | 2006-06-01 13:10:59 -0700 | [diff] [blame] | 415 | #ifdef CONFIG_INOTIFY_USER | 
| Robert Love | 0eeca28 | 2005-07-12 17:06:03 -0400 | [diff] [blame] | 416 | 		atomic_set(&new->inotify_watches, 0); | 
 | 417 | 		atomic_set(&new->inotify_devs, 0); | 
 | 418 | #endif | 
| Alexey Dobriyan | 970a864 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 419 | #ifdef CONFIG_POSIX_MQUEUE | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | 		new->mq_bytes = 0; | 
| Alexey Dobriyan | 970a864 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 421 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | 		new->locked_shm = 0; | 
 | 423 |  | 
| Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 424 | 		if (alloc_uid_keyring(new, current) < 0) | 
 | 425 | 			goto out_free_user; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 |  | 
| Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 427 | 		if (sched_create_user(new) < 0) | 
 | 428 | 			goto out_put_keys; | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 429 |  | 
| Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 430 | 		if (uids_user_create(new)) | 
 | 431 | 			goto out_destoy_sched; | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 432 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | 		/* | 
 | 434 | 		 * Before adding this, check whether we raced | 
 | 435 | 		 * on adding the same user already.. | 
 | 436 | 		 */ | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 437 | 		spin_lock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | 		up = uid_hash_find(uid, hashent); | 
 | 439 | 		if (up) { | 
| Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 440 | 			/* This case is not possible when CONFIG_USER_SCHED | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 441 | 			 * is defined, since we serialize alloc_uid() using | 
 | 442 | 			 * uids_mutex. Hence no need to call | 
 | 443 | 			 * sched_destroy_user() or remove_user_sysfs_dir(). | 
 | 444 | 			 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | 			key_put(new->uid_keyring); | 
 | 446 | 			key_put(new->session_keyring); | 
 | 447 | 			kmem_cache_free(uid_cachep, new); | 
 | 448 | 		} else { | 
 | 449 | 			uid_hash_insert(new, hashent); | 
 | 450 | 			up = new; | 
 | 451 | 		} | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 452 | 		spin_unlock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 |  | 
 | 454 | 	} | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 455 |  | 
 | 456 | 	uids_mutex_unlock(); | 
 | 457 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | 	return up; | 
| Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 459 |  | 
 | 460 | out_destoy_sched: | 
 | 461 | 	sched_destroy_user(new); | 
 | 462 | out_put_keys: | 
 | 463 | 	key_put(new->uid_keyring); | 
 | 464 | 	key_put(new->session_keyring); | 
 | 465 | out_free_user: | 
 | 466 | 	kmem_cache_free(uid_cachep, new); | 
 | 467 | out_unlock: | 
 | 468 | 	uids_mutex_unlock(); | 
 | 469 | 	return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | } | 
 | 471 |  | 
 | 472 | void switch_uid(struct user_struct *new_user) | 
 | 473 | { | 
 | 474 | 	struct user_struct *old_user; | 
 | 475 |  | 
 | 476 | 	/* What if a process setreuid()'s and this brings the | 
 | 477 | 	 * new uid over his NPROC rlimit?  We can check this now | 
 | 478 | 	 * cheaply with the new uid cache, so if it matters | 
 | 479 | 	 * we should be checking for it.  -DaveM | 
 | 480 | 	 */ | 
 | 481 | 	old_user = current->user; | 
 | 482 | 	atomic_inc(&new_user->processes); | 
 | 483 | 	atomic_dec(&old_user->processes); | 
 | 484 | 	switch_uid_keyring(new_user); | 
 | 485 | 	current->user = new_user; | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 486 | 	sched_switch_user(current); | 
| Linus Torvalds | 45c18b0 | 2006-11-04 10:06:02 -0800 | [diff] [blame] | 487 |  | 
 | 488 | 	/* | 
 | 489 | 	 * We need to synchronize with __sigqueue_alloc() | 
 | 490 | 	 * doing a get_uid(p->user).. If that saw the old | 
 | 491 | 	 * user value, we need to wait until it has exited | 
 | 492 | 	 * its critical region before we can free the old | 
 | 493 | 	 * structure. | 
 | 494 | 	 */ | 
 | 495 | 	smp_mb(); | 
 | 496 | 	spin_unlock_wait(¤t->sighand->siglock); | 
 | 497 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | 	free_uid(old_user); | 
 | 499 | 	suid_keys(current); | 
 | 500 | } | 
 | 501 |  | 
| Pavel Emelyanov | aee16ce | 2008-02-08 04:18:23 -0800 | [diff] [blame] | 502 | #ifdef CONFIG_USER_NS | 
| Pavel Emelyanov | 28f300d | 2007-09-18 22:46:45 -0700 | [diff] [blame] | 503 | void release_uids(struct user_namespace *ns) | 
 | 504 | { | 
 | 505 | 	int i; | 
 | 506 | 	unsigned long flags; | 
 | 507 | 	struct hlist_head *head; | 
 | 508 | 	struct hlist_node *nd; | 
 | 509 |  | 
 | 510 | 	spin_lock_irqsave(&uidhash_lock, flags); | 
 | 511 | 	/* | 
 | 512 | 	 * collapse the chains so that the user_struct-s will | 
 | 513 | 	 * be still alive, but not in hashes. subsequent free_uid() | 
 | 514 | 	 * will free them. | 
 | 515 | 	 */ | 
 | 516 | 	for (i = 0; i < UIDHASH_SZ; i++) { | 
 | 517 | 		head = ns->uidhash_table + i; | 
 | 518 | 		while (!hlist_empty(head)) { | 
 | 519 | 			nd = head->first; | 
 | 520 | 			hlist_del_init(nd); | 
 | 521 | 		} | 
 | 522 | 	} | 
 | 523 | 	spin_unlock_irqrestore(&uidhash_lock, flags); | 
 | 524 |  | 
 | 525 | 	free_uid(ns->root_user); | 
 | 526 | } | 
| Pavel Emelyanov | aee16ce | 2008-02-08 04:18:23 -0800 | [diff] [blame] | 527 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 |  | 
 | 529 | static int __init uid_cache_init(void) | 
 | 530 | { | 
 | 531 | 	int n; | 
 | 532 |  | 
 | 533 | 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 534 | 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 |  | 
 | 536 | 	for(n = 0; n < UIDHASH_SZ; ++n) | 
| Pavel Emelyanov | 735de22 | 2007-09-18 22:46:44 -0700 | [diff] [blame] | 537 | 		INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 |  | 
 | 539 | 	/* Insert the root user immediately (init already runs as root) */ | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 540 | 	spin_lock_irq(&uidhash_lock); | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 541 | 	uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 542 | 	spin_unlock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 |  | 
 | 544 | 	return 0; | 
 | 545 | } | 
 | 546 |  | 
 | 547 | module_init(uid_cache_init); |