| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * The "user cache". | 
|  | 3 | * | 
|  | 4 | * (C) Copyright 1991-2000 Linus Torvalds | 
|  | 5 | * | 
|  | 6 | * We have a per-user structure to keep track of how many | 
|  | 7 | * processes, files etc the user has claimed, in order to be | 
|  | 8 | * able to have per-user limits for system resources. | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | #include <linux/init.h> | 
|  | 12 | #include <linux/sched.h> | 
|  | 13 | #include <linux/slab.h> | 
|  | 14 | #include <linux/bitops.h> | 
|  | 15 | #include <linux/key.h> | 
| Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 16 | #include <linux/interrupt.h> | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 17 | #include <linux/module.h> | 
|  | 18 | #include <linux/user_namespace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 |  | 
|  | 20 | /* | 
|  | 21 | * UID task count cache, to get fast user lookup in "alloc_uid" | 
|  | 22 | * when changing user ID's (ie setuid() and friends). | 
|  | 23 | */ | 
|  | 24 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #define UIDHASH_MASK		(UIDHASH_SZ - 1) | 
|  | 26 | #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 27 | #define uidhashentry(ns, uid)	((ns)->uidhash_table + __uidhashfn((uid))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 29 | static struct kmem_cache *uid_cachep; | 
| Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 30 |  | 
|  | 31 | /* | 
|  | 32 | * The uidhash_lock is mostly taken from process context, but it is | 
|  | 33 | * occasionally also taken from softirq/tasklet context, when | 
|  | 34 | * task-structs get RCU-freed. Hence all locking must be softirq-safe. | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 35 | * But free_uid() is also called with local interrupts disabled, and running | 
|  | 36 | * local_bh_enable() with local interrupts disabled is an error - we'll run | 
|  | 37 | * softirq callbacks, and they can unconditionally enable interrupts, and | 
|  | 38 | * the caller of free_uid() didn't expect that.. | 
| Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 39 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | static DEFINE_SPINLOCK(uidhash_lock); | 
|  | 41 |  | 
|  | 42 | struct user_struct root_user = { | 
|  | 43 | .__count	= ATOMIC_INIT(1), | 
|  | 44 | .processes	= ATOMIC_INIT(1), | 
|  | 45 | .files		= ATOMIC_INIT(0), | 
|  | 46 | .sigpending	= ATOMIC_INIT(0), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | .locked_shm     = 0, | 
|  | 48 | #ifdef CONFIG_KEYS | 
|  | 49 | .uid_keyring	= &root_user_keyring, | 
|  | 50 | .session_keyring = &root_session_keyring, | 
|  | 51 | #endif | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 52 | #ifdef CONFIG_FAIR_USER_SCHED | 
| Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 53 | .tg		= &init_task_group, | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 54 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | }; | 
|  | 56 |  | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 57 | /* | 
|  | 58 | * These routines must be called with the uidhash spinlock held! | 
|  | 59 | */ | 
| Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 60 | static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 61 | { | 
|  | 62 | hlist_add_head(&up->uidhash_node, hashent); | 
|  | 63 | } | 
|  | 64 |  | 
| Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 65 | static void uid_hash_remove(struct user_struct *up) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 66 | { | 
|  | 67 | hlist_del_init(&up->uidhash_node); | 
|  | 68 | } | 
|  | 69 |  | 
| Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 70 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 71 | { | 
|  | 72 | struct user_struct *user; | 
|  | 73 | struct hlist_node *h; | 
|  | 74 |  | 
|  | 75 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | 
|  | 76 | if (user->uid == uid) { | 
|  | 77 | atomic_inc(&user->__count); | 
|  | 78 | return user; | 
|  | 79 | } | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | return NULL; | 
|  | 83 | } | 
|  | 84 |  | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 85 | #ifdef CONFIG_FAIR_USER_SCHED | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 86 |  | 
|  | 87 | static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */ | 
|  | 88 | static DEFINE_MUTEX(uids_mutex); | 
|  | 89 |  | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 90 | static void sched_destroy_user(struct user_struct *up) | 
|  | 91 | { | 
|  | 92 | sched_destroy_group(up->tg); | 
|  | 93 | } | 
|  | 94 |  | 
|  | 95 | static int sched_create_user(struct user_struct *up) | 
|  | 96 | { | 
|  | 97 | int rc = 0; | 
|  | 98 |  | 
|  | 99 | up->tg = sched_create_group(); | 
|  | 100 | if (IS_ERR(up->tg)) | 
|  | 101 | rc = -ENOMEM; | 
|  | 102 |  | 
|  | 103 | return rc; | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | static void sched_switch_user(struct task_struct *p) | 
|  | 107 | { | 
|  | 108 | sched_move_task(p); | 
|  | 109 | } | 
|  | 110 |  | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 111 | static inline void uids_mutex_lock(void) | 
|  | 112 | { | 
|  | 113 | mutex_lock(&uids_mutex); | 
|  | 114 | } | 
|  | 115 |  | 
|  | 116 | static inline void uids_mutex_unlock(void) | 
|  | 117 | { | 
|  | 118 | mutex_unlock(&uids_mutex); | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | /* return cpu shares held by the user */ | 
|  | 122 | ssize_t cpu_shares_show(struct kset *kset, char *buffer) | 
|  | 123 | { | 
|  | 124 | struct user_struct *up = container_of(kset, struct user_struct, kset); | 
|  | 125 |  | 
|  | 126 | return sprintf(buffer, "%lu\n", sched_group_shares(up->tg)); | 
|  | 127 | } | 
|  | 128 |  | 
|  | 129 | /* modify cpu shares held by the user */ | 
|  | 130 | ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size) | 
|  | 131 | { | 
|  | 132 | struct user_struct *up = container_of(kset, struct user_struct, kset); | 
|  | 133 | unsigned long shares; | 
|  | 134 | int rc; | 
|  | 135 |  | 
|  | 136 | sscanf(buffer, "%lu", &shares); | 
|  | 137 |  | 
|  | 138 | rc = sched_group_set_shares(up->tg, shares); | 
|  | 139 |  | 
|  | 140 | return (rc ? rc : size); | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | static void user_attr_init(struct subsys_attribute *sa, char *name, int mode) | 
|  | 144 | { | 
|  | 145 | sa->attr.name = name; | 
|  | 146 | sa->attr.mode = mode; | 
|  | 147 | sa->show = cpu_shares_show; | 
|  | 148 | sa->store = cpu_shares_store; | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 | /* Create "/sys/kernel/uids/<uid>" directory and | 
|  | 152 | *  "/sys/kernel/uids/<uid>/cpu_share" file for this user. | 
|  | 153 | */ | 
|  | 154 | static int user_kobject_create(struct user_struct *up) | 
|  | 155 | { | 
|  | 156 | struct kset *kset = &up->kset; | 
|  | 157 | struct kobject *kobj = &kset->kobj; | 
|  | 158 | int error; | 
|  | 159 |  | 
|  | 160 | memset(kset, 0, sizeof(struct kset)); | 
|  | 161 | kobj->parent = &uids_kobject;	/* create under /sys/kernel/uids dir */ | 
|  | 162 | kobject_set_name(kobj, "%d", up->uid); | 
|  | 163 | kset_init(kset); | 
|  | 164 | user_attr_init(&up->user_attr, "cpu_share", 0644); | 
|  | 165 |  | 
|  | 166 | error = kobject_add(kobj); | 
|  | 167 | if (error) | 
|  | 168 | goto done; | 
|  | 169 |  | 
|  | 170 | error = sysfs_create_file(kobj, &up->user_attr.attr); | 
|  | 171 | if (error) | 
|  | 172 | kobject_del(kobj); | 
|  | 173 |  | 
| Srivatsa Vaddagiri | fb7dde3 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 174 | kobject_uevent(kobj, KOBJ_ADD); | 
|  | 175 |  | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 176 | done: | 
|  | 177 | return error; | 
|  | 178 | } | 
|  | 179 |  | 
|  | 180 | /* create these in sysfs filesystem: | 
|  | 181 | * 	"/sys/kernel/uids" directory | 
|  | 182 | * 	"/sys/kernel/uids/0" directory (for root user) | 
|  | 183 | * 	"/sys/kernel/uids/0/cpu_share" file (for root user) | 
|  | 184 | */ | 
|  | 185 | int __init uids_kobject_init(void) | 
|  | 186 | { | 
|  | 187 | int error; | 
|  | 188 |  | 
|  | 189 | /* create under /sys/kernel dir */ | 
|  | 190 | uids_kobject.parent = &kernel_subsys.kobj; | 
| Srivatsa Vaddagiri | fb7dde3 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 191 | uids_kobject.kset = &kernel_subsys; | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 192 | kobject_set_name(&uids_kobject, "uids"); | 
|  | 193 | kobject_init(&uids_kobject); | 
|  | 194 |  | 
|  | 195 | error = kobject_add(&uids_kobject); | 
|  | 196 | if (!error) | 
|  | 197 | error = user_kobject_create(&root_user); | 
|  | 198 |  | 
|  | 199 | return error; | 
|  | 200 | } | 
|  | 201 |  | 
|  | 202 | /* work function to remove sysfs directory for a user and free up | 
|  | 203 | * corresponding structures. | 
|  | 204 | */ | 
|  | 205 | static void remove_user_sysfs_dir(struct work_struct *w) | 
|  | 206 | { | 
|  | 207 | struct user_struct *up = container_of(w, struct user_struct, work); | 
|  | 208 | struct kobject *kobj = &up->kset.kobj; | 
|  | 209 | unsigned long flags; | 
|  | 210 | int remove_user = 0; | 
|  | 211 |  | 
|  | 212 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() | 
|  | 213 | * atomic. | 
|  | 214 | */ | 
|  | 215 | uids_mutex_lock(); | 
|  | 216 |  | 
|  | 217 | local_irq_save(flags); | 
|  | 218 |  | 
|  | 219 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | 
|  | 220 | uid_hash_remove(up); | 
|  | 221 | remove_user = 1; | 
|  | 222 | spin_unlock_irqrestore(&uidhash_lock, flags); | 
|  | 223 | } else { | 
|  | 224 | local_irq_restore(flags); | 
|  | 225 | } | 
|  | 226 |  | 
|  | 227 | if (!remove_user) | 
|  | 228 | goto done; | 
|  | 229 |  | 
|  | 230 | sysfs_remove_file(kobj, &up->user_attr.attr); | 
| Srivatsa Vaddagiri | fb7dde3 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 231 | kobject_uevent(kobj, KOBJ_REMOVE); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 232 | kobject_del(kobj); | 
|  | 233 |  | 
|  | 234 | sched_destroy_user(up); | 
|  | 235 | key_put(up->uid_keyring); | 
|  | 236 | key_put(up->session_keyring); | 
|  | 237 | kmem_cache_free(uid_cachep, up); | 
|  | 238 |  | 
|  | 239 | done: | 
|  | 240 | uids_mutex_unlock(); | 
|  | 241 | } | 
|  | 242 |  | 
|  | 243 | /* IRQs are disabled and uidhash_lock is held upon function entry. | 
|  | 244 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 
|  | 245 | * upon function exit. | 
|  | 246 | */ | 
|  | 247 | static inline void free_user(struct user_struct *up, unsigned long flags) | 
|  | 248 | { | 
|  | 249 | /* restore back the count */ | 
|  | 250 | atomic_inc(&up->__count); | 
|  | 251 | spin_unlock_irqrestore(&uidhash_lock, flags); | 
|  | 252 |  | 
|  | 253 | INIT_WORK(&up->work, remove_user_sysfs_dir); | 
|  | 254 | schedule_work(&up->work); | 
|  | 255 | } | 
|  | 256 |  | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 257 | #else	/* CONFIG_FAIR_USER_SCHED */ | 
|  | 258 |  | 
|  | 259 | static void sched_destroy_user(struct user_struct *up) { } | 
|  | 260 | static int sched_create_user(struct user_struct *up) { return 0; } | 
|  | 261 | static void sched_switch_user(struct task_struct *p) { } | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 262 | static inline int user_kobject_create(struct user_struct *up) { return 0; } | 
|  | 263 | static inline void uids_mutex_lock(void) { } | 
|  | 264 | static inline void uids_mutex_unlock(void) { } | 
|  | 265 |  | 
|  | 266 | /* IRQs are disabled and uidhash_lock is held upon function entry. | 
|  | 267 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 
|  | 268 | * upon function exit. | 
|  | 269 | */ | 
|  | 270 | static inline void free_user(struct user_struct *up, unsigned long flags) | 
|  | 271 | { | 
|  | 272 | uid_hash_remove(up); | 
|  | 273 | spin_unlock_irqrestore(&uidhash_lock, flags); | 
|  | 274 | sched_destroy_user(up); | 
|  | 275 | key_put(up->uid_keyring); | 
|  | 276 | key_put(up->session_keyring); | 
|  | 277 | kmem_cache_free(uid_cachep, up); | 
|  | 278 | } | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 279 |  | 
|  | 280 | #endif	/* CONFIG_FAIR_USER_SCHED */ | 
|  | 281 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | * Locate the user_struct for the passed UID.  If found, take a ref on it.  The | 
|  | 284 | * caller must undo that ref with free_uid(). | 
|  | 285 | * | 
|  | 286 | * If the user_struct could not be found, return NULL. | 
|  | 287 | */ | 
|  | 288 | struct user_struct *find_user(uid_t uid) | 
|  | 289 | { | 
|  | 290 | struct user_struct *ret; | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 291 | unsigned long flags; | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 292 | struct user_namespace *ns = current->nsproxy->user_ns; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 |  | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 294 | spin_lock_irqsave(&uidhash_lock, flags); | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 295 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 296 | spin_unlock_irqrestore(&uidhash_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | return ret; | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 | void free_uid(struct user_struct *up) | 
|  | 301 | { | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 302 | unsigned long flags; | 
|  | 303 |  | 
| Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 304 | if (!up) | 
|  | 305 | return; | 
|  | 306 |  | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 307 | local_irq_save(flags); | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 308 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) | 
|  | 309 | free_user(up, flags); | 
|  | 310 | else | 
| Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 311 | local_irq_restore(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | } | 
|  | 313 |  | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 314 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | { | 
| Pavel Emelyanov | 735de22 | 2007-09-18 22:46:44 -0700 | [diff] [blame] | 316 | struct hlist_head *hashent = uidhashentry(ns, uid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | struct user_struct *up; | 
|  | 318 |  | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 319 | /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert() | 
|  | 320 | * atomic. | 
|  | 321 | */ | 
|  | 322 | uids_mutex_lock(); | 
|  | 323 |  | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 324 | spin_lock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | up = uid_hash_find(uid, hashent); | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 326 | spin_unlock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 |  | 
|  | 328 | if (!up) { | 
|  | 329 | struct user_struct *new; | 
|  | 330 |  | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 331 | new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | if (!new) | 
|  | 333 | return NULL; | 
|  | 334 | new->uid = uid; | 
|  | 335 | atomic_set(&new->__count, 1); | 
|  | 336 | atomic_set(&new->processes, 0); | 
|  | 337 | atomic_set(&new->files, 0); | 
|  | 338 | atomic_set(&new->sigpending, 0); | 
| Amy Griffis | 2d9048e | 2006-06-01 13:10:59 -0700 | [diff] [blame] | 339 | #ifdef CONFIG_INOTIFY_USER | 
| Robert Love | 0eeca28 | 2005-07-12 17:06:03 -0400 | [diff] [blame] | 340 | atomic_set(&new->inotify_watches, 0); | 
|  | 341 | atomic_set(&new->inotify_devs, 0); | 
|  | 342 | #endif | 
| Alexey Dobriyan | 970a864 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 343 | #ifdef CONFIG_POSIX_MQUEUE | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | new->mq_bytes = 0; | 
| Alexey Dobriyan | 970a864 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 345 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | new->locked_shm = 0; | 
|  | 347 |  | 
| Michael LeMay | d720024 | 2006-06-22 14:47:17 -0700 | [diff] [blame] | 348 | if (alloc_uid_keyring(new, current) < 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | kmem_cache_free(uid_cachep, new); | 
|  | 350 | return NULL; | 
|  | 351 | } | 
|  | 352 |  | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 353 | if (sched_create_user(new) < 0) { | 
|  | 354 | key_put(new->uid_keyring); | 
|  | 355 | key_put(new->session_keyring); | 
|  | 356 | kmem_cache_free(uid_cachep, new); | 
|  | 357 | return NULL; | 
|  | 358 | } | 
|  | 359 |  | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 360 | if (user_kobject_create(new)) { | 
|  | 361 | sched_destroy_user(new); | 
|  | 362 | key_put(new->uid_keyring); | 
|  | 363 | key_put(new->session_keyring); | 
|  | 364 | kmem_cache_free(uid_cachep, new); | 
|  | 365 | uids_mutex_unlock(); | 
|  | 366 | return NULL; | 
|  | 367 | } | 
|  | 368 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | /* | 
|  | 370 | * Before adding this, check whether we raced | 
|  | 371 | * on adding the same user already.. | 
|  | 372 | */ | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 373 | spin_lock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | up = uid_hash_find(uid, hashent); | 
|  | 375 | if (up) { | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 376 | /* This case is not possible when CONFIG_FAIR_USER_SCHED | 
|  | 377 | * is defined, since we serialize alloc_uid() using | 
|  | 378 | * uids_mutex. Hence no need to call | 
|  | 379 | * sched_destroy_user() or remove_user_sysfs_dir(). | 
|  | 380 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | key_put(new->uid_keyring); | 
|  | 382 | key_put(new->session_keyring); | 
|  | 383 | kmem_cache_free(uid_cachep, new); | 
|  | 384 | } else { | 
|  | 385 | uid_hash_insert(new, hashent); | 
|  | 386 | up = new; | 
|  | 387 | } | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 388 | spin_unlock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 |  | 
|  | 390 | } | 
| Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 391 |  | 
|  | 392 | uids_mutex_unlock(); | 
|  | 393 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | return up; | 
|  | 395 | } | 
|  | 396 |  | 
|  | 397 | void switch_uid(struct user_struct *new_user) | 
|  | 398 | { | 
|  | 399 | struct user_struct *old_user; | 
|  | 400 |  | 
|  | 401 | /* What if a process setreuid()'s and this brings the | 
|  | 402 | * new uid over his NPROC rlimit?  We can check this now | 
|  | 403 | * cheaply with the new uid cache, so if it matters | 
|  | 404 | * we should be checking for it.  -DaveM | 
|  | 405 | */ | 
|  | 406 | old_user = current->user; | 
|  | 407 | atomic_inc(&new_user->processes); | 
|  | 408 | atomic_dec(&old_user->processes); | 
|  | 409 | switch_uid_keyring(new_user); | 
|  | 410 | current->user = new_user; | 
| Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 411 | sched_switch_user(current); | 
| Linus Torvalds | 45c18b0 | 2006-11-04 10:06:02 -0800 | [diff] [blame] | 412 |  | 
|  | 413 | /* | 
|  | 414 | * We need to synchronize with __sigqueue_alloc() | 
|  | 415 | * doing a get_uid(p->user).. If that saw the old | 
|  | 416 | * user value, we need to wait until it has exited | 
|  | 417 | * its critical region before we can free the old | 
|  | 418 | * structure. | 
|  | 419 | */ | 
|  | 420 | smp_mb(); | 
|  | 421 | spin_unlock_wait(¤t->sighand->siglock); | 
|  | 422 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | free_uid(old_user); | 
|  | 424 | suid_keys(current); | 
|  | 425 | } | 
|  | 426 |  | 
| Pavel Emelyanov | 28f300d | 2007-09-18 22:46:45 -0700 | [diff] [blame] | 427 | void release_uids(struct user_namespace *ns) | 
|  | 428 | { | 
|  | 429 | int i; | 
|  | 430 | unsigned long flags; | 
|  | 431 | struct hlist_head *head; | 
|  | 432 | struct hlist_node *nd; | 
|  | 433 |  | 
|  | 434 | spin_lock_irqsave(&uidhash_lock, flags); | 
|  | 435 | /* | 
|  | 436 | * collapse the chains so that the user_struct-s will | 
|  | 437 | * be still alive, but not in hashes. subsequent free_uid() | 
|  | 438 | * will free them. | 
|  | 439 | */ | 
|  | 440 | for (i = 0; i < UIDHASH_SZ; i++) { | 
|  | 441 | head = ns->uidhash_table + i; | 
|  | 442 | while (!hlist_empty(head)) { | 
|  | 443 | nd = head->first; | 
|  | 444 | hlist_del_init(nd); | 
|  | 445 | } | 
|  | 446 | } | 
|  | 447 | spin_unlock_irqrestore(&uidhash_lock, flags); | 
|  | 448 |  | 
|  | 449 | free_uid(ns->root_user); | 
|  | 450 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 |  | 
|  | 452 | static int __init uid_cache_init(void) | 
|  | 453 | { | 
|  | 454 | int n; | 
|  | 455 |  | 
|  | 456 | uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 457 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 |  | 
|  | 459 | for(n = 0; n < UIDHASH_SZ; ++n) | 
| Pavel Emelyanov | 735de22 | 2007-09-18 22:46:44 -0700 | [diff] [blame] | 460 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 |  | 
|  | 462 | /* Insert the root user immediately (init already runs as root) */ | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 463 | spin_lock_irq(&uidhash_lock); | 
| Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 464 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 465 | spin_unlock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 |  | 
|  | 467 | return 0; | 
|  | 468 | } | 
|  | 469 |  | 
|  | 470 | module_init(uid_cache_init); |