| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * The "user cache". | 
 | 3 |  * | 
 | 4 |  * (C) Copyright 1991-2000 Linus Torvalds | 
 | 5 |  * | 
 | 6 |  * We have a per-user structure to keep track of how many | 
 | 7 |  * processes, files etc the user has claimed, in order to be | 
 | 8 |  * able to have per-user limits for system resources.  | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/init.h> | 
 | 12 | #include <linux/sched.h> | 
 | 13 | #include <linux/slab.h> | 
 | 14 | #include <linux/bitops.h> | 
 | 15 | #include <linux/key.h> | 
| Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 16 | #include <linux/interrupt.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  | 
 | 18 | /* | 
 | 19 |  * UID task count cache, to get fast user lookup in "alloc_uid" | 
 | 20 |  * when changing user ID's (ie setuid() and friends). | 
 | 21 |  */ | 
 | 22 |  | 
 | 23 | #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8) | 
 | 24 | #define UIDHASH_SZ		(1 << UIDHASH_BITS) | 
 | 25 | #define UIDHASH_MASK		(UIDHASH_SZ - 1) | 
 | 26 | #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | 
 | 27 | #define uidhashentry(uid)	(uidhash_table + __uidhashfn((uid))) | 
 | 28 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 29 | static struct kmem_cache *uid_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | static struct list_head uidhash_table[UIDHASH_SZ]; | 
| Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 31 |  | 
 | 32 | /* | 
 | 33 |  * The uidhash_lock is mostly taken from process context, but it is | 
 | 34 |  * occasionally also taken from softirq/tasklet context, when | 
 | 35 |  * task-structs get RCU-freed. Hence all locking must be softirq-safe. | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 36 |  * But free_uid() is also called with local interrupts disabled, and running | 
 | 37 |  * local_bh_enable() with local interrupts disabled is an error - we'll run | 
 | 38 |  * softirq callbacks, and they can unconditionally enable interrupts, and | 
 | 39 |  * the caller of free_uid() didn't expect that.. | 
| Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 40 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | static DEFINE_SPINLOCK(uidhash_lock); | 
 | 42 |  | 
 | 43 | struct user_struct root_user = { | 
 | 44 | 	.__count	= ATOMIC_INIT(1), | 
 | 45 | 	.processes	= ATOMIC_INIT(1), | 
 | 46 | 	.files		= ATOMIC_INIT(0), | 
 | 47 | 	.sigpending	= ATOMIC_INIT(0), | 
 | 48 | 	.mq_bytes	= 0, | 
 | 49 | 	.locked_shm     = 0, | 
 | 50 | #ifdef CONFIG_KEYS | 
 | 51 | 	.uid_keyring	= &root_user_keyring, | 
 | 52 | 	.session_keyring = &root_session_keyring, | 
 | 53 | #endif | 
 | 54 | }; | 
 | 55 |  | 
 | 56 | /* | 
 | 57 |  * These routines must be called with the uidhash spinlock held! | 
 | 58 |  */ | 
 | 59 | static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent) | 
 | 60 | { | 
 | 61 | 	list_add(&up->uidhash_list, hashent); | 
 | 62 | } | 
 | 63 |  | 
 | 64 | static inline void uid_hash_remove(struct user_struct *up) | 
 | 65 | { | 
 | 66 | 	list_del(&up->uidhash_list); | 
 | 67 | } | 
 | 68 |  | 
 | 69 | static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent) | 
 | 70 | { | 
 | 71 | 	struct list_head *up; | 
 | 72 |  | 
 | 73 | 	list_for_each(up, hashent) { | 
 | 74 | 		struct user_struct *user; | 
 | 75 |  | 
 | 76 | 		user = list_entry(up, struct user_struct, uidhash_list); | 
 | 77 |  | 
 | 78 | 		if(user->uid == uid) { | 
 | 79 | 			atomic_inc(&user->__count); | 
 | 80 | 			return user; | 
 | 81 | 		} | 
 | 82 | 	} | 
 | 83 |  | 
 | 84 | 	return NULL; | 
 | 85 | } | 
 | 86 |  | 
 | 87 | /* | 
 | 88 |  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The | 
 | 89 |  * caller must undo that ref with free_uid(). | 
 | 90 |  * | 
 | 91 |  * If the user_struct could not be found, return NULL. | 
 | 92 |  */ | 
 | 93 | struct user_struct *find_user(uid_t uid) | 
 | 94 | { | 
 | 95 | 	struct user_struct *ret; | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 96 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 |  | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 98 | 	spin_lock_irqsave(&uidhash_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | 	ret = uid_hash_find(uid, uidhashentry(uid)); | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 100 | 	spin_unlock_irqrestore(&uidhash_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | 	return ret; | 
 | 102 | } | 
 | 103 |  | 
 | 104 | void free_uid(struct user_struct *up) | 
 | 105 | { | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 106 | 	unsigned long flags; | 
 | 107 |  | 
| Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 108 | 	if (!up) | 
 | 109 | 		return; | 
 | 110 |  | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 111 | 	local_irq_save(flags); | 
| Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 112 | 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | 		uid_hash_remove(up); | 
| Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 114 | 		spin_unlock_irqrestore(&uidhash_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | 		key_put(up->uid_keyring); | 
 | 116 | 		key_put(up->session_keyring); | 
 | 117 | 		kmem_cache_free(uid_cachep, up); | 
| Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 118 | 	} else { | 
 | 119 | 		local_irq_restore(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | 	} | 
 | 121 | } | 
 | 122 |  | 
 | 123 | struct user_struct * alloc_uid(uid_t uid) | 
 | 124 | { | 
 | 125 | 	struct list_head *hashent = uidhashentry(uid); | 
 | 126 | 	struct user_struct *up; | 
 | 127 |  | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 128 | 	spin_lock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | 	up = uid_hash_find(uid, hashent); | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 130 | 	spin_unlock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 |  | 
 | 132 | 	if (!up) { | 
 | 133 | 		struct user_struct *new; | 
 | 134 |  | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 135 | 		new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | 		if (!new) | 
 | 137 | 			return NULL; | 
 | 138 | 		new->uid = uid; | 
 | 139 | 		atomic_set(&new->__count, 1); | 
 | 140 | 		atomic_set(&new->processes, 0); | 
 | 141 | 		atomic_set(&new->files, 0); | 
 | 142 | 		atomic_set(&new->sigpending, 0); | 
| Amy Griffis | 2d9048e | 2006-06-01 13:10:59 -0700 | [diff] [blame] | 143 | #ifdef CONFIG_INOTIFY_USER | 
| Robert Love | 0eeca28 | 2005-07-12 17:06:03 -0400 | [diff] [blame] | 144 | 		atomic_set(&new->inotify_watches, 0); | 
 | 145 | 		atomic_set(&new->inotify_devs, 0); | 
 | 146 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 |  | 
 | 148 | 		new->mq_bytes = 0; | 
 | 149 | 		new->locked_shm = 0; | 
 | 150 |  | 
| Michael LeMay | d720024 | 2006-06-22 14:47:17 -0700 | [diff] [blame] | 151 | 		if (alloc_uid_keyring(new, current) < 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | 			kmem_cache_free(uid_cachep, new); | 
 | 153 | 			return NULL; | 
 | 154 | 		} | 
 | 155 |  | 
 | 156 | 		/* | 
 | 157 | 		 * Before adding this, check whether we raced | 
 | 158 | 		 * on adding the same user already.. | 
 | 159 | 		 */ | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 160 | 		spin_lock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | 		up = uid_hash_find(uid, hashent); | 
 | 162 | 		if (up) { | 
 | 163 | 			key_put(new->uid_keyring); | 
 | 164 | 			key_put(new->session_keyring); | 
 | 165 | 			kmem_cache_free(uid_cachep, new); | 
 | 166 | 		} else { | 
 | 167 | 			uid_hash_insert(new, hashent); | 
 | 168 | 			up = new; | 
 | 169 | 		} | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 170 | 		spin_unlock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 |  | 
 | 172 | 	} | 
 | 173 | 	return up; | 
 | 174 | } | 
 | 175 |  | 
 | 176 | void switch_uid(struct user_struct *new_user) | 
 | 177 | { | 
 | 178 | 	struct user_struct *old_user; | 
 | 179 |  | 
 | 180 | 	/* What if a process setreuid()'s and this brings the | 
 | 181 | 	 * new uid over his NPROC rlimit?  We can check this now | 
 | 182 | 	 * cheaply with the new uid cache, so if it matters | 
 | 183 | 	 * we should be checking for it.  -DaveM | 
 | 184 | 	 */ | 
 | 185 | 	old_user = current->user; | 
 | 186 | 	atomic_inc(&new_user->processes); | 
 | 187 | 	atomic_dec(&old_user->processes); | 
 | 188 | 	switch_uid_keyring(new_user); | 
 | 189 | 	current->user = new_user; | 
| Linus Torvalds | 45c18b0 | 2006-11-04 10:06:02 -0800 | [diff] [blame] | 190 |  | 
 | 191 | 	/* | 
 | 192 | 	 * We need to synchronize with __sigqueue_alloc() | 
 | 193 | 	 * doing a get_uid(p->user).. If that saw the old | 
 | 194 | 	 * user value, we need to wait until it has exited | 
 | 195 | 	 * its critical region before we can free the old | 
 | 196 | 	 * structure. | 
 | 197 | 	 */ | 
 | 198 | 	smp_mb(); | 
 | 199 | 	spin_unlock_wait(¤t->sighand->siglock); | 
 | 200 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | 	free_uid(old_user); | 
 | 202 | 	suid_keys(current); | 
 | 203 | } | 
 | 204 |  | 
 | 205 |  | 
 | 206 | static int __init uid_cache_init(void) | 
 | 207 | { | 
 | 208 | 	int n; | 
 | 209 |  | 
 | 210 | 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | 
 | 211 | 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 
 | 212 |  | 
 | 213 | 	for(n = 0; n < UIDHASH_SZ; ++n) | 
 | 214 | 		INIT_LIST_HEAD(uidhash_table + n); | 
 | 215 |  | 
 | 216 | 	/* Insert the root user immediately (init already runs as root) */ | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 217 | 	spin_lock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | 	uid_hash_insert(&root_user, uidhashentry(0)); | 
| Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 219 | 	spin_unlock_irq(&uidhash_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 |  | 
 | 221 | 	return 0; | 
 | 222 | } | 
 | 223 |  | 
 | 224 | module_init(uid_cache_init); |