| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  Fast Userspace Mutexes (which I call "Futexes!"). | 
 | 3 |  *  (C) Rusty Russell, IBM 2002 | 
 | 4 |  * | 
 | 5 |  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar | 
 | 6 |  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved | 
 | 7 |  * | 
 | 8 |  *  Removed page pinning, fix privately mapped COW pages and other cleanups | 
 | 9 |  *  (C) Copyright 2003, 2004 Jamie Lokier | 
 | 10 |  * | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 11 |  *  Robust futex support started by Ingo Molnar | 
 | 12 |  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved | 
 | 13 |  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes. | 
 | 14 |  * | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 15 |  *  PI-futex support started by Ingo Molnar and Thomas Gleixner | 
 | 16 |  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
 | 17 |  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | 
 | 18 |  * | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 19 |  *  PRIVATE futexes by Eric Dumazet | 
 | 20 |  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> | 
 | 21 |  * | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 22 |  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> | 
 | 23 |  *  Copyright (C) IBM Corporation, 2009 | 
 | 24 |  *  Thanks to Thomas Gleixner for conceptual design and careful reviews. | 
 | 25 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 |  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly | 
 | 27 |  *  enough at me, Linus for the original (flawed) idea, Matthew | 
 | 28 |  *  Kirkwood for proof-of-concept implementation. | 
 | 29 |  * | 
 | 30 |  *  "The futexes are also cursed." | 
 | 31 |  *  "But they come in a choice of three flavours!" | 
 | 32 |  * | 
 | 33 |  *  This program is free software; you can redistribute it and/or modify | 
 | 34 |  *  it under the terms of the GNU General Public License as published by | 
 | 35 |  *  the Free Software Foundation; either version 2 of the License, or | 
 | 36 |  *  (at your option) any later version. | 
 | 37 |  * | 
 | 38 |  *  This program is distributed in the hope that it will be useful, | 
 | 39 |  *  but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 40 |  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 41 |  *  GNU General Public License for more details. | 
 | 42 |  * | 
 | 43 |  *  You should have received a copy of the GNU General Public License | 
 | 44 |  *  along with this program; if not, write to the Free Software | 
 | 45 |  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA | 
 | 46 |  */ | 
 | 47 | #include <linux/slab.h> | 
 | 48 | #include <linux/poll.h> | 
 | 49 | #include <linux/fs.h> | 
 | 50 | #include <linux/file.h> | 
 | 51 | #include <linux/jhash.h> | 
 | 52 | #include <linux/init.h> | 
 | 53 | #include <linux/futex.h> | 
 | 54 | #include <linux/mount.h> | 
 | 55 | #include <linux/pagemap.h> | 
 | 56 | #include <linux/syscalls.h> | 
| Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 57 | #include <linux/signal.h> | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 58 | #include <linux/module.h> | 
| Andrey Mirkin | fd5eea4 | 2007-10-16 23:30:13 -0700 | [diff] [blame] | 59 | #include <linux/magic.h> | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 60 | #include <linux/pid.h> | 
 | 61 | #include <linux/nsproxy.h> | 
 | 62 |  | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 63 | #include <asm/futex.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 65 | #include "rtmutex_common.h" | 
 | 66 |  | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 67 | int __read_mostly futex_cmpxchg_enabled; | 
 | 68 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) | 
 | 70 |  | 
 | 71 | /* | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 72 |  * Futex flags used to encode options to functions and preserve them across | 
 | 73 |  * restarts. | 
 | 74 |  */ | 
 | 75 | #define FLAGS_SHARED		0x01 | 
 | 76 | #define FLAGS_CLOCKRT		0x02 | 
 | 77 | #define FLAGS_HAS_TIMEOUT	0x04 | 
 | 78 |  | 
 | 79 | /* | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 80 |  * Priority Inheritance state: | 
 | 81 |  */ | 
 | 82 | struct futex_pi_state { | 
 | 83 | 	/* | 
 | 84 | 	 * list of 'owned' pi_state instances - these have to be | 
 | 85 | 	 * cleaned up in do_exit() if the task exits prematurely: | 
 | 86 | 	 */ | 
 | 87 | 	struct list_head list; | 
 | 88 |  | 
 | 89 | 	/* | 
 | 90 | 	 * The PI object: | 
 | 91 | 	 */ | 
 | 92 | 	struct rt_mutex pi_mutex; | 
 | 93 |  | 
 | 94 | 	struct task_struct *owner; | 
 | 95 | 	atomic_t refcount; | 
 | 96 |  | 
 | 97 | 	union futex_key key; | 
 | 98 | }; | 
 | 99 |  | 
| Darren Hart | d8d88fb | 2009-09-21 22:30:30 -0700 | [diff] [blame] | 100 | /** | 
 | 101 |  * struct futex_q - The hashed futex queue entry, one per waiting task | 
| Randy Dunlap | fb62db2 | 2010-10-13 11:02:34 -0700 | [diff] [blame] | 102 |  * @list:		priority-sorted list of tasks waiting on this futex | 
| Darren Hart | d8d88fb | 2009-09-21 22:30:30 -0700 | [diff] [blame] | 103 |  * @task:		the task waiting on the futex | 
 | 104 |  * @lock_ptr:		the hash bucket lock | 
 | 105 |  * @key:		the key the futex is hashed on | 
 | 106 |  * @pi_state:		optional priority inheritance state | 
 | 107 |  * @rt_waiter:		rt_waiter storage for use with requeue_pi | 
 | 108 |  * @requeue_pi_key:	the requeue_pi target futex key | 
 | 109 |  * @bitset:		bitset for the optional bitmasked wakeup | 
 | 110 |  * | 
 | 111 |  * We use this hashed waitqueue, instead of a normal wait_queue_t, so | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 |  * we can wake only the relevant ones (hashed queues may be shared). | 
 | 113 |  * | 
 | 114 |  * A futex_q has a woken state, just like tasks have TASK_RUNNING. | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 115 |  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. | 
| Randy Dunlap | fb62db2 | 2010-10-13 11:02:34 -0700 | [diff] [blame] | 116 |  * The order of wakeup is always to make the first condition true, then | 
| Darren Hart | d8d88fb | 2009-09-21 22:30:30 -0700 | [diff] [blame] | 117 |  * the second. | 
 | 118 |  * | 
 | 119 |  * PI futexes are typically woken before they are removed from the hash list via | 
 | 120 |  * the rt_mutex code. See unqueue_me_pi(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 |  */ | 
 | 122 | struct futex_q { | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 123 | 	struct plist_node list; | 
| Darren Hart | d8d88fb | 2009-09-21 22:30:30 -0700 | [diff] [blame] | 124 |  | 
| Thomas Gleixner | f1a11e0 | 2009-05-05 19:21:40 +0200 | [diff] [blame] | 125 | 	struct task_struct *task; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | 	spinlock_t *lock_ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | 	union futex_key key; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 128 | 	struct futex_pi_state *pi_state; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 129 | 	struct rt_mutex_waiter *rt_waiter; | 
| Darren Hart | 84bc4af | 2009-08-13 17:36:53 -0700 | [diff] [blame] | 130 | 	union futex_key *requeue_pi_key; | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 131 | 	u32 bitset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | }; | 
 | 133 |  | 
| Darren Hart | 5bdb05f | 2010-11-08 13:40:28 -0800 | [diff] [blame] | 134 | static const struct futex_q futex_q_init = { | 
 | 135 | 	/* list gets initialized in queue_me()*/ | 
 | 136 | 	.key = FUTEX_KEY_INIT, | 
 | 137 | 	.bitset = FUTEX_BITSET_MATCH_ANY | 
 | 138 | }; | 
 | 139 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | /* | 
| Darren Hart | b2d0994 | 2009-03-12 00:55:37 -0700 | [diff] [blame] | 141 |  * Hash buckets are shared by all the futex_keys that hash to the same | 
 | 142 |  * location.  Each key may have multiple futex_q structures, one for each task | 
 | 143 |  * waiting on a futex. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 |  */ | 
 | 145 | struct futex_hash_bucket { | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 146 | 	spinlock_t lock; | 
 | 147 | 	struct plist_head chain; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | }; | 
 | 149 |  | 
 | 150 | static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; | 
 | 151 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | /* | 
 | 153 |  * We hash on the keys returned from get_futex_key (see below). | 
 | 154 |  */ | 
 | 155 | static struct futex_hash_bucket *hash_futex(union futex_key *key) | 
 | 156 | { | 
 | 157 | 	u32 hash = jhash2((u32*)&key->both.word, | 
 | 158 | 			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4, | 
 | 159 | 			  key->both.offset); | 
 | 160 | 	return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)]; | 
 | 161 | } | 
 | 162 |  | 
 | 163 | /* | 
 | 164 |  * Return 1 if two futex_keys are equal, 0 otherwise. | 
 | 165 |  */ | 
 | 166 | static inline int match_futex(union futex_key *key1, union futex_key *key2) | 
 | 167 | { | 
| Darren Hart | 2bc8720 | 2009-10-14 10:12:39 -0700 | [diff] [blame] | 168 | 	return (key1 && key2 | 
 | 169 | 		&& key1->both.word == key2->both.word | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | 		&& key1->both.ptr == key2->both.ptr | 
 | 171 | 		&& key1->both.offset == key2->both.offset); | 
 | 172 | } | 
 | 173 |  | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 174 | /* | 
 | 175 |  * Take a reference to the resource addressed by a key. | 
 | 176 |  * Can be called while holding spinlocks. | 
 | 177 |  * | 
 | 178 |  */ | 
 | 179 | static void get_futex_key_refs(union futex_key *key) | 
 | 180 | { | 
 | 181 | 	if (!key->both.ptr) | 
 | 182 | 		return; | 
 | 183 |  | 
 | 184 | 	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { | 
 | 185 | 	case FUT_OFF_INODE: | 
| Al Viro | 7de9c6e | 2010-10-23 11:11:40 -0400 | [diff] [blame] | 186 | 		ihold(key->shared.inode); | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 187 | 		break; | 
 | 188 | 	case FUT_OFF_MMSHARED: | 
 | 189 | 		atomic_inc(&key->private.mm->mm_count); | 
 | 190 | 		break; | 
 | 191 | 	} | 
 | 192 | } | 
 | 193 |  | 
 | 194 | /* | 
 | 195 |  * Drop a reference to the resource addressed by a key. | 
 | 196 |  * The hash bucket spinlock must not be held. | 
 | 197 |  */ | 
 | 198 | static void drop_futex_key_refs(union futex_key *key) | 
 | 199 | { | 
| Darren Hart | 90621c4 | 2008-12-29 19:43:21 -0800 | [diff] [blame] | 200 | 	if (!key->both.ptr) { | 
 | 201 | 		/* If we're here then we tried to put a key we failed to get */ | 
 | 202 | 		WARN_ON_ONCE(1); | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 203 | 		return; | 
| Darren Hart | 90621c4 | 2008-12-29 19:43:21 -0800 | [diff] [blame] | 204 | 	} | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 205 |  | 
 | 206 | 	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { | 
 | 207 | 	case FUT_OFF_INODE: | 
 | 208 | 		iput(key->shared.inode); | 
 | 209 | 		break; | 
 | 210 | 	case FUT_OFF_MMSHARED: | 
 | 211 | 		mmdrop(key->private.mm); | 
 | 212 | 		break; | 
 | 213 | 	} | 
 | 214 | } | 
 | 215 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 216 | /** | 
| Darren Hart | d96ee56 | 2009-09-21 22:30:22 -0700 | [diff] [blame] | 217 |  * get_futex_key() - Get parameters which are the keys for a futex | 
 | 218 |  * @uaddr:	virtual address of the futex | 
 | 219 |  * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED | 
 | 220 |  * @key:	address where result is stored. | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 221 |  * | 
 | 222 |  * Returns a negative error code or 0 | 
 | 223 |  * The key words are stored in *key on success. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 |  * | 
| Josef "Jeff" Sipek | f3a43f3 | 2006-12-08 02:36:43 -0800 | [diff] [blame] | 225 |  * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 |  * offset_within_page).  For private mappings, it's (uaddr, current->mm). | 
 | 227 |  * We can usually work out the index without swapping in the page. | 
 | 228 |  * | 
| Darren Hart | b2d0994 | 2009-03-12 00:55:37 -0700 | [diff] [blame] | 229 |  * lock_page() might sleep, the caller should not hold a spinlock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 |  */ | 
| Thomas Gleixner | 64d1304 | 2009-05-18 21:20:10 +0200 | [diff] [blame] | 231 | static int | 
| KOSAKI Motohiro | 7485d0d | 2010-01-05 16:32:43 +0900 | [diff] [blame] | 232 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 234 | 	unsigned long address = (unsigned long)uaddr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | 	struct mm_struct *mm = current->mm; | 
| Andrea Arcangeli | a5b338f | 2011-01-13 15:46:34 -0800 | [diff] [blame] | 236 | 	struct page *page, *page_head; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | 	int err; | 
 | 238 |  | 
 | 239 | 	/* | 
 | 240 | 	 * The futex address must be "naturally" aligned. | 
 | 241 | 	 */ | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 242 | 	key->both.offset = address % PAGE_SIZE; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 243 | 	if (unlikely((address % sizeof(u32)) != 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | 		return -EINVAL; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 245 | 	address -= key->both.offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 |  | 
 | 247 | 	/* | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 248 | 	 * PROCESS_PRIVATE futexes are fast. | 
 | 249 | 	 * As the mm cannot disappear under us and the 'key' only needs | 
 | 250 | 	 * virtual address, we dont even have to find the underlying vma. | 
 | 251 | 	 * Note : We do have to check 'uaddr' is a valid user address, | 
 | 252 | 	 *        but access_ok() should be faster than find_vma() | 
 | 253 | 	 */ | 
 | 254 | 	if (!fshared) { | 
| KOSAKI Motohiro | 7485d0d | 2010-01-05 16:32:43 +0900 | [diff] [blame] | 255 | 		if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 256 | 			return -EFAULT; | 
 | 257 | 		key->private.mm = mm; | 
 | 258 | 		key->private.address = address; | 
| Peter Zijlstra | 42569c3 | 2008-09-30 12:33:07 +0200 | [diff] [blame] | 259 | 		get_futex_key_refs(key); | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 260 | 		return 0; | 
 | 261 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 |  | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 263 | again: | 
| KOSAKI Motohiro | 7485d0d | 2010-01-05 16:32:43 +0900 | [diff] [blame] | 264 | 	err = get_user_pages_fast(address, 1, 1, &page); | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 265 | 	if (err < 0) | 
 | 266 | 		return err; | 
 | 267 |  | 
| Andrea Arcangeli | a5b338f | 2011-01-13 15:46:34 -0800 | [diff] [blame] | 268 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
 | 269 | 	page_head = page; | 
 | 270 | 	if (unlikely(PageTail(page))) { | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 271 | 		put_page(page); | 
| Andrea Arcangeli | a5b338f | 2011-01-13 15:46:34 -0800 | [diff] [blame] | 272 | 		/* serialize against __split_huge_page_splitting() */ | 
 | 273 | 		local_irq_disable(); | 
 | 274 | 		if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { | 
 | 275 | 			page_head = compound_head(page); | 
 | 276 | 			/* | 
 | 277 | 			 * page_head is valid pointer but we must pin | 
 | 278 | 			 * it before taking the PG_lock and/or | 
 | 279 | 			 * PG_compound_lock. The moment we re-enable | 
 | 280 | 			 * irqs __split_huge_page_splitting() can | 
 | 281 | 			 * return and the head page can be freed from | 
 | 282 | 			 * under us. We can't take the PG_lock and/or | 
 | 283 | 			 * PG_compound_lock on a page that could be | 
 | 284 | 			 * freed from under us. | 
 | 285 | 			 */ | 
 | 286 | 			if (page != page_head) { | 
 | 287 | 				get_page(page_head); | 
 | 288 | 				put_page(page); | 
 | 289 | 			} | 
 | 290 | 			local_irq_enable(); | 
 | 291 | 		} else { | 
 | 292 | 			local_irq_enable(); | 
 | 293 | 			goto again; | 
 | 294 | 		} | 
 | 295 | 	} | 
 | 296 | #else | 
 | 297 | 	page_head = compound_head(page); | 
 | 298 | 	if (page != page_head) { | 
 | 299 | 		get_page(page_head); | 
 | 300 | 		put_page(page); | 
 | 301 | 	} | 
 | 302 | #endif | 
 | 303 |  | 
 | 304 | 	lock_page(page_head); | 
 | 305 | 	if (!page_head->mapping) { | 
 | 306 | 		unlock_page(page_head); | 
 | 307 | 		put_page(page_head); | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 308 | 		goto again; | 
 | 309 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 |  | 
 | 311 | 	/* | 
 | 312 | 	 * Private mappings are handled in a simple way. | 
 | 313 | 	 * | 
 | 314 | 	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if | 
 | 315 | 	 * it's a read-only handle, it's expected that futexes attach to | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 316 | 	 * the object not the particular process. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | 	 */ | 
| Andrea Arcangeli | a5b338f | 2011-01-13 15:46:34 -0800 | [diff] [blame] | 318 | 	if (PageAnon(page_head)) { | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 319 | 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | 		key->private.mm = mm; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 321 | 		key->private.address = address; | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 322 | 	} else { | 
 | 323 | 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */ | 
| Andrea Arcangeli | a5b338f | 2011-01-13 15:46:34 -0800 | [diff] [blame] | 324 | 		key->shared.inode = page_head->mapping->host; | 
 | 325 | 		key->shared.pgoff = page_head->index; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | 	} | 
 | 327 |  | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 328 | 	get_futex_key_refs(key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 |  | 
| Andrea Arcangeli | a5b338f | 2011-01-13 15:46:34 -0800 | [diff] [blame] | 330 | 	unlock_page(page_head); | 
 | 331 | 	put_page(page_head); | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 332 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | } | 
 | 334 |  | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 335 | static inline void put_futex_key(union futex_key *key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | { | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 337 | 	drop_futex_key_refs(key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | } | 
 | 339 |  | 
| Darren Hart | d96ee56 | 2009-09-21 22:30:22 -0700 | [diff] [blame] | 340 | /** | 
 | 341 |  * fault_in_user_writeable() - Fault in user address and verify RW access | 
| Thomas Gleixner | d072599 | 2009-06-11 23:15:43 +0200 | [diff] [blame] | 342 |  * @uaddr:	pointer to faulting user space address | 
 | 343 |  * | 
 | 344 |  * Slow path to fixup the fault we just took in the atomic write | 
 | 345 |  * access to @uaddr. | 
 | 346 |  * | 
| Randy Dunlap | fb62db2 | 2010-10-13 11:02:34 -0700 | [diff] [blame] | 347 |  * We have no generic implementation of a non-destructive write to the | 
| Thomas Gleixner | d072599 | 2009-06-11 23:15:43 +0200 | [diff] [blame] | 348 |  * user address. We know that we faulted in the atomic pagefault | 
 | 349 |  * disabled section so we can as well avoid the #PF overhead by | 
 | 350 |  * calling get_user_pages() right away. | 
 | 351 |  */ | 
 | 352 | static int fault_in_user_writeable(u32 __user *uaddr) | 
 | 353 | { | 
| Andi Kleen | 722d017 | 2009-12-08 13:19:42 +0100 | [diff] [blame] | 354 | 	struct mm_struct *mm = current->mm; | 
 | 355 | 	int ret; | 
 | 356 |  | 
 | 357 | 	down_read(&mm->mmap_sem); | 
 | 358 | 	ret = get_user_pages(current, mm, (unsigned long)uaddr, | 
 | 359 | 			     1, 1, 0, NULL, NULL); | 
 | 360 | 	up_read(&mm->mmap_sem); | 
 | 361 |  | 
| Thomas Gleixner | d072599 | 2009-06-11 23:15:43 +0200 | [diff] [blame] | 362 | 	return ret < 0 ? ret : 0; | 
 | 363 | } | 
 | 364 |  | 
| Darren Hart | 4b1c486 | 2009-04-03 13:39:42 -0700 | [diff] [blame] | 365 | /** | 
 | 366 |  * futex_top_waiter() - Return the highest priority waiter on a futex | 
| Darren Hart | d96ee56 | 2009-09-21 22:30:22 -0700 | [diff] [blame] | 367 |  * @hb:		the hash bucket the futex_q's reside in | 
 | 368 |  * @key:	the futex key (to distinguish it from other futex futex_q's) | 
| Darren Hart | 4b1c486 | 2009-04-03 13:39:42 -0700 | [diff] [blame] | 369 |  * | 
 | 370 |  * Must be called with the hb lock held. | 
 | 371 |  */ | 
 | 372 | static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, | 
 | 373 | 					union futex_key *key) | 
 | 374 | { | 
 | 375 | 	struct futex_q *this; | 
 | 376 |  | 
 | 377 | 	plist_for_each_entry(this, &hb->chain, list) { | 
 | 378 | 		if (match_futex(&this->key, key)) | 
 | 379 | 			return this; | 
 | 380 | 	} | 
 | 381 | 	return NULL; | 
 | 382 | } | 
 | 383 |  | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 384 | static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, | 
 | 385 | 				      u32 uval, u32 newval) | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 386 | { | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 387 | 	int ret; | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 388 |  | 
 | 389 | 	pagefault_disable(); | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 390 | 	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 391 | 	pagefault_enable(); | 
 | 392 |  | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 393 | 	return ret; | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 394 | } | 
 | 395 |  | 
 | 396 | static int get_futex_value_locked(u32 *dest, u32 __user *from) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | { | 
 | 398 | 	int ret; | 
 | 399 |  | 
| Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 400 | 	pagefault_disable(); | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 401 | 	ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); | 
| Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 402 | 	pagefault_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 |  | 
 | 404 | 	return ret ? -EFAULT : 0; | 
 | 405 | } | 
 | 406 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 407 |  | 
 | 408 | /* | 
 | 409 |  * PI code: | 
 | 410 |  */ | 
 | 411 | static int refill_pi_state_cache(void) | 
 | 412 | { | 
 | 413 | 	struct futex_pi_state *pi_state; | 
 | 414 |  | 
 | 415 | 	if (likely(current->pi_state_cache)) | 
 | 416 | 		return 0; | 
 | 417 |  | 
| Burman Yan | 4668edc | 2006-12-06 20:38:51 -0800 | [diff] [blame] | 418 | 	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 419 |  | 
 | 420 | 	if (!pi_state) | 
 | 421 | 		return -ENOMEM; | 
 | 422 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 423 | 	INIT_LIST_HEAD(&pi_state->list); | 
 | 424 | 	/* pi_mutex gets initialized later */ | 
 | 425 | 	pi_state->owner = NULL; | 
 | 426 | 	atomic_set(&pi_state->refcount, 1); | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 427 | 	pi_state->key = FUTEX_KEY_INIT; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 428 |  | 
 | 429 | 	current->pi_state_cache = pi_state; | 
 | 430 |  | 
 | 431 | 	return 0; | 
 | 432 | } | 
 | 433 |  | 
 | 434 | static struct futex_pi_state * alloc_pi_state(void) | 
 | 435 | { | 
 | 436 | 	struct futex_pi_state *pi_state = current->pi_state_cache; | 
 | 437 |  | 
 | 438 | 	WARN_ON(!pi_state); | 
 | 439 | 	current->pi_state_cache = NULL; | 
 | 440 |  | 
 | 441 | 	return pi_state; | 
 | 442 | } | 
 | 443 |  | 
 | 444 | static void free_pi_state(struct futex_pi_state *pi_state) | 
 | 445 | { | 
 | 446 | 	if (!atomic_dec_and_test(&pi_state->refcount)) | 
 | 447 | 		return; | 
 | 448 |  | 
 | 449 | 	/* | 
 | 450 | 	 * If pi_state->owner is NULL, the owner is most probably dying | 
 | 451 | 	 * and has cleaned up the pi_state already | 
 | 452 | 	 */ | 
 | 453 | 	if (pi_state->owner) { | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 454 | 		raw_spin_lock_irq(&pi_state->owner->pi_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 455 | 		list_del_init(&pi_state->list); | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 456 | 		raw_spin_unlock_irq(&pi_state->owner->pi_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 457 |  | 
 | 458 | 		rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); | 
 | 459 | 	} | 
 | 460 |  | 
 | 461 | 	if (current->pi_state_cache) | 
 | 462 | 		kfree(pi_state); | 
 | 463 | 	else { | 
 | 464 | 		/* | 
 | 465 | 		 * pi_state->list is already empty. | 
 | 466 | 		 * clear pi_state->owner. | 
 | 467 | 		 * refcount is at 0 - put it back to 1. | 
 | 468 | 		 */ | 
 | 469 | 		pi_state->owner = NULL; | 
 | 470 | 		atomic_set(&pi_state->refcount, 1); | 
 | 471 | 		current->pi_state_cache = pi_state; | 
 | 472 | 	} | 
 | 473 | } | 
 | 474 |  | 
 | 475 | /* | 
 | 476 |  * Look up the task based on what TID userspace gave us. | 
 | 477 |  * We dont trust it. | 
 | 478 |  */ | 
 | 479 | static struct task_struct * futex_find_get_task(pid_t pid) | 
 | 480 | { | 
 | 481 | 	struct task_struct *p; | 
 | 482 |  | 
| Oleg Nesterov | d359b54 | 2006-09-29 02:00:55 -0700 | [diff] [blame] | 483 | 	rcu_read_lock(); | 
| Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 484 | 	p = find_task_by_vpid(pid); | 
| Michal Hocko | 7a0ea09 | 2010-06-30 09:51:19 +0200 | [diff] [blame] | 485 | 	if (p) | 
 | 486 | 		get_task_struct(p); | 
| Thomas Gleixner | a06381f | 2007-06-23 11:48:40 +0200 | [diff] [blame] | 487 |  | 
| Oleg Nesterov | d359b54 | 2006-09-29 02:00:55 -0700 | [diff] [blame] | 488 | 	rcu_read_unlock(); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 489 |  | 
 | 490 | 	return p; | 
 | 491 | } | 
 | 492 |  | 
 | 493 | /* | 
 | 494 |  * This task is holding PI mutexes at exit time => bad. | 
 | 495 |  * Kernel cleans up PI-state, but userspace is likely hosed. | 
 | 496 |  * (Robust-futex cleanup is separate and might save the day for userspace.) | 
 | 497 |  */ | 
 | 498 | void exit_pi_state_list(struct task_struct *curr) | 
 | 499 | { | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 500 | 	struct list_head *next, *head = &curr->pi_state_list; | 
 | 501 | 	struct futex_pi_state *pi_state; | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 502 | 	struct futex_hash_bucket *hb; | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 503 | 	union futex_key key = FUTEX_KEY_INIT; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 504 |  | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 505 | 	if (!futex_cmpxchg_enabled) | 
 | 506 | 		return; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 507 | 	/* | 
 | 508 | 	 * We are a ZOMBIE and nobody can enqueue itself on | 
 | 509 | 	 * pi_state_list anymore, but we have to be careful | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 510 | 	 * versus waiters unqueueing themselves: | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 511 | 	 */ | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 512 | 	raw_spin_lock_irq(&curr->pi_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 513 | 	while (!list_empty(head)) { | 
 | 514 |  | 
 | 515 | 		next = head->next; | 
 | 516 | 		pi_state = list_entry(next, struct futex_pi_state, list); | 
 | 517 | 		key = pi_state->key; | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 518 | 		hb = hash_futex(&key); | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 519 | 		raw_spin_unlock_irq(&curr->pi_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 520 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 521 | 		spin_lock(&hb->lock); | 
 | 522 |  | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 523 | 		raw_spin_lock_irq(&curr->pi_lock); | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 524 | 		/* | 
 | 525 | 		 * We dropped the pi-lock, so re-check whether this | 
 | 526 | 		 * task still owns the PI-state: | 
 | 527 | 		 */ | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 528 | 		if (head->next != next) { | 
 | 529 | 			spin_unlock(&hb->lock); | 
 | 530 | 			continue; | 
 | 531 | 		} | 
 | 532 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 533 | 		WARN_ON(pi_state->owner != curr); | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 534 | 		WARN_ON(list_empty(&pi_state->list)); | 
 | 535 | 		list_del_init(&pi_state->list); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 536 | 		pi_state->owner = NULL; | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 537 | 		raw_spin_unlock_irq(&curr->pi_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 538 |  | 
 | 539 | 		rt_mutex_unlock(&pi_state->pi_mutex); | 
 | 540 |  | 
 | 541 | 		spin_unlock(&hb->lock); | 
 | 542 |  | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 543 | 		raw_spin_lock_irq(&curr->pi_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 544 | 	} | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 545 | 	raw_spin_unlock_irq(&curr->pi_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 546 | } | 
 | 547 |  | 
 | 548 | static int | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 549 | lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | 
 | 550 | 		union futex_key *key, struct futex_pi_state **ps) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 551 | { | 
 | 552 | 	struct futex_pi_state *pi_state = NULL; | 
 | 553 | 	struct futex_q *this, *next; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 554 | 	struct plist_head *head; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 555 | 	struct task_struct *p; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 556 | 	pid_t pid = uval & FUTEX_TID_MASK; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 557 |  | 
 | 558 | 	head = &hb->chain; | 
 | 559 |  | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 560 | 	plist_for_each_entry_safe(this, next, head, list) { | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 561 | 		if (match_futex(&this->key, key)) { | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 562 | 			/* | 
 | 563 | 			 * Another waiter already exists - bump up | 
 | 564 | 			 * the refcount and return its pi_state: | 
 | 565 | 			 */ | 
 | 566 | 			pi_state = this->pi_state; | 
| Thomas Gleixner | 06a9ec2 | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 567 | 			/* | 
| Randy Dunlap | fb62db2 | 2010-10-13 11:02:34 -0700 | [diff] [blame] | 568 | 			 * Userspace might have messed up non-PI and PI futexes | 
| Thomas Gleixner | 06a9ec2 | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 569 | 			 */ | 
 | 570 | 			if (unlikely(!pi_state)) | 
 | 571 | 				return -EINVAL; | 
 | 572 |  | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 573 | 			WARN_ON(!atomic_read(&pi_state->refcount)); | 
| Thomas Gleixner | 59647b6 | 2010-02-03 09:33:05 +0100 | [diff] [blame] | 574 |  | 
 | 575 | 			/* | 
 | 576 | 			 * When pi_state->owner is NULL then the owner died | 
 | 577 | 			 * and another waiter is on the fly. pi_state->owner | 
 | 578 | 			 * is fixed up by the task which acquires | 
 | 579 | 			 * pi_state->rt_mutex. | 
 | 580 | 			 * | 
 | 581 | 			 * We do not check for pid == 0 which can happen when | 
 | 582 | 			 * the owner died and robust_list_exit() cleared the | 
 | 583 | 			 * TID. | 
 | 584 | 			 */ | 
 | 585 | 			if (pid && pi_state->owner) { | 
 | 586 | 				/* | 
 | 587 | 				 * Bail out if user space manipulated the | 
 | 588 | 				 * futex value. | 
 | 589 | 				 */ | 
 | 590 | 				if (pid != task_pid_vnr(pi_state->owner)) | 
 | 591 | 					return -EINVAL; | 
 | 592 | 			} | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 593 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 594 | 			atomic_inc(&pi_state->refcount); | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 595 | 			*ps = pi_state; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 596 |  | 
 | 597 | 			return 0; | 
 | 598 | 		} | 
 | 599 | 	} | 
 | 600 |  | 
 | 601 | 	/* | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 602 | 	 * We are the first waiter - try to look up the real owner and attach | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 603 | 	 * the new pi_state to it, but bail out when TID = 0 | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 604 | 	 */ | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 605 | 	if (!pid) | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 606 | 		return -ESRCH; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 607 | 	p = futex_find_get_task(pid); | 
| Michal Hocko | 7a0ea09 | 2010-06-30 09:51:19 +0200 | [diff] [blame] | 608 | 	if (!p) | 
 | 609 | 		return -ESRCH; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 610 |  | 
 | 611 | 	/* | 
 | 612 | 	 * We need to look at the task state flags to figure out, | 
 | 613 | 	 * whether the task is exiting. To protect against the do_exit | 
 | 614 | 	 * change of the task flags, we do this protected by | 
 | 615 | 	 * p->pi_lock: | 
 | 616 | 	 */ | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 617 | 	raw_spin_lock_irq(&p->pi_lock); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 618 | 	if (unlikely(p->flags & PF_EXITING)) { | 
 | 619 | 		/* | 
 | 620 | 		 * The task is on the way out. When PF_EXITPIDONE is | 
 | 621 | 		 * set, we know that the task has finished the | 
 | 622 | 		 * cleanup: | 
 | 623 | 		 */ | 
 | 624 | 		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; | 
 | 625 |  | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 626 | 		raw_spin_unlock_irq(&p->pi_lock); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 627 | 		put_task_struct(p); | 
 | 628 | 		return ret; | 
 | 629 | 	} | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 630 |  | 
 | 631 | 	pi_state = alloc_pi_state(); | 
 | 632 |  | 
 | 633 | 	/* | 
 | 634 | 	 * Initialize the pi_mutex in locked state and make 'p' | 
 | 635 | 	 * the owner of it: | 
 | 636 | 	 */ | 
 | 637 | 	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); | 
 | 638 |  | 
 | 639 | 	/* Store the key for possible exit cleanups: */ | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 640 | 	pi_state->key = *key; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 641 |  | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 642 | 	WARN_ON(!list_empty(&pi_state->list)); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 643 | 	list_add(&pi_state->list, &p->pi_state_list); | 
 | 644 | 	pi_state->owner = p; | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 645 | 	raw_spin_unlock_irq(&p->pi_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 646 |  | 
 | 647 | 	put_task_struct(p); | 
 | 648 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 649 | 	*ps = pi_state; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 650 |  | 
 | 651 | 	return 0; | 
 | 652 | } | 
 | 653 |  | 
| Darren Hart | 1a52084 | 2009-04-03 13:39:52 -0700 | [diff] [blame] | 654 | /** | 
| Darren Hart | d96ee56 | 2009-09-21 22:30:22 -0700 | [diff] [blame] | 655 |  * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 656 |  * @uaddr:		the pi futex user address | 
 | 657 |  * @hb:			the pi futex hash bucket | 
 | 658 |  * @key:		the futex key associated with uaddr and hb | 
 | 659 |  * @ps:			the pi_state pointer where we store the result of the | 
 | 660 |  *			lookup | 
 | 661 |  * @task:		the task to perform the atomic lock work for.  This will | 
 | 662 |  *			be "current" except in the case of requeue pi. | 
 | 663 |  * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0) | 
| Darren Hart | 1a52084 | 2009-04-03 13:39:52 -0700 | [diff] [blame] | 664 |  * | 
 | 665 |  * Returns: | 
 | 666 |  *  0 - ready to wait | 
 | 667 |  *  1 - acquired the lock | 
 | 668 |  * <0 - error | 
 | 669 |  * | 
 | 670 |  * The hb->lock and futex_key refs shall be held by the caller. | 
 | 671 |  */ | 
 | 672 | static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, | 
 | 673 | 				union futex_key *key, | 
 | 674 | 				struct futex_pi_state **ps, | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 675 | 				struct task_struct *task, int set_waiters) | 
| Darren Hart | 1a52084 | 2009-04-03 13:39:52 -0700 | [diff] [blame] | 676 | { | 
 | 677 | 	int lock_taken, ret, ownerdied = 0; | 
| Thomas Gleixner | c0c9ed1 | 2011-03-11 11:51:22 +0100 | [diff] [blame] | 678 | 	u32 uval, newval, curval, vpid = task_pid_vnr(task); | 
| Darren Hart | 1a52084 | 2009-04-03 13:39:52 -0700 | [diff] [blame] | 679 |  | 
 | 680 | retry: | 
 | 681 | 	ret = lock_taken = 0; | 
 | 682 |  | 
 | 683 | 	/* | 
 | 684 | 	 * To avoid races, we attempt to take the lock here again | 
 | 685 | 	 * (by doing a 0 -> TID atomic cmpxchg), while holding all | 
 | 686 | 	 * the locks. It will most likely not succeed. | 
 | 687 | 	 */ | 
| Thomas Gleixner | c0c9ed1 | 2011-03-11 11:51:22 +0100 | [diff] [blame] | 688 | 	newval = vpid; | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 689 | 	if (set_waiters) | 
 | 690 | 		newval |= FUTEX_WAITERS; | 
| Darren Hart | 1a52084 | 2009-04-03 13:39:52 -0700 | [diff] [blame] | 691 |  | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 692 | 	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval))) | 
| Darren Hart | 1a52084 | 2009-04-03 13:39:52 -0700 | [diff] [blame] | 693 | 		return -EFAULT; | 
 | 694 |  | 
 | 695 | 	/* | 
 | 696 | 	 * Detect deadlocks. | 
 | 697 | 	 */ | 
| Thomas Gleixner | c0c9ed1 | 2011-03-11 11:51:22 +0100 | [diff] [blame] | 698 | 	if ((unlikely((curval & FUTEX_TID_MASK) == vpid))) | 
| Darren Hart | 1a52084 | 2009-04-03 13:39:52 -0700 | [diff] [blame] | 699 | 		return -EDEADLK; | 
 | 700 |  | 
 | 701 | 	/* | 
 | 702 | 	 * Surprise - we got the lock. Just return to userspace: | 
 | 703 | 	 */ | 
 | 704 | 	if (unlikely(!curval)) | 
 | 705 | 		return 1; | 
 | 706 |  | 
 | 707 | 	uval = curval; | 
 | 708 |  | 
 | 709 | 	/* | 
 | 710 | 	 * Set the FUTEX_WAITERS flag, so the owner will know it has someone | 
 | 711 | 	 * to wake at the next unlock. | 
 | 712 | 	 */ | 
 | 713 | 	newval = curval | FUTEX_WAITERS; | 
 | 714 |  | 
 | 715 | 	/* | 
 | 716 | 	 * There are two cases, where a futex might have no owner (the | 
 | 717 | 	 * owner TID is 0): OWNER_DIED. We take over the futex in this | 
 | 718 | 	 * case. We also do an unconditional take over, when the owner | 
 | 719 | 	 * of the futex died. | 
 | 720 | 	 * | 
 | 721 | 	 * This is safe as we are protected by the hash bucket lock ! | 
 | 722 | 	 */ | 
 | 723 | 	if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { | 
 | 724 | 		/* Keep the OWNER_DIED bit */ | 
| Thomas Gleixner | c0c9ed1 | 2011-03-11 11:51:22 +0100 | [diff] [blame] | 725 | 		newval = (curval & ~FUTEX_TID_MASK) | vpid; | 
| Darren Hart | 1a52084 | 2009-04-03 13:39:52 -0700 | [diff] [blame] | 726 | 		ownerdied = 0; | 
 | 727 | 		lock_taken = 1; | 
 | 728 | 	} | 
 | 729 |  | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 730 | 	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) | 
| Darren Hart | 1a52084 | 2009-04-03 13:39:52 -0700 | [diff] [blame] | 731 | 		return -EFAULT; | 
 | 732 | 	if (unlikely(curval != uval)) | 
 | 733 | 		goto retry; | 
 | 734 |  | 
 | 735 | 	/* | 
 | 736 | 	 * We took the lock due to owner died take over. | 
 | 737 | 	 */ | 
 | 738 | 	if (unlikely(lock_taken)) | 
 | 739 | 		return 1; | 
 | 740 |  | 
 | 741 | 	/* | 
 | 742 | 	 * We dont have the lock. Look up the PI state (or create it if | 
 | 743 | 	 * we are the first waiter): | 
 | 744 | 	 */ | 
 | 745 | 	ret = lookup_pi_state(uval, hb, key, ps); | 
 | 746 |  | 
 | 747 | 	if (unlikely(ret)) { | 
 | 748 | 		switch (ret) { | 
 | 749 | 		case -ESRCH: | 
 | 750 | 			/* | 
 | 751 | 			 * No owner found for this futex. Check if the | 
 | 752 | 			 * OWNER_DIED bit is set to figure out whether | 
 | 753 | 			 * this is a robust futex or not. | 
 | 754 | 			 */ | 
 | 755 | 			if (get_futex_value_locked(&curval, uaddr)) | 
 | 756 | 				return -EFAULT; | 
 | 757 |  | 
 | 758 | 			/* | 
 | 759 | 			 * We simply start over in case of a robust | 
 | 760 | 			 * futex. The code above will take the futex | 
 | 761 | 			 * and return happy. | 
 | 762 | 			 */ | 
 | 763 | 			if (curval & FUTEX_OWNER_DIED) { | 
 | 764 | 				ownerdied = 1; | 
 | 765 | 				goto retry; | 
 | 766 | 			} | 
 | 767 | 		default: | 
 | 768 | 			break; | 
 | 769 | 		} | 
 | 770 | 	} | 
 | 771 |  | 
 | 772 | 	return ret; | 
 | 773 | } | 
 | 774 |  | 
| Lai Jiangshan | 2e12978 | 2010-12-22 14:18:50 +0800 | [diff] [blame] | 775 | /** | 
 | 776 |  * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket | 
 | 777 |  * @q:	The futex_q to unqueue | 
 | 778 |  * | 
 | 779 |  * The q->lock_ptr must not be NULL and must be held by the caller. | 
 | 780 |  */ | 
 | 781 | static void __unqueue_futex(struct futex_q *q) | 
 | 782 | { | 
 | 783 | 	struct futex_hash_bucket *hb; | 
 | 784 |  | 
| Steven Rostedt | 2909620 | 2011-03-17 15:21:07 -0400 | [diff] [blame] | 785 | 	if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) | 
 | 786 | 	    || WARN_ON(plist_node_empty(&q->list))) | 
| Lai Jiangshan | 2e12978 | 2010-12-22 14:18:50 +0800 | [diff] [blame] | 787 | 		return; | 
 | 788 |  | 
 | 789 | 	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); | 
 | 790 | 	plist_del(&q->list, &hb->chain); | 
 | 791 | } | 
 | 792 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 793 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 |  * The hash bucket lock must be held when this is called. | 
 | 795 |  * Afterwards, the futex_q must not be accessed. | 
 | 796 |  */ | 
 | 797 | static void wake_futex(struct futex_q *q) | 
 | 798 | { | 
| Thomas Gleixner | f1a11e0 | 2009-05-05 19:21:40 +0200 | [diff] [blame] | 799 | 	struct task_struct *p = q->task; | 
 | 800 |  | 
 | 801 | 	/* | 
 | 802 | 	 * We set q->lock_ptr = NULL _before_ we wake up the task. If | 
| Randy Dunlap | fb62db2 | 2010-10-13 11:02:34 -0700 | [diff] [blame] | 803 | 	 * a non-futex wake up happens on another CPU then the task | 
 | 804 | 	 * might exit and p would dereference a non-existing task | 
| Thomas Gleixner | f1a11e0 | 2009-05-05 19:21:40 +0200 | [diff] [blame] | 805 | 	 * struct. Prevent this by holding a reference on p across the | 
 | 806 | 	 * wake up. | 
 | 807 | 	 */ | 
 | 808 | 	get_task_struct(p); | 
 | 809 |  | 
| Lai Jiangshan | 2e12978 | 2010-12-22 14:18:50 +0800 | [diff] [blame] | 810 | 	__unqueue_futex(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 | 	/* | 
| Thomas Gleixner | f1a11e0 | 2009-05-05 19:21:40 +0200 | [diff] [blame] | 812 | 	 * The waiting task can free the futex_q as soon as | 
 | 813 | 	 * q->lock_ptr = NULL is written, without taking any locks. A | 
 | 814 | 	 * memory barrier is required here to prevent the following | 
 | 815 | 	 * store to lock_ptr from getting ahead of the plist_del. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | 	 */ | 
| Ralf Baechle | ccdea2f | 2006-12-06 20:40:26 -0800 | [diff] [blame] | 817 | 	smp_wmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | 	q->lock_ptr = NULL; | 
| Thomas Gleixner | f1a11e0 | 2009-05-05 19:21:40 +0200 | [diff] [blame] | 819 |  | 
 | 820 | 	wake_up_state(p, TASK_NORMAL); | 
 | 821 | 	put_task_struct(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 | } | 
 | 823 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 824 | static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | 
 | 825 | { | 
 | 826 | 	struct task_struct *new_owner; | 
 | 827 | 	struct futex_pi_state *pi_state = this->pi_state; | 
 | 828 | 	u32 curval, newval; | 
 | 829 |  | 
 | 830 | 	if (!pi_state) | 
 | 831 | 		return -EINVAL; | 
 | 832 |  | 
| Thomas Gleixner | 51246bf | 2010-02-02 11:40:27 +0100 | [diff] [blame] | 833 | 	/* | 
 | 834 | 	 * If current does not own the pi_state then the futex is | 
 | 835 | 	 * inconsistent and user space fiddled with the futex value. | 
 | 836 | 	 */ | 
 | 837 | 	if (pi_state->owner != current) | 
 | 838 | 		return -EINVAL; | 
 | 839 |  | 
| Thomas Gleixner | d209d74 | 2009-11-17 18:22:11 +0100 | [diff] [blame] | 840 | 	raw_spin_lock(&pi_state->pi_mutex.wait_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 841 | 	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); | 
 | 842 |  | 
 | 843 | 	/* | 
| Steven Rostedt | f123c98 | 2011-01-06 15:08:29 -0500 | [diff] [blame] | 844 | 	 * It is possible that the next waiter (the one that brought | 
 | 845 | 	 * this owner to the kernel) timed out and is no longer | 
 | 846 | 	 * waiting on the lock. | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 847 | 	 */ | 
 | 848 | 	if (!new_owner) | 
 | 849 | 		new_owner = this->task; | 
 | 850 |  | 
 | 851 | 	/* | 
 | 852 | 	 * We pass it to the next owner. (The WAITERS bit is always | 
 | 853 | 	 * kept enabled while there is PI state around. We must also | 
 | 854 | 	 * preserve the owner died bit.) | 
 | 855 | 	 */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 856 | 	if (!(uval & FUTEX_OWNER_DIED)) { | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 857 | 		int ret = 0; | 
 | 858 |  | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 859 | 		newval = FUTEX_WAITERS | task_pid_vnr(new_owner); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 860 |  | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 861 | 		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 862 | 			ret = -EFAULT; | 
| Thomas Gleixner | cde898f | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 863 | 		else if (curval != uval) | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 864 | 			ret = -EINVAL; | 
 | 865 | 		if (ret) { | 
| Thomas Gleixner | d209d74 | 2009-11-17 18:22:11 +0100 | [diff] [blame] | 866 | 			raw_spin_unlock(&pi_state->pi_mutex.wait_lock); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 867 | 			return ret; | 
 | 868 | 		} | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 869 | 	} | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 870 |  | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 871 | 	raw_spin_lock_irq(&pi_state->owner->pi_lock); | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 872 | 	WARN_ON(list_empty(&pi_state->list)); | 
 | 873 | 	list_del_init(&pi_state->list); | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 874 | 	raw_spin_unlock_irq(&pi_state->owner->pi_lock); | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 875 |  | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 876 | 	raw_spin_lock_irq(&new_owner->pi_lock); | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 877 | 	WARN_ON(!list_empty(&pi_state->list)); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 878 | 	list_add(&pi_state->list, &new_owner->pi_state_list); | 
 | 879 | 	pi_state->owner = new_owner; | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 880 | 	raw_spin_unlock_irq(&new_owner->pi_lock); | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 881 |  | 
| Thomas Gleixner | d209d74 | 2009-11-17 18:22:11 +0100 | [diff] [blame] | 882 | 	raw_spin_unlock(&pi_state->pi_mutex.wait_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 883 | 	rt_mutex_unlock(&pi_state->pi_mutex); | 
 | 884 |  | 
 | 885 | 	return 0; | 
 | 886 | } | 
 | 887 |  | 
 | 888 | static int unlock_futex_pi(u32 __user *uaddr, u32 uval) | 
 | 889 | { | 
 | 890 | 	u32 oldval; | 
 | 891 |  | 
 | 892 | 	/* | 
 | 893 | 	 * There is no waiter, so we unlock the futex. The owner died | 
 | 894 | 	 * bit has not to be preserved here. We are the owner: | 
 | 895 | 	 */ | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 896 | 	if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0)) | 
 | 897 | 		return -EFAULT; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 898 | 	if (oldval != uval) | 
 | 899 | 		return -EAGAIN; | 
 | 900 |  | 
 | 901 | 	return 0; | 
 | 902 | } | 
 | 903 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | /* | 
| Ingo Molnar | 8b8f319 | 2006-07-03 00:25:05 -0700 | [diff] [blame] | 905 |  * Express the locking dependencies for lockdep: | 
 | 906 |  */ | 
 | 907 | static inline void | 
 | 908 | double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) | 
 | 909 | { | 
 | 910 | 	if (hb1 <= hb2) { | 
 | 911 | 		spin_lock(&hb1->lock); | 
 | 912 | 		if (hb1 < hb2) | 
 | 913 | 			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); | 
 | 914 | 	} else { /* hb1 > hb2 */ | 
 | 915 | 		spin_lock(&hb2->lock); | 
 | 916 | 		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); | 
 | 917 | 	} | 
 | 918 | } | 
 | 919 |  | 
| Darren Hart | 5eb3dc6 | 2009-03-12 00:55:52 -0700 | [diff] [blame] | 920 | static inline void | 
 | 921 | double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) | 
 | 922 | { | 
| Darren Hart | f061d35 | 2009-03-12 15:11:18 -0700 | [diff] [blame] | 923 | 	spin_unlock(&hb1->lock); | 
| Ingo Molnar | 88f502f | 2009-03-13 10:32:07 +0100 | [diff] [blame] | 924 | 	if (hb1 != hb2) | 
 | 925 | 		spin_unlock(&hb2->lock); | 
| Darren Hart | 5eb3dc6 | 2009-03-12 00:55:52 -0700 | [diff] [blame] | 926 | } | 
 | 927 |  | 
| Ingo Molnar | 8b8f319 | 2006-07-03 00:25:05 -0700 | [diff] [blame] | 928 | /* | 
| Darren Hart | b2d0994 | 2009-03-12 00:55:37 -0700 | [diff] [blame] | 929 |  * Wake up waiters matching bitset queued on this futex (uaddr). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 |  */ | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 931 | static int | 
 | 932 | futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 934 | 	struct futex_hash_bucket *hb; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | 	struct futex_q *this, *next; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 936 | 	struct plist_head *head; | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 937 | 	union futex_key key = FUTEX_KEY_INIT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | 	int ret; | 
 | 939 |  | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 940 | 	if (!bitset) | 
 | 941 | 		return -EINVAL; | 
 | 942 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 943 | 	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | 	if (unlikely(ret != 0)) | 
 | 945 | 		goto out; | 
 | 946 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 947 | 	hb = hash_futex(&key); | 
 | 948 | 	spin_lock(&hb->lock); | 
 | 949 | 	head = &hb->chain; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 |  | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 951 | 	plist_for_each_entry_safe(this, next, head, list) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | 		if (match_futex (&this->key, &key)) { | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 953 | 			if (this->pi_state || this->rt_waiter) { | 
| Ingo Molnar | ed6f7b1 | 2006-07-01 04:35:46 -0700 | [diff] [blame] | 954 | 				ret = -EINVAL; | 
 | 955 | 				break; | 
 | 956 | 			} | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 957 |  | 
 | 958 | 			/* Check if one of the bits is set in both bitsets */ | 
 | 959 | 			if (!(this->bitset & bitset)) | 
 | 960 | 				continue; | 
 | 961 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | 			wake_futex(this); | 
 | 963 | 			if (++ret >= nr_wake) | 
 | 964 | 				break; | 
 | 965 | 		} | 
 | 966 | 	} | 
 | 967 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 968 | 	spin_unlock(&hb->lock); | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 969 | 	put_futex_key(&key); | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 970 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 | 	return ret; | 
 | 972 | } | 
 | 973 |  | 
 | 974 | /* | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 975 |  * Wake up all waiters hashed on the physical page that is mapped | 
 | 976 |  * to this virtual address: | 
 | 977 |  */ | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 978 | static int | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 979 | futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 980 | 	      int nr_wake, int nr_wake2, int op) | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 981 | { | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 982 | 	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 983 | 	struct futex_hash_bucket *hb1, *hb2; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 984 | 	struct plist_head *head; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 985 | 	struct futex_q *this, *next; | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 986 | 	int ret, op_ret; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 987 |  | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 988 | retry: | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 989 | 	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 990 | 	if (unlikely(ret != 0)) | 
 | 991 | 		goto out; | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 992 | 	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 993 | 	if (unlikely(ret != 0)) | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 994 | 		goto out_put_key1; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 995 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 996 | 	hb1 = hash_futex(&key1); | 
 | 997 | 	hb2 = hash_futex(&key2); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 998 |  | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 999 | retry_private: | 
| Thomas Gleixner | eaaea80 | 2009-10-04 09:34:17 +0200 | [diff] [blame] | 1000 | 	double_lock_hb(hb1, hb2); | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1001 | 	op_ret = futex_atomic_op_inuser(op, uaddr2); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1002 | 	if (unlikely(op_ret < 0)) { | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1003 |  | 
| Darren Hart | 5eb3dc6 | 2009-03-12 00:55:52 -0700 | [diff] [blame] | 1004 | 		double_unlock_hb(hb1, hb2); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1005 |  | 
| David Howells | 7ee1dd3 | 2006-01-06 00:11:44 -0800 | [diff] [blame] | 1006 | #ifndef CONFIG_MMU | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1007 | 		/* | 
 | 1008 | 		 * we don't get EFAULT from MMU faults if we don't have an MMU, | 
 | 1009 | 		 * but we might get them from range checking | 
 | 1010 | 		 */ | 
| David Howells | 7ee1dd3 | 2006-01-06 00:11:44 -0800 | [diff] [blame] | 1011 | 		ret = op_ret; | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1012 | 		goto out_put_keys; | 
| David Howells | 7ee1dd3 | 2006-01-06 00:11:44 -0800 | [diff] [blame] | 1013 | #endif | 
 | 1014 |  | 
| David Gibson | 796f8d9 | 2005-11-07 00:59:33 -0800 | [diff] [blame] | 1015 | 		if (unlikely(op_ret != -EFAULT)) { | 
 | 1016 | 			ret = op_ret; | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1017 | 			goto out_put_keys; | 
| David Gibson | 796f8d9 | 2005-11-07 00:59:33 -0800 | [diff] [blame] | 1018 | 		} | 
 | 1019 |  | 
| Thomas Gleixner | d072599 | 2009-06-11 23:15:43 +0200 | [diff] [blame] | 1020 | 		ret = fault_in_user_writeable(uaddr2); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1021 | 		if (ret) | 
| Darren Hart | de87fcc | 2009-03-12 00:55:46 -0700 | [diff] [blame] | 1022 | 			goto out_put_keys; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1023 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1024 | 		if (!(flags & FLAGS_SHARED)) | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 1025 | 			goto retry_private; | 
 | 1026 |  | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1027 | 		put_futex_key(&key2); | 
 | 1028 | 		put_futex_key(&key1); | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 1029 | 		goto retry; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1030 | 	} | 
 | 1031 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1032 | 	head = &hb1->chain; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1033 |  | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1034 | 	plist_for_each_entry_safe(this, next, head, list) { | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1035 | 		if (match_futex (&this->key, &key1)) { | 
 | 1036 | 			wake_futex(this); | 
 | 1037 | 			if (++ret >= nr_wake) | 
 | 1038 | 				break; | 
 | 1039 | 		} | 
 | 1040 | 	} | 
 | 1041 |  | 
 | 1042 | 	if (op_ret > 0) { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1043 | 		head = &hb2->chain; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1044 |  | 
 | 1045 | 		op_ret = 0; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1046 | 		plist_for_each_entry_safe(this, next, head, list) { | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1047 | 			if (match_futex (&this->key, &key2)) { | 
 | 1048 | 				wake_futex(this); | 
 | 1049 | 				if (++op_ret >= nr_wake2) | 
 | 1050 | 					break; | 
 | 1051 | 			} | 
 | 1052 | 		} | 
 | 1053 | 		ret += op_ret; | 
 | 1054 | 	} | 
 | 1055 |  | 
| Darren Hart | 5eb3dc6 | 2009-03-12 00:55:52 -0700 | [diff] [blame] | 1056 | 	double_unlock_hb(hb1, hb2); | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1057 | out_put_keys: | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1058 | 	put_futex_key(&key2); | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1059 | out_put_key1: | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1060 | 	put_futex_key(&key1); | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1061 | out: | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 1062 | 	return ret; | 
 | 1063 | } | 
 | 1064 |  | 
| Darren Hart | 9121e47 | 2009-04-03 13:40:31 -0700 | [diff] [blame] | 1065 | /** | 
 | 1066 |  * requeue_futex() - Requeue a futex_q from one hb to another | 
 | 1067 |  * @q:		the futex_q to requeue | 
 | 1068 |  * @hb1:	the source hash_bucket | 
 | 1069 |  * @hb2:	the target hash_bucket | 
 | 1070 |  * @key2:	the new key for the requeued futex_q | 
 | 1071 |  */ | 
 | 1072 | static inline | 
 | 1073 | void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, | 
 | 1074 | 		   struct futex_hash_bucket *hb2, union futex_key *key2) | 
 | 1075 | { | 
 | 1076 |  | 
 | 1077 | 	/* | 
 | 1078 | 	 * If key1 and key2 hash to the same bucket, no need to | 
 | 1079 | 	 * requeue. | 
 | 1080 | 	 */ | 
 | 1081 | 	if (likely(&hb1->chain != &hb2->chain)) { | 
 | 1082 | 		plist_del(&q->list, &hb1->chain); | 
 | 1083 | 		plist_add(&q->list, &hb2->chain); | 
 | 1084 | 		q->lock_ptr = &hb2->lock; | 
| Darren Hart | 9121e47 | 2009-04-03 13:40:31 -0700 | [diff] [blame] | 1085 | 	} | 
 | 1086 | 	get_futex_key_refs(key2); | 
 | 1087 | 	q->key = *key2; | 
 | 1088 | } | 
 | 1089 |  | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1090 | /** | 
 | 1091 |  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue | 
| Darren Hart | d96ee56 | 2009-09-21 22:30:22 -0700 | [diff] [blame] | 1092 |  * @q:		the futex_q | 
 | 1093 |  * @key:	the key of the requeue target futex | 
 | 1094 |  * @hb:		the hash_bucket of the requeue target futex | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1095 |  * | 
 | 1096 |  * During futex_requeue, with requeue_pi=1, it is possible to acquire the | 
 | 1097 |  * target futex if it is uncontended or via a lock steal.  Set the futex_q key | 
 | 1098 |  * to the requeue target futex so the waiter can detect the wakeup on the right | 
 | 1099 |  * futex, but remove it from the hb and NULL the rt_waiter so it can detect | 
| Darren Hart | beda2c7 | 2009-08-09 15:34:39 -0700 | [diff] [blame] | 1100 |  * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock | 
 | 1101 |  * to protect access to the pi_state to fixup the owner later.  Must be called | 
 | 1102 |  * with both q->lock_ptr and hb->lock held. | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1103 |  */ | 
 | 1104 | static inline | 
| Darren Hart | beda2c7 | 2009-08-09 15:34:39 -0700 | [diff] [blame] | 1105 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, | 
 | 1106 | 			   struct futex_hash_bucket *hb) | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1107 | { | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1108 | 	get_futex_key_refs(key); | 
 | 1109 | 	q->key = *key; | 
 | 1110 |  | 
| Lai Jiangshan | 2e12978 | 2010-12-22 14:18:50 +0800 | [diff] [blame] | 1111 | 	__unqueue_futex(q); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1112 |  | 
 | 1113 | 	WARN_ON(!q->rt_waiter); | 
 | 1114 | 	q->rt_waiter = NULL; | 
 | 1115 |  | 
| Darren Hart | beda2c7 | 2009-08-09 15:34:39 -0700 | [diff] [blame] | 1116 | 	q->lock_ptr = &hb->lock; | 
| Darren Hart | beda2c7 | 2009-08-09 15:34:39 -0700 | [diff] [blame] | 1117 |  | 
| Thomas Gleixner | f1a11e0 | 2009-05-05 19:21:40 +0200 | [diff] [blame] | 1118 | 	wake_up_state(q->task, TASK_NORMAL); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1119 | } | 
 | 1120 |  | 
 | 1121 | /** | 
 | 1122 |  * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 1123 |  * @pifutex:		the user address of the to futex | 
 | 1124 |  * @hb1:		the from futex hash bucket, must be locked by the caller | 
 | 1125 |  * @hb2:		the to futex hash bucket, must be locked by the caller | 
 | 1126 |  * @key1:		the from futex key | 
 | 1127 |  * @key2:		the to futex key | 
 | 1128 |  * @ps:			address to store the pi_state pointer | 
 | 1129 |  * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0) | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1130 |  * | 
 | 1131 |  * Try and get the lock on behalf of the top waiter if we can do it atomically. | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 1132 |  * Wake the top waiter if we succeed.  If the caller specified set_waiters, | 
 | 1133 |  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. | 
 | 1134 |  * hb1 and hb2 must be held by the caller. | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1135 |  * | 
 | 1136 |  * Returns: | 
 | 1137 |  *  0 - failed to acquire the lock atomicly | 
 | 1138 |  *  1 - acquired the lock | 
 | 1139 |  * <0 - error | 
 | 1140 |  */ | 
 | 1141 | static int futex_proxy_trylock_atomic(u32 __user *pifutex, | 
 | 1142 | 				 struct futex_hash_bucket *hb1, | 
 | 1143 | 				 struct futex_hash_bucket *hb2, | 
 | 1144 | 				 union futex_key *key1, union futex_key *key2, | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 1145 | 				 struct futex_pi_state **ps, int set_waiters) | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1146 | { | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 1147 | 	struct futex_q *top_waiter = NULL; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1148 | 	u32 curval; | 
 | 1149 | 	int ret; | 
 | 1150 |  | 
 | 1151 | 	if (get_futex_value_locked(&curval, pifutex)) | 
 | 1152 | 		return -EFAULT; | 
 | 1153 |  | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 1154 | 	/* | 
 | 1155 | 	 * Find the top_waiter and determine if there are additional waiters. | 
 | 1156 | 	 * If the caller intends to requeue more than 1 waiter to pifutex, | 
 | 1157 | 	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, | 
 | 1158 | 	 * as we have means to handle the possible fault.  If not, don't set | 
 | 1159 | 	 * the bit unecessarily as it will force the subsequent unlock to enter | 
 | 1160 | 	 * the kernel. | 
 | 1161 | 	 */ | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1162 | 	top_waiter = futex_top_waiter(hb1, key1); | 
 | 1163 |  | 
 | 1164 | 	/* There are no waiters, nothing for us to do. */ | 
 | 1165 | 	if (!top_waiter) | 
 | 1166 | 		return 0; | 
 | 1167 |  | 
| Darren Hart | 84bc4af | 2009-08-13 17:36:53 -0700 | [diff] [blame] | 1168 | 	/* Ensure we requeue to the expected futex. */ | 
 | 1169 | 	if (!match_futex(top_waiter->requeue_pi_key, key2)) | 
 | 1170 | 		return -EINVAL; | 
 | 1171 |  | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1172 | 	/* | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 1173 | 	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in | 
 | 1174 | 	 * the contended case or if set_waiters is 1.  The pi_state is returned | 
 | 1175 | 	 * in ps in contended cases. | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1176 | 	 */ | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 1177 | 	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, | 
 | 1178 | 				   set_waiters); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1179 | 	if (ret == 1) | 
| Darren Hart | beda2c7 | 2009-08-09 15:34:39 -0700 | [diff] [blame] | 1180 | 		requeue_pi_wake_futex(top_waiter, key2, hb2); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1181 |  | 
 | 1182 | 	return ret; | 
 | 1183 | } | 
 | 1184 |  | 
 | 1185 | /** | 
 | 1186 |  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 | 
| Randy Dunlap | fb62db2 | 2010-10-13 11:02:34 -0700 | [diff] [blame] | 1187 |  * @uaddr1:	source futex user address | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1188 |  * @flags:	futex flags (FLAGS_SHARED, etc.) | 
| Randy Dunlap | fb62db2 | 2010-10-13 11:02:34 -0700 | [diff] [blame] | 1189 |  * @uaddr2:	target futex user address | 
 | 1190 |  * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi) | 
 | 1191 |  * @nr_requeue:	number of waiters to requeue (0-INT_MAX) | 
 | 1192 |  * @cmpval:	@uaddr1 expected value (or %NULL) | 
 | 1193 |  * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1194 |  *		pi futex (pi to pi requeue is not supported) | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1195 |  * | 
 | 1196 |  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire | 
 | 1197 |  * uaddr2 atomically on behalf of the top waiter. | 
 | 1198 |  * | 
 | 1199 |  * Returns: | 
 | 1200 |  * >=0 - on success, the number of tasks requeued or woken | 
 | 1201 |  *  <0 - on error | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 |  */ | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1203 | static int futex_requeue(u32 __user *uaddr1, unsigned int flags, | 
 | 1204 | 			 u32 __user *uaddr2, int nr_wake, int nr_requeue, | 
 | 1205 | 			 u32 *cmpval, int requeue_pi) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | { | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 1207 | 	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1208 | 	int drop_count = 0, task_count = 0, ret; | 
 | 1209 | 	struct futex_pi_state *pi_state = NULL; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1210 | 	struct futex_hash_bucket *hb1, *hb2; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1211 | 	struct plist_head *head1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 | 	struct futex_q *this, *next; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1213 | 	u32 curval2; | 
 | 1214 |  | 
 | 1215 | 	if (requeue_pi) { | 
 | 1216 | 		/* | 
 | 1217 | 		 * requeue_pi requires a pi_state, try to allocate it now | 
 | 1218 | 		 * without any locks in case it fails. | 
 | 1219 | 		 */ | 
 | 1220 | 		if (refill_pi_state_cache()) | 
 | 1221 | 			return -ENOMEM; | 
 | 1222 | 		/* | 
 | 1223 | 		 * requeue_pi must wake as many tasks as it can, up to nr_wake | 
 | 1224 | 		 * + nr_requeue, since it acquires the rt_mutex prior to | 
 | 1225 | 		 * returning to userspace, so as to not leave the rt_mutex with | 
 | 1226 | 		 * waiters and no owner.  However, second and third wake-ups | 
 | 1227 | 		 * cannot be predicted as they involve race conditions with the | 
 | 1228 | 		 * first wake and a fault while looking up the pi_state.  Both | 
 | 1229 | 		 * pthread_cond_signal() and pthread_cond_broadcast() should | 
 | 1230 | 		 * use nr_wake=1. | 
 | 1231 | 		 */ | 
 | 1232 | 		if (nr_wake != 1) | 
 | 1233 | 			return -EINVAL; | 
 | 1234 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 |  | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1236 | retry: | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1237 | 	if (pi_state != NULL) { | 
 | 1238 | 		/* | 
 | 1239 | 		 * We will have to lookup the pi_state again, so free this one | 
 | 1240 | 		 * to keep the accounting correct. | 
 | 1241 | 		 */ | 
 | 1242 | 		free_pi_state(pi_state); | 
 | 1243 | 		pi_state = NULL; | 
 | 1244 | 	} | 
 | 1245 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1246 | 	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1247 | 	if (unlikely(ret != 0)) | 
 | 1248 | 		goto out; | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1249 | 	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1250 | 	if (unlikely(ret != 0)) | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1251 | 		goto out_put_key1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1252 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1253 | 	hb1 = hash_futex(&key1); | 
 | 1254 | 	hb2 = hash_futex(&key2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1255 |  | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 1256 | retry_private: | 
| Ingo Molnar | 8b8f319 | 2006-07-03 00:25:05 -0700 | [diff] [blame] | 1257 | 	double_lock_hb(hb1, hb2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1258 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1259 | 	if (likely(cmpval != NULL)) { | 
 | 1260 | 		u32 curval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1262 | 		ret = get_futex_value_locked(&curval, uaddr1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 |  | 
 | 1264 | 		if (unlikely(ret)) { | 
| Darren Hart | 5eb3dc6 | 2009-03-12 00:55:52 -0700 | [diff] [blame] | 1265 | 			double_unlock_hb(hb1, hb2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 |  | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 1267 | 			ret = get_user(curval, uaddr1); | 
 | 1268 | 			if (ret) | 
 | 1269 | 				goto out_put_keys; | 
 | 1270 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1271 | 			if (!(flags & FLAGS_SHARED)) | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 1272 | 				goto retry_private; | 
 | 1273 |  | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1274 | 			put_futex_key(&key2); | 
 | 1275 | 			put_futex_key(&key1); | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 1276 | 			goto retry; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1277 | 		} | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1278 | 		if (curval != *cmpval) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1279 | 			ret = -EAGAIN; | 
 | 1280 | 			goto out_unlock; | 
 | 1281 | 		} | 
 | 1282 | 	} | 
 | 1283 |  | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1284 | 	if (requeue_pi && (task_count - nr_wake < nr_requeue)) { | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 1285 | 		/* | 
 | 1286 | 		 * Attempt to acquire uaddr2 and wake the top waiter. If we | 
 | 1287 | 		 * intend to requeue waiters, force setting the FUTEX_WAITERS | 
 | 1288 | 		 * bit.  We force this here where we are able to easily handle | 
 | 1289 | 		 * faults rather in the requeue loop below. | 
 | 1290 | 		 */ | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1291 | 		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 1292 | 						 &key2, &pi_state, nr_requeue); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1293 |  | 
 | 1294 | 		/* | 
 | 1295 | 		 * At this point the top_waiter has either taken uaddr2 or is | 
 | 1296 | 		 * waiting on it.  If the former, then the pi_state will not | 
 | 1297 | 		 * exist yet, look it up one more time to ensure we have a | 
 | 1298 | 		 * reference to it. | 
 | 1299 | 		 */ | 
 | 1300 | 		if (ret == 1) { | 
 | 1301 | 			WARN_ON(pi_state); | 
| Darren Hart | 89061d3 | 2009-10-15 15:30:48 -0700 | [diff] [blame] | 1302 | 			drop_count++; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1303 | 			task_count++; | 
 | 1304 | 			ret = get_futex_value_locked(&curval2, uaddr2); | 
 | 1305 | 			if (!ret) | 
 | 1306 | 				ret = lookup_pi_state(curval2, hb2, &key2, | 
 | 1307 | 						      &pi_state); | 
 | 1308 | 		} | 
 | 1309 |  | 
 | 1310 | 		switch (ret) { | 
 | 1311 | 		case 0: | 
 | 1312 | 			break; | 
 | 1313 | 		case -EFAULT: | 
 | 1314 | 			double_unlock_hb(hb1, hb2); | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1315 | 			put_futex_key(&key2); | 
 | 1316 | 			put_futex_key(&key1); | 
| Thomas Gleixner | d072599 | 2009-06-11 23:15:43 +0200 | [diff] [blame] | 1317 | 			ret = fault_in_user_writeable(uaddr2); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1318 | 			if (!ret) | 
 | 1319 | 				goto retry; | 
 | 1320 | 			goto out; | 
 | 1321 | 		case -EAGAIN: | 
 | 1322 | 			/* The owner was exiting, try again. */ | 
 | 1323 | 			double_unlock_hb(hb1, hb2); | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1324 | 			put_futex_key(&key2); | 
 | 1325 | 			put_futex_key(&key1); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1326 | 			cond_resched(); | 
 | 1327 | 			goto retry; | 
 | 1328 | 		default: | 
 | 1329 | 			goto out_unlock; | 
 | 1330 | 		} | 
 | 1331 | 	} | 
 | 1332 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1333 | 	head1 = &hb1->chain; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1334 | 	plist_for_each_entry_safe(this, next, head1, list) { | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1335 | 		if (task_count - nr_wake >= nr_requeue) | 
 | 1336 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 |  | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1338 | 		if (!match_futex(&this->key, &key1)) | 
 | 1339 | 			continue; | 
 | 1340 |  | 
| Darren Hart | 392741e | 2009-08-07 15:20:48 -0700 | [diff] [blame] | 1341 | 		/* | 
 | 1342 | 		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always | 
 | 1343 | 		 * be paired with each other and no other futex ops. | 
 | 1344 | 		 */ | 
 | 1345 | 		if ((requeue_pi && !this->rt_waiter) || | 
 | 1346 | 		    (!requeue_pi && this->rt_waiter)) { | 
 | 1347 | 			ret = -EINVAL; | 
 | 1348 | 			break; | 
 | 1349 | 		} | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1350 |  | 
 | 1351 | 		/* | 
 | 1352 | 		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the | 
 | 1353 | 		 * lock, we already woke the top_waiter.  If not, it will be | 
 | 1354 | 		 * woken by futex_unlock_pi(). | 
 | 1355 | 		 */ | 
 | 1356 | 		if (++task_count <= nr_wake && !requeue_pi) { | 
 | 1357 | 			wake_futex(this); | 
 | 1358 | 			continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | 		} | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1360 |  | 
| Darren Hart | 84bc4af | 2009-08-13 17:36:53 -0700 | [diff] [blame] | 1361 | 		/* Ensure we requeue to the expected futex for requeue_pi. */ | 
 | 1362 | 		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) { | 
 | 1363 | 			ret = -EINVAL; | 
 | 1364 | 			break; | 
 | 1365 | 		} | 
 | 1366 |  | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1367 | 		/* | 
 | 1368 | 		 * Requeue nr_requeue waiters and possibly one more in the case | 
 | 1369 | 		 * of requeue_pi if we couldn't acquire the lock atomically. | 
 | 1370 | 		 */ | 
 | 1371 | 		if (requeue_pi) { | 
 | 1372 | 			/* Prepare the waiter to take the rt_mutex. */ | 
 | 1373 | 			atomic_inc(&pi_state->refcount); | 
 | 1374 | 			this->pi_state = pi_state; | 
 | 1375 | 			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, | 
 | 1376 | 							this->rt_waiter, | 
 | 1377 | 							this->task, 1); | 
 | 1378 | 			if (ret == 1) { | 
 | 1379 | 				/* We got the lock. */ | 
| Darren Hart | beda2c7 | 2009-08-09 15:34:39 -0700 | [diff] [blame] | 1380 | 				requeue_pi_wake_futex(this, &key2, hb2); | 
| Darren Hart | 89061d3 | 2009-10-15 15:30:48 -0700 | [diff] [blame] | 1381 | 				drop_count++; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1382 | 				continue; | 
 | 1383 | 			} else if (ret) { | 
 | 1384 | 				/* -EDEADLK */ | 
 | 1385 | 				this->pi_state = NULL; | 
 | 1386 | 				free_pi_state(pi_state); | 
 | 1387 | 				goto out_unlock; | 
 | 1388 | 			} | 
 | 1389 | 		} | 
 | 1390 | 		requeue_futex(this, hb1, hb2, &key2); | 
 | 1391 | 		drop_count++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | 	} | 
 | 1393 |  | 
 | 1394 | out_unlock: | 
| Darren Hart | 5eb3dc6 | 2009-03-12 00:55:52 -0700 | [diff] [blame] | 1395 | 	double_unlock_hb(hb1, hb2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 |  | 
| Darren Hart | cd84a42 | 2009-04-02 14:19:38 -0700 | [diff] [blame] | 1397 | 	/* | 
 | 1398 | 	 * drop_futex_key_refs() must be called outside the spinlocks. During | 
 | 1399 | 	 * the requeue we moved futex_q's from the hash bucket at key1 to the | 
 | 1400 | 	 * one at key2 and updated their key pointer.  We no longer need to | 
 | 1401 | 	 * hold the references to key1. | 
 | 1402 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1403 | 	while (--drop_count >= 0) | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 1404 | 		drop_futex_key_refs(&key1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 |  | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1406 | out_put_keys: | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1407 | 	put_futex_key(&key2); | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1408 | out_put_key1: | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1409 | 	put_futex_key(&key1); | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1410 | out: | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 1411 | 	if (pi_state != NULL) | 
 | 1412 | 		free_pi_state(pi_state); | 
 | 1413 | 	return ret ? ret : task_count; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1414 | } | 
 | 1415 |  | 
 | 1416 | /* The key must be already stored in q->key. */ | 
| Eric Sesterhenn | 82af7ac | 2008-01-25 10:40:46 +0100 | [diff] [blame] | 1417 | static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | 
| Namhyung Kim | 15e408c | 2010-09-14 21:43:48 +0900 | [diff] [blame] | 1418 | 	__acquires(&hb->lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1419 | { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1420 | 	struct futex_hash_bucket *hb; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1421 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1422 | 	hb = hash_futex(&q->key); | 
 | 1423 | 	q->lock_ptr = &hb->lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1424 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1425 | 	spin_lock(&hb->lock); | 
 | 1426 | 	return hb; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1427 | } | 
 | 1428 |  | 
| Darren Hart | d40d65c | 2009-09-21 22:30:15 -0700 | [diff] [blame] | 1429 | static inline void | 
 | 1430 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) | 
| Namhyung Kim | 15e408c | 2010-09-14 21:43:48 +0900 | [diff] [blame] | 1431 | 	__releases(&hb->lock) | 
| Darren Hart | d40d65c | 2009-09-21 22:30:15 -0700 | [diff] [blame] | 1432 | { | 
 | 1433 | 	spin_unlock(&hb->lock); | 
| Darren Hart | d40d65c | 2009-09-21 22:30:15 -0700 | [diff] [blame] | 1434 | } | 
 | 1435 |  | 
 | 1436 | /** | 
 | 1437 |  * queue_me() - Enqueue the futex_q on the futex_hash_bucket | 
 | 1438 |  * @q:	The futex_q to enqueue | 
 | 1439 |  * @hb:	The destination hash bucket | 
 | 1440 |  * | 
 | 1441 |  * The hb->lock must be held by the caller, and is released here. A call to | 
 | 1442 |  * queue_me() is typically paired with exactly one call to unqueue_me().  The | 
 | 1443 |  * exceptions involve the PI related operations, which may use unqueue_me_pi() | 
 | 1444 |  * or nothing if the unqueue is done as part of the wake process and the unqueue | 
 | 1445 |  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for | 
 | 1446 |  * an example). | 
 | 1447 |  */ | 
| Eric Sesterhenn | 82af7ac | 2008-01-25 10:40:46 +0100 | [diff] [blame] | 1448 | static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) | 
| Namhyung Kim | 15e408c | 2010-09-14 21:43:48 +0900 | [diff] [blame] | 1449 | 	__releases(&hb->lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1450 | { | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1451 | 	int prio; | 
 | 1452 |  | 
 | 1453 | 	/* | 
 | 1454 | 	 * The priority used to register this element is | 
 | 1455 | 	 * - either the real thread-priority for the real-time threads | 
 | 1456 | 	 * (i.e. threads with a priority lower than MAX_RT_PRIO) | 
 | 1457 | 	 * - or MAX_RT_PRIO for non-RT threads. | 
 | 1458 | 	 * Thus, all RT-threads are woken first in priority order, and | 
 | 1459 | 	 * the others are woken last, in FIFO order. | 
 | 1460 | 	 */ | 
 | 1461 | 	prio = min(current->normal_prio, MAX_RT_PRIO); | 
 | 1462 |  | 
 | 1463 | 	plist_node_init(&q->list, prio); | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1464 | 	plist_add(&q->list, &hb->chain); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1465 | 	q->task = current; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1466 | 	spin_unlock(&hb->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1467 | } | 
 | 1468 |  | 
| Darren Hart | d40d65c | 2009-09-21 22:30:15 -0700 | [diff] [blame] | 1469 | /** | 
 | 1470 |  * unqueue_me() - Remove the futex_q from its futex_hash_bucket | 
 | 1471 |  * @q:	The futex_q to unqueue | 
 | 1472 |  * | 
 | 1473 |  * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must | 
 | 1474 |  * be paired with exactly one earlier call to queue_me(). | 
 | 1475 |  * | 
 | 1476 |  * Returns: | 
 | 1477 |  *   1 - if the futex_q was still queued (and we removed unqueued it) | 
 | 1478 |  *   0 - if the futex_q was already removed by the waking thread | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1479 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1480 | static int unqueue_me(struct futex_q *q) | 
 | 1481 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | 	spinlock_t *lock_ptr; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1483 | 	int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1484 |  | 
 | 1485 | 	/* In the common case we don't take the spinlock, which is nice. */ | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1486 | retry: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1487 | 	lock_ptr = q->lock_ptr; | 
| Christian Borntraeger | e91467e | 2006-08-05 12:13:52 -0700 | [diff] [blame] | 1488 | 	barrier(); | 
| Stephen Hemminger | c80544d | 2007-10-18 03:07:05 -0700 | [diff] [blame] | 1489 | 	if (lock_ptr != NULL) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1490 | 		spin_lock(lock_ptr); | 
 | 1491 | 		/* | 
 | 1492 | 		 * q->lock_ptr can change between reading it and | 
 | 1493 | 		 * spin_lock(), causing us to take the wrong lock.  This | 
 | 1494 | 		 * corrects the race condition. | 
 | 1495 | 		 * | 
 | 1496 | 		 * Reasoning goes like this: if we have the wrong lock, | 
 | 1497 | 		 * q->lock_ptr must have changed (maybe several times) | 
 | 1498 | 		 * between reading it and the spin_lock().  It can | 
 | 1499 | 		 * change again after the spin_lock() but only if it was | 
 | 1500 | 		 * already changed before the spin_lock().  It cannot, | 
 | 1501 | 		 * however, change back to the original value.  Therefore | 
 | 1502 | 		 * we can detect whether we acquired the correct lock. | 
 | 1503 | 		 */ | 
 | 1504 | 		if (unlikely(lock_ptr != q->lock_ptr)) { | 
 | 1505 | 			spin_unlock(lock_ptr); | 
 | 1506 | 			goto retry; | 
 | 1507 | 		} | 
| Lai Jiangshan | 2e12978 | 2010-12-22 14:18:50 +0800 | [diff] [blame] | 1508 | 		__unqueue_futex(q); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1509 |  | 
 | 1510 | 		BUG_ON(q->pi_state); | 
 | 1511 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | 		spin_unlock(lock_ptr); | 
 | 1513 | 		ret = 1; | 
 | 1514 | 	} | 
 | 1515 |  | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 1516 | 	drop_futex_key_refs(&q->key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | 	return ret; | 
 | 1518 | } | 
 | 1519 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1520 | /* | 
 | 1521 |  * PI futexes can not be requeued and must remove themself from the | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1522 |  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry | 
 | 1523 |  * and dropped here. | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1524 |  */ | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1525 | static void unqueue_me_pi(struct futex_q *q) | 
| Namhyung Kim | 15e408c | 2010-09-14 21:43:48 +0900 | [diff] [blame] | 1526 | 	__releases(q->lock_ptr) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1527 | { | 
| Lai Jiangshan | 2e12978 | 2010-12-22 14:18:50 +0800 | [diff] [blame] | 1528 | 	__unqueue_futex(q); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1529 |  | 
 | 1530 | 	BUG_ON(!q->pi_state); | 
 | 1531 | 	free_pi_state(q->pi_state); | 
 | 1532 | 	q->pi_state = NULL; | 
 | 1533 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1534 | 	spin_unlock(q->lock_ptr); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1535 | } | 
 | 1536 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1537 | /* | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1538 |  * Fixup the pi_state owner with the new owner. | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1539 |  * | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1540 |  * Must be called with hash bucket lock held and mm->sem held for non | 
 | 1541 |  * private futexes. | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1542 |  */ | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1543 | static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1544 | 				struct task_struct *newowner) | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1545 | { | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1546 | 	u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1547 | 	struct futex_pi_state *pi_state = q->pi_state; | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1548 | 	struct task_struct *oldowner = pi_state->owner; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1549 | 	u32 uval, curval, newval; | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 1550 | 	int ret; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1551 |  | 
 | 1552 | 	/* Owner died? */ | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1553 | 	if (!pi_state->owner) | 
 | 1554 | 		newtid |= FUTEX_OWNER_DIED; | 
 | 1555 |  | 
 | 1556 | 	/* | 
 | 1557 | 	 * We are here either because we stole the rtmutex from the | 
| Lai Jiangshan | 8161239 | 2011-01-14 17:09:41 +0800 | [diff] [blame] | 1558 | 	 * previous highest priority waiter or we are the highest priority | 
 | 1559 | 	 * waiter but failed to get the rtmutex the first time. | 
 | 1560 | 	 * We have to replace the newowner TID in the user space variable. | 
 | 1561 | 	 * This must be atomic as we have to preserve the owner died bit here. | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1562 | 	 * | 
| Darren Hart | b2d0994 | 2009-03-12 00:55:37 -0700 | [diff] [blame] | 1563 | 	 * Note: We write the user space value _before_ changing the pi_state | 
 | 1564 | 	 * because we can fault here. Imagine swapped out pages or a fork | 
 | 1565 | 	 * that marked all the anonymous memory readonly for cow. | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1566 | 	 * | 
 | 1567 | 	 * Modifying pi_state _before_ the user space value would | 
 | 1568 | 	 * leave the pi_state in an inconsistent state when we fault | 
 | 1569 | 	 * here, because we need to drop the hash bucket lock to | 
 | 1570 | 	 * handle the fault. This might be observed in the PID check | 
 | 1571 | 	 * in lookup_pi_state. | 
 | 1572 | 	 */ | 
 | 1573 | retry: | 
 | 1574 | 	if (get_futex_value_locked(&uval, uaddr)) | 
 | 1575 | 		goto handle_fault; | 
 | 1576 |  | 
 | 1577 | 	while (1) { | 
 | 1578 | 		newval = (uval & FUTEX_OWNER_DIED) | newtid; | 
 | 1579 |  | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 1580 | 		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1581 | 			goto handle_fault; | 
 | 1582 | 		if (curval == uval) | 
 | 1583 | 			break; | 
 | 1584 | 		uval = curval; | 
 | 1585 | 	} | 
 | 1586 |  | 
 | 1587 | 	/* | 
 | 1588 | 	 * We fixed up user space. Now we need to fix the pi_state | 
 | 1589 | 	 * itself. | 
 | 1590 | 	 */ | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1591 | 	if (pi_state->owner != NULL) { | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 1592 | 		raw_spin_lock_irq(&pi_state->owner->pi_lock); | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1593 | 		WARN_ON(list_empty(&pi_state->list)); | 
 | 1594 | 		list_del_init(&pi_state->list); | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 1595 | 		raw_spin_unlock_irq(&pi_state->owner->pi_lock); | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1596 | 	} | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1597 |  | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1598 | 	pi_state->owner = newowner; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1599 |  | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 1600 | 	raw_spin_lock_irq(&newowner->pi_lock); | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1601 | 	WARN_ON(!list_empty(&pi_state->list)); | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1602 | 	list_add(&pi_state->list, &newowner->pi_state_list); | 
| Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 1603 | 	raw_spin_unlock_irq(&newowner->pi_lock); | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1604 | 	return 0; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1605 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1606 | 	/* | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1607 | 	 * To handle the page fault we need to drop the hash bucket | 
| Lai Jiangshan | 8161239 | 2011-01-14 17:09:41 +0800 | [diff] [blame] | 1608 | 	 * lock here. That gives the other task (either the highest priority | 
 | 1609 | 	 * waiter itself or the task which stole the rtmutex) the | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1610 | 	 * chance to try the fixup of the pi_state. So once we are | 
 | 1611 | 	 * back from handling the fault we need to check the pi_state | 
 | 1612 | 	 * after reacquiring the hash bucket lock and before trying to | 
 | 1613 | 	 * do another fixup. When the fixup has been done already we | 
 | 1614 | 	 * simply return. | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1615 | 	 */ | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1616 | handle_fault: | 
 | 1617 | 	spin_unlock(q->lock_ptr); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1618 |  | 
| Thomas Gleixner | d072599 | 2009-06-11 23:15:43 +0200 | [diff] [blame] | 1619 | 	ret = fault_in_user_writeable(uaddr); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1620 |  | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1621 | 	spin_lock(q->lock_ptr); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1622 |  | 
| Thomas Gleixner | 1b7558e | 2008-06-23 11:21:58 +0200 | [diff] [blame] | 1623 | 	/* | 
 | 1624 | 	 * Check if someone else fixed it for us: | 
 | 1625 | 	 */ | 
 | 1626 | 	if (pi_state->owner != oldowner) | 
 | 1627 | 		return 0; | 
 | 1628 |  | 
 | 1629 | 	if (ret) | 
 | 1630 | 		return ret; | 
 | 1631 |  | 
 | 1632 | 	goto retry; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1633 | } | 
 | 1634 |  | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1635 | static long futex_wait_restart(struct restart_block *restart); | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1636 |  | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1637 | /** | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1638 |  * fixup_owner() - Post lock pi_state and corner case management | 
 | 1639 |  * @uaddr:	user address of the futex | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1640 |  * @q:		futex_q (contains pi_state and access to the rt_mutex) | 
 | 1641 |  * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0) | 
 | 1642 |  * | 
 | 1643 |  * After attempting to lock an rt_mutex, this function is called to cleanup | 
 | 1644 |  * the pi_state owner as well as handle race conditions that may allow us to | 
 | 1645 |  * acquire the lock. Must be called with the hb lock held. | 
 | 1646 |  * | 
 | 1647 |  * Returns: | 
 | 1648 |  *  1 - success, lock taken | 
 | 1649 |  *  0 - success, lock not taken | 
 | 1650 |  * <0 - on error (-EFAULT) | 
 | 1651 |  */ | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1652 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1653 | { | 
 | 1654 | 	struct task_struct *owner; | 
 | 1655 | 	int ret = 0; | 
 | 1656 |  | 
 | 1657 | 	if (locked) { | 
 | 1658 | 		/* | 
 | 1659 | 		 * Got the lock. We might not be the anticipated owner if we | 
 | 1660 | 		 * did a lock-steal - fix up the PI-state in that case: | 
 | 1661 | 		 */ | 
 | 1662 | 		if (q->pi_state->owner != current) | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1663 | 			ret = fixup_pi_state_owner(uaddr, q, current); | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1664 | 		goto out; | 
 | 1665 | 	} | 
 | 1666 |  | 
 | 1667 | 	/* | 
 | 1668 | 	 * Catch the rare case, where the lock was released when we were on the | 
 | 1669 | 	 * way back before we locked the hash bucket. | 
 | 1670 | 	 */ | 
 | 1671 | 	if (q->pi_state->owner == current) { | 
 | 1672 | 		/* | 
 | 1673 | 		 * Try to get the rt_mutex now. This might fail as some other | 
 | 1674 | 		 * task acquired the rt_mutex after we removed ourself from the | 
 | 1675 | 		 * rt_mutex waiters list. | 
 | 1676 | 		 */ | 
 | 1677 | 		if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { | 
 | 1678 | 			locked = 1; | 
 | 1679 | 			goto out; | 
 | 1680 | 		} | 
 | 1681 |  | 
 | 1682 | 		/* | 
 | 1683 | 		 * pi_state is incorrect, some other task did a lock steal and | 
 | 1684 | 		 * we returned due to timeout or signal without taking the | 
| Lai Jiangshan | 8161239 | 2011-01-14 17:09:41 +0800 | [diff] [blame] | 1685 | 		 * rt_mutex. Too late. | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1686 | 		 */ | 
| Lai Jiangshan | 8161239 | 2011-01-14 17:09:41 +0800 | [diff] [blame] | 1687 | 		raw_spin_lock(&q->pi_state->pi_mutex.wait_lock); | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1688 | 		owner = rt_mutex_owner(&q->pi_state->pi_mutex); | 
| Lai Jiangshan | 8161239 | 2011-01-14 17:09:41 +0800 | [diff] [blame] | 1689 | 		if (!owner) | 
 | 1690 | 			owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); | 
 | 1691 | 		raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock); | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1692 | 		ret = fixup_pi_state_owner(uaddr, q, owner); | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1693 | 		goto out; | 
 | 1694 | 	} | 
 | 1695 |  | 
 | 1696 | 	/* | 
 | 1697 | 	 * Paranoia check. If we did not take the lock, then we should not be | 
| Lai Jiangshan | 8161239 | 2011-01-14 17:09:41 +0800 | [diff] [blame] | 1698 | 	 * the owner of the rt_mutex. | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1699 | 	 */ | 
 | 1700 | 	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) | 
 | 1701 | 		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " | 
 | 1702 | 				"pi-state %p\n", ret, | 
 | 1703 | 				q->pi_state->pi_mutex.owner, | 
 | 1704 | 				q->pi_state->owner); | 
 | 1705 |  | 
 | 1706 | out: | 
 | 1707 | 	return ret ? ret : locked; | 
 | 1708 | } | 
 | 1709 |  | 
 | 1710 | /** | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1711 |  * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal | 
 | 1712 |  * @hb:		the futex hash bucket, must be locked by the caller | 
 | 1713 |  * @q:		the futex_q to queue up on | 
 | 1714 |  * @timeout:	the prepared hrtimer_sleeper, or null for no timeout | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1715 |  */ | 
 | 1716 | static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | 
| Thomas Gleixner | f1a11e0 | 2009-05-05 19:21:40 +0200 | [diff] [blame] | 1717 | 				struct hrtimer_sleeper *timeout) | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1718 | { | 
| Darren Hart | 9beba3c | 2009-09-24 11:54:47 -0700 | [diff] [blame] | 1719 | 	/* | 
 | 1720 | 	 * The task state is guaranteed to be set before another task can | 
 | 1721 | 	 * wake it. set_current_state() is implemented using set_mb() and | 
 | 1722 | 	 * queue_me() calls spin_unlock() upon completion, both serializing | 
 | 1723 | 	 * access to the hash list and forcing another memory barrier. | 
 | 1724 | 	 */ | 
| Thomas Gleixner | f1a11e0 | 2009-05-05 19:21:40 +0200 | [diff] [blame] | 1725 | 	set_current_state(TASK_INTERRUPTIBLE); | 
| Darren Hart | 0729e19 | 2009-09-21 22:30:38 -0700 | [diff] [blame] | 1726 | 	queue_me(q, hb); | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1727 |  | 
 | 1728 | 	/* Arm the timer */ | 
 | 1729 | 	if (timeout) { | 
 | 1730 | 		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); | 
 | 1731 | 		if (!hrtimer_active(&timeout->timer)) | 
 | 1732 | 			timeout->task = NULL; | 
 | 1733 | 	} | 
 | 1734 |  | 
 | 1735 | 	/* | 
| Darren Hart | 0729e19 | 2009-09-21 22:30:38 -0700 | [diff] [blame] | 1736 | 	 * If we have been removed from the hash list, then another task | 
 | 1737 | 	 * has tried to wake us, and we can skip the call to schedule(). | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1738 | 	 */ | 
 | 1739 | 	if (likely(!plist_node_empty(&q->list))) { | 
 | 1740 | 		/* | 
 | 1741 | 		 * If the timer has already expired, current will already be | 
 | 1742 | 		 * flagged for rescheduling. Only call schedule if there | 
 | 1743 | 		 * is no timeout, or if it has yet to expire. | 
 | 1744 | 		 */ | 
 | 1745 | 		if (!timeout || timeout->task) | 
 | 1746 | 			schedule(); | 
 | 1747 | 	} | 
 | 1748 | 	__set_current_state(TASK_RUNNING); | 
 | 1749 | } | 
 | 1750 |  | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1751 | /** | 
 | 1752 |  * futex_wait_setup() - Prepare to wait on a futex | 
 | 1753 |  * @uaddr:	the futex userspace address | 
 | 1754 |  * @val:	the expected value | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1755 |  * @flags:	futex flags (FLAGS_SHARED, etc.) | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1756 |  * @q:		the associated futex_q | 
 | 1757 |  * @hb:		storage for hash_bucket pointer to be returned to caller | 
 | 1758 |  * | 
 | 1759 |  * Setup the futex_q and locate the hash_bucket.  Get the futex value and | 
 | 1760 |  * compare it with the expected value.  Handle atomic faults internally. | 
 | 1761 |  * Return with the hb lock held and a q.key reference on success, and unlocked | 
 | 1762 |  * with no q.key reference on failure. | 
 | 1763 |  * | 
 | 1764 |  * Returns: | 
 | 1765 |  *  0 - uaddr contains val and hb has been locked | 
 | 1766 |  * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked | 
 | 1767 |  */ | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1768 | static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1769 | 			   struct futex_q *q, struct futex_hash_bucket **hb) | 
 | 1770 | { | 
 | 1771 | 	u32 uval; | 
 | 1772 | 	int ret; | 
 | 1773 |  | 
 | 1774 | 	/* | 
 | 1775 | 	 * Access the page AFTER the hash-bucket is locked. | 
 | 1776 | 	 * Order is important: | 
 | 1777 | 	 * | 
 | 1778 | 	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); | 
 | 1779 | 	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); } | 
 | 1780 | 	 * | 
 | 1781 | 	 * The basic logical guarantee of a futex is that it blocks ONLY | 
 | 1782 | 	 * if cond(var) is known to be true at the time of blocking, for | 
| Michel Lespinasse | 8fe8f54 | 2011-03-06 18:07:50 -0800 | [diff] [blame] | 1783 | 	 * any cond.  If we locked the hash-bucket after testing *uaddr, that | 
 | 1784 | 	 * would open a race condition where we could block indefinitely with | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1785 | 	 * cond(var) false, which would violate the guarantee. | 
 | 1786 | 	 * | 
| Michel Lespinasse | 8fe8f54 | 2011-03-06 18:07:50 -0800 | [diff] [blame] | 1787 | 	 * On the other hand, we insert q and release the hash-bucket only | 
 | 1788 | 	 * after testing *uaddr.  This guarantees that futex_wait() will NOT | 
 | 1789 | 	 * absorb a wakeup if *uaddr does not match the desired values | 
 | 1790 | 	 * while the syscall executes. | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1791 | 	 */ | 
 | 1792 | retry: | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1793 | 	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key); | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1794 | 	if (unlikely(ret != 0)) | 
| Darren Hart | a5a2a0c | 2009-04-10 09:50:05 -0700 | [diff] [blame] | 1795 | 		return ret; | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1796 |  | 
 | 1797 | retry_private: | 
 | 1798 | 	*hb = queue_lock(q); | 
 | 1799 |  | 
 | 1800 | 	ret = get_futex_value_locked(&uval, uaddr); | 
 | 1801 |  | 
 | 1802 | 	if (ret) { | 
 | 1803 | 		queue_unlock(q, *hb); | 
 | 1804 |  | 
 | 1805 | 		ret = get_user(uval, uaddr); | 
 | 1806 | 		if (ret) | 
 | 1807 | 			goto out; | 
 | 1808 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1809 | 		if (!(flags & FLAGS_SHARED)) | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1810 | 			goto retry_private; | 
 | 1811 |  | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1812 | 		put_futex_key(&q->key); | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1813 | 		goto retry; | 
 | 1814 | 	} | 
 | 1815 |  | 
 | 1816 | 	if (uval != val) { | 
 | 1817 | 		queue_unlock(q, *hb); | 
 | 1818 | 		ret = -EWOULDBLOCK; | 
 | 1819 | 	} | 
 | 1820 |  | 
 | 1821 | out: | 
 | 1822 | 	if (ret) | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1823 | 		put_futex_key(&q->key); | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1824 | 	return ret; | 
 | 1825 | } | 
 | 1826 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1827 | static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, | 
 | 1828 | 		      ktime_t *abs_time, u32 bitset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1829 | { | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1830 | 	struct hrtimer_sleeper timeout, *to = NULL; | 
| Peter Zijlstra | 2fff78c7 | 2009-02-11 18:10:10 +0100 | [diff] [blame] | 1831 | 	struct restart_block *restart; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1832 | 	struct futex_hash_bucket *hb; | 
| Darren Hart | 5bdb05f | 2010-11-08 13:40:28 -0800 | [diff] [blame] | 1833 | 	struct futex_q q = futex_q_init; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1834 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1835 |  | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 1836 | 	if (!bitset) | 
 | 1837 | 		return -EINVAL; | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 1838 | 	q.bitset = bitset; | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1839 |  | 
 | 1840 | 	if (abs_time) { | 
 | 1841 | 		to = &timeout; | 
 | 1842 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1843 | 		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? | 
 | 1844 | 				      CLOCK_REALTIME : CLOCK_MONOTONIC, | 
 | 1845 | 				      HRTIMER_MODE_ABS); | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1846 | 		hrtimer_init_sleeper(to, current); | 
 | 1847 | 		hrtimer_set_expires_range_ns(&to->timer, *abs_time, | 
 | 1848 | 					     current->timer_slack_ns); | 
 | 1849 | 	} | 
 | 1850 |  | 
| Thomas Gleixner | d58e657 | 2009-10-13 20:40:43 +0200 | [diff] [blame] | 1851 | retry: | 
| Darren Hart | 7ada876 | 2010-10-17 08:35:04 -0700 | [diff] [blame] | 1852 | 	/* | 
 | 1853 | 	 * Prepare to wait on uaddr. On success, holds hb lock and increments | 
 | 1854 | 	 * q.key refs. | 
 | 1855 | 	 */ | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1856 | 	ret = futex_wait_setup(uaddr, val, flags, &q, &hb); | 
| Darren Hart | f801073 | 2009-04-03 13:40:40 -0700 | [diff] [blame] | 1857 | 	if (ret) | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1858 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1859 |  | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1860 | 	/* queue_me and wait for wakeup, timeout, or a signal. */ | 
| Thomas Gleixner | f1a11e0 | 2009-05-05 19:21:40 +0200 | [diff] [blame] | 1861 | 	futex_wait_queue_me(hb, &q, to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1862 |  | 
 | 1863 | 	/* If we were woken (and unqueued), we succeeded, whatever. */ | 
| Peter Zijlstra | 2fff78c7 | 2009-02-11 18:10:10 +0100 | [diff] [blame] | 1864 | 	ret = 0; | 
| Darren Hart | 7ada876 | 2010-10-17 08:35:04 -0700 | [diff] [blame] | 1865 | 	/* unqueue_me() drops q.key ref */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1866 | 	if (!unqueue_me(&q)) | 
| Darren Hart | 7ada876 | 2010-10-17 08:35:04 -0700 | [diff] [blame] | 1867 | 		goto out; | 
| Peter Zijlstra | 2fff78c7 | 2009-02-11 18:10:10 +0100 | [diff] [blame] | 1868 | 	ret = -ETIMEDOUT; | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1869 | 	if (to && !to->task) | 
| Darren Hart | 7ada876 | 2010-10-17 08:35:04 -0700 | [diff] [blame] | 1870 | 		goto out; | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1871 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1872 | 	/* | 
| Thomas Gleixner | d58e657 | 2009-10-13 20:40:43 +0200 | [diff] [blame] | 1873 | 	 * We expect signal_pending(current), but we might be the | 
 | 1874 | 	 * victim of a spurious wakeup as well. | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1875 | 	 */ | 
| Darren Hart | 7ada876 | 2010-10-17 08:35:04 -0700 | [diff] [blame] | 1876 | 	if (!signal_pending(current)) | 
| Thomas Gleixner | d58e657 | 2009-10-13 20:40:43 +0200 | [diff] [blame] | 1877 | 		goto retry; | 
| Thomas Gleixner | d58e657 | 2009-10-13 20:40:43 +0200 | [diff] [blame] | 1878 |  | 
| Peter Zijlstra | 2fff78c7 | 2009-02-11 18:10:10 +0100 | [diff] [blame] | 1879 | 	ret = -ERESTARTSYS; | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1880 | 	if (!abs_time) | 
| Darren Hart | 7ada876 | 2010-10-17 08:35:04 -0700 | [diff] [blame] | 1881 | 		goto out; | 
| Steven Rostedt | ce6bd42 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1882 |  | 
| Peter Zijlstra | 2fff78c7 | 2009-02-11 18:10:10 +0100 | [diff] [blame] | 1883 | 	restart = ¤t_thread_info()->restart_block; | 
 | 1884 | 	restart->fn = futex_wait_restart; | 
| Namhyung Kim | a3c74c5 | 2010-09-14 21:43:47 +0900 | [diff] [blame] | 1885 | 	restart->futex.uaddr = uaddr; | 
| Peter Zijlstra | 2fff78c7 | 2009-02-11 18:10:10 +0100 | [diff] [blame] | 1886 | 	restart->futex.val = val; | 
 | 1887 | 	restart->futex.time = abs_time->tv64; | 
 | 1888 | 	restart->futex.bitset = bitset; | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1889 | 	restart->futex.flags = flags; | 
| Peter Zijlstra | 2fff78c7 | 2009-02-11 18:10:10 +0100 | [diff] [blame] | 1890 |  | 
 | 1891 | 	ret = -ERESTART_RESTARTBLOCK; | 
 | 1892 |  | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1893 | out: | 
| Darren Hart | ca5f952 | 2009-04-03 13:39:33 -0700 | [diff] [blame] | 1894 | 	if (to) { | 
 | 1895 | 		hrtimer_cancel(&to->timer); | 
 | 1896 | 		destroy_hrtimer_on_stack(&to->timer); | 
 | 1897 | 	} | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1898 | 	return ret; | 
 | 1899 | } | 
 | 1900 |  | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1901 |  | 
 | 1902 | static long futex_wait_restart(struct restart_block *restart) | 
 | 1903 | { | 
| Namhyung Kim | a3c74c5 | 2010-09-14 21:43:47 +0900 | [diff] [blame] | 1904 | 	u32 __user *uaddr = restart->futex.uaddr; | 
| Darren Hart | a72188d | 2009-04-03 13:40:22 -0700 | [diff] [blame] | 1905 | 	ktime_t t, *tp = NULL; | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1906 |  | 
| Darren Hart | a72188d | 2009-04-03 13:40:22 -0700 | [diff] [blame] | 1907 | 	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { | 
 | 1908 | 		t.tv64 = restart->futex.time; | 
 | 1909 | 		tp = &t; | 
 | 1910 | 	} | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1911 | 	restart->fn = do_no_restart_syscall; | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1912 |  | 
 | 1913 | 	return (long)futex_wait(uaddr, restart->futex.flags, | 
 | 1914 | 				restart->futex.val, tp, restart->futex.bitset); | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1915 | } | 
 | 1916 |  | 
 | 1917 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1918 | /* | 
 | 1919 |  * Userspace tried a 0 -> TID atomic transition of the futex value | 
 | 1920 |  * and failed. The kernel side here does the whole locking operation: | 
 | 1921 |  * if there are waiters then it will block, it does PI, etc. (Due to | 
 | 1922 |  * races the kernel might see a 0 value of the futex too.) | 
 | 1923 |  */ | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1924 | static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect, | 
 | 1925 | 			 ktime_t *time, int trylock) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1926 | { | 
| Thomas Gleixner | c5780e9 | 2006-09-08 09:47:15 -0700 | [diff] [blame] | 1927 | 	struct hrtimer_sleeper timeout, *to = NULL; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1928 | 	struct futex_hash_bucket *hb; | 
| Darren Hart | 5bdb05f | 2010-11-08 13:40:28 -0800 | [diff] [blame] | 1929 | 	struct futex_q q = futex_q_init; | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1930 | 	int res, ret; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1931 |  | 
 | 1932 | 	if (refill_pi_state_cache()) | 
 | 1933 | 		return -ENOMEM; | 
 | 1934 |  | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1935 | 	if (time) { | 
| Thomas Gleixner | c5780e9 | 2006-09-08 09:47:15 -0700 | [diff] [blame] | 1936 | 		to = &timeout; | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1937 | 		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, | 
 | 1938 | 				      HRTIMER_MODE_ABS); | 
| Thomas Gleixner | c5780e9 | 2006-09-08 09:47:15 -0700 | [diff] [blame] | 1939 | 		hrtimer_init_sleeper(to, current); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1940 | 		hrtimer_set_expires(&to->timer, *time); | 
| Thomas Gleixner | c5780e9 | 2006-09-08 09:47:15 -0700 | [diff] [blame] | 1941 | 	} | 
 | 1942 |  | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1943 | retry: | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 1944 | 	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1945 | 	if (unlikely(ret != 0)) | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1946 | 		goto out; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1947 |  | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 1948 | retry_private: | 
| Eric Sesterhenn | 82af7ac | 2008-01-25 10:40:46 +0100 | [diff] [blame] | 1949 | 	hb = queue_lock(&q); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1950 |  | 
| Darren Hart | bab5bc9 | 2009-04-07 23:23:50 -0700 | [diff] [blame] | 1951 | 	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1952 | 	if (unlikely(ret)) { | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1953 | 		switch (ret) { | 
| Darren Hart | 1a52084 | 2009-04-03 13:39:52 -0700 | [diff] [blame] | 1954 | 		case 1: | 
 | 1955 | 			/* We got the lock. */ | 
 | 1956 | 			ret = 0; | 
 | 1957 | 			goto out_unlock_put_key; | 
 | 1958 | 		case -EFAULT: | 
 | 1959 | 			goto uaddr_faulted; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1960 | 		case -EAGAIN: | 
 | 1961 | 			/* | 
 | 1962 | 			 * Task is exiting and we just wait for the | 
 | 1963 | 			 * exit to complete. | 
 | 1964 | 			 */ | 
 | 1965 | 			queue_unlock(&q, hb); | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1966 | 			put_futex_key(&q.key); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1967 | 			cond_resched(); | 
 | 1968 | 			goto retry; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1969 | 		default: | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 1970 | 			goto out_unlock_put_key; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1971 | 		} | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1972 | 	} | 
 | 1973 |  | 
 | 1974 | 	/* | 
 | 1975 | 	 * Only actually queue now that the atomic ops are done: | 
 | 1976 | 	 */ | 
| Eric Sesterhenn | 82af7ac | 2008-01-25 10:40:46 +0100 | [diff] [blame] | 1977 | 	queue_me(&q, hb); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1978 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1979 | 	WARN_ON(!q.pi_state); | 
 | 1980 | 	/* | 
 | 1981 | 	 * Block on the PI mutex: | 
 | 1982 | 	 */ | 
 | 1983 | 	if (!trylock) | 
 | 1984 | 		ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1); | 
 | 1985 | 	else { | 
 | 1986 | 		ret = rt_mutex_trylock(&q.pi_state->pi_mutex); | 
 | 1987 | 		/* Fixup the trylock return value: */ | 
 | 1988 | 		ret = ret ? 0 : -EWOULDBLOCK; | 
 | 1989 | 	} | 
 | 1990 |  | 
| Vernon Mauery | a99e4e4 | 2006-07-01 04:35:42 -0700 | [diff] [blame] | 1991 | 	spin_lock(q.lock_ptr); | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1992 | 	/* | 
 | 1993 | 	 * Fixup the pi_state owner and possibly acquire the lock if we | 
 | 1994 | 	 * haven't already. | 
 | 1995 | 	 */ | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 1996 | 	res = fixup_owner(uaddr, &q, !ret); | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 1997 | 	/* | 
 | 1998 | 	 * If fixup_owner() returned an error, proprogate that.  If it acquired | 
 | 1999 | 	 * the lock, clear our -ETIMEDOUT or -EINTR. | 
 | 2000 | 	 */ | 
 | 2001 | 	if (res) | 
 | 2002 | 		ret = (res < 0) ? res : 0; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2003 |  | 
| Darren Hart | e8f6386 | 2009-03-12 00:56:06 -0700 | [diff] [blame] | 2004 | 	/* | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 2005 | 	 * If fixup_owner() faulted and was unable to handle the fault, unlock | 
 | 2006 | 	 * it and return the fault to userspace. | 
| Darren Hart | e8f6386 | 2009-03-12 00:56:06 -0700 | [diff] [blame] | 2007 | 	 */ | 
 | 2008 | 	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) | 
 | 2009 | 		rt_mutex_unlock(&q.pi_state->pi_mutex); | 
 | 2010 |  | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 2011 | 	/* Unqueue and drop the lock */ | 
 | 2012 | 	unqueue_me_pi(&q); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2013 |  | 
| Mikael Pettersson | 5ecb01c | 2010-01-23 22:36:29 +0100 | [diff] [blame] | 2014 | 	goto out_put_key; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2015 |  | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 2016 | out_unlock_put_key: | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2017 | 	queue_unlock(&q, hb); | 
 | 2018 |  | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 2019 | out_put_key: | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 2020 | 	put_futex_key(&q.key); | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 2021 | out: | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 2022 | 	if (to) | 
 | 2023 | 		destroy_hrtimer_on_stack(&to->timer); | 
| Darren Hart | dd97399 | 2009-04-03 13:40:02 -0700 | [diff] [blame] | 2024 | 	return ret != -EINTR ? ret : -ERESTARTNOINTR; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2025 |  | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 2026 | uaddr_faulted: | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 2027 | 	queue_unlock(&q, hb); | 
 | 2028 |  | 
| Thomas Gleixner | d072599 | 2009-06-11 23:15:43 +0200 | [diff] [blame] | 2029 | 	ret = fault_in_user_writeable(uaddr); | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 2030 | 	if (ret) | 
 | 2031 | 		goto out_put_key; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2032 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2033 | 	if (!(flags & FLAGS_SHARED)) | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 2034 | 		goto retry_private; | 
 | 2035 |  | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 2036 | 	put_futex_key(&q.key); | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 2037 | 	goto retry; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2038 | } | 
 | 2039 |  | 
 | 2040 | /* | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2041 |  * Userspace attempted a TID -> 0 atomic transition, and failed. | 
 | 2042 |  * This is the in-kernel slowpath: we look up the PI state (if any), | 
 | 2043 |  * and do the rt-mutex unlock. | 
 | 2044 |  */ | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2045 | static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2046 | { | 
 | 2047 | 	struct futex_hash_bucket *hb; | 
 | 2048 | 	struct futex_q *this, *next; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 2049 | 	struct plist_head *head; | 
| Peter Zijlstra | 38d47c1 | 2008-09-26 19:32:20 +0200 | [diff] [blame] | 2050 | 	union futex_key key = FUTEX_KEY_INIT; | 
| Thomas Gleixner | c0c9ed1 | 2011-03-11 11:51:22 +0100 | [diff] [blame] | 2051 | 	u32 uval, vpid = task_pid_vnr(current); | 
| Darren Hart | e4dc5b7 | 2009-03-12 00:56:13 -0700 | [diff] [blame] | 2052 | 	int ret; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2053 |  | 
 | 2054 | retry: | 
 | 2055 | 	if (get_user(uval, uaddr)) | 
 | 2056 | 		return -EFAULT; | 
 | 2057 | 	/* | 
 | 2058 | 	 * We release only a lock we actually own: | 
 | 2059 | 	 */ | 
| Thomas Gleixner | c0c9ed1 | 2011-03-11 11:51:22 +0100 | [diff] [blame] | 2060 | 	if ((uval & FUTEX_TID_MASK) != vpid) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2061 | 		return -EPERM; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2062 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2063 | 	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2064 | 	if (unlikely(ret != 0)) | 
 | 2065 | 		goto out; | 
 | 2066 |  | 
 | 2067 | 	hb = hash_futex(&key); | 
 | 2068 | 	spin_lock(&hb->lock); | 
 | 2069 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2070 | 	/* | 
 | 2071 | 	 * To avoid races, try to do the TID -> 0 atomic transition | 
 | 2072 | 	 * again. If it succeeds then we can return without waking | 
 | 2073 | 	 * anyone else up: | 
 | 2074 | 	 */ | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 2075 | 	if (!(uval & FUTEX_OWNER_DIED) && | 
 | 2076 | 	    cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2077 | 		goto pi_faulted; | 
 | 2078 | 	/* | 
 | 2079 | 	 * Rare case: we managed to release the lock atomically, | 
 | 2080 | 	 * no need to wake anyone else up: | 
 | 2081 | 	 */ | 
| Thomas Gleixner | c0c9ed1 | 2011-03-11 11:51:22 +0100 | [diff] [blame] | 2082 | 	if (unlikely(uval == vpid)) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2083 | 		goto out_unlock; | 
 | 2084 |  | 
 | 2085 | 	/* | 
 | 2086 | 	 * Ok, other tasks may need to be woken up - check waiters | 
 | 2087 | 	 * and do the wakeup if necessary: | 
 | 2088 | 	 */ | 
 | 2089 | 	head = &hb->chain; | 
 | 2090 |  | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 2091 | 	plist_for_each_entry_safe(this, next, head, list) { | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2092 | 		if (!match_futex (&this->key, &key)) | 
 | 2093 | 			continue; | 
 | 2094 | 		ret = wake_futex_pi(uaddr, uval, this); | 
 | 2095 | 		/* | 
 | 2096 | 		 * The atomic access to the futex value | 
 | 2097 | 		 * generated a pagefault, so retry the | 
 | 2098 | 		 * user-access and the wakeup: | 
 | 2099 | 		 */ | 
 | 2100 | 		if (ret == -EFAULT) | 
 | 2101 | 			goto pi_faulted; | 
 | 2102 | 		goto out_unlock; | 
 | 2103 | 	} | 
 | 2104 | 	/* | 
 | 2105 | 	 * No waiters - kernel unlocks the futex: | 
 | 2106 | 	 */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2107 | 	if (!(uval & FUTEX_OWNER_DIED)) { | 
 | 2108 | 		ret = unlock_futex_pi(uaddr, uval); | 
 | 2109 | 		if (ret == -EFAULT) | 
 | 2110 | 			goto pi_faulted; | 
 | 2111 | 	} | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2112 |  | 
 | 2113 | out_unlock: | 
 | 2114 | 	spin_unlock(&hb->lock); | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 2115 | 	put_futex_key(&key); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2116 |  | 
| Darren Hart | 42d35d4 | 2008-12-29 15:49:53 -0800 | [diff] [blame] | 2117 | out: | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2118 | 	return ret; | 
 | 2119 |  | 
 | 2120 | pi_faulted: | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 2121 | 	spin_unlock(&hb->lock); | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 2122 | 	put_futex_key(&key); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2123 |  | 
| Thomas Gleixner | d072599 | 2009-06-11 23:15:43 +0200 | [diff] [blame] | 2124 | 	ret = fault_in_user_writeable(uaddr); | 
| Darren Hart | b568636 | 2008-12-18 15:06:34 -0800 | [diff] [blame] | 2125 | 	if (!ret) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2126 | 		goto retry; | 
 | 2127 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2128 | 	return ret; | 
 | 2129 | } | 
 | 2130 |  | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2131 | /** | 
 | 2132 |  * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex | 
 | 2133 |  * @hb:		the hash_bucket futex_q was original enqueued on | 
 | 2134 |  * @q:		the futex_q woken while waiting to be requeued | 
 | 2135 |  * @key2:	the futex_key of the requeue target futex | 
 | 2136 |  * @timeout:	the timeout associated with the wait (NULL if none) | 
 | 2137 |  * | 
 | 2138 |  * Detect if the task was woken on the initial futex as opposed to the requeue | 
 | 2139 |  * target futex.  If so, determine if it was a timeout or a signal that caused | 
 | 2140 |  * the wakeup and return the appropriate error code to the caller.  Must be | 
 | 2141 |  * called with the hb lock held. | 
 | 2142 |  * | 
 | 2143 |  * Returns | 
 | 2144 |  *  0 - no early wakeup detected | 
| Thomas Gleixner | 1c840c1 | 2009-05-20 09:22:40 +0200 | [diff] [blame] | 2145 |  * <0 - -ETIMEDOUT or -ERESTARTNOINTR | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2146 |  */ | 
 | 2147 | static inline | 
 | 2148 | int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | 
 | 2149 | 				   struct futex_q *q, union futex_key *key2, | 
 | 2150 | 				   struct hrtimer_sleeper *timeout) | 
 | 2151 | { | 
 | 2152 | 	int ret = 0; | 
 | 2153 |  | 
 | 2154 | 	/* | 
 | 2155 | 	 * With the hb lock held, we avoid races while we process the wakeup. | 
 | 2156 | 	 * We only need to hold hb (and not hb2) to ensure atomicity as the | 
 | 2157 | 	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. | 
 | 2158 | 	 * It can't be requeued from uaddr2 to something else since we don't | 
 | 2159 | 	 * support a PI aware source futex for requeue. | 
 | 2160 | 	 */ | 
 | 2161 | 	if (!match_futex(&q->key, key2)) { | 
 | 2162 | 		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr)); | 
 | 2163 | 		/* | 
 | 2164 | 		 * We were woken prior to requeue by a timeout or a signal. | 
 | 2165 | 		 * Unqueue the futex_q and determine which it was. | 
 | 2166 | 		 */ | 
| Lai Jiangshan | 2e12978 | 2010-12-22 14:18:50 +0800 | [diff] [blame] | 2167 | 		plist_del(&q->list, &hb->chain); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2168 |  | 
| Thomas Gleixner | d58e657 | 2009-10-13 20:40:43 +0200 | [diff] [blame] | 2169 | 		/* Handle spurious wakeups gracefully */ | 
| Thomas Gleixner | 11df6dd | 2009-10-28 20:26:48 +0100 | [diff] [blame] | 2170 | 		ret = -EWOULDBLOCK; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2171 | 		if (timeout && !timeout->task) | 
 | 2172 | 			ret = -ETIMEDOUT; | 
| Thomas Gleixner | d58e657 | 2009-10-13 20:40:43 +0200 | [diff] [blame] | 2173 | 		else if (signal_pending(current)) | 
| Thomas Gleixner | 1c840c1 | 2009-05-20 09:22:40 +0200 | [diff] [blame] | 2174 | 			ret = -ERESTARTNOINTR; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2175 | 	} | 
 | 2176 | 	return ret; | 
 | 2177 | } | 
 | 2178 |  | 
 | 2179 | /** | 
 | 2180 |  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 | 
| Darren Hart | 56ec160 | 2009-09-21 22:29:59 -0700 | [diff] [blame] | 2181 |  * @uaddr:	the futex we initially wait on (non-pi) | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2182 |  * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2183 |  * 		the same type, no requeueing from private to shared, etc. | 
 | 2184 |  * @val:	the expected value of uaddr | 
 | 2185 |  * @abs_time:	absolute timeout | 
| Darren Hart | 56ec160 | 2009-09-21 22:29:59 -0700 | [diff] [blame] | 2186 |  * @bitset:	32 bit wakeup bitset set by userspace, defaults to all | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2187 |  * @clockrt:	whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) | 
 | 2188 |  * @uaddr2:	the pi futex we will take prior to returning to user-space | 
 | 2189 |  * | 
 | 2190 |  * The caller will wait on uaddr and will be requeued by futex_requeue() to | 
 | 2191 |  * uaddr2 which must be PI aware.  Normal wakeup will wake on uaddr2 and | 
 | 2192 |  * complete the acquisition of the rt_mutex prior to returning to userspace. | 
 | 2193 |  * This ensures the rt_mutex maintains an owner when it has waiters; without | 
 | 2194 |  * one, the pi logic wouldn't know which task to boost/deboost, if there was a | 
 | 2195 |  * need to. | 
 | 2196 |  * | 
 | 2197 |  * We call schedule in futex_wait_queue_me() when we enqueue and return there | 
 | 2198 |  * via the following: | 
 | 2199 |  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() | 
| Darren Hart | cc6db4e | 2009-07-31 16:20:10 -0700 | [diff] [blame] | 2200 |  * 2) wakeup on uaddr2 after a requeue | 
 | 2201 |  * 3) signal | 
 | 2202 |  * 4) timeout | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2203 |  * | 
| Darren Hart | cc6db4e | 2009-07-31 16:20:10 -0700 | [diff] [blame] | 2204 |  * If 3, cleanup and return -ERESTARTNOINTR. | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2205 |  * | 
 | 2206 |  * If 2, we may then block on trying to take the rt_mutex and return via: | 
 | 2207 |  * 5) successful lock | 
 | 2208 |  * 6) signal | 
 | 2209 |  * 7) timeout | 
 | 2210 |  * 8) other lock acquisition failure | 
 | 2211 |  * | 
| Darren Hart | cc6db4e | 2009-07-31 16:20:10 -0700 | [diff] [blame] | 2212 |  * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2213 |  * | 
 | 2214 |  * If 4 or 7, we cleanup and return with -ETIMEDOUT. | 
 | 2215 |  * | 
 | 2216 |  * Returns: | 
 | 2217 |  *  0 - On success | 
 | 2218 |  * <0 - On error | 
 | 2219 |  */ | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2220 | static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2221 | 				 u32 val, ktime_t *abs_time, u32 bitset, | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2222 | 				 u32 __user *uaddr2) | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2223 | { | 
 | 2224 | 	struct hrtimer_sleeper timeout, *to = NULL; | 
 | 2225 | 	struct rt_mutex_waiter rt_waiter; | 
 | 2226 | 	struct rt_mutex *pi_mutex = NULL; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2227 | 	struct futex_hash_bucket *hb; | 
| Darren Hart | 5bdb05f | 2010-11-08 13:40:28 -0800 | [diff] [blame] | 2228 | 	union futex_key key2 = FUTEX_KEY_INIT; | 
 | 2229 | 	struct futex_q q = futex_q_init; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2230 | 	int res, ret; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2231 |  | 
 | 2232 | 	if (!bitset) | 
 | 2233 | 		return -EINVAL; | 
 | 2234 |  | 
 | 2235 | 	if (abs_time) { | 
 | 2236 | 		to = &timeout; | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2237 | 		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? | 
 | 2238 | 				      CLOCK_REALTIME : CLOCK_MONOTONIC, | 
 | 2239 | 				      HRTIMER_MODE_ABS); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2240 | 		hrtimer_init_sleeper(to, current); | 
 | 2241 | 		hrtimer_set_expires_range_ns(&to->timer, *abs_time, | 
 | 2242 | 					     current->timer_slack_ns); | 
 | 2243 | 	} | 
 | 2244 |  | 
 | 2245 | 	/* | 
 | 2246 | 	 * The waiter is allocated on our stack, manipulated by the requeue | 
 | 2247 | 	 * code while we sleep on uaddr. | 
 | 2248 | 	 */ | 
 | 2249 | 	debug_rt_mutex_init_waiter(&rt_waiter); | 
 | 2250 | 	rt_waiter.task = NULL; | 
 | 2251 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2252 | 	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2253 | 	if (unlikely(ret != 0)) | 
 | 2254 | 		goto out; | 
 | 2255 |  | 
| Darren Hart | 84bc4af | 2009-08-13 17:36:53 -0700 | [diff] [blame] | 2256 | 	q.bitset = bitset; | 
 | 2257 | 	q.rt_waiter = &rt_waiter; | 
 | 2258 | 	q.requeue_pi_key = &key2; | 
 | 2259 |  | 
| Darren Hart | 7ada876 | 2010-10-17 08:35:04 -0700 | [diff] [blame] | 2260 | 	/* | 
 | 2261 | 	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref | 
 | 2262 | 	 * count. | 
 | 2263 | 	 */ | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2264 | 	ret = futex_wait_setup(uaddr, val, flags, &q, &hb); | 
| Thomas Gleixner | c8b15a7 | 2009-05-20 09:18:50 +0200 | [diff] [blame] | 2265 | 	if (ret) | 
 | 2266 | 		goto out_key2; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2267 |  | 
 | 2268 | 	/* Queue the futex_q, drop the hb lock, wait for wakeup. */ | 
| Thomas Gleixner | f1a11e0 | 2009-05-05 19:21:40 +0200 | [diff] [blame] | 2269 | 	futex_wait_queue_me(hb, &q, to); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2270 |  | 
 | 2271 | 	spin_lock(&hb->lock); | 
 | 2272 | 	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); | 
 | 2273 | 	spin_unlock(&hb->lock); | 
 | 2274 | 	if (ret) | 
 | 2275 | 		goto out_put_keys; | 
 | 2276 |  | 
 | 2277 | 	/* | 
 | 2278 | 	 * In order for us to be here, we know our q.key == key2, and since | 
 | 2279 | 	 * we took the hb->lock above, we also know that futex_requeue() has | 
 | 2280 | 	 * completed and we no longer have to concern ourselves with a wakeup | 
| Darren Hart | 7ada876 | 2010-10-17 08:35:04 -0700 | [diff] [blame] | 2281 | 	 * race with the atomic proxy lock acquisition by the requeue code. The | 
 | 2282 | 	 * futex_requeue dropped our key1 reference and incremented our key2 | 
 | 2283 | 	 * reference count. | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2284 | 	 */ | 
 | 2285 |  | 
 | 2286 | 	/* Check if the requeue code acquired the second futex for us. */ | 
 | 2287 | 	if (!q.rt_waiter) { | 
 | 2288 | 		/* | 
 | 2289 | 		 * Got the lock. We might not be the anticipated owner if we | 
 | 2290 | 		 * did a lock-steal - fix up the PI-state in that case. | 
 | 2291 | 		 */ | 
 | 2292 | 		if (q.pi_state && (q.pi_state->owner != current)) { | 
 | 2293 | 			spin_lock(q.lock_ptr); | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 2294 | 			ret = fixup_pi_state_owner(uaddr2, &q, current); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2295 | 			spin_unlock(q.lock_ptr); | 
 | 2296 | 		} | 
 | 2297 | 	} else { | 
 | 2298 | 		/* | 
 | 2299 | 		 * We have been woken up by futex_unlock_pi(), a timeout, or a | 
 | 2300 | 		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor | 
 | 2301 | 		 * the pi_state. | 
 | 2302 | 		 */ | 
 | 2303 | 		WARN_ON(!&q.pi_state); | 
 | 2304 | 		pi_mutex = &q.pi_state->pi_mutex; | 
 | 2305 | 		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); | 
 | 2306 | 		debug_rt_mutex_free_waiter(&rt_waiter); | 
 | 2307 |  | 
 | 2308 | 		spin_lock(q.lock_ptr); | 
 | 2309 | 		/* | 
 | 2310 | 		 * Fixup the pi_state owner and possibly acquire the lock if we | 
 | 2311 | 		 * haven't already. | 
 | 2312 | 		 */ | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 2313 | 		res = fixup_owner(uaddr2, &q, !ret); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2314 | 		/* | 
 | 2315 | 		 * If fixup_owner() returned an error, proprogate that.  If it | 
| Darren Hart | 56ec160 | 2009-09-21 22:29:59 -0700 | [diff] [blame] | 2316 | 		 * acquired the lock, clear -ETIMEDOUT or -EINTR. | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2317 | 		 */ | 
 | 2318 | 		if (res) | 
 | 2319 | 			ret = (res < 0) ? res : 0; | 
 | 2320 |  | 
 | 2321 | 		/* Unqueue and drop the lock. */ | 
 | 2322 | 		unqueue_me_pi(&q); | 
 | 2323 | 	} | 
 | 2324 |  | 
 | 2325 | 	/* | 
 | 2326 | 	 * If fixup_pi_state_owner() faulted and was unable to handle the | 
 | 2327 | 	 * fault, unlock the rt_mutex and return the fault to userspace. | 
 | 2328 | 	 */ | 
 | 2329 | 	if (ret == -EFAULT) { | 
 | 2330 | 		if (rt_mutex_owner(pi_mutex) == current) | 
 | 2331 | 			rt_mutex_unlock(pi_mutex); | 
 | 2332 | 	} else if (ret == -EINTR) { | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2333 | 		/* | 
| Darren Hart | cc6db4e | 2009-07-31 16:20:10 -0700 | [diff] [blame] | 2334 | 		 * We've already been requeued, but cannot restart by calling | 
 | 2335 | 		 * futex_lock_pi() directly. We could restart this syscall, but | 
 | 2336 | 		 * it would detect that the user space "val" changed and return | 
 | 2337 | 		 * -EWOULDBLOCK.  Save the overhead of the restart and return | 
 | 2338 | 		 * -EWOULDBLOCK directly. | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2339 | 		 */ | 
| Thomas Gleixner | 2070887 | 2009-05-19 23:04:59 +0200 | [diff] [blame] | 2340 | 		ret = -EWOULDBLOCK; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2341 | 	} | 
 | 2342 |  | 
 | 2343 | out_put_keys: | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 2344 | 	put_futex_key(&q.key); | 
| Thomas Gleixner | c8b15a7 | 2009-05-20 09:18:50 +0200 | [diff] [blame] | 2345 | out_key2: | 
| Thomas Gleixner | ae791a2 | 2010-11-10 13:30:36 +0100 | [diff] [blame] | 2346 | 	put_futex_key(&key2); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2347 |  | 
 | 2348 | out: | 
 | 2349 | 	if (to) { | 
 | 2350 | 		hrtimer_cancel(&to->timer); | 
 | 2351 | 		destroy_hrtimer_on_stack(&to->timer); | 
 | 2352 | 	} | 
 | 2353 | 	return ret; | 
 | 2354 | } | 
 | 2355 |  | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2356 | /* | 
 | 2357 |  * Support for robust futexes: the kernel cleans up held futexes at | 
 | 2358 |  * thread exit time. | 
 | 2359 |  * | 
 | 2360 |  * Implementation: user-space maintains a per-thread list of locks it | 
 | 2361 |  * is holding. Upon do_exit(), the kernel carefully walks this list, | 
 | 2362 |  * and marks all locks that are owned by this thread with the | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2363 |  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2364 |  * always manipulated with the lock held, so the list is private and | 
 | 2365 |  * per-thread. Userspace also maintains a per-thread 'list_op_pending' | 
 | 2366 |  * field, to allow the kernel to clean up if the thread dies after | 
 | 2367 |  * acquiring the lock, but just before it could have added itself to | 
 | 2368 |  * the list. There can only be one such pending lock. | 
 | 2369 |  */ | 
 | 2370 |  | 
 | 2371 | /** | 
| Darren Hart | d96ee56 | 2009-09-21 22:30:22 -0700 | [diff] [blame] | 2372 |  * sys_set_robust_list() - Set the robust-futex list head of a task | 
 | 2373 |  * @head:	pointer to the list-head | 
 | 2374 |  * @len:	length of the list-head, as userspace expects | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2375 |  */ | 
| Heiko Carstens | 836f92a | 2009-01-14 14:14:33 +0100 | [diff] [blame] | 2376 | SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, | 
 | 2377 | 		size_t, len) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2378 | { | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2379 | 	if (!futex_cmpxchg_enabled) | 
 | 2380 | 		return -ENOSYS; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2381 | 	/* | 
 | 2382 | 	 * The kernel knows only one size for now: | 
 | 2383 | 	 */ | 
 | 2384 | 	if (unlikely(len != sizeof(*head))) | 
 | 2385 | 		return -EINVAL; | 
 | 2386 |  | 
 | 2387 | 	current->robust_list = head; | 
 | 2388 |  | 
 | 2389 | 	return 0; | 
 | 2390 | } | 
 | 2391 |  | 
 | 2392 | /** | 
| Darren Hart | d96ee56 | 2009-09-21 22:30:22 -0700 | [diff] [blame] | 2393 |  * sys_get_robust_list() - Get the robust-futex list head of a task | 
 | 2394 |  * @pid:	pid of the process [zero for current task] | 
 | 2395 |  * @head_ptr:	pointer to a list-head pointer, the kernel fills it in | 
 | 2396 |  * @len_ptr:	pointer to a length field, the kernel fills in the header size | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2397 |  */ | 
| Heiko Carstens | 836f92a | 2009-01-14 14:14:33 +0100 | [diff] [blame] | 2398 | SYSCALL_DEFINE3(get_robust_list, int, pid, | 
 | 2399 | 		struct robust_list_head __user * __user *, head_ptr, | 
 | 2400 | 		size_t __user *, len_ptr) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2401 | { | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 2402 | 	struct robust_list_head __user *head; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2403 | 	unsigned long ret; | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 2404 | 	const struct cred *cred = current_cred(), *pcred; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2405 |  | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2406 | 	if (!futex_cmpxchg_enabled) | 
 | 2407 | 		return -ENOSYS; | 
 | 2408 |  | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2409 | 	if (!pid) | 
 | 2410 | 		head = current->robust_list; | 
 | 2411 | 	else { | 
 | 2412 | 		struct task_struct *p; | 
 | 2413 |  | 
 | 2414 | 		ret = -ESRCH; | 
| Oleg Nesterov | aaa2a97 | 2006-09-29 02:00:55 -0700 | [diff] [blame] | 2415 | 		rcu_read_lock(); | 
| Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 2416 | 		p = find_task_by_vpid(pid); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2417 | 		if (!p) | 
 | 2418 | 			goto err_unlock; | 
 | 2419 | 		ret = -EPERM; | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 2420 | 		pcred = __task_cred(p); | 
| Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 2421 | 		/* If victim is in different user_ns, then uids are not | 
 | 2422 | 		   comparable, so we must have CAP_SYS_PTRACE */ | 
 | 2423 | 		if (cred->user->user_ns != pcred->user->user_ns) { | 
 | 2424 | 			if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE)) | 
 | 2425 | 				goto err_unlock; | 
 | 2426 | 			goto ok; | 
 | 2427 | 		} | 
 | 2428 | 		/* If victim is in same user_ns, then uids are comparable */ | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 2429 | 		if (cred->euid != pcred->euid && | 
 | 2430 | 		    cred->euid != pcred->uid && | 
| Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 2431 | 		    !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE)) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2432 | 			goto err_unlock; | 
| Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 2433 | ok: | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2434 | 		head = p->robust_list; | 
| Oleg Nesterov | aaa2a97 | 2006-09-29 02:00:55 -0700 | [diff] [blame] | 2435 | 		rcu_read_unlock(); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2436 | 	} | 
 | 2437 |  | 
 | 2438 | 	if (put_user(sizeof(*head), len_ptr)) | 
 | 2439 | 		return -EFAULT; | 
 | 2440 | 	return put_user(head, head_ptr); | 
 | 2441 |  | 
 | 2442 | err_unlock: | 
| Oleg Nesterov | aaa2a97 | 2006-09-29 02:00:55 -0700 | [diff] [blame] | 2443 | 	rcu_read_unlock(); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2444 |  | 
 | 2445 | 	return ret; | 
 | 2446 | } | 
 | 2447 |  | 
 | 2448 | /* | 
 | 2449 |  * Process a futex-list entry, check whether it's owned by the | 
 | 2450 |  * dying task, and do notification if so: | 
 | 2451 |  */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2452 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2453 | { | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2454 | 	u32 uval, nval, mval; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2455 |  | 
| Ingo Molnar | 8f17d3a | 2006-03-27 01:16:27 -0800 | [diff] [blame] | 2456 | retry: | 
 | 2457 | 	if (get_user(uval, uaddr)) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2458 | 		return -1; | 
 | 2459 |  | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 2460 | 	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2461 | 		/* | 
 | 2462 | 		 * Ok, this dying thread is truly holding a futex | 
 | 2463 | 		 * of interest. Set the OWNER_DIED bit atomically | 
 | 2464 | 		 * via cmpxchg, and if the value had FUTEX_WAITERS | 
 | 2465 | 		 * set, wake up a waiter (if any). (We have to do a | 
 | 2466 | 		 * futex_wake() even if OWNER_DIED is already set - | 
 | 2467 | 		 * to handle the rare but possible case of recursive | 
 | 2468 | 		 * thread-death.) The rest of the cleanup is done in | 
 | 2469 | 		 * userspace. | 
 | 2470 | 		 */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2471 | 		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; | 
| Thomas Gleixner | 6e0aa9f | 2011-03-14 10:34:35 +0100 | [diff] [blame] | 2472 | 		/* | 
 | 2473 | 		 * We are not holding a lock here, but we want to have | 
 | 2474 | 		 * the pagefault_disable/enable() protection because | 
 | 2475 | 		 * we want to handle the fault gracefully. If the | 
 | 2476 | 		 * access fails we try to fault in the futex with R/W | 
 | 2477 | 		 * verification via get_user_pages. get_user() above | 
 | 2478 | 		 * does not guarantee R/W access. If that fails we | 
 | 2479 | 		 * give up and leave the futex locked. | 
 | 2480 | 		 */ | 
 | 2481 | 		if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) { | 
 | 2482 | 			if (fault_in_user_writeable(uaddr)) | 
 | 2483 | 				return -1; | 
 | 2484 | 			goto retry; | 
 | 2485 | 		} | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2486 | 		if (nval != uval) | 
| Ingo Molnar | 8f17d3a | 2006-03-27 01:16:27 -0800 | [diff] [blame] | 2487 | 			goto retry; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2488 |  | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2489 | 		/* | 
 | 2490 | 		 * Wake robust non-PI futexes here. The wakeup of | 
 | 2491 | 		 * PI futexes happens in exit_pi_state(): | 
 | 2492 | 		 */ | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 2493 | 		if (!pi && (uval & FUTEX_WAITERS)) | 
| Peter Zijlstra | c2f9f20 | 2008-09-26 19:32:23 +0200 | [diff] [blame] | 2494 | 			futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2495 | 	} | 
 | 2496 | 	return 0; | 
 | 2497 | } | 
 | 2498 |  | 
 | 2499 | /* | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2500 |  * Fetch a robust-list pointer. Bit 0 signals PI futexes: | 
 | 2501 |  */ | 
 | 2502 | static inline int fetch_robust_entry(struct robust_list __user **entry, | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 2503 | 				     struct robust_list __user * __user *head, | 
| Namhyung Kim | 1dcc41b | 2010-09-14 21:43:46 +0900 | [diff] [blame] | 2504 | 				     unsigned int *pi) | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2505 | { | 
 | 2506 | 	unsigned long uentry; | 
 | 2507 |  | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 2508 | 	if (get_user(uentry, (unsigned long __user *)head)) | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2509 | 		return -EFAULT; | 
 | 2510 |  | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 2511 | 	*entry = (void __user *)(uentry & ~1UL); | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2512 | 	*pi = uentry & 1; | 
 | 2513 |  | 
 | 2514 | 	return 0; | 
 | 2515 | } | 
 | 2516 |  | 
 | 2517 | /* | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2518 |  * Walk curr->robust_list (very carefully, it's a userspace list!) | 
 | 2519 |  * and mark any locks found there dead, and notify any waiters. | 
 | 2520 |  * | 
 | 2521 |  * We silently return on any sign of list-walking problem. | 
 | 2522 |  */ | 
 | 2523 | void exit_robust_list(struct task_struct *curr) | 
 | 2524 | { | 
 | 2525 | 	struct robust_list_head __user *head = curr->robust_list; | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2526 | 	struct robust_list __user *entry, *next_entry, *pending; | 
| Darren Hart | 4c115e9 | 2010-11-04 15:00:00 -0400 | [diff] [blame] | 2527 | 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; | 
 | 2528 | 	unsigned int uninitialized_var(next_pi); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2529 | 	unsigned long futex_offset; | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2530 | 	int rc; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2531 |  | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2532 | 	if (!futex_cmpxchg_enabled) | 
 | 2533 | 		return; | 
 | 2534 |  | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2535 | 	/* | 
 | 2536 | 	 * Fetch the list head (which was registered earlier, via | 
 | 2537 | 	 * sys_set_robust_list()): | 
 | 2538 | 	 */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2539 | 	if (fetch_robust_entry(&entry, &head->list.next, &pi)) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2540 | 		return; | 
 | 2541 | 	/* | 
 | 2542 | 	 * Fetch the relative futex offset: | 
 | 2543 | 	 */ | 
 | 2544 | 	if (get_user(futex_offset, &head->futex_offset)) | 
 | 2545 | 		return; | 
 | 2546 | 	/* | 
 | 2547 | 	 * Fetch any possibly pending lock-add first, and handle it | 
 | 2548 | 	 * if it exists: | 
 | 2549 | 	 */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2550 | 	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2551 | 		return; | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2552 |  | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2553 | 	next_entry = NULL;	/* avoid warning with gcc */ | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2554 | 	while (entry != &head->list) { | 
 | 2555 | 		/* | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2556 | 		 * Fetch the next entry in the list before calling | 
 | 2557 | 		 * handle_futex_death: | 
 | 2558 | 		 */ | 
 | 2559 | 		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); | 
 | 2560 | 		/* | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2561 | 		 * A pending lock might already be on the list, so | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2562 | 		 * don't process it twice: | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2563 | 		 */ | 
 | 2564 | 		if (entry != pending) | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 2565 | 			if (handle_futex_death((void __user *)entry + futex_offset, | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2566 | 						curr, pi)) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2567 | 				return; | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2568 | 		if (rc) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2569 | 			return; | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2570 | 		entry = next_entry; | 
 | 2571 | 		pi = next_pi; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2572 | 		/* | 
 | 2573 | 		 * Avoid excessively long or circular lists: | 
 | 2574 | 		 */ | 
 | 2575 | 		if (!--limit) | 
 | 2576 | 			break; | 
 | 2577 |  | 
 | 2578 | 		cond_resched(); | 
 | 2579 | 	} | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2580 |  | 
 | 2581 | 	if (pending) | 
 | 2582 | 		handle_futex_death((void __user *)pending + futex_offset, | 
 | 2583 | 				   curr, pip); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2584 | } | 
 | 2585 |  | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2586 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 2587 | 		u32 __user *uaddr2, u32 val2, u32 val3) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2588 | { | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2589 | 	int ret = -ENOSYS, cmd = op & FUTEX_CMD_MASK; | 
 | 2590 | 	unsigned int flags = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2591 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2592 | 	if (!(op & FUTEX_PRIVATE_FLAG)) | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2593 | 		flags |= FLAGS_SHARED; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2594 |  | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2595 | 	if (op & FUTEX_CLOCK_REALTIME) { | 
 | 2596 | 		flags |= FLAGS_CLOCKRT; | 
 | 2597 | 		if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) | 
 | 2598 | 			return -ENOSYS; | 
 | 2599 | 	} | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2600 |  | 
 | 2601 | 	switch (cmd) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2602 | 	case FUTEX_WAIT: | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 2603 | 		val3 = FUTEX_BITSET_MATCH_ANY; | 
 | 2604 | 	case FUTEX_WAIT_BITSET: | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2605 | 		ret = futex_wait(uaddr, flags, val, timeout, val3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2606 | 		break; | 
 | 2607 | 	case FUTEX_WAKE: | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 2608 | 		val3 = FUTEX_BITSET_MATCH_ANY; | 
 | 2609 | 	case FUTEX_WAKE_BITSET: | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2610 | 		ret = futex_wake(uaddr, flags, val, val3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2611 | 		break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2612 | 	case FUTEX_REQUEUE: | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2613 | 		ret = futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2614 | 		break; | 
 | 2615 | 	case FUTEX_CMP_REQUEUE: | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2616 | 		ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2617 | 		break; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 2618 | 	case FUTEX_WAKE_OP: | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2619 | 		ret = futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 2620 | 		break; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2621 | 	case FUTEX_LOCK_PI: | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2622 | 		if (futex_cmpxchg_enabled) | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2623 | 			ret = futex_lock_pi(uaddr, flags, val, timeout, 0); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2624 | 		break; | 
 | 2625 | 	case FUTEX_UNLOCK_PI: | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2626 | 		if (futex_cmpxchg_enabled) | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2627 | 			ret = futex_unlock_pi(uaddr, flags); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2628 | 		break; | 
 | 2629 | 	case FUTEX_TRYLOCK_PI: | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2630 | 		if (futex_cmpxchg_enabled) | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2631 | 			ret = futex_lock_pi(uaddr, flags, 0, timeout, 1); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2632 | 		break; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2633 | 	case FUTEX_WAIT_REQUEUE_PI: | 
 | 2634 | 		val3 = FUTEX_BITSET_MATCH_ANY; | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2635 | 		ret = futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, | 
 | 2636 | 					    uaddr2); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2637 | 		break; | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2638 | 	case FUTEX_CMP_REQUEUE_PI: | 
| Darren Hart | b41277d | 2010-11-08 13:10:09 -0800 | [diff] [blame] | 2639 | 		ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1); | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2640 | 		break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2641 | 	default: | 
 | 2642 | 		ret = -ENOSYS; | 
 | 2643 | 	} | 
 | 2644 | 	return ret; | 
 | 2645 | } | 
 | 2646 |  | 
 | 2647 |  | 
| Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 2648 | SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, | 
 | 2649 | 		struct timespec __user *, utime, u32 __user *, uaddr2, | 
 | 2650 | 		u32, val3) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2651 | { | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2652 | 	struct timespec ts; | 
 | 2653 | 	ktime_t t, *tp = NULL; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 2654 | 	u32 val2 = 0; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2655 | 	int cmd = op & FUTEX_CMD_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2656 |  | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 2657 | 	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2658 | 		      cmd == FUTEX_WAIT_BITSET || | 
 | 2659 | 		      cmd == FUTEX_WAIT_REQUEUE_PI)) { | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2660 | 		if (copy_from_user(&ts, utime, sizeof(ts)) != 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2661 | 			return -EFAULT; | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2662 | 		if (!timespec_valid(&ts)) | 
| Thomas Gleixner | 9741ef9 | 2006-03-31 02:31:32 -0800 | [diff] [blame] | 2663 | 			return -EINVAL; | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2664 |  | 
 | 2665 | 		t = timespec_to_ktime(ts); | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2666 | 		if (cmd == FUTEX_WAIT) | 
| Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 2667 | 			t = ktime_add_safe(ktime_get(), t); | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2668 | 		tp = &t; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2669 | 	} | 
 | 2670 | 	/* | 
| Darren Hart | 52400ba | 2009-04-03 13:40:49 -0700 | [diff] [blame] | 2671 | 	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*. | 
| Andreas Schwab | f54f098 | 2007-07-31 00:38:51 -0700 | [diff] [blame] | 2672 | 	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2673 | 	 */ | 
| Andreas Schwab | f54f098 | 2007-07-31 00:38:51 -0700 | [diff] [blame] | 2674 | 	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || | 
| Darren Hart | ba9c22f | 2009-04-20 22:22:22 -0700 | [diff] [blame] | 2675 | 	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 2676 | 		val2 = (u32) (unsigned long) utime; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2677 |  | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2678 | 	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2679 | } | 
 | 2680 |  | 
| Benjamin Herrenschmidt | f6d107f | 2008-03-27 14:52:15 +1100 | [diff] [blame] | 2681 | static int __init futex_init(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2682 | { | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2683 | 	u32 curval; | 
| Thomas Gleixner | 3e4ab74 | 2008-02-23 15:23:55 -0800 | [diff] [blame] | 2684 | 	int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2685 |  | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2686 | 	/* | 
 | 2687 | 	 * This will fail and we want it. Some arch implementations do | 
 | 2688 | 	 * runtime detection of the futex_atomic_cmpxchg_inatomic() | 
 | 2689 | 	 * functionality. We want to know that before we call in any | 
 | 2690 | 	 * of the complex code paths. Also we want to prevent | 
 | 2691 | 	 * registration of robust lists in that case. NULL is | 
 | 2692 | 	 * guaranteed to fault and we get -EFAULT on functional | 
| Randy Dunlap | fb62db2 | 2010-10-13 11:02:34 -0700 | [diff] [blame] | 2693 | 	 * implementation, the non-functional ones will return | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2694 | 	 * -ENOSYS. | 
 | 2695 | 	 */ | 
| Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 2696 | 	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2697 | 		futex_cmpxchg_enabled = 1; | 
 | 2698 |  | 
| Thomas Gleixner | 3e4ab74 | 2008-02-23 15:23:55 -0800 | [diff] [blame] | 2699 | 	for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { | 
 | 2700 | 		plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); | 
 | 2701 | 		spin_lock_init(&futex_queues[i].lock); | 
 | 2702 | 	} | 
 | 2703 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2704 | 	return 0; | 
 | 2705 | } | 
| Benjamin Herrenschmidt | f6d107f | 2008-03-27 14:52:15 +1100 | [diff] [blame] | 2706 | __initcall(futex_init); |