| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  Fast Userspace Mutexes (which I call "Futexes!"). | 
|  | 3 | *  (C) Rusty Russell, IBM 2002 | 
|  | 4 | * | 
|  | 5 | *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar | 
|  | 6 | *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved | 
|  | 7 | * | 
|  | 8 | *  Removed page pinning, fix privately mapped COW pages and other cleanups | 
|  | 9 | *  (C) Copyright 2003, 2004 Jamie Lokier | 
|  | 10 | * | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 11 | *  Robust futex support started by Ingo Molnar | 
|  | 12 | *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved | 
|  | 13 | *  Thanks to Thomas Gleixner for suggestions, analysis and fixes. | 
|  | 14 | * | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 15 | *  PI-futex support started by Ingo Molnar and Thomas Gleixner | 
|  | 16 | *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
|  | 17 | *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | 
|  | 18 | * | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 19 | *  PRIVATE futexes by Eric Dumazet | 
|  | 20 | *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> | 
|  | 21 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly | 
|  | 23 | *  enough at me, Linus for the original (flawed) idea, Matthew | 
|  | 24 | *  Kirkwood for proof-of-concept implementation. | 
|  | 25 | * | 
|  | 26 | *  "The futexes are also cursed." | 
|  | 27 | *  "But they come in a choice of three flavours!" | 
|  | 28 | * | 
|  | 29 | *  This program is free software; you can redistribute it and/or modify | 
|  | 30 | *  it under the terms of the GNU General Public License as published by | 
|  | 31 | *  the Free Software Foundation; either version 2 of the License, or | 
|  | 32 | *  (at your option) any later version. | 
|  | 33 | * | 
|  | 34 | *  This program is distributed in the hope that it will be useful, | 
|  | 35 | *  but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 36 | *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 37 | *  GNU General Public License for more details. | 
|  | 38 | * | 
|  | 39 | *  You should have received a copy of the GNU General Public License | 
|  | 40 | *  along with this program; if not, write to the Free Software | 
|  | 41 | *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA | 
|  | 42 | */ | 
|  | 43 | #include <linux/slab.h> | 
|  | 44 | #include <linux/poll.h> | 
|  | 45 | #include <linux/fs.h> | 
|  | 46 | #include <linux/file.h> | 
|  | 47 | #include <linux/jhash.h> | 
|  | 48 | #include <linux/init.h> | 
|  | 49 | #include <linux/futex.h> | 
|  | 50 | #include <linux/mount.h> | 
|  | 51 | #include <linux/pagemap.h> | 
|  | 52 | #include <linux/syscalls.h> | 
| Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 53 | #include <linux/signal.h> | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 54 | #include <linux/module.h> | 
| Andrey Mirkin | fd5eea4 | 2007-10-16 23:30:13 -0700 | [diff] [blame] | 55 | #include <linux/magic.h> | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 56 | #include <linux/pid.h> | 
|  | 57 | #include <linux/nsproxy.h> | 
|  | 58 |  | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 59 | #include <asm/futex.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 61 | #include "rtmutex_common.h" | 
|  | 62 |  | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 63 | int __read_mostly futex_cmpxchg_enabled; | 
|  | 64 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) | 
|  | 66 |  | 
|  | 67 | /* | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 68 | * Priority Inheritance state: | 
|  | 69 | */ | 
|  | 70 | struct futex_pi_state { | 
|  | 71 | /* | 
|  | 72 | * list of 'owned' pi_state instances - these have to be | 
|  | 73 | * cleaned up in do_exit() if the task exits prematurely: | 
|  | 74 | */ | 
|  | 75 | struct list_head list; | 
|  | 76 |  | 
|  | 77 | /* | 
|  | 78 | * The PI object: | 
|  | 79 | */ | 
|  | 80 | struct rt_mutex pi_mutex; | 
|  | 81 |  | 
|  | 82 | struct task_struct *owner; | 
|  | 83 | atomic_t refcount; | 
|  | 84 |  | 
|  | 85 | union futex_key key; | 
|  | 86 | }; | 
|  | 87 |  | 
|  | 88 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | * We use this hashed waitqueue instead of a normal wait_queue_t, so | 
|  | 90 | * we can wake only the relevant ones (hashed queues may be shared). | 
|  | 91 | * | 
|  | 92 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 93 | * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | * The order of wakup is always to make the first condition true, then | 
|  | 95 | * wake up q->waiters, then make the second condition true. | 
|  | 96 | */ | 
|  | 97 | struct futex_q { | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 98 | struct plist_node list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | wait_queue_head_t waiters; | 
|  | 100 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 101 | /* Which hash list lock to use: */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | spinlock_t *lock_ptr; | 
|  | 103 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 104 | /* Key which the futex is hashed on: */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | union futex_key key; | 
|  | 106 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 107 | /* For fd, sigio sent using these: */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | int fd; | 
|  | 109 | struct file *filp; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 110 |  | 
|  | 111 | /* Optional priority inheritance state: */ | 
|  | 112 | struct futex_pi_state *pi_state; | 
|  | 113 | struct task_struct *task; | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 114 |  | 
|  | 115 | /* Bitset for the optional bitmasked wakeup */ | 
|  | 116 | u32 bitset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | }; | 
|  | 118 |  | 
|  | 119 | /* | 
|  | 120 | * Split the global futex_lock into every hash list lock. | 
|  | 121 | */ | 
|  | 122 | struct futex_hash_bucket { | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 123 | spinlock_t lock; | 
|  | 124 | struct plist_head chain; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | }; | 
|  | 126 |  | 
|  | 127 | static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; | 
|  | 128 |  | 
|  | 129 | /* Futex-fs vfsmount entry: */ | 
|  | 130 | static struct vfsmount *futex_mnt; | 
|  | 131 |  | 
|  | 132 | /* | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 133 | * Take mm->mmap_sem, when futex is shared | 
|  | 134 | */ | 
|  | 135 | static inline void futex_lock_mm(struct rw_semaphore *fshared) | 
|  | 136 | { | 
|  | 137 | if (fshared) | 
|  | 138 | down_read(fshared); | 
|  | 139 | } | 
|  | 140 |  | 
|  | 141 | /* | 
|  | 142 | * Release mm->mmap_sem, when the futex is shared | 
|  | 143 | */ | 
|  | 144 | static inline void futex_unlock_mm(struct rw_semaphore *fshared) | 
|  | 145 | { | 
|  | 146 | if (fshared) | 
|  | 147 | up_read(fshared); | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | * We hash on the keys returned from get_futex_key (see below). | 
|  | 152 | */ | 
|  | 153 | static struct futex_hash_bucket *hash_futex(union futex_key *key) | 
|  | 154 | { | 
|  | 155 | u32 hash = jhash2((u32*)&key->both.word, | 
|  | 156 | (sizeof(key->both.word)+sizeof(key->both.ptr))/4, | 
|  | 157 | key->both.offset); | 
|  | 158 | return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)]; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | /* | 
|  | 162 | * Return 1 if two futex_keys are equal, 0 otherwise. | 
|  | 163 | */ | 
|  | 164 | static inline int match_futex(union futex_key *key1, union futex_key *key2) | 
|  | 165 | { | 
|  | 166 | return (key1->both.word == key2->both.word | 
|  | 167 | && key1->both.ptr == key2->both.ptr | 
|  | 168 | && key1->both.offset == key2->both.offset); | 
|  | 169 | } | 
|  | 170 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 171 | /** | 
|  | 172 | * get_futex_key - Get parameters which are the keys for a futex. | 
|  | 173 | * @uaddr: virtual address of the futex | 
|  | 174 | * @shared: NULL for a PROCESS_PRIVATE futex, | 
|  | 175 | *	¤t->mm->mmap_sem for a PROCESS_SHARED futex | 
|  | 176 | * @key: address where result is stored. | 
|  | 177 | * | 
|  | 178 | * Returns a negative error code or 0 | 
|  | 179 | * The key words are stored in *key on success. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | * | 
| Josef "Jeff" Sipek | f3a43f3 | 2006-12-08 02:36:43 -0800 | [diff] [blame] | 181 | * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | * offset_within_page).  For private mappings, it's (uaddr, current->mm). | 
|  | 183 | * We can usually work out the index without swapping in the page. | 
|  | 184 | * | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 185 | * fshared is NULL for PROCESS_PRIVATE futexes | 
|  | 186 | * For other futexes, it points to ¤t->mm->mmap_sem and | 
|  | 187 | * caller must have taken the reader lock. but NOT any spinlocks. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | */ | 
| Adrian Bunk | fad23fc | 2007-11-02 16:43:22 +0100 | [diff] [blame] | 189 | static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, | 
|  | 190 | union futex_key *key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 192 | unsigned long address = (unsigned long)uaddr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | struct mm_struct *mm = current->mm; | 
|  | 194 | struct vm_area_struct *vma; | 
|  | 195 | struct page *page; | 
|  | 196 | int err; | 
|  | 197 |  | 
|  | 198 | /* | 
|  | 199 | * The futex address must be "naturally" aligned. | 
|  | 200 | */ | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 201 | key->both.offset = address % PAGE_SIZE; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 202 | if (unlikely((address % sizeof(u32)) != 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | return -EINVAL; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 204 | address -= key->both.offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 |  | 
|  | 206 | /* | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 207 | * PROCESS_PRIVATE futexes are fast. | 
|  | 208 | * As the mm cannot disappear under us and the 'key' only needs | 
|  | 209 | * virtual address, we dont even have to find the underlying vma. | 
|  | 210 | * Note : We do have to check 'uaddr' is a valid user address, | 
|  | 211 | *        but access_ok() should be faster than find_vma() | 
|  | 212 | */ | 
|  | 213 | if (!fshared) { | 
|  | 214 | if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) | 
|  | 215 | return -EFAULT; | 
|  | 216 | key->private.mm = mm; | 
|  | 217 | key->private.address = address; | 
|  | 218 | return 0; | 
|  | 219 | } | 
|  | 220 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | * The futex is hashed differently depending on whether | 
|  | 222 | * it's in a shared or private mapping.  So check vma first. | 
|  | 223 | */ | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 224 | vma = find_extend_vma(mm, address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | if (unlikely(!vma)) | 
|  | 226 | return -EFAULT; | 
|  | 227 |  | 
|  | 228 | /* | 
|  | 229 | * Permissions. | 
|  | 230 | */ | 
|  | 231 | if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) | 
|  | 232 | return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; | 
|  | 233 |  | 
|  | 234 | /* | 
|  | 235 | * Private mappings are handled in a simple way. | 
|  | 236 | * | 
|  | 237 | * NOTE: When userspace waits on a MAP_SHARED mapping, even if | 
|  | 238 | * it's a read-only handle, it's expected that futexes attach to | 
|  | 239 | * the object not the particular process.  Therefore we use | 
|  | 240 | * VM_MAYSHARE here, not VM_SHARED which is restricted to shared | 
|  | 241 | * mappings of _writable_ handles. | 
|  | 242 | */ | 
|  | 243 | if (likely(!(vma->vm_flags & VM_MAYSHARE))) { | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 244 | key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | key->private.mm = mm; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 246 | key->private.address = address; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | return 0; | 
|  | 248 | } | 
|  | 249 |  | 
|  | 250 | /* | 
|  | 251 | * Linear file mappings are also simple. | 
|  | 252 | */ | 
| Josef "Jeff" Sipek | f3a43f3 | 2006-12-08 02:36:43 -0800 | [diff] [blame] | 253 | key->shared.inode = vma->vm_file->f_path.dentry->d_inode; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 254 | key->both.offset |= FUT_OFF_INODE; /* inode-based key. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | if (likely(!(vma->vm_flags & VM_NONLINEAR))) { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 256 | key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | + vma->vm_pgoff); | 
|  | 258 | return 0; | 
|  | 259 | } | 
|  | 260 |  | 
|  | 261 | /* | 
|  | 262 | * We could walk the page table to read the non-linear | 
|  | 263 | * pte, and get the page index without fetching the page | 
|  | 264 | * from swap.  But that's a lot of code to duplicate here | 
|  | 265 | * for a rare case, so we simply fetch the page. | 
|  | 266 | */ | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 267 | err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | if (err >= 0) { | 
|  | 269 | key->shared.pgoff = | 
|  | 270 | page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
|  | 271 | put_page(page); | 
|  | 272 | return 0; | 
|  | 273 | } | 
|  | 274 | return err; | 
|  | 275 | } | 
|  | 276 |  | 
|  | 277 | /* | 
|  | 278 | * Take a reference to the resource addressed by a key. | 
|  | 279 | * Can be called while holding spinlocks. | 
|  | 280 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | */ | 
| Adrian Bunk | fad23fc | 2007-11-02 16:43:22 +0100 | [diff] [blame] | 282 | static void get_futex_key_refs(union futex_key *key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | { | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 284 | if (key->both.ptr == 0) | 
|  | 285 | return; | 
|  | 286 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { | 
|  | 287 | case FUT_OFF_INODE: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | atomic_inc(&key->shared.inode->i_count); | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 289 | break; | 
|  | 290 | case FUT_OFF_MMSHARED: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | atomic_inc(&key->private.mm->mm_count); | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 292 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | } | 
|  | 294 | } | 
|  | 295 |  | 
|  | 296 | /* | 
|  | 297 | * Drop a reference to the resource addressed by a key. | 
|  | 298 | * The hash bucket spinlock must not be held. | 
|  | 299 | */ | 
| Adrian Bunk | fad23fc | 2007-11-02 16:43:22 +0100 | [diff] [blame] | 300 | static void drop_futex_key_refs(union futex_key *key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | { | 
| Stephen Hemminger | c80544d | 2007-10-18 03:07:05 -0700 | [diff] [blame] | 302 | if (!key->both.ptr) | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 303 | return; | 
|  | 304 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { | 
|  | 305 | case FUT_OFF_INODE: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | iput(key->shared.inode); | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 307 | break; | 
|  | 308 | case FUT_OFF_MMSHARED: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | mmdrop(key->private.mm); | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 310 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | } | 
|  | 312 | } | 
|  | 313 |  | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 314 | static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) | 
|  | 315 | { | 
|  | 316 | u32 curval; | 
|  | 317 |  | 
|  | 318 | pagefault_disable(); | 
|  | 319 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); | 
|  | 320 | pagefault_enable(); | 
|  | 321 |  | 
|  | 322 | return curval; | 
|  | 323 | } | 
|  | 324 |  | 
|  | 325 | static int get_futex_value_locked(u32 *dest, u32 __user *from) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | { | 
|  | 327 | int ret; | 
|  | 328 |  | 
| Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 329 | pagefault_disable(); | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 330 | ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); | 
| Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 331 | pagefault_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 |  | 
|  | 333 | return ret ? -EFAULT : 0; | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | /* | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 337 | * Fault handling. | 
|  | 338 | * if fshared is non NULL, current->mm->mmap_sem is already held | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 339 | */ | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 340 | static int futex_handle_fault(unsigned long address, | 
|  | 341 | struct rw_semaphore *fshared, int attempt) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 342 | { | 
|  | 343 | struct vm_area_struct * vma; | 
|  | 344 | struct mm_struct *mm = current->mm; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 345 | int ret = -EFAULT; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 346 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 347 | if (attempt > 2) | 
|  | 348 | return ret; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 349 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 350 | if (!fshared) | 
|  | 351 | down_read(&mm->mmap_sem); | 
|  | 352 | vma = find_vma(mm, address); | 
|  | 353 | if (vma && address >= vma->vm_start && | 
|  | 354 | (vma->vm_flags & VM_WRITE)) { | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 355 | int fault; | 
|  | 356 | fault = handle_mm_fault(mm, vma, address, 1); | 
|  | 357 | if (unlikely((fault & VM_FAULT_ERROR))) { | 
|  | 358 | #if 0 | 
|  | 359 | /* XXX: let's do this when we verify it is OK */ | 
|  | 360 | if (ret & VM_FAULT_OOM) | 
|  | 361 | ret = -ENOMEM; | 
|  | 362 | #endif | 
|  | 363 | } else { | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 364 | ret = 0; | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 365 | if (fault & VM_FAULT_MAJOR) | 
|  | 366 | current->maj_flt++; | 
|  | 367 | else | 
|  | 368 | current->min_flt++; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 369 | } | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 370 | } | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 371 | if (!fshared) | 
|  | 372 | up_read(&mm->mmap_sem); | 
|  | 373 | return ret; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 374 | } | 
|  | 375 |  | 
|  | 376 | /* | 
|  | 377 | * PI code: | 
|  | 378 | */ | 
|  | 379 | static int refill_pi_state_cache(void) | 
|  | 380 | { | 
|  | 381 | struct futex_pi_state *pi_state; | 
|  | 382 |  | 
|  | 383 | if (likely(current->pi_state_cache)) | 
|  | 384 | return 0; | 
|  | 385 |  | 
| Burman Yan | 4668edc | 2006-12-06 20:38:51 -0800 | [diff] [blame] | 386 | pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 387 |  | 
|  | 388 | if (!pi_state) | 
|  | 389 | return -ENOMEM; | 
|  | 390 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 391 | INIT_LIST_HEAD(&pi_state->list); | 
|  | 392 | /* pi_mutex gets initialized later */ | 
|  | 393 | pi_state->owner = NULL; | 
|  | 394 | atomic_set(&pi_state->refcount, 1); | 
|  | 395 |  | 
|  | 396 | current->pi_state_cache = pi_state; | 
|  | 397 |  | 
|  | 398 | return 0; | 
|  | 399 | } | 
|  | 400 |  | 
|  | 401 | static struct futex_pi_state * alloc_pi_state(void) | 
|  | 402 | { | 
|  | 403 | struct futex_pi_state *pi_state = current->pi_state_cache; | 
|  | 404 |  | 
|  | 405 | WARN_ON(!pi_state); | 
|  | 406 | current->pi_state_cache = NULL; | 
|  | 407 |  | 
|  | 408 | return pi_state; | 
|  | 409 | } | 
|  | 410 |  | 
|  | 411 | static void free_pi_state(struct futex_pi_state *pi_state) | 
|  | 412 | { | 
|  | 413 | if (!atomic_dec_and_test(&pi_state->refcount)) | 
|  | 414 | return; | 
|  | 415 |  | 
|  | 416 | /* | 
|  | 417 | * If pi_state->owner is NULL, the owner is most probably dying | 
|  | 418 | * and has cleaned up the pi_state already | 
|  | 419 | */ | 
|  | 420 | if (pi_state->owner) { | 
|  | 421 | spin_lock_irq(&pi_state->owner->pi_lock); | 
|  | 422 | list_del_init(&pi_state->list); | 
|  | 423 | spin_unlock_irq(&pi_state->owner->pi_lock); | 
|  | 424 |  | 
|  | 425 | rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); | 
|  | 426 | } | 
|  | 427 |  | 
|  | 428 | if (current->pi_state_cache) | 
|  | 429 | kfree(pi_state); | 
|  | 430 | else { | 
|  | 431 | /* | 
|  | 432 | * pi_state->list is already empty. | 
|  | 433 | * clear pi_state->owner. | 
|  | 434 | * refcount is at 0 - put it back to 1. | 
|  | 435 | */ | 
|  | 436 | pi_state->owner = NULL; | 
|  | 437 | atomic_set(&pi_state->refcount, 1); | 
|  | 438 | current->pi_state_cache = pi_state; | 
|  | 439 | } | 
|  | 440 | } | 
|  | 441 |  | 
|  | 442 | /* | 
|  | 443 | * Look up the task based on what TID userspace gave us. | 
|  | 444 | * We dont trust it. | 
|  | 445 | */ | 
|  | 446 | static struct task_struct * futex_find_get_task(pid_t pid) | 
|  | 447 | { | 
|  | 448 | struct task_struct *p; | 
|  | 449 |  | 
| Oleg Nesterov | d359b54 | 2006-09-29 02:00:55 -0700 | [diff] [blame] | 450 | rcu_read_lock(); | 
| Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 451 | p = find_task_by_vpid(pid); | 
| Thomas Gleixner | a06381f | 2007-06-23 11:48:40 +0200 | [diff] [blame] | 452 | if (!p || ((current->euid != p->euid) && (current->euid != p->uid))) | 
|  | 453 | p = ERR_PTR(-ESRCH); | 
|  | 454 | else | 
|  | 455 | get_task_struct(p); | 
|  | 456 |  | 
| Oleg Nesterov | d359b54 | 2006-09-29 02:00:55 -0700 | [diff] [blame] | 457 | rcu_read_unlock(); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 458 |  | 
|  | 459 | return p; | 
|  | 460 | } | 
|  | 461 |  | 
|  | 462 | /* | 
|  | 463 | * This task is holding PI mutexes at exit time => bad. | 
|  | 464 | * Kernel cleans up PI-state, but userspace is likely hosed. | 
|  | 465 | * (Robust-futex cleanup is separate and might save the day for userspace.) | 
|  | 466 | */ | 
|  | 467 | void exit_pi_state_list(struct task_struct *curr) | 
|  | 468 | { | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 469 | struct list_head *next, *head = &curr->pi_state_list; | 
|  | 470 | struct futex_pi_state *pi_state; | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 471 | struct futex_hash_bucket *hb; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 472 | union futex_key key; | 
|  | 473 |  | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 474 | if (!futex_cmpxchg_enabled) | 
|  | 475 | return; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 476 | /* | 
|  | 477 | * We are a ZOMBIE and nobody can enqueue itself on | 
|  | 478 | * pi_state_list anymore, but we have to be careful | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 479 | * versus waiters unqueueing themselves: | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 480 | */ | 
|  | 481 | spin_lock_irq(&curr->pi_lock); | 
|  | 482 | while (!list_empty(head)) { | 
|  | 483 |  | 
|  | 484 | next = head->next; | 
|  | 485 | pi_state = list_entry(next, struct futex_pi_state, list); | 
|  | 486 | key = pi_state->key; | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 487 | hb = hash_futex(&key); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 488 | spin_unlock_irq(&curr->pi_lock); | 
|  | 489 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 490 | spin_lock(&hb->lock); | 
|  | 491 |  | 
|  | 492 | spin_lock_irq(&curr->pi_lock); | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 493 | /* | 
|  | 494 | * We dropped the pi-lock, so re-check whether this | 
|  | 495 | * task still owns the PI-state: | 
|  | 496 | */ | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 497 | if (head->next != next) { | 
|  | 498 | spin_unlock(&hb->lock); | 
|  | 499 | continue; | 
|  | 500 | } | 
|  | 501 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 502 | WARN_ON(pi_state->owner != curr); | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 503 | WARN_ON(list_empty(&pi_state->list)); | 
|  | 504 | list_del_init(&pi_state->list); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 505 | pi_state->owner = NULL; | 
|  | 506 | spin_unlock_irq(&curr->pi_lock); | 
|  | 507 |  | 
|  | 508 | rt_mutex_unlock(&pi_state->pi_mutex); | 
|  | 509 |  | 
|  | 510 | spin_unlock(&hb->lock); | 
|  | 511 |  | 
|  | 512 | spin_lock_irq(&curr->pi_lock); | 
|  | 513 | } | 
|  | 514 | spin_unlock_irq(&curr->pi_lock); | 
|  | 515 | } | 
|  | 516 |  | 
|  | 517 | static int | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 518 | lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | 
|  | 519 | union futex_key *key, struct futex_pi_state **ps) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 520 | { | 
|  | 521 | struct futex_pi_state *pi_state = NULL; | 
|  | 522 | struct futex_q *this, *next; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 523 | struct plist_head *head; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 524 | struct task_struct *p; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 525 | pid_t pid = uval & FUTEX_TID_MASK; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 526 |  | 
|  | 527 | head = &hb->chain; | 
|  | 528 |  | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 529 | plist_for_each_entry_safe(this, next, head, list) { | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 530 | if (match_futex(&this->key, key)) { | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 531 | /* | 
|  | 532 | * Another waiter already exists - bump up | 
|  | 533 | * the refcount and return its pi_state: | 
|  | 534 | */ | 
|  | 535 | pi_state = this->pi_state; | 
| Thomas Gleixner | 06a9ec2 | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 536 | /* | 
|  | 537 | * Userspace might have messed up non PI and PI futexes | 
|  | 538 | */ | 
|  | 539 | if (unlikely(!pi_state)) | 
|  | 540 | return -EINVAL; | 
|  | 541 |  | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 542 | WARN_ON(!atomic_read(&pi_state->refcount)); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 543 | WARN_ON(pid && pi_state->owner && | 
|  | 544 | pi_state->owner->pid != pid); | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 545 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 546 | atomic_inc(&pi_state->refcount); | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 547 | *ps = pi_state; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 548 |  | 
|  | 549 | return 0; | 
|  | 550 | } | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | /* | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 554 | * We are the first waiter - try to look up the real owner and attach | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 555 | * the new pi_state to it, but bail out when TID = 0 | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 556 | */ | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 557 | if (!pid) | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 558 | return -ESRCH; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 559 | p = futex_find_get_task(pid); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 560 | if (IS_ERR(p)) | 
|  | 561 | return PTR_ERR(p); | 
|  | 562 |  | 
|  | 563 | /* | 
|  | 564 | * We need to look at the task state flags to figure out, | 
|  | 565 | * whether the task is exiting. To protect against the do_exit | 
|  | 566 | * change of the task flags, we do this protected by | 
|  | 567 | * p->pi_lock: | 
|  | 568 | */ | 
|  | 569 | spin_lock_irq(&p->pi_lock); | 
|  | 570 | if (unlikely(p->flags & PF_EXITING)) { | 
|  | 571 | /* | 
|  | 572 | * The task is on the way out. When PF_EXITPIDONE is | 
|  | 573 | * set, we know that the task has finished the | 
|  | 574 | * cleanup: | 
|  | 575 | */ | 
|  | 576 | int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; | 
|  | 577 |  | 
|  | 578 | spin_unlock_irq(&p->pi_lock); | 
|  | 579 | put_task_struct(p); | 
|  | 580 | return ret; | 
|  | 581 | } | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 582 |  | 
|  | 583 | pi_state = alloc_pi_state(); | 
|  | 584 |  | 
|  | 585 | /* | 
|  | 586 | * Initialize the pi_mutex in locked state and make 'p' | 
|  | 587 | * the owner of it: | 
|  | 588 | */ | 
|  | 589 | rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); | 
|  | 590 |  | 
|  | 591 | /* Store the key for possible exit cleanups: */ | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 592 | pi_state->key = *key; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 593 |  | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 594 | WARN_ON(!list_empty(&pi_state->list)); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 595 | list_add(&pi_state->list, &p->pi_state_list); | 
|  | 596 | pi_state->owner = p; | 
|  | 597 | spin_unlock_irq(&p->pi_lock); | 
|  | 598 |  | 
|  | 599 | put_task_struct(p); | 
|  | 600 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 601 | *ps = pi_state; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 602 |  | 
|  | 603 | return 0; | 
|  | 604 | } | 
|  | 605 |  | 
|  | 606 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | * The hash bucket lock must be held when this is called. | 
|  | 608 | * Afterwards, the futex_q must not be accessed. | 
|  | 609 | */ | 
|  | 610 | static void wake_futex(struct futex_q *q) | 
|  | 611 | { | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 612 | plist_del(&q->list, &q->list.plist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | if (q->filp) | 
|  | 614 | send_sigio(&q->filp->f_owner, q->fd, POLL_IN); | 
|  | 615 | /* | 
|  | 616 | * The lock in wake_up_all() is a crucial memory barrier after the | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 617 | * plist_del() and also before assigning to q->lock_ptr. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | */ | 
|  | 619 | wake_up_all(&q->waiters); | 
|  | 620 | /* | 
|  | 621 | * The waiting task can free the futex_q as soon as this is written, | 
|  | 622 | * without taking any locks.  This must come last. | 
| Andrew Morton | 8e31108 | 2005-12-23 19:54:46 -0800 | [diff] [blame] | 623 | * | 
|  | 624 | * A memory barrier is required here to prevent the following store | 
|  | 625 | * to lock_ptr from getting ahead of the wakeup. Clearing the lock | 
|  | 626 | * at the end of wake_up_all() does not prevent this store from | 
|  | 627 | * moving. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | */ | 
| Ralf Baechle | ccdea2f | 2006-12-06 20:40:26 -0800 | [diff] [blame] | 629 | smp_wmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | q->lock_ptr = NULL; | 
|  | 631 | } | 
|  | 632 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 633 | static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | 
|  | 634 | { | 
|  | 635 | struct task_struct *new_owner; | 
|  | 636 | struct futex_pi_state *pi_state = this->pi_state; | 
|  | 637 | u32 curval, newval; | 
|  | 638 |  | 
|  | 639 | if (!pi_state) | 
|  | 640 | return -EINVAL; | 
|  | 641 |  | 
| Ingo Molnar | 21778867 | 2007-03-16 13:38:31 -0800 | [diff] [blame] | 642 | spin_lock(&pi_state->pi_mutex.wait_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 643 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); | 
|  | 644 |  | 
|  | 645 | /* | 
|  | 646 | * This happens when we have stolen the lock and the original | 
|  | 647 | * pending owner did not enqueue itself back on the rt_mutex. | 
|  | 648 | * Thats not a tragedy. We know that way, that a lock waiter | 
|  | 649 | * is on the fly. We make the futex_q waiter the pending owner. | 
|  | 650 | */ | 
|  | 651 | if (!new_owner) | 
|  | 652 | new_owner = this->task; | 
|  | 653 |  | 
|  | 654 | /* | 
|  | 655 | * We pass it to the next owner. (The WAITERS bit is always | 
|  | 656 | * kept enabled while there is PI state around. We must also | 
|  | 657 | * preserve the owner died bit.) | 
|  | 658 | */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 659 | if (!(uval & FUTEX_OWNER_DIED)) { | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 660 | int ret = 0; | 
|  | 661 |  | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 662 | newval = FUTEX_WAITERS | task_pid_vnr(new_owner); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 663 |  | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 664 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 665 |  | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 666 | if (curval == -EFAULT) | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 667 | ret = -EFAULT; | 
| Thomas Gleixner | cde898f | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 668 | else if (curval != uval) | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 669 | ret = -EINVAL; | 
|  | 670 | if (ret) { | 
|  | 671 | spin_unlock(&pi_state->pi_mutex.wait_lock); | 
|  | 672 | return ret; | 
|  | 673 | } | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 674 | } | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 675 |  | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 676 | spin_lock_irq(&pi_state->owner->pi_lock); | 
|  | 677 | WARN_ON(list_empty(&pi_state->list)); | 
|  | 678 | list_del_init(&pi_state->list); | 
|  | 679 | spin_unlock_irq(&pi_state->owner->pi_lock); | 
|  | 680 |  | 
|  | 681 | spin_lock_irq(&new_owner->pi_lock); | 
|  | 682 | WARN_ON(!list_empty(&pi_state->list)); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 683 | list_add(&pi_state->list, &new_owner->pi_state_list); | 
|  | 684 | pi_state->owner = new_owner; | 
| Ingo Molnar | 627371d | 2006-07-29 05:16:20 +0200 | [diff] [blame] | 685 | spin_unlock_irq(&new_owner->pi_lock); | 
|  | 686 |  | 
| Ingo Molnar | 21778867 | 2007-03-16 13:38:31 -0800 | [diff] [blame] | 687 | spin_unlock(&pi_state->pi_mutex.wait_lock); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 688 | rt_mutex_unlock(&pi_state->pi_mutex); | 
|  | 689 |  | 
|  | 690 | return 0; | 
|  | 691 | } | 
|  | 692 |  | 
|  | 693 | static int unlock_futex_pi(u32 __user *uaddr, u32 uval) | 
|  | 694 | { | 
|  | 695 | u32 oldval; | 
|  | 696 |  | 
|  | 697 | /* | 
|  | 698 | * There is no waiter, so we unlock the futex. The owner died | 
|  | 699 | * bit has not to be preserved here. We are the owner: | 
|  | 700 | */ | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 701 | oldval = cmpxchg_futex_value_locked(uaddr, uval, 0); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 702 |  | 
|  | 703 | if (oldval == -EFAULT) | 
|  | 704 | return oldval; | 
|  | 705 | if (oldval != uval) | 
|  | 706 | return -EAGAIN; | 
|  | 707 |  | 
|  | 708 | return 0; | 
|  | 709 | } | 
|  | 710 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | /* | 
| Ingo Molnar | 8b8f319 | 2006-07-03 00:25:05 -0700 | [diff] [blame] | 712 | * Express the locking dependencies for lockdep: | 
|  | 713 | */ | 
|  | 714 | static inline void | 
|  | 715 | double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) | 
|  | 716 | { | 
|  | 717 | if (hb1 <= hb2) { | 
|  | 718 | spin_lock(&hb1->lock); | 
|  | 719 | if (hb1 < hb2) | 
|  | 720 | spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); | 
|  | 721 | } else { /* hb1 > hb2 */ | 
|  | 722 | spin_lock(&hb2->lock); | 
|  | 723 | spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); | 
|  | 724 | } | 
|  | 725 | } | 
|  | 726 |  | 
|  | 727 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | * Wake up all waiters hashed on the physical page that is mapped | 
|  | 729 | * to this virtual address: | 
|  | 730 | */ | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 731 | static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 732 | int nr_wake, u32 bitset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 734 | struct futex_hash_bucket *hb; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | struct futex_q *this, *next; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 736 | struct plist_head *head; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 737 | union futex_key key; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | int ret; | 
|  | 739 |  | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 740 | if (!bitset) | 
|  | 741 | return -EINVAL; | 
|  | 742 |  | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 743 | futex_lock_mm(fshared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 745 | ret = get_futex_key(uaddr, fshared, &key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | if (unlikely(ret != 0)) | 
|  | 747 | goto out; | 
|  | 748 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 749 | hb = hash_futex(&key); | 
|  | 750 | spin_lock(&hb->lock); | 
|  | 751 | head = &hb->chain; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 |  | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 753 | plist_for_each_entry_safe(this, next, head, list) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | if (match_futex (&this->key, &key)) { | 
| Ingo Molnar | ed6f7b1 | 2006-07-01 04:35:46 -0700 | [diff] [blame] | 755 | if (this->pi_state) { | 
|  | 756 | ret = -EINVAL; | 
|  | 757 | break; | 
|  | 758 | } | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 759 |  | 
|  | 760 | /* Check if one of the bits is set in both bitsets */ | 
|  | 761 | if (!(this->bitset & bitset)) | 
|  | 762 | continue; | 
|  | 763 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | wake_futex(this); | 
|  | 765 | if (++ret >= nr_wake) | 
|  | 766 | break; | 
|  | 767 | } | 
|  | 768 | } | 
|  | 769 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 770 | spin_unlock(&hb->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | out: | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 772 | futex_unlock_mm(fshared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | return ret; | 
|  | 774 | } | 
|  | 775 |  | 
|  | 776 | /* | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 777 | * Wake up all waiters hashed on the physical page that is mapped | 
|  | 778 | * to this virtual address: | 
|  | 779 | */ | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 780 | static int | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 781 | futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, | 
|  | 782 | u32 __user *uaddr2, | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 783 | int nr_wake, int nr_wake2, int op) | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 784 | { | 
|  | 785 | union futex_key key1, key2; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 786 | struct futex_hash_bucket *hb1, *hb2; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 787 | struct plist_head *head; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 788 | struct futex_q *this, *next; | 
|  | 789 | int ret, op_ret, attempt = 0; | 
|  | 790 |  | 
|  | 791 | retryfull: | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 792 | futex_lock_mm(fshared); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 793 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 794 | ret = get_futex_key(uaddr1, fshared, &key1); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 795 | if (unlikely(ret != 0)) | 
|  | 796 | goto out; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 797 | ret = get_futex_key(uaddr2, fshared, &key2); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 798 | if (unlikely(ret != 0)) | 
|  | 799 | goto out; | 
|  | 800 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 801 | hb1 = hash_futex(&key1); | 
|  | 802 | hb2 = hash_futex(&key2); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 803 |  | 
|  | 804 | retry: | 
| Ingo Molnar | 8b8f319 | 2006-07-03 00:25:05 -0700 | [diff] [blame] | 805 | double_lock_hb(hb1, hb2); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 806 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 807 | op_ret = futex_atomic_op_inuser(op, uaddr2); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 808 | if (unlikely(op_ret < 0)) { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 809 | u32 dummy; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 810 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 811 | spin_unlock(&hb1->lock); | 
|  | 812 | if (hb1 != hb2) | 
|  | 813 | spin_unlock(&hb2->lock); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 814 |  | 
| David Howells | 7ee1dd3 | 2006-01-06 00:11:44 -0800 | [diff] [blame] | 815 | #ifndef CONFIG_MMU | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 816 | /* | 
|  | 817 | * we don't get EFAULT from MMU faults if we don't have an MMU, | 
|  | 818 | * but we might get them from range checking | 
|  | 819 | */ | 
| David Howells | 7ee1dd3 | 2006-01-06 00:11:44 -0800 | [diff] [blame] | 820 | ret = op_ret; | 
|  | 821 | goto out; | 
|  | 822 | #endif | 
|  | 823 |  | 
| David Gibson | 796f8d9 | 2005-11-07 00:59:33 -0800 | [diff] [blame] | 824 | if (unlikely(op_ret != -EFAULT)) { | 
|  | 825 | ret = op_ret; | 
|  | 826 | goto out; | 
|  | 827 | } | 
|  | 828 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 829 | /* | 
|  | 830 | * futex_atomic_op_inuser needs to both read and write | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 831 | * *(int __user *)uaddr2, but we can't modify it | 
|  | 832 | * non-atomically.  Therefore, if get_user below is not | 
|  | 833 | * enough, we need to handle the fault ourselves, while | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 834 | * still holding the mmap_sem. | 
|  | 835 | */ | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 836 | if (attempt++) { | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 837 | ret = futex_handle_fault((unsigned long)uaddr2, | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 838 | fshared, attempt); | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 839 | if (ret) | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 840 | goto out; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 841 | goto retry; | 
|  | 842 | } | 
|  | 843 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 844 | /* | 
|  | 845 | * If we would have faulted, release mmap_sem, | 
|  | 846 | * fault it in and start all over again. | 
|  | 847 | */ | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 848 | futex_unlock_mm(fshared); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 849 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 850 | ret = get_user(dummy, uaddr2); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 851 | if (ret) | 
|  | 852 | return ret; | 
|  | 853 |  | 
|  | 854 | goto retryfull; | 
|  | 855 | } | 
|  | 856 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 857 | head = &hb1->chain; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 858 |  | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 859 | plist_for_each_entry_safe(this, next, head, list) { | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 860 | if (match_futex (&this->key, &key1)) { | 
|  | 861 | wake_futex(this); | 
|  | 862 | if (++ret >= nr_wake) | 
|  | 863 | break; | 
|  | 864 | } | 
|  | 865 | } | 
|  | 866 |  | 
|  | 867 | if (op_ret > 0) { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 868 | head = &hb2->chain; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 869 |  | 
|  | 870 | op_ret = 0; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 871 | plist_for_each_entry_safe(this, next, head, list) { | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 872 | if (match_futex (&this->key, &key2)) { | 
|  | 873 | wake_futex(this); | 
|  | 874 | if (++op_ret >= nr_wake2) | 
|  | 875 | break; | 
|  | 876 | } | 
|  | 877 | } | 
|  | 878 | ret += op_ret; | 
|  | 879 | } | 
|  | 880 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 881 | spin_unlock(&hb1->lock); | 
|  | 882 | if (hb1 != hb2) | 
|  | 883 | spin_unlock(&hb2->lock); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 884 | out: | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 885 | futex_unlock_mm(fshared); | 
|  | 886 |  | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 887 | return ret; | 
|  | 888 | } | 
|  | 889 |  | 
|  | 890 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | * Requeue all waiters hashed on one physical page to another | 
|  | 892 | * physical page. | 
|  | 893 | */ | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 894 | static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, | 
|  | 895 | u32 __user *uaddr2, | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 896 | int nr_wake, int nr_requeue, u32 *cmpval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | { | 
|  | 898 | union futex_key key1, key2; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 899 | struct futex_hash_bucket *hb1, *hb2; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 900 | struct plist_head *head1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | struct futex_q *this, *next; | 
|  | 902 | int ret, drop_count = 0; | 
|  | 903 |  | 
|  | 904 | retry: | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 905 | futex_lock_mm(fshared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 907 | ret = get_futex_key(uaddr1, fshared, &key1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | if (unlikely(ret != 0)) | 
|  | 909 | goto out; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 910 | ret = get_futex_key(uaddr2, fshared, &key2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | if (unlikely(ret != 0)) | 
|  | 912 | goto out; | 
|  | 913 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 914 | hb1 = hash_futex(&key1); | 
|  | 915 | hb2 = hash_futex(&key2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 |  | 
| Ingo Molnar | 8b8f319 | 2006-07-03 00:25:05 -0700 | [diff] [blame] | 917 | double_lock_hb(hb1, hb2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 919 | if (likely(cmpval != NULL)) { | 
|  | 920 | u32 curval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 922 | ret = get_futex_value_locked(&curval, uaddr1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 |  | 
|  | 924 | if (unlikely(ret)) { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 925 | spin_unlock(&hb1->lock); | 
|  | 926 | if (hb1 != hb2) | 
|  | 927 | spin_unlock(&hb2->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 929 | /* | 
|  | 930 | * If we would have faulted, release mmap_sem, fault | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | * it in and start all over again. | 
|  | 932 | */ | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 933 | futex_unlock_mm(fshared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 935 | ret = get_user(curval, uaddr1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 |  | 
|  | 937 | if (!ret) | 
|  | 938 | goto retry; | 
|  | 939 |  | 
|  | 940 | return ret; | 
|  | 941 | } | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 942 | if (curval != *cmpval) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | ret = -EAGAIN; | 
|  | 944 | goto out_unlock; | 
|  | 945 | } | 
|  | 946 | } | 
|  | 947 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 948 | head1 = &hb1->chain; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 949 | plist_for_each_entry_safe(this, next, head1, list) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 | if (!match_futex (&this->key, &key1)) | 
|  | 951 | continue; | 
|  | 952 | if (++ret <= nr_wake) { | 
|  | 953 | wake_futex(this); | 
|  | 954 | } else { | 
| Sebastien Dugue | 59e0e0a | 2006-06-27 02:55:03 -0700 | [diff] [blame] | 955 | /* | 
|  | 956 | * If key1 and key2 hash to the same bucket, no need to | 
|  | 957 | * requeue. | 
|  | 958 | */ | 
|  | 959 | if (likely(head1 != &hb2->chain)) { | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 960 | plist_del(&this->list, &hb1->chain); | 
|  | 961 | plist_add(&this->list, &hb2->chain); | 
| Sebastien Dugue | 59e0e0a | 2006-06-27 02:55:03 -0700 | [diff] [blame] | 962 | this->lock_ptr = &hb2->lock; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 963 | #ifdef CONFIG_DEBUG_PI_LIST | 
|  | 964 | this->list.plist.lock = &hb2->lock; | 
|  | 965 | #endif | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 966 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | this->key = key2; | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 968 | get_futex_key_refs(&key2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | drop_count++; | 
|  | 970 |  | 
|  | 971 | if (ret - nr_wake >= nr_requeue) | 
|  | 972 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | } | 
|  | 974 | } | 
|  | 975 |  | 
|  | 976 | out_unlock: | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 977 | spin_unlock(&hb1->lock); | 
|  | 978 | if (hb1 != hb2) | 
|  | 979 | spin_unlock(&hb2->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 |  | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 981 | /* drop_futex_key_refs() must be called outside the spinlocks. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | while (--drop_count >= 0) | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 983 | drop_futex_key_refs(&key1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 |  | 
|  | 985 | out: | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 986 | futex_unlock_mm(fshared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | return ret; | 
|  | 988 | } | 
|  | 989 |  | 
|  | 990 | /* The key must be already stored in q->key. */ | 
|  | 991 | static inline struct futex_hash_bucket * | 
|  | 992 | queue_lock(struct futex_q *q, int fd, struct file *filp) | 
|  | 993 | { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 994 | struct futex_hash_bucket *hb; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 |  | 
|  | 996 | q->fd = fd; | 
|  | 997 | q->filp = filp; | 
|  | 998 |  | 
|  | 999 | init_waitqueue_head(&q->waiters); | 
|  | 1000 |  | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 1001 | get_futex_key_refs(&q->key); | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1002 | hb = hash_futex(&q->key); | 
|  | 1003 | q->lock_ptr = &hb->lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1004 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1005 | spin_lock(&hb->lock); | 
|  | 1006 | return hb; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 | } | 
|  | 1008 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1009 | static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 | { | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1011 | int prio; | 
|  | 1012 |  | 
|  | 1013 | /* | 
|  | 1014 | * The priority used to register this element is | 
|  | 1015 | * - either the real thread-priority for the real-time threads | 
|  | 1016 | * (i.e. threads with a priority lower than MAX_RT_PRIO) | 
|  | 1017 | * - or MAX_RT_PRIO for non-RT threads. | 
|  | 1018 | * Thus, all RT-threads are woken first in priority order, and | 
|  | 1019 | * the others are woken last, in FIFO order. | 
|  | 1020 | */ | 
|  | 1021 | prio = min(current->normal_prio, MAX_RT_PRIO); | 
|  | 1022 |  | 
|  | 1023 | plist_node_init(&q->list, prio); | 
|  | 1024 | #ifdef CONFIG_DEBUG_PI_LIST | 
|  | 1025 | q->list.plist.lock = &hb->lock; | 
|  | 1026 | #endif | 
|  | 1027 | plist_add(&q->list, &hb->chain); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1028 | q->task = current; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1029 | spin_unlock(&hb->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | } | 
|  | 1031 |  | 
|  | 1032 | static inline void | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1033 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1034 | { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1035 | spin_unlock(&hb->lock); | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 1036 | drop_futex_key_refs(&q->key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1037 | } | 
|  | 1038 |  | 
|  | 1039 | /* | 
|  | 1040 | * queue_me and unqueue_me must be called as a pair, each | 
|  | 1041 | * exactly once.  They are called with the hashed spinlock held. | 
|  | 1042 | */ | 
|  | 1043 |  | 
|  | 1044 | /* The key must be already stored in q->key. */ | 
|  | 1045 | static void queue_me(struct futex_q *q, int fd, struct file *filp) | 
|  | 1046 | { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1047 | struct futex_hash_bucket *hb; | 
|  | 1048 |  | 
|  | 1049 | hb = queue_lock(q, fd, filp); | 
|  | 1050 | __queue_me(q, hb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | } | 
|  | 1052 |  | 
|  | 1053 | /* Return 1 if we were still queued (ie. 0 means we were woken) */ | 
|  | 1054 | static int unqueue_me(struct futex_q *q) | 
|  | 1055 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1056 | spinlock_t *lock_ptr; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1057 | int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 |  | 
|  | 1059 | /* In the common case we don't take the spinlock, which is nice. */ | 
|  | 1060 | retry: | 
|  | 1061 | lock_ptr = q->lock_ptr; | 
| Christian Borntraeger | e91467e | 2006-08-05 12:13:52 -0700 | [diff] [blame] | 1062 | barrier(); | 
| Stephen Hemminger | c80544d | 2007-10-18 03:07:05 -0700 | [diff] [blame] | 1063 | if (lock_ptr != NULL) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | spin_lock(lock_ptr); | 
|  | 1065 | /* | 
|  | 1066 | * q->lock_ptr can change between reading it and | 
|  | 1067 | * spin_lock(), causing us to take the wrong lock.  This | 
|  | 1068 | * corrects the race condition. | 
|  | 1069 | * | 
|  | 1070 | * Reasoning goes like this: if we have the wrong lock, | 
|  | 1071 | * q->lock_ptr must have changed (maybe several times) | 
|  | 1072 | * between reading it and the spin_lock().  It can | 
|  | 1073 | * change again after the spin_lock() but only if it was | 
|  | 1074 | * already changed before the spin_lock().  It cannot, | 
|  | 1075 | * however, change back to the original value.  Therefore | 
|  | 1076 | * we can detect whether we acquired the correct lock. | 
|  | 1077 | */ | 
|  | 1078 | if (unlikely(lock_ptr != q->lock_ptr)) { | 
|  | 1079 | spin_unlock(lock_ptr); | 
|  | 1080 | goto retry; | 
|  | 1081 | } | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1082 | WARN_ON(plist_node_empty(&q->list)); | 
|  | 1083 | plist_del(&q->list, &q->list.plist); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1084 |  | 
|  | 1085 | BUG_ON(q->pi_state); | 
|  | 1086 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | spin_unlock(lock_ptr); | 
|  | 1088 | ret = 1; | 
|  | 1089 | } | 
|  | 1090 |  | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 1091 | drop_futex_key_refs(&q->key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 | return ret; | 
|  | 1093 | } | 
|  | 1094 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1095 | /* | 
|  | 1096 | * PI futexes can not be requeued and must remove themself from the | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1097 | * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry | 
|  | 1098 | * and dropped here. | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1099 | */ | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1100 | static void unqueue_me_pi(struct futex_q *q) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1101 | { | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1102 | WARN_ON(plist_node_empty(&q->list)); | 
|  | 1103 | plist_del(&q->list, &q->list.plist); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1104 |  | 
|  | 1105 | BUG_ON(!q->pi_state); | 
|  | 1106 | free_pi_state(q->pi_state); | 
|  | 1107 | q->pi_state = NULL; | 
|  | 1108 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1109 | spin_unlock(q->lock_ptr); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1110 |  | 
| Rusty Russell | 9adef58 | 2007-05-08 00:26:42 -0700 | [diff] [blame] | 1111 | drop_futex_key_refs(&q->key); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1112 | } | 
|  | 1113 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1114 | /* | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1115 | * Fixup the pi_state owner with the new owner. | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1116 | * | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1117 | * Must be called with hash bucket lock held and mm->sem held for non | 
|  | 1118 | * private futexes. | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1119 | */ | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1120 | static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1121 | struct task_struct *newowner) | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1122 | { | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1123 | u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1124 | struct futex_pi_state *pi_state = q->pi_state; | 
|  | 1125 | u32 uval, curval, newval; | 
|  | 1126 | int ret; | 
|  | 1127 |  | 
|  | 1128 | /* Owner died? */ | 
|  | 1129 | if (pi_state->owner != NULL) { | 
|  | 1130 | spin_lock_irq(&pi_state->owner->pi_lock); | 
|  | 1131 | WARN_ON(list_empty(&pi_state->list)); | 
|  | 1132 | list_del_init(&pi_state->list); | 
|  | 1133 | spin_unlock_irq(&pi_state->owner->pi_lock); | 
|  | 1134 | } else | 
|  | 1135 | newtid |= FUTEX_OWNER_DIED; | 
|  | 1136 |  | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1137 | pi_state->owner = newowner; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1138 |  | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1139 | spin_lock_irq(&newowner->pi_lock); | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1140 | WARN_ON(!list_empty(&pi_state->list)); | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1141 | list_add(&pi_state->list, &newowner->pi_state_list); | 
|  | 1142 | spin_unlock_irq(&newowner->pi_lock); | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1143 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1144 | /* | 
|  | 1145 | * We own it, so we have to replace the pending owner | 
|  | 1146 | * TID. This must be atomic as we have preserve the | 
|  | 1147 | * owner died bit here. | 
|  | 1148 | */ | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1149 | ret = get_futex_value_locked(&uval, uaddr); | 
|  | 1150 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1151 | while (!ret) { | 
|  | 1152 | newval = (uval & FUTEX_OWNER_DIED) | newtid; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1153 |  | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1154 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1155 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1156 | if (curval == -EFAULT) | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1157 | ret = -EFAULT; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1158 | if (curval == uval) | 
|  | 1159 | break; | 
|  | 1160 | uval = curval; | 
|  | 1161 | } | 
|  | 1162 | return ret; | 
|  | 1163 | } | 
|  | 1164 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1165 | /* | 
|  | 1166 | * In case we must use restart_block to restart a futex_wait, | 
| Steven Rostedt | ce6bd42 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1167 | * we encode in the 'flags' shared capability | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1168 | */ | 
| Steven Rostedt | ce6bd42 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1169 | #define FLAGS_SHARED  1 | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1170 |  | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1171 | static long futex_wait_restart(struct restart_block *restart); | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1172 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1173 | static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 1174 | u32 val, ktime_t *abs_time, u32 bitset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 | { | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1176 | struct task_struct *curr = current; | 
|  | 1177 | DECLARE_WAITQUEUE(wait, curr); | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1178 | struct futex_hash_bucket *hb; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1179 | struct futex_q q; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1180 | u32 uval; | 
|  | 1181 | int ret; | 
| Thomas Gleixner | bd19723 | 2007-06-17 21:11:10 +0200 | [diff] [blame] | 1182 | struct hrtimer_sleeper t; | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1183 | int rem = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 |  | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 1185 | if (!bitset) | 
|  | 1186 | return -EINVAL; | 
|  | 1187 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1188 | q.pi_state = NULL; | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 1189 | q.bitset = bitset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1190 | retry: | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1191 | futex_lock_mm(fshared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1193 | ret = get_futex_key(uaddr, fshared, &q.key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | if (unlikely(ret != 0)) | 
|  | 1195 | goto out_release_sem; | 
|  | 1196 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1197 | hb = queue_lock(&q, -1, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 |  | 
|  | 1199 | /* | 
|  | 1200 | * Access the page AFTER the futex is queued. | 
|  | 1201 | * Order is important: | 
|  | 1202 | * | 
|  | 1203 | *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); | 
|  | 1204 | *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); } | 
|  | 1205 | * | 
|  | 1206 | * The basic logical guarantee of a futex is that it blocks ONLY | 
|  | 1207 | * if cond(var) is known to be true at the time of blocking, for | 
|  | 1208 | * any cond.  If we queued after testing *uaddr, that would open | 
|  | 1209 | * a race condition where we could block indefinitely with | 
|  | 1210 | * cond(var) false, which would violate the guarantee. | 
|  | 1211 | * | 
|  | 1212 | * A consequence is that futex_wait() can return zero and absorb | 
|  | 1213 | * a wakeup when *uaddr != val on entry to the syscall.  This is | 
|  | 1214 | * rare, but normal. | 
|  | 1215 | * | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1216 | * for shared futexes, we hold the mmap semaphore, so the mapping | 
|  | 1217 | * cannot have changed since we looked it up in get_futex_key. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | */ | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1219 | ret = get_futex_value_locked(&uval, uaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 |  | 
|  | 1221 | if (unlikely(ret)) { | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1222 | queue_unlock(&q, hb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1224 | /* | 
|  | 1225 | * If we would have faulted, release mmap_sem, fault it in and | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1226 | * start all over again. | 
|  | 1227 | */ | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1228 | futex_unlock_mm(fshared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1230 | ret = get_user(uval, uaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 |  | 
|  | 1232 | if (!ret) | 
|  | 1233 | goto retry; | 
|  | 1234 | return ret; | 
|  | 1235 | } | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1236 | ret = -EWOULDBLOCK; | 
|  | 1237 | if (uval != val) | 
|  | 1238 | goto out_unlock_release_sem; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1239 |  | 
|  | 1240 | /* Only actually queue if *uaddr contained val.  */ | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1241 | __queue_me(&q, hb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1242 |  | 
|  | 1243 | /* | 
|  | 1244 | * Now the futex is queued and we have checked the data, we | 
|  | 1245 | * don't want to hold mmap_sem while we sleep. | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1246 | */ | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1247 | futex_unlock_mm(fshared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1248 |  | 
|  | 1249 | /* | 
|  | 1250 | * There might have been scheduling since the queue_me(), as we | 
|  | 1251 | * cannot hold a spinlock across the get_user() in case it | 
|  | 1252 | * faults, and we cannot just set TASK_INTERRUPTIBLE state when | 
|  | 1253 | * queueing ourselves into the futex hash.  This code thus has to | 
|  | 1254 | * rely on the futex_wake() code removing us from hash when it | 
|  | 1255 | * wakes us up. | 
|  | 1256 | */ | 
|  | 1257 |  | 
|  | 1258 | /* add_wait_queue is the barrier after __set_current_state. */ | 
|  | 1259 | __set_current_state(TASK_INTERRUPTIBLE); | 
|  | 1260 | add_wait_queue(&q.waiters, &wait); | 
|  | 1261 | /* | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1262 | * !plist_node_empty() is safe here without any lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | * q.lock_ptr != 0 is not safe, because of ordering against wakeup. | 
|  | 1264 | */ | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1265 | if (likely(!plist_node_empty(&q.list))) { | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1266 | if (!abs_time) | 
|  | 1267 | schedule(); | 
|  | 1268 | else { | 
|  | 1269 | hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 
|  | 1270 | hrtimer_init_sleeper(&t, current); | 
|  | 1271 | t.timer.expires = *abs_time; | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1272 |  | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1273 | hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS); | 
| Peter Zijlstra | 3588a08 | 2008-02-01 17:45:13 +0100 | [diff] [blame] | 1274 | if (!hrtimer_active(&t.timer)) | 
|  | 1275 | t.task = NULL; | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1276 |  | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1277 | /* | 
|  | 1278 | * the timer could have already expired, in which | 
|  | 1279 | * case current would be flagged for rescheduling. | 
|  | 1280 | * Don't bother calling schedule. | 
|  | 1281 | */ | 
|  | 1282 | if (likely(t.task)) | 
|  | 1283 | schedule(); | 
|  | 1284 |  | 
|  | 1285 | hrtimer_cancel(&t.timer); | 
|  | 1286 |  | 
|  | 1287 | /* Flag if a timeout occured */ | 
|  | 1288 | rem = (t.task == NULL); | 
|  | 1289 | } | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1290 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1291 | __set_current_state(TASK_RUNNING); | 
|  | 1292 |  | 
|  | 1293 | /* | 
|  | 1294 | * NOTE: we don't remove ourselves from the waitqueue because | 
|  | 1295 | * we are the only user of it. | 
|  | 1296 | */ | 
|  | 1297 |  | 
|  | 1298 | /* If we were woken (and unqueued), we succeeded, whatever. */ | 
|  | 1299 | if (!unqueue_me(&q)) | 
|  | 1300 | return 0; | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1301 | if (rem) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | return -ETIMEDOUT; | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1303 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1304 | /* | 
|  | 1305 | * We expect signal_pending(current), but another thread may | 
|  | 1306 | * have handled it for us already. | 
|  | 1307 | */ | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1308 | if (!abs_time) | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1309 | return -ERESTARTSYS; | 
|  | 1310 | else { | 
|  | 1311 | struct restart_block *restart; | 
|  | 1312 | restart = ¤t_thread_info()->restart_block; | 
|  | 1313 | restart->fn = futex_wait_restart; | 
| Steven Rostedt | ce6bd42 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1314 | restart->futex.uaddr = (u32 *)uaddr; | 
|  | 1315 | restart->futex.val = val; | 
|  | 1316 | restart->futex.time = abs_time->tv64; | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 1317 | restart->futex.bitset = bitset; | 
| Steven Rostedt | ce6bd42 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1318 | restart->futex.flags = 0; | 
|  | 1319 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1320 | if (fshared) | 
| Steven Rostedt | ce6bd42 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1321 | restart->futex.flags |= FLAGS_SHARED; | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1322 | return -ERESTART_RESTARTBLOCK; | 
|  | 1323 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1324 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1325 | out_unlock_release_sem: | 
|  | 1326 | queue_unlock(&q, hb); | 
|  | 1327 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 | out_release_sem: | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1329 | futex_unlock_mm(fshared); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1330 | return ret; | 
|  | 1331 | } | 
|  | 1332 |  | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1333 |  | 
|  | 1334 | static long futex_wait_restart(struct restart_block *restart) | 
|  | 1335 | { | 
| Steven Rostedt | ce6bd42 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1336 | u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1337 | struct rw_semaphore *fshared = NULL; | 
| Steven Rostedt | ce6bd42 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1338 | ktime_t t; | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1339 |  | 
| Steven Rostedt | ce6bd42 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1340 | t.tv64 = restart->futex.time; | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1341 | restart->fn = do_no_restart_syscall; | 
| Steven Rostedt | ce6bd42 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1342 | if (restart->futex.flags & FLAGS_SHARED) | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1343 | fshared = ¤t->mm->mmap_sem; | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 1344 | return (long)futex_wait(uaddr, fshared, restart->futex.val, &t, | 
|  | 1345 | restart->futex.bitset); | 
| Nick Piggin | 72c1bbf | 2007-05-08 00:26:43 -0700 | [diff] [blame] | 1346 | } | 
|  | 1347 |  | 
|  | 1348 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1349 | /* | 
|  | 1350 | * Userspace tried a 0 -> TID atomic transition of the futex value | 
|  | 1351 | * and failed. The kernel side here does the whole locking operation: | 
|  | 1352 | * if there are waiters then it will block, it does PI, etc. (Due to | 
|  | 1353 | * races the kernel might see a 0 value of the futex too.) | 
|  | 1354 | */ | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1355 | static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | 
|  | 1356 | int detect, ktime_t *time, int trylock) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1357 | { | 
| Thomas Gleixner | c5780e9 | 2006-09-08 09:47:15 -0700 | [diff] [blame] | 1358 | struct hrtimer_sleeper timeout, *to = NULL; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1359 | struct task_struct *curr = current; | 
|  | 1360 | struct futex_hash_bucket *hb; | 
|  | 1361 | u32 uval, newval, curval; | 
|  | 1362 | struct futex_q q; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1363 | int ret, lock_taken, ownerdied = 0, attempt = 0; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1364 |  | 
|  | 1365 | if (refill_pi_state_cache()) | 
|  | 1366 | return -ENOMEM; | 
|  | 1367 |  | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1368 | if (time) { | 
| Thomas Gleixner | c5780e9 | 2006-09-08 09:47:15 -0700 | [diff] [blame] | 1369 | to = &timeout; | 
| Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1370 | hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 
| Thomas Gleixner | c5780e9 | 2006-09-08 09:47:15 -0700 | [diff] [blame] | 1371 | hrtimer_init_sleeper(to, current); | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1372 | to->timer.expires = *time; | 
| Thomas Gleixner | c5780e9 | 2006-09-08 09:47:15 -0700 | [diff] [blame] | 1373 | } | 
|  | 1374 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1375 | q.pi_state = NULL; | 
|  | 1376 | retry: | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1377 | futex_lock_mm(fshared); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1378 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1379 | ret = get_futex_key(uaddr, fshared, &q.key); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1380 | if (unlikely(ret != 0)) | 
|  | 1381 | goto out_release_sem; | 
|  | 1382 |  | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1383 | retry_unlocked: | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1384 | hb = queue_lock(&q, -1, NULL); | 
|  | 1385 |  | 
|  | 1386 | retry_locked: | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1387 | ret = lock_taken = 0; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1388 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1389 | /* | 
|  | 1390 | * To avoid races, we attempt to take the lock here again | 
|  | 1391 | * (by doing a 0 -> TID atomic cmpxchg), while holding all | 
|  | 1392 | * the locks. It will most likely not succeed. | 
|  | 1393 | */ | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1394 | newval = task_pid_vnr(current); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1395 |  | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1396 | curval = cmpxchg_futex_value_locked(uaddr, 0, newval); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1397 |  | 
|  | 1398 | if (unlikely(curval == -EFAULT)) | 
|  | 1399 | goto uaddr_faulted; | 
|  | 1400 |  | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1401 | /* | 
|  | 1402 | * Detect deadlocks. In case of REQUEUE_PI this is a valid | 
|  | 1403 | * situation and we return success to user space. | 
|  | 1404 | */ | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1405 | if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) { | 
| Thomas Gleixner | bd19723 | 2007-06-17 21:11:10 +0200 | [diff] [blame] | 1406 | ret = -EDEADLK; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1407 | goto out_unlock_release_sem; | 
|  | 1408 | } | 
|  | 1409 |  | 
|  | 1410 | /* | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1411 | * Surprise - we got the lock. Just return to userspace: | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1412 | */ | 
|  | 1413 | if (unlikely(!curval)) | 
|  | 1414 | goto out_unlock_release_sem; | 
|  | 1415 |  | 
|  | 1416 | uval = curval; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1417 |  | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1418 | /* | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1419 | * Set the WAITERS flag, so the owner will know it has someone | 
|  | 1420 | * to wake at next unlock | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1421 | */ | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1422 | newval = curval | FUTEX_WAITERS; | 
|  | 1423 |  | 
|  | 1424 | /* | 
|  | 1425 | * There are two cases, where a futex might have no owner (the | 
| Thomas Gleixner | bd19723 | 2007-06-17 21:11:10 +0200 | [diff] [blame] | 1426 | * owner TID is 0): OWNER_DIED. We take over the futex in this | 
|  | 1427 | * case. We also do an unconditional take over, when the owner | 
|  | 1428 | * of the futex died. | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1429 | * | 
|  | 1430 | * This is safe as we are protected by the hash bucket lock ! | 
|  | 1431 | */ | 
|  | 1432 | if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { | 
| Thomas Gleixner | bd19723 | 2007-06-17 21:11:10 +0200 | [diff] [blame] | 1433 | /* Keep the OWNER_DIED bit */ | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1434 | newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1435 | ownerdied = 0; | 
|  | 1436 | lock_taken = 1; | 
|  | 1437 | } | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1438 |  | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1439 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1440 |  | 
|  | 1441 | if (unlikely(curval == -EFAULT)) | 
|  | 1442 | goto uaddr_faulted; | 
|  | 1443 | if (unlikely(curval != uval)) | 
|  | 1444 | goto retry_locked; | 
|  | 1445 |  | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1446 | /* | 
| Thomas Gleixner | bd19723 | 2007-06-17 21:11:10 +0200 | [diff] [blame] | 1447 | * We took the lock due to owner died take over. | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1448 | */ | 
| Thomas Gleixner | bd19723 | 2007-06-17 21:11:10 +0200 | [diff] [blame] | 1449 | if (unlikely(lock_taken)) | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1450 | goto out_unlock_release_sem; | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1451 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1452 | /* | 
|  | 1453 | * We dont have the lock. Look up the PI state (or create it if | 
|  | 1454 | * we are the first waiter): | 
|  | 1455 | */ | 
| Pierre Peiffer | d0aa7a7 | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 1456 | ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1457 |  | 
|  | 1458 | if (unlikely(ret)) { | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1459 | switch (ret) { | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1460 |  | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1461 | case -EAGAIN: | 
|  | 1462 | /* | 
|  | 1463 | * Task is exiting and we just wait for the | 
|  | 1464 | * exit to complete. | 
|  | 1465 | */ | 
|  | 1466 | queue_unlock(&q, hb); | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1467 | futex_unlock_mm(fshared); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1468 | cond_resched(); | 
|  | 1469 | goto retry; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1470 |  | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1471 | case -ESRCH: | 
|  | 1472 | /* | 
|  | 1473 | * No owner found for this futex. Check if the | 
|  | 1474 | * OWNER_DIED bit is set to figure out whether | 
|  | 1475 | * this is a robust futex or not. | 
|  | 1476 | */ | 
|  | 1477 | if (get_futex_value_locked(&curval, uaddr)) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1478 | goto uaddr_faulted; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1479 |  | 
|  | 1480 | /* | 
|  | 1481 | * We simply start over in case of a robust | 
|  | 1482 | * futex. The code above will take the futex | 
|  | 1483 | * and return happy. | 
|  | 1484 | */ | 
|  | 1485 | if (curval & FUTEX_OWNER_DIED) { | 
|  | 1486 | ownerdied = 1; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1487 | goto retry_locked; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1488 | } | 
|  | 1489 | default: | 
|  | 1490 | goto out_unlock_release_sem; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1491 | } | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1492 | } | 
|  | 1493 |  | 
|  | 1494 | /* | 
|  | 1495 | * Only actually queue now that the atomic ops are done: | 
|  | 1496 | */ | 
|  | 1497 | __queue_me(&q, hb); | 
|  | 1498 |  | 
|  | 1499 | /* | 
|  | 1500 | * Now the futex is queued and we have checked the data, we | 
|  | 1501 | * don't want to hold mmap_sem while we sleep. | 
|  | 1502 | */ | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1503 | futex_unlock_mm(fshared); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1504 |  | 
|  | 1505 | WARN_ON(!q.pi_state); | 
|  | 1506 | /* | 
|  | 1507 | * Block on the PI mutex: | 
|  | 1508 | */ | 
|  | 1509 | if (!trylock) | 
|  | 1510 | ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1); | 
|  | 1511 | else { | 
|  | 1512 | ret = rt_mutex_trylock(&q.pi_state->pi_mutex); | 
|  | 1513 | /* Fixup the trylock return value: */ | 
|  | 1514 | ret = ret ? 0 : -EWOULDBLOCK; | 
|  | 1515 | } | 
|  | 1516 |  | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1517 | futex_lock_mm(fshared); | 
| Vernon Mauery | a99e4e4 | 2006-07-01 04:35:42 -0700 | [diff] [blame] | 1518 | spin_lock(q.lock_ptr); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1519 |  | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1520 | if (!ret) { | 
|  | 1521 | /* | 
|  | 1522 | * Got the lock. We might not be the anticipated owner | 
|  | 1523 | * if we did a lock-steal - fix up the PI-state in | 
|  | 1524 | * that case: | 
|  | 1525 | */ | 
|  | 1526 | if (q.pi_state->owner != curr) | 
|  | 1527 | ret = fixup_pi_state_owner(uaddr, &q, curr); | 
|  | 1528 | } else { | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1529 | /* | 
|  | 1530 | * Catch the rare case, where the lock was released | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1531 | * when we were on the way back before we locked the | 
|  | 1532 | * hash bucket. | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1533 | */ | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1534 | if (q.pi_state->owner == curr) { | 
|  | 1535 | /* | 
|  | 1536 | * Try to get the rt_mutex now. This might | 
|  | 1537 | * fail as some other task acquired the | 
|  | 1538 | * rt_mutex after we removed ourself from the | 
|  | 1539 | * rt_mutex waiters list. | 
|  | 1540 | */ | 
|  | 1541 | if (rt_mutex_trylock(&q.pi_state->pi_mutex)) | 
|  | 1542 | ret = 0; | 
|  | 1543 | else { | 
|  | 1544 | /* | 
|  | 1545 | * pi_state is incorrect, some other | 
|  | 1546 | * task did a lock steal and we | 
|  | 1547 | * returned due to timeout or signal | 
|  | 1548 | * without taking the rt_mutex. Too | 
|  | 1549 | * late. We can access the | 
|  | 1550 | * rt_mutex_owner without locking, as | 
|  | 1551 | * the other task is now blocked on | 
|  | 1552 | * the hash bucket lock. Fix the state | 
|  | 1553 | * up. | 
|  | 1554 | */ | 
|  | 1555 | struct task_struct *owner; | 
|  | 1556 | int res; | 
|  | 1557 |  | 
|  | 1558 | owner = rt_mutex_owner(&q.pi_state->pi_mutex); | 
|  | 1559 | res = fixup_pi_state_owner(uaddr, &q, owner); | 
|  | 1560 |  | 
| Thomas Gleixner | cdf71a1 | 2008-01-08 19:47:38 +0100 | [diff] [blame] | 1561 | /* propagate -EFAULT, if the fixup failed */ | 
|  | 1562 | if (res) | 
|  | 1563 | ret = res; | 
|  | 1564 | } | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1565 | } else { | 
|  | 1566 | /* | 
|  | 1567 | * Paranoia check. If we did not take the lock | 
|  | 1568 | * in the trylock above, then we should not be | 
|  | 1569 | * the owner of the rtmutex, neither the real | 
|  | 1570 | * nor the pending one: | 
|  | 1571 | */ | 
|  | 1572 | if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr) | 
|  | 1573 | printk(KERN_ERR "futex_lock_pi: ret = %d " | 
|  | 1574 | "pi-mutex: %p pi-state %p\n", ret, | 
|  | 1575 | q.pi_state->pi_mutex.owner, | 
|  | 1576 | q.pi_state->owner); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1577 | } | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1578 | } | 
|  | 1579 |  | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1580 | /* Unqueue and drop the lock */ | 
|  | 1581 | unqueue_me_pi(&q); | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1582 | futex_unlock_mm(fshared); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1583 |  | 
| Thomas Gleixner | c5780e9 | 2006-09-08 09:47:15 -0700 | [diff] [blame] | 1584 | return ret != -EINTR ? ret : -ERESTARTNOINTR; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1585 |  | 
|  | 1586 | out_unlock_release_sem: | 
|  | 1587 | queue_unlock(&q, hb); | 
|  | 1588 |  | 
|  | 1589 | out_release_sem: | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1590 | futex_unlock_mm(fshared); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1591 | return ret; | 
|  | 1592 |  | 
|  | 1593 | uaddr_faulted: | 
|  | 1594 | /* | 
|  | 1595 | * We have to r/w  *(int __user *)uaddr, but we can't modify it | 
|  | 1596 | * non-atomically.  Therefore, if get_user below is not | 
|  | 1597 | * enough, we need to handle the fault ourselves, while | 
|  | 1598 | * still holding the mmap_sem. | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1599 | * | 
|  | 1600 | * ... and hb->lock. :-) --ANK | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1601 | */ | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1602 | queue_unlock(&q, hb); | 
|  | 1603 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1604 | if (attempt++) { | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1605 | ret = futex_handle_fault((unsigned long)uaddr, fshared, | 
|  | 1606 | attempt); | 
|  | 1607 | if (ret) | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1608 | goto out_release_sem; | 
|  | 1609 | goto retry_unlocked; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1610 | } | 
|  | 1611 |  | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1612 | futex_unlock_mm(fshared); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1613 |  | 
|  | 1614 | ret = get_user(uval, uaddr); | 
|  | 1615 | if (!ret && (uval != -EFAULT)) | 
|  | 1616 | goto retry; | 
|  | 1617 |  | 
|  | 1618 | return ret; | 
|  | 1619 | } | 
|  | 1620 |  | 
|  | 1621 | /* | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1622 | * Userspace attempted a TID -> 0 atomic transition, and failed. | 
|  | 1623 | * This is the in-kernel slowpath: we look up the PI state (if any), | 
|  | 1624 | * and do the rt-mutex unlock. | 
|  | 1625 | */ | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1626 | static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1627 | { | 
|  | 1628 | struct futex_hash_bucket *hb; | 
|  | 1629 | struct futex_q *this, *next; | 
|  | 1630 | u32 uval; | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1631 | struct plist_head *head; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1632 | union futex_key key; | 
|  | 1633 | int ret, attempt = 0; | 
|  | 1634 |  | 
|  | 1635 | retry: | 
|  | 1636 | if (get_user(uval, uaddr)) | 
|  | 1637 | return -EFAULT; | 
|  | 1638 | /* | 
|  | 1639 | * We release only a lock we actually own: | 
|  | 1640 | */ | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1641 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1642 | return -EPERM; | 
|  | 1643 | /* | 
|  | 1644 | * First take all the futex related locks: | 
|  | 1645 | */ | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1646 | futex_lock_mm(fshared); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1647 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1648 | ret = get_futex_key(uaddr, fshared, &key); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1649 | if (unlikely(ret != 0)) | 
|  | 1650 | goto out; | 
|  | 1651 |  | 
|  | 1652 | hb = hash_futex(&key); | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1653 | retry_unlocked: | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1654 | spin_lock(&hb->lock); | 
|  | 1655 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1656 | /* | 
|  | 1657 | * To avoid races, try to do the TID -> 0 atomic transition | 
|  | 1658 | * again. If it succeeds then we can return without waking | 
|  | 1659 | * anyone else up: | 
|  | 1660 | */ | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1661 | if (!(uval & FUTEX_OWNER_DIED)) | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1662 | uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0); | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1663 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1664 |  | 
|  | 1665 | if (unlikely(uval == -EFAULT)) | 
|  | 1666 | goto pi_faulted; | 
|  | 1667 | /* | 
|  | 1668 | * Rare case: we managed to release the lock atomically, | 
|  | 1669 | * no need to wake anyone else up: | 
|  | 1670 | */ | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1671 | if (unlikely(uval == task_pid_vnr(current))) | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1672 | goto out_unlock; | 
|  | 1673 |  | 
|  | 1674 | /* | 
|  | 1675 | * Ok, other tasks may need to be woken up - check waiters | 
|  | 1676 | * and do the wakeup if necessary: | 
|  | 1677 | */ | 
|  | 1678 | head = &hb->chain; | 
|  | 1679 |  | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1680 | plist_for_each_entry_safe(this, next, head, list) { | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1681 | if (!match_futex (&this->key, &key)) | 
|  | 1682 | continue; | 
|  | 1683 | ret = wake_futex_pi(uaddr, uval, this); | 
|  | 1684 | /* | 
|  | 1685 | * The atomic access to the futex value | 
|  | 1686 | * generated a pagefault, so retry the | 
|  | 1687 | * user-access and the wakeup: | 
|  | 1688 | */ | 
|  | 1689 | if (ret == -EFAULT) | 
|  | 1690 | goto pi_faulted; | 
|  | 1691 | goto out_unlock; | 
|  | 1692 | } | 
|  | 1693 | /* | 
|  | 1694 | * No waiters - kernel unlocks the futex: | 
|  | 1695 | */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 1696 | if (!(uval & FUTEX_OWNER_DIED)) { | 
|  | 1697 | ret = unlock_futex_pi(uaddr, uval); | 
|  | 1698 | if (ret == -EFAULT) | 
|  | 1699 | goto pi_faulted; | 
|  | 1700 | } | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1701 |  | 
|  | 1702 | out_unlock: | 
|  | 1703 | spin_unlock(&hb->lock); | 
|  | 1704 | out: | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1705 | futex_unlock_mm(fshared); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1706 |  | 
|  | 1707 | return ret; | 
|  | 1708 |  | 
|  | 1709 | pi_faulted: | 
|  | 1710 | /* | 
|  | 1711 | * We have to r/w  *(int __user *)uaddr, but we can't modify it | 
|  | 1712 | * non-atomically.  Therefore, if get_user below is not | 
|  | 1713 | * enough, we need to handle the fault ourselves, while | 
|  | 1714 | * still holding the mmap_sem. | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1715 | * | 
|  | 1716 | * ... and hb->lock. --ANK | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1717 | */ | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1718 | spin_unlock(&hb->lock); | 
|  | 1719 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1720 | if (attempt++) { | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1721 | ret = futex_handle_fault((unsigned long)uaddr, fshared, | 
|  | 1722 | attempt); | 
|  | 1723 | if (ret) | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1724 | goto out; | 
| john stultz | 187226f | 2007-08-22 14:01:10 -0700 | [diff] [blame] | 1725 | uval = 0; | 
| Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1726 | goto retry_unlocked; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1727 | } | 
|  | 1728 |  | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1729 | futex_unlock_mm(fshared); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1730 |  | 
|  | 1731 | ret = get_user(uval, uaddr); | 
|  | 1732 | if (!ret && (uval != -EFAULT)) | 
|  | 1733 | goto retry; | 
|  | 1734 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1735 | return ret; | 
|  | 1736 | } | 
|  | 1737 |  | 
|  | 1738 | static int futex_close(struct inode *inode, struct file *filp) | 
|  | 1739 | { | 
|  | 1740 | struct futex_q *q = filp->private_data; | 
|  | 1741 |  | 
|  | 1742 | unqueue_me(q); | 
|  | 1743 | kfree(q); | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1744 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1745 | return 0; | 
|  | 1746 | } | 
|  | 1747 |  | 
|  | 1748 | /* This is one-shot: once it's gone off you need a new fd */ | 
|  | 1749 | static unsigned int futex_poll(struct file *filp, | 
|  | 1750 | struct poll_table_struct *wait) | 
|  | 1751 | { | 
|  | 1752 | struct futex_q *q = filp->private_data; | 
|  | 1753 | int ret = 0; | 
|  | 1754 |  | 
|  | 1755 | poll_wait(filp, &q->waiters, wait); | 
|  | 1756 |  | 
|  | 1757 | /* | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1758 | * plist_node_empty() is safe here without any lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1759 | * q->lock_ptr != 0 is not safe, because of ordering against wakeup. | 
|  | 1760 | */ | 
| Pierre Peiffer | ec92d08 | 2007-05-09 02:35:00 -0700 | [diff] [blame] | 1761 | if (plist_node_empty(&q->list)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1762 | ret = POLLIN | POLLRDNORM; | 
|  | 1763 |  | 
|  | 1764 | return ret; | 
|  | 1765 | } | 
|  | 1766 |  | 
| Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 1767 | static const struct file_operations futex_fops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | .release	= futex_close, | 
|  | 1769 | .poll		= futex_poll, | 
|  | 1770 | }; | 
|  | 1771 |  | 
|  | 1772 | /* | 
|  | 1773 | * Signal allows caller to avoid the race which would occur if they | 
|  | 1774 | * set the sigio stuff up afterwards. | 
|  | 1775 | */ | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 1776 | static int futex_fd(u32 __user *uaddr, int signal) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1777 | { | 
|  | 1778 | struct futex_q *q; | 
|  | 1779 | struct file *filp; | 
|  | 1780 | int ret, err; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1781 | struct rw_semaphore *fshared; | 
| Andrew Morton | 19c6b6e | 2006-11-02 22:07:17 -0800 | [diff] [blame] | 1782 | static unsigned long printk_interval; | 
|  | 1783 |  | 
|  | 1784 | if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { | 
|  | 1785 | printk(KERN_WARNING "Process `%s' used FUTEX_FD, which " | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1786 | "will be removed from the kernel in June 2007\n", | 
|  | 1787 | current->comm); | 
| Andrew Morton | 19c6b6e | 2006-11-02 22:07:17 -0800 | [diff] [blame] | 1788 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1789 |  | 
|  | 1790 | ret = -EINVAL; | 
| Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 1791 | if (!valid_signal(signal)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1792 | goto out; | 
|  | 1793 |  | 
|  | 1794 | ret = get_unused_fd(); | 
|  | 1795 | if (ret < 0) | 
|  | 1796 | goto out; | 
|  | 1797 | filp = get_empty_filp(); | 
|  | 1798 | if (!filp) { | 
|  | 1799 | put_unused_fd(ret); | 
|  | 1800 | ret = -ENFILE; | 
|  | 1801 | goto out; | 
|  | 1802 | } | 
|  | 1803 | filp->f_op = &futex_fops; | 
| Josef "Jeff" Sipek | f3a43f3 | 2006-12-08 02:36:43 -0800 | [diff] [blame] | 1804 | filp->f_path.mnt = mntget(futex_mnt); | 
|  | 1805 | filp->f_path.dentry = dget(futex_mnt->mnt_root); | 
|  | 1806 | filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1807 |  | 
|  | 1808 | if (signal) { | 
| Eric W. Biederman | 609d7fa | 2006-10-02 02:17:15 -0700 | [diff] [blame] | 1809 | err = __f_setown(filp, task_pid(current), PIDTYPE_PID, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1810 | if (err < 0) { | 
| Pekka Enberg | 39ed3fd | 2005-09-06 15:17:44 -0700 | [diff] [blame] | 1811 | goto error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1812 | } | 
|  | 1813 | filp->f_owner.signum = signal; | 
|  | 1814 | } | 
|  | 1815 |  | 
|  | 1816 | q = kmalloc(sizeof(*q), GFP_KERNEL); | 
|  | 1817 | if (!q) { | 
| Pekka Enberg | 39ed3fd | 2005-09-06 15:17:44 -0700 | [diff] [blame] | 1818 | err = -ENOMEM; | 
|  | 1819 | goto error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1820 | } | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1821 | q->pi_state = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1822 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1823 | fshared = ¤t->mm->mmap_sem; | 
|  | 1824 | down_read(fshared); | 
|  | 1825 | err = get_futex_key(uaddr, fshared, &q->key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1826 |  | 
|  | 1827 | if (unlikely(err != 0)) { | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1828 | up_read(fshared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1829 | kfree(q); | 
| Pekka Enberg | 39ed3fd | 2005-09-06 15:17:44 -0700 | [diff] [blame] | 1830 | goto error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1831 | } | 
|  | 1832 |  | 
|  | 1833 | /* | 
|  | 1834 | * queue_me() must be called before releasing mmap_sem, because | 
|  | 1835 | * key->shared.inode needs to be referenced while holding it. | 
|  | 1836 | */ | 
|  | 1837 | filp->private_data = q; | 
|  | 1838 |  | 
|  | 1839 | queue_me(q, ret, filp); | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 1840 | up_read(fshared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1841 |  | 
|  | 1842 | /* Now we map fd to filp, so userspace can access it */ | 
|  | 1843 | fd_install(ret, filp); | 
|  | 1844 | out: | 
|  | 1845 | return ret; | 
| Pekka Enberg | 39ed3fd | 2005-09-06 15:17:44 -0700 | [diff] [blame] | 1846 | error: | 
|  | 1847 | put_unused_fd(ret); | 
|  | 1848 | put_filp(filp); | 
|  | 1849 | ret = err; | 
|  | 1850 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1851 | } | 
|  | 1852 |  | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1853 | /* | 
|  | 1854 | * Support for robust futexes: the kernel cleans up held futexes at | 
|  | 1855 | * thread exit time. | 
|  | 1856 | * | 
|  | 1857 | * Implementation: user-space maintains a per-thread list of locks it | 
|  | 1858 | * is holding. Upon do_exit(), the kernel carefully walks this list, | 
|  | 1859 | * and marks all locks that are owned by this thread with the | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1860 | * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1861 | * always manipulated with the lock held, so the list is private and | 
|  | 1862 | * per-thread. Userspace also maintains a per-thread 'list_op_pending' | 
|  | 1863 | * field, to allow the kernel to clean up if the thread dies after | 
|  | 1864 | * acquiring the lock, but just before it could have added itself to | 
|  | 1865 | * the list. There can only be one such pending lock. | 
|  | 1866 | */ | 
|  | 1867 |  | 
|  | 1868 | /** | 
|  | 1869 | * sys_set_robust_list - set the robust-futex list head of a task | 
|  | 1870 | * @head: pointer to the list-head | 
|  | 1871 | * @len: length of the list-head, as userspace expects | 
|  | 1872 | */ | 
|  | 1873 | asmlinkage long | 
|  | 1874 | sys_set_robust_list(struct robust_list_head __user *head, | 
|  | 1875 | size_t len) | 
|  | 1876 | { | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 1877 | if (!futex_cmpxchg_enabled) | 
|  | 1878 | return -ENOSYS; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1879 | /* | 
|  | 1880 | * The kernel knows only one size for now: | 
|  | 1881 | */ | 
|  | 1882 | if (unlikely(len != sizeof(*head))) | 
|  | 1883 | return -EINVAL; | 
|  | 1884 |  | 
|  | 1885 | current->robust_list = head; | 
|  | 1886 |  | 
|  | 1887 | return 0; | 
|  | 1888 | } | 
|  | 1889 |  | 
|  | 1890 | /** | 
|  | 1891 | * sys_get_robust_list - get the robust-futex list head of a task | 
|  | 1892 | * @pid: pid of the process [zero for current task] | 
|  | 1893 | * @head_ptr: pointer to a list-head pointer, the kernel fills it in | 
|  | 1894 | * @len_ptr: pointer to a length field, the kernel fills in the header size | 
|  | 1895 | */ | 
|  | 1896 | asmlinkage long | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 1897 | sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1898 | size_t __user *len_ptr) | 
|  | 1899 | { | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 1900 | struct robust_list_head __user *head; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1901 | unsigned long ret; | 
|  | 1902 |  | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 1903 | if (!futex_cmpxchg_enabled) | 
|  | 1904 | return -ENOSYS; | 
|  | 1905 |  | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1906 | if (!pid) | 
|  | 1907 | head = current->robust_list; | 
|  | 1908 | else { | 
|  | 1909 | struct task_struct *p; | 
|  | 1910 |  | 
|  | 1911 | ret = -ESRCH; | 
| Oleg Nesterov | aaa2a97 | 2006-09-29 02:00:55 -0700 | [diff] [blame] | 1912 | rcu_read_lock(); | 
| Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1913 | p = find_task_by_vpid(pid); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1914 | if (!p) | 
|  | 1915 | goto err_unlock; | 
|  | 1916 | ret = -EPERM; | 
|  | 1917 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 
|  | 1918 | !capable(CAP_SYS_PTRACE)) | 
|  | 1919 | goto err_unlock; | 
|  | 1920 | head = p->robust_list; | 
| Oleg Nesterov | aaa2a97 | 2006-09-29 02:00:55 -0700 | [diff] [blame] | 1921 | rcu_read_unlock(); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1922 | } | 
|  | 1923 |  | 
|  | 1924 | if (put_user(sizeof(*head), len_ptr)) | 
|  | 1925 | return -EFAULT; | 
|  | 1926 | return put_user(head, head_ptr); | 
|  | 1927 |  | 
|  | 1928 | err_unlock: | 
| Oleg Nesterov | aaa2a97 | 2006-09-29 02:00:55 -0700 | [diff] [blame] | 1929 | rcu_read_unlock(); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1930 |  | 
|  | 1931 | return ret; | 
|  | 1932 | } | 
|  | 1933 |  | 
|  | 1934 | /* | 
|  | 1935 | * Process a futex-list entry, check whether it's owned by the | 
|  | 1936 | * dying task, and do notification if so: | 
|  | 1937 | */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 1938 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1939 | { | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 1940 | u32 uval, nval, mval; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1941 |  | 
| Ingo Molnar | 8f17d3a | 2006-03-27 01:16:27 -0800 | [diff] [blame] | 1942 | retry: | 
|  | 1943 | if (get_user(uval, uaddr)) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1944 | return -1; | 
|  | 1945 |  | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1946 | if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1947 | /* | 
|  | 1948 | * Ok, this dying thread is truly holding a futex | 
|  | 1949 | * of interest. Set the OWNER_DIED bit atomically | 
|  | 1950 | * via cmpxchg, and if the value had FUTEX_WAITERS | 
|  | 1951 | * set, wake up a waiter (if any). (We have to do a | 
|  | 1952 | * futex_wake() even if OWNER_DIED is already set - | 
|  | 1953 | * to handle the rare but possible case of recursive | 
|  | 1954 | * thread-death.) The rest of the cleanup is done in | 
|  | 1955 | * userspace. | 
|  | 1956 | */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 1957 | mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; | 
|  | 1958 | nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); | 
|  | 1959 |  | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1960 | if (nval == -EFAULT) | 
|  | 1961 | return -1; | 
|  | 1962 |  | 
|  | 1963 | if (nval != uval) | 
| Ingo Molnar | 8f17d3a | 2006-03-27 01:16:27 -0800 | [diff] [blame] | 1964 | goto retry; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1965 |  | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 1966 | /* | 
|  | 1967 | * Wake robust non-PI futexes here. The wakeup of | 
|  | 1968 | * PI futexes happens in exit_pi_state(): | 
|  | 1969 | */ | 
| Thomas Gleixner | 36cf3b5 | 2007-07-15 23:41:20 -0700 | [diff] [blame] | 1970 | if (!pi && (uval & FUTEX_WAITERS)) | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 1971 | futex_wake(uaddr, &curr->mm->mmap_sem, 1, | 
|  | 1972 | FUTEX_BITSET_MATCH_ANY); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1973 | } | 
|  | 1974 | return 0; | 
|  | 1975 | } | 
|  | 1976 |  | 
|  | 1977 | /* | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 1978 | * Fetch a robust-list pointer. Bit 0 signals PI futexes: | 
|  | 1979 | */ | 
|  | 1980 | static inline int fetch_robust_entry(struct robust_list __user **entry, | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 1981 | struct robust_list __user * __user *head, | 
|  | 1982 | int *pi) | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 1983 | { | 
|  | 1984 | unsigned long uentry; | 
|  | 1985 |  | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 1986 | if (get_user(uentry, (unsigned long __user *)head)) | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 1987 | return -EFAULT; | 
|  | 1988 |  | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 1989 | *entry = (void __user *)(uentry & ~1UL); | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 1990 | *pi = uentry & 1; | 
|  | 1991 |  | 
|  | 1992 | return 0; | 
|  | 1993 | } | 
|  | 1994 |  | 
|  | 1995 | /* | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 1996 | * Walk curr->robust_list (very carefully, it's a userspace list!) | 
|  | 1997 | * and mark any locks found there dead, and notify any waiters. | 
|  | 1998 | * | 
|  | 1999 | * We silently return on any sign of list-walking problem. | 
|  | 2000 | */ | 
|  | 2001 | void exit_robust_list(struct task_struct *curr) | 
|  | 2002 | { | 
|  | 2003 | struct robust_list_head __user *head = curr->robust_list; | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2004 | struct robust_list __user *entry, *next_entry, *pending; | 
|  | 2005 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2006 | unsigned long futex_offset; | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2007 | int rc; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2008 |  | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2009 | if (!futex_cmpxchg_enabled) | 
|  | 2010 | return; | 
|  | 2011 |  | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2012 | /* | 
|  | 2013 | * Fetch the list head (which was registered earlier, via | 
|  | 2014 | * sys_set_robust_list()): | 
|  | 2015 | */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2016 | if (fetch_robust_entry(&entry, &head->list.next, &pi)) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2017 | return; | 
|  | 2018 | /* | 
|  | 2019 | * Fetch the relative futex offset: | 
|  | 2020 | */ | 
|  | 2021 | if (get_user(futex_offset, &head->futex_offset)) | 
|  | 2022 | return; | 
|  | 2023 | /* | 
|  | 2024 | * Fetch any possibly pending lock-add first, and handle it | 
|  | 2025 | * if it exists: | 
|  | 2026 | */ | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2027 | if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2028 | return; | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2029 |  | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2030 | next_entry = NULL;	/* avoid warning with gcc */ | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2031 | while (entry != &head->list) { | 
|  | 2032 | /* | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2033 | * Fetch the next entry in the list before calling | 
|  | 2034 | * handle_futex_death: | 
|  | 2035 | */ | 
|  | 2036 | rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); | 
|  | 2037 | /* | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2038 | * A pending lock might already be on the list, so | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2039 | * don't process it twice: | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2040 | */ | 
|  | 2041 | if (entry != pending) | 
| Al Viro | ba46df9 | 2006-10-10 22:46:07 +0100 | [diff] [blame] | 2042 | if (handle_futex_death((void __user *)entry + futex_offset, | 
| Ingo Molnar | e3f2dde | 2006-07-29 05:17:57 +0200 | [diff] [blame] | 2043 | curr, pi)) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2044 | return; | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2045 | if (rc) | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2046 | return; | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2047 | entry = next_entry; | 
|  | 2048 | pi = next_pi; | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2049 | /* | 
|  | 2050 | * Avoid excessively long or circular lists: | 
|  | 2051 | */ | 
|  | 2052 | if (!--limit) | 
|  | 2053 | break; | 
|  | 2054 |  | 
|  | 2055 | cond_resched(); | 
|  | 2056 | } | 
| Martin Schwidefsky | 9f96cb1 | 2007-10-01 01:20:13 -0700 | [diff] [blame] | 2057 |  | 
|  | 2058 | if (pending) | 
|  | 2059 | handle_futex_death((void __user *)pending + futex_offset, | 
|  | 2060 | curr, pip); | 
| Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 2061 | } | 
|  | 2062 |  | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2063 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 2064 | u32 __user *uaddr2, u32 val2, u32 val3) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2065 | { | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2066 | int ret = -ENOSYS; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2067 | int cmd = op & FUTEX_CMD_MASK; | 
|  | 2068 | struct rw_semaphore *fshared = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2069 |  | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2070 | if (!(op & FUTEX_PRIVATE_FLAG)) | 
|  | 2071 | fshared = ¤t->mm->mmap_sem; | 
|  | 2072 |  | 
|  | 2073 | switch (cmd) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2074 | case FUTEX_WAIT: | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 2075 | val3 = FUTEX_BITSET_MATCH_ANY; | 
|  | 2076 | case FUTEX_WAIT_BITSET: | 
|  | 2077 | ret = futex_wait(uaddr, fshared, val, timeout, val3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2078 | break; | 
|  | 2079 | case FUTEX_WAKE: | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 2080 | val3 = FUTEX_BITSET_MATCH_ANY; | 
|  | 2081 | case FUTEX_WAKE_BITSET: | 
|  | 2082 | ret = futex_wake(uaddr, fshared, val, val3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2083 | break; | 
|  | 2084 | case FUTEX_FD: | 
|  | 2085 | /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */ | 
|  | 2086 | ret = futex_fd(uaddr, val); | 
|  | 2087 | break; | 
|  | 2088 | case FUTEX_REQUEUE: | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2089 | ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2090 | break; | 
|  | 2091 | case FUTEX_CMP_REQUEUE: | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2092 | ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2093 | break; | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 2094 | case FUTEX_WAKE_OP: | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2095 | ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); | 
| Jakub Jelinek | 4732efb | 2005-09-06 15:16:25 -0700 | [diff] [blame] | 2096 | break; | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2097 | case FUTEX_LOCK_PI: | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2098 | if (futex_cmpxchg_enabled) | 
|  | 2099 | ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2100 | break; | 
|  | 2101 | case FUTEX_UNLOCK_PI: | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2102 | if (futex_cmpxchg_enabled) | 
|  | 2103 | ret = futex_unlock_pi(uaddr, fshared); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2104 | break; | 
|  | 2105 | case FUTEX_TRYLOCK_PI: | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2106 | if (futex_cmpxchg_enabled) | 
|  | 2107 | ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); | 
| Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 2108 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2109 | default: | 
|  | 2110 | ret = -ENOSYS; | 
|  | 2111 | } | 
|  | 2112 | return ret; | 
|  | 2113 | } | 
|  | 2114 |  | 
|  | 2115 |  | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 2116 | asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2117 | struct timespec __user *utime, u32 __user *uaddr2, | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 2118 | u32 val3) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2119 | { | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2120 | struct timespec ts; | 
|  | 2121 | ktime_t t, *tp = NULL; | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 2122 | u32 val2 = 0; | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2123 | int cmd = op & FUTEX_CMD_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2124 |  | 
| Thomas Gleixner | cd68998 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 2125 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || | 
|  | 2126 | cmd == FUTEX_WAIT_BITSET)) { | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2127 | if (copy_from_user(&ts, utime, sizeof(ts)) != 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2128 | return -EFAULT; | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2129 | if (!timespec_valid(&ts)) | 
| Thomas Gleixner | 9741ef9 | 2006-03-31 02:31:32 -0800 | [diff] [blame] | 2130 | return -EINVAL; | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2131 |  | 
|  | 2132 | t = timespec_to_ktime(ts); | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2133 | if (cmd == FUTEX_WAIT) | 
| Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 2134 | t = ktime_add_safe(ktime_get(), t); | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2135 | tp = &t; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2136 | } | 
|  | 2137 | /* | 
| Eric Dumazet | 34f01cc | 2007-05-09 02:35:04 -0700 | [diff] [blame] | 2138 | * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE. | 
| Andreas Schwab | f54f098 | 2007-07-31 00:38:51 -0700 | [diff] [blame] | 2139 | * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2140 | */ | 
| Andreas Schwab | f54f098 | 2007-07-31 00:38:51 -0700 | [diff] [blame] | 2141 | if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || | 
|  | 2142 | cmd == FUTEX_WAKE_OP) | 
| Ingo Molnar | e2970f2 | 2006-06-27 02:54:47 -0700 | [diff] [blame] | 2143 | val2 = (u32) (unsigned long) utime; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2144 |  | 
| Pierre Peiffer | c19384b | 2007-05-09 02:35:02 -0700 | [diff] [blame] | 2145 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2146 | } | 
|  | 2147 |  | 
| David Howells | 454e239 | 2006-06-23 02:02:57 -0700 | [diff] [blame] | 2148 | static int futexfs_get_sb(struct file_system_type *fs_type, | 
|  | 2149 | int flags, const char *dev_name, void *data, | 
|  | 2150 | struct vfsmount *mnt) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2151 | { | 
| Andrey Mirkin | fd5eea4 | 2007-10-16 23:30:13 -0700 | [diff] [blame] | 2152 | return get_sb_pseudo(fs_type, "futex", NULL, FUTEXFS_SUPER_MAGIC, mnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2153 | } | 
|  | 2154 |  | 
|  | 2155 | static struct file_system_type futex_fs_type = { | 
|  | 2156 | .name		= "futexfs", | 
|  | 2157 | .get_sb		= futexfs_get_sb, | 
|  | 2158 | .kill_sb	= kill_anon_super, | 
|  | 2159 | }; | 
|  | 2160 |  | 
|  | 2161 | static int __init init(void) | 
|  | 2162 | { | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2163 | u32 curval; | 
| Thomas Gleixner | 3e4ab74 | 2008-02-23 15:23:55 -0800 | [diff] [blame] | 2164 | int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2165 |  | 
| Thomas Gleixner | a0c1e90 | 2008-02-23 15:23:57 -0800 | [diff] [blame] | 2166 | /* | 
|  | 2167 | * This will fail and we want it. Some arch implementations do | 
|  | 2168 | * runtime detection of the futex_atomic_cmpxchg_inatomic() | 
|  | 2169 | * functionality. We want to know that before we call in any | 
|  | 2170 | * of the complex code paths. Also we want to prevent | 
|  | 2171 | * registration of robust lists in that case. NULL is | 
|  | 2172 | * guaranteed to fault and we get -EFAULT on functional | 
|  | 2173 | * implementation, the non functional ones will return | 
|  | 2174 | * -ENOSYS. | 
|  | 2175 | */ | 
|  | 2176 | curval = cmpxchg_futex_value_locked(NULL, 0, 0); | 
|  | 2177 | if (curval == -EFAULT) | 
|  | 2178 | futex_cmpxchg_enabled = 1; | 
|  | 2179 |  | 
| Thomas Gleixner | 3e4ab74 | 2008-02-23 15:23:55 -0800 | [diff] [blame] | 2180 | for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { | 
|  | 2181 | plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); | 
|  | 2182 | spin_lock_init(&futex_queues[i].lock); | 
|  | 2183 | } | 
|  | 2184 |  | 
|  | 2185 | i = register_filesystem(&futex_fs_type); | 
| Akinobu Mita | 95362fa | 2006-12-06 20:39:03 -0800 | [diff] [blame] | 2186 | if (i) | 
|  | 2187 | return i; | 
|  | 2188 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2189 | futex_mnt = kern_mount(&futex_fs_type); | 
| Akinobu Mita | 95362fa | 2006-12-06 20:39:03 -0800 | [diff] [blame] | 2190 | if (IS_ERR(futex_mnt)) { | 
|  | 2191 | unregister_filesystem(&futex_fs_type); | 
|  | 2192 | return PTR_ERR(futex_mnt); | 
|  | 2193 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2194 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2195 | return 0; | 
|  | 2196 | } | 
|  | 2197 | __initcall(init); |