blob: 87ad28746e17d6b3b07194bd05c7994a2d11a1d9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
Ingo Molnar0771dfe2006-03-27 01:16:22 -080011 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
Ingo Molnarc87e2832006-06-27 02:54:58 -070015 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
Eric Dumazet34f01cc2007-05-09 02:35:04 -070019 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
Darren Hart52400ba2009-04-03 13:40:49 -070022 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47#include <linux/slab.h>
48#include <linux/poll.h>
49#include <linux/fs.h>
50#include <linux/file.h>
51#include <linux/jhash.h>
52#include <linux/init.h>
53#include <linux/futex.h>
54#include <linux/mount.h>
55#include <linux/pagemap.h>
56#include <linux/syscalls.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070057#include <linux/signal.h>
Rusty Russell9adef582007-05-08 00:26:42 -070058#include <linux/module.h>
Andrey Mirkinfd5eea42007-10-16 23:30:13 -070059#include <linux/magic.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070060#include <linux/pid.h>
61#include <linux/nsproxy.h>
62
Jakub Jelinek4732efb2005-09-06 15:16:25 -070063#include <asm/futex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Ingo Molnarc87e2832006-06-27 02:54:58 -070065#include "rtmutex_common.h"
66
Thomas Gleixnera0c1e902008-02-23 15:23:57 -080067int __read_mostly futex_cmpxchg_enabled;
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
70
71/*
Darren Hartb41277d2010-11-08 13:10:09 -080072 * Futex flags used to encode options to functions and preserve them across
73 * restarts.
74 */
75#define FLAGS_SHARED 0x01
76#define FLAGS_CLOCKRT 0x02
77#define FLAGS_HAS_TIMEOUT 0x04
78
79/*
Ingo Molnarc87e2832006-06-27 02:54:58 -070080 * Priority Inheritance state:
81 */
82struct futex_pi_state {
83 /*
84 * list of 'owned' pi_state instances - these have to be
85 * cleaned up in do_exit() if the task exits prematurely:
86 */
87 struct list_head list;
88
89 /*
90 * The PI object:
91 */
92 struct rt_mutex pi_mutex;
93
94 struct task_struct *owner;
95 atomic_t refcount;
96
97 union futex_key key;
98};
99
Darren Hartd8d88fb2009-09-21 22:30:30 -0700100/**
101 * struct futex_q - The hashed futex queue entry, one per waiting task
Randy Dunlapfb62db22010-10-13 11:02:34 -0700102 * @list: priority-sorted list of tasks waiting on this futex
Darren Hartd8d88fb2009-09-21 22:30:30 -0700103 * @task: the task waiting on the futex
104 * @lock_ptr: the hash bucket lock
105 * @key: the key the futex is hashed on
106 * @pi_state: optional priority inheritance state
107 * @rt_waiter: rt_waiter storage for use with requeue_pi
108 * @requeue_pi_key: the requeue_pi target futex key
109 * @bitset: bitset for the optional bitmasked wakeup
110 *
111 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 * we can wake only the relevant ones (hashed queues may be shared).
113 *
114 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
Pierre Peifferec92d082007-05-09 02:35:00 -0700115 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
Randy Dunlapfb62db22010-10-13 11:02:34 -0700116 * The order of wakeup is always to make the first condition true, then
Darren Hartd8d88fb2009-09-21 22:30:30 -0700117 * the second.
118 *
119 * PI futexes are typically woken before they are removed from the hash list via
120 * the rt_mutex code. See unqueue_me_pi().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 */
122struct futex_q {
Pierre Peifferec92d082007-05-09 02:35:00 -0700123 struct plist_node list;
Darren Hartd8d88fb2009-09-21 22:30:30 -0700124
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200125 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 spinlock_t *lock_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 union futex_key key;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700128 struct futex_pi_state *pi_state;
Darren Hart52400ba2009-04-03 13:40:49 -0700129 struct rt_mutex_waiter *rt_waiter;
Darren Hart84bc4af2009-08-13 17:36:53 -0700130 union futex_key *requeue_pi_key;
Thomas Gleixnercd689982008-02-01 17:45:14 +0100131 u32 bitset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132};
133
134/*
Darren Hartb2d09942009-03-12 00:55:37 -0700135 * Hash buckets are shared by all the futex_keys that hash to the same
136 * location. Each key may have multiple futex_q structures, one for each task
137 * waiting on a futex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 */
139struct futex_hash_bucket {
Pierre Peifferec92d082007-05-09 02:35:00 -0700140 spinlock_t lock;
141 struct plist_head chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142};
143
144static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146/*
147 * We hash on the keys returned from get_futex_key (see below).
148 */
149static struct futex_hash_bucket *hash_futex(union futex_key *key)
150{
151 u32 hash = jhash2((u32*)&key->both.word,
152 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
153 key->both.offset);
154 return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
155}
156
157/*
158 * Return 1 if two futex_keys are equal, 0 otherwise.
159 */
160static inline int match_futex(union futex_key *key1, union futex_key *key2)
161{
Darren Hart2bc87202009-10-14 10:12:39 -0700162 return (key1 && key2
163 && key1->both.word == key2->both.word
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 && key1->both.ptr == key2->both.ptr
165 && key1->both.offset == key2->both.offset);
166}
167
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200168/*
169 * Take a reference to the resource addressed by a key.
170 * Can be called while holding spinlocks.
171 *
172 */
173static void get_futex_key_refs(union futex_key *key)
174{
175 if (!key->both.ptr)
176 return;
177
178 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
179 case FUT_OFF_INODE:
Al Viro7de9c6e2010-10-23 11:11:40 -0400180 ihold(key->shared.inode);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200181 break;
182 case FUT_OFF_MMSHARED:
183 atomic_inc(&key->private.mm->mm_count);
184 break;
185 }
186}
187
188/*
189 * Drop a reference to the resource addressed by a key.
190 * The hash bucket spinlock must not be held.
191 */
192static void drop_futex_key_refs(union futex_key *key)
193{
Darren Hart90621c42008-12-29 19:43:21 -0800194 if (!key->both.ptr) {
195 /* If we're here then we tried to put a key we failed to get */
196 WARN_ON_ONCE(1);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200197 return;
Darren Hart90621c42008-12-29 19:43:21 -0800198 }
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200199
200 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
201 case FUT_OFF_INODE:
202 iput(key->shared.inode);
203 break;
204 case FUT_OFF_MMSHARED:
205 mmdrop(key->private.mm);
206 break;
207 }
208}
209
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700210/**
Darren Hartd96ee562009-09-21 22:30:22 -0700211 * get_futex_key() - Get parameters which are the keys for a futex
212 * @uaddr: virtual address of the futex
213 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
214 * @key: address where result is stored.
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700215 *
216 * Returns a negative error code or 0
217 * The key words are stored in *key on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 *
Josef "Jeff" Sipekf3a43f32006-12-08 02:36:43 -0800219 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 * offset_within_page). For private mappings, it's (uaddr, current->mm).
221 * We can usually work out the index without swapping in the page.
222 *
Darren Hartb2d09942009-03-12 00:55:37 -0700223 * lock_page() might sleep, the caller should not hold a spinlock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 */
Thomas Gleixner64d13042009-05-18 21:20:10 +0200225static int
KOSAKI Motohiro7485d0d2010-01-05 16:32:43 +0900226get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
Ingo Molnare2970f22006-06-27 02:54:47 -0700228 unsigned long address = (unsigned long)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 struct mm_struct *mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 struct page *page;
231 int err;
232
233 /*
234 * The futex address must be "naturally" aligned.
235 */
Ingo Molnare2970f22006-06-27 02:54:47 -0700236 key->both.offset = address % PAGE_SIZE;
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700237 if (unlikely((address % sizeof(u32)) != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 return -EINVAL;
Ingo Molnare2970f22006-06-27 02:54:47 -0700239 address -= key->both.offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 /*
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700242 * PROCESS_PRIVATE futexes are fast.
243 * As the mm cannot disappear under us and the 'key' only needs
244 * virtual address, we dont even have to find the underlying vma.
245 * Note : We do have to check 'uaddr' is a valid user address,
246 * but access_ok() should be faster than find_vma()
247 */
248 if (!fshared) {
KOSAKI Motohiro7485d0d2010-01-05 16:32:43 +0900249 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700250 return -EFAULT;
251 key->private.mm = mm;
252 key->private.address = address;
Peter Zijlstra42569c32008-09-30 12:33:07 +0200253 get_futex_key_refs(key);
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700254 return 0;
255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200257again:
KOSAKI Motohiro7485d0d2010-01-05 16:32:43 +0900258 err = get_user_pages_fast(address, 1, 1, &page);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200259 if (err < 0)
260 return err;
261
Sonny Raoce2ae532009-07-10 18:13:13 -0500262 page = compound_head(page);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200263 lock_page(page);
264 if (!page->mapping) {
265 unlock_page(page);
266 put_page(page);
267 goto again;
268 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
270 /*
271 * Private mappings are handled in a simple way.
272 *
273 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
274 * it's a read-only handle, it's expected that futexes attach to
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200275 * the object not the particular process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 */
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200277 if (PageAnon(page)) {
278 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 key->private.mm = mm;
Ingo Molnare2970f22006-06-27 02:54:47 -0700280 key->private.address = address;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200281 } else {
282 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
283 key->shared.inode = page->mapping->host;
284 key->shared.pgoff = page->index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 }
286
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200287 get_futex_key_refs(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200289 unlock_page(page);
290 put_page(page);
291 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293
Thomas Gleixnerae791a22010-11-10 13:30:36 +0100294static inline void put_futex_key(union futex_key *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295{
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200296 drop_futex_key_refs(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
Darren Hartd96ee562009-09-21 22:30:22 -0700299/**
300 * fault_in_user_writeable() - Fault in user address and verify RW access
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200301 * @uaddr: pointer to faulting user space address
302 *
303 * Slow path to fixup the fault we just took in the atomic write
304 * access to @uaddr.
305 *
Randy Dunlapfb62db22010-10-13 11:02:34 -0700306 * We have no generic implementation of a non-destructive write to the
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200307 * user address. We know that we faulted in the atomic pagefault
308 * disabled section so we can as well avoid the #PF overhead by
309 * calling get_user_pages() right away.
310 */
311static int fault_in_user_writeable(u32 __user *uaddr)
312{
Andi Kleen722d0172009-12-08 13:19:42 +0100313 struct mm_struct *mm = current->mm;
314 int ret;
315
316 down_read(&mm->mmap_sem);
317 ret = get_user_pages(current, mm, (unsigned long)uaddr,
318 1, 1, 0, NULL, NULL);
319 up_read(&mm->mmap_sem);
320
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200321 return ret < 0 ? ret : 0;
322}
323
Darren Hart4b1c4862009-04-03 13:39:42 -0700324/**
325 * futex_top_waiter() - Return the highest priority waiter on a futex
Darren Hartd96ee562009-09-21 22:30:22 -0700326 * @hb: the hash bucket the futex_q's reside in
327 * @key: the futex key (to distinguish it from other futex futex_q's)
Darren Hart4b1c4862009-04-03 13:39:42 -0700328 *
329 * Must be called with the hb lock held.
330 */
331static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
332 union futex_key *key)
333{
334 struct futex_q *this;
335
336 plist_for_each_entry(this, &hb->chain, list) {
337 if (match_futex(&this->key, key))
338 return this;
339 }
340 return NULL;
341}
342
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700343static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
344{
345 u32 curval;
346
347 pagefault_disable();
348 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
349 pagefault_enable();
350
351 return curval;
352}
353
354static int get_futex_value_locked(u32 *dest, u32 __user *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
356 int ret;
357
Peter Zijlstraa8663742006-12-06 20:32:20 -0800358 pagefault_disable();
Ingo Molnare2970f22006-06-27 02:54:47 -0700359 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
Peter Zijlstraa8663742006-12-06 20:32:20 -0800360 pagefault_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 return ret ? -EFAULT : 0;
363}
364
Ingo Molnarc87e2832006-06-27 02:54:58 -0700365
366/*
367 * PI code:
368 */
369static int refill_pi_state_cache(void)
370{
371 struct futex_pi_state *pi_state;
372
373 if (likely(current->pi_state_cache))
374 return 0;
375
Burman Yan4668edc2006-12-06 20:38:51 -0800376 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700377
378 if (!pi_state)
379 return -ENOMEM;
380
Ingo Molnarc87e2832006-06-27 02:54:58 -0700381 INIT_LIST_HEAD(&pi_state->list);
382 /* pi_mutex gets initialized later */
383 pi_state->owner = NULL;
384 atomic_set(&pi_state->refcount, 1);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200385 pi_state->key = FUTEX_KEY_INIT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700386
387 current->pi_state_cache = pi_state;
388
389 return 0;
390}
391
392static struct futex_pi_state * alloc_pi_state(void)
393{
394 struct futex_pi_state *pi_state = current->pi_state_cache;
395
396 WARN_ON(!pi_state);
397 current->pi_state_cache = NULL;
398
399 return pi_state;
400}
401
402static void free_pi_state(struct futex_pi_state *pi_state)
403{
404 if (!atomic_dec_and_test(&pi_state->refcount))
405 return;
406
407 /*
408 * If pi_state->owner is NULL, the owner is most probably dying
409 * and has cleaned up the pi_state already
410 */
411 if (pi_state->owner) {
Thomas Gleixner1d615482009-11-17 14:54:03 +0100412 raw_spin_lock_irq(&pi_state->owner->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700413 list_del_init(&pi_state->list);
Thomas Gleixner1d615482009-11-17 14:54:03 +0100414 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700415
416 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
417 }
418
419 if (current->pi_state_cache)
420 kfree(pi_state);
421 else {
422 /*
423 * pi_state->list is already empty.
424 * clear pi_state->owner.
425 * refcount is at 0 - put it back to 1.
426 */
427 pi_state->owner = NULL;
428 atomic_set(&pi_state->refcount, 1);
429 current->pi_state_cache = pi_state;
430 }
431}
432
433/*
434 * Look up the task based on what TID userspace gave us.
435 * We dont trust it.
436 */
437static struct task_struct * futex_find_get_task(pid_t pid)
438{
439 struct task_struct *p;
440
Oleg Nesterovd359b542006-09-29 02:00:55 -0700441 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -0700442 p = find_task_by_vpid(pid);
Michal Hocko7a0ea092010-06-30 09:51:19 +0200443 if (p)
444 get_task_struct(p);
Thomas Gleixnera06381f2007-06-23 11:48:40 +0200445
Oleg Nesterovd359b542006-09-29 02:00:55 -0700446 rcu_read_unlock();
Ingo Molnarc87e2832006-06-27 02:54:58 -0700447
448 return p;
449}
450
451/*
452 * This task is holding PI mutexes at exit time => bad.
453 * Kernel cleans up PI-state, but userspace is likely hosed.
454 * (Robust-futex cleanup is separate and might save the day for userspace.)
455 */
456void exit_pi_state_list(struct task_struct *curr)
457{
Ingo Molnarc87e2832006-06-27 02:54:58 -0700458 struct list_head *next, *head = &curr->pi_state_list;
459 struct futex_pi_state *pi_state;
Ingo Molnar627371d2006-07-29 05:16:20 +0200460 struct futex_hash_bucket *hb;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200461 union futex_key key = FUTEX_KEY_INIT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700462
Thomas Gleixnera0c1e902008-02-23 15:23:57 -0800463 if (!futex_cmpxchg_enabled)
464 return;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700465 /*
466 * We are a ZOMBIE and nobody can enqueue itself on
467 * pi_state_list anymore, but we have to be careful
Ingo Molnar627371d2006-07-29 05:16:20 +0200468 * versus waiters unqueueing themselves:
Ingo Molnarc87e2832006-06-27 02:54:58 -0700469 */
Thomas Gleixner1d615482009-11-17 14:54:03 +0100470 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700471 while (!list_empty(head)) {
472
473 next = head->next;
474 pi_state = list_entry(next, struct futex_pi_state, list);
475 key = pi_state->key;
Ingo Molnar627371d2006-07-29 05:16:20 +0200476 hb = hash_futex(&key);
Thomas Gleixner1d615482009-11-17 14:54:03 +0100477 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700478
Ingo Molnarc87e2832006-06-27 02:54:58 -0700479 spin_lock(&hb->lock);
480
Thomas Gleixner1d615482009-11-17 14:54:03 +0100481 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200482 /*
483 * We dropped the pi-lock, so re-check whether this
484 * task still owns the PI-state:
485 */
Ingo Molnarc87e2832006-06-27 02:54:58 -0700486 if (head->next != next) {
487 spin_unlock(&hb->lock);
488 continue;
489 }
490
Ingo Molnarc87e2832006-06-27 02:54:58 -0700491 WARN_ON(pi_state->owner != curr);
Ingo Molnar627371d2006-07-29 05:16:20 +0200492 WARN_ON(list_empty(&pi_state->list));
493 list_del_init(&pi_state->list);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700494 pi_state->owner = NULL;
Thomas Gleixner1d615482009-11-17 14:54:03 +0100495 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700496
497 rt_mutex_unlock(&pi_state->pi_mutex);
498
499 spin_unlock(&hb->lock);
500
Thomas Gleixner1d615482009-11-17 14:54:03 +0100501 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700502 }
Thomas Gleixner1d615482009-11-17 14:54:03 +0100503 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700504}
505
506static int
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700507lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
508 union futex_key *key, struct futex_pi_state **ps)
Ingo Molnarc87e2832006-06-27 02:54:58 -0700509{
510 struct futex_pi_state *pi_state = NULL;
511 struct futex_q *this, *next;
Pierre Peifferec92d082007-05-09 02:35:00 -0700512 struct plist_head *head;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700513 struct task_struct *p;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700514 pid_t pid = uval & FUTEX_TID_MASK;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700515
516 head = &hb->chain;
517
Pierre Peifferec92d082007-05-09 02:35:00 -0700518 plist_for_each_entry_safe(this, next, head, list) {
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700519 if (match_futex(&this->key, key)) {
Ingo Molnarc87e2832006-06-27 02:54:58 -0700520 /*
521 * Another waiter already exists - bump up
522 * the refcount and return its pi_state:
523 */
524 pi_state = this->pi_state;
Thomas Gleixner06a9ec22006-07-10 04:44:30 -0700525 /*
Randy Dunlapfb62db22010-10-13 11:02:34 -0700526 * Userspace might have messed up non-PI and PI futexes
Thomas Gleixner06a9ec22006-07-10 04:44:30 -0700527 */
528 if (unlikely(!pi_state))
529 return -EINVAL;
530
Ingo Molnar627371d2006-07-29 05:16:20 +0200531 WARN_ON(!atomic_read(&pi_state->refcount));
Thomas Gleixner59647b62010-02-03 09:33:05 +0100532
533 /*
534 * When pi_state->owner is NULL then the owner died
535 * and another waiter is on the fly. pi_state->owner
536 * is fixed up by the task which acquires
537 * pi_state->rt_mutex.
538 *
539 * We do not check for pid == 0 which can happen when
540 * the owner died and robust_list_exit() cleared the
541 * TID.
542 */
543 if (pid && pi_state->owner) {
544 /*
545 * Bail out if user space manipulated the
546 * futex value.
547 */
548 if (pid != task_pid_vnr(pi_state->owner))
549 return -EINVAL;
550 }
Ingo Molnar627371d2006-07-29 05:16:20 +0200551
Ingo Molnarc87e2832006-06-27 02:54:58 -0700552 atomic_inc(&pi_state->refcount);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700553 *ps = pi_state;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700554
555 return 0;
556 }
557 }
558
559 /*
Ingo Molnare3f2dde2006-07-29 05:17:57 +0200560 * We are the first waiter - try to look up the real owner and attach
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700561 * the new pi_state to it, but bail out when TID = 0
Ingo Molnarc87e2832006-06-27 02:54:58 -0700562 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700563 if (!pid)
Ingo Molnare3f2dde2006-07-29 05:17:57 +0200564 return -ESRCH;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700565 p = futex_find_get_task(pid);
Michal Hocko7a0ea092010-06-30 09:51:19 +0200566 if (!p)
567 return -ESRCH;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700568
569 /*
570 * We need to look at the task state flags to figure out,
571 * whether the task is exiting. To protect against the do_exit
572 * change of the task flags, we do this protected by
573 * p->pi_lock:
574 */
Thomas Gleixner1d615482009-11-17 14:54:03 +0100575 raw_spin_lock_irq(&p->pi_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700576 if (unlikely(p->flags & PF_EXITING)) {
577 /*
578 * The task is on the way out. When PF_EXITPIDONE is
579 * set, we know that the task has finished the
580 * cleanup:
581 */
582 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
583
Thomas Gleixner1d615482009-11-17 14:54:03 +0100584 raw_spin_unlock_irq(&p->pi_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700585 put_task_struct(p);
586 return ret;
587 }
Ingo Molnarc87e2832006-06-27 02:54:58 -0700588
589 pi_state = alloc_pi_state();
590
591 /*
592 * Initialize the pi_mutex in locked state and make 'p'
593 * the owner of it:
594 */
595 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
596
597 /* Store the key for possible exit cleanups: */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700598 pi_state->key = *key;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700599
Ingo Molnar627371d2006-07-29 05:16:20 +0200600 WARN_ON(!list_empty(&pi_state->list));
Ingo Molnarc87e2832006-06-27 02:54:58 -0700601 list_add(&pi_state->list, &p->pi_state_list);
602 pi_state->owner = p;
Thomas Gleixner1d615482009-11-17 14:54:03 +0100603 raw_spin_unlock_irq(&p->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700604
605 put_task_struct(p);
606
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700607 *ps = pi_state;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700608
609 return 0;
610}
611
Darren Hart1a520842009-04-03 13:39:52 -0700612/**
Darren Hartd96ee562009-09-21 22:30:22 -0700613 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
Darren Hartbab5bc92009-04-07 23:23:50 -0700614 * @uaddr: the pi futex user address
615 * @hb: the pi futex hash bucket
616 * @key: the futex key associated with uaddr and hb
617 * @ps: the pi_state pointer where we store the result of the
618 * lookup
619 * @task: the task to perform the atomic lock work for. This will
620 * be "current" except in the case of requeue pi.
621 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
Darren Hart1a520842009-04-03 13:39:52 -0700622 *
623 * Returns:
624 * 0 - ready to wait
625 * 1 - acquired the lock
626 * <0 - error
627 *
628 * The hb->lock and futex_key refs shall be held by the caller.
629 */
630static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
631 union futex_key *key,
632 struct futex_pi_state **ps,
Darren Hartbab5bc92009-04-07 23:23:50 -0700633 struct task_struct *task, int set_waiters)
Darren Hart1a520842009-04-03 13:39:52 -0700634{
635 int lock_taken, ret, ownerdied = 0;
636 u32 uval, newval, curval;
637
638retry:
639 ret = lock_taken = 0;
640
641 /*
642 * To avoid races, we attempt to take the lock here again
643 * (by doing a 0 -> TID atomic cmpxchg), while holding all
644 * the locks. It will most likely not succeed.
645 */
646 newval = task_pid_vnr(task);
Darren Hartbab5bc92009-04-07 23:23:50 -0700647 if (set_waiters)
648 newval |= FUTEX_WAITERS;
Darren Hart1a520842009-04-03 13:39:52 -0700649
650 curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
651
652 if (unlikely(curval == -EFAULT))
653 return -EFAULT;
654
655 /*
656 * Detect deadlocks.
657 */
658 if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
659 return -EDEADLK;
660
661 /*
662 * Surprise - we got the lock. Just return to userspace:
663 */
664 if (unlikely(!curval))
665 return 1;
666
667 uval = curval;
668
669 /*
670 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
671 * to wake at the next unlock.
672 */
673 newval = curval | FUTEX_WAITERS;
674
675 /*
676 * There are two cases, where a futex might have no owner (the
677 * owner TID is 0): OWNER_DIED. We take over the futex in this
678 * case. We also do an unconditional take over, when the owner
679 * of the futex died.
680 *
681 * This is safe as we are protected by the hash bucket lock !
682 */
683 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
684 /* Keep the OWNER_DIED bit */
685 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
686 ownerdied = 0;
687 lock_taken = 1;
688 }
689
690 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
691
692 if (unlikely(curval == -EFAULT))
693 return -EFAULT;
694 if (unlikely(curval != uval))
695 goto retry;
696
697 /*
698 * We took the lock due to owner died take over.
699 */
700 if (unlikely(lock_taken))
701 return 1;
702
703 /*
704 * We dont have the lock. Look up the PI state (or create it if
705 * we are the first waiter):
706 */
707 ret = lookup_pi_state(uval, hb, key, ps);
708
709 if (unlikely(ret)) {
710 switch (ret) {
711 case -ESRCH:
712 /*
713 * No owner found for this futex. Check if the
714 * OWNER_DIED bit is set to figure out whether
715 * this is a robust futex or not.
716 */
717 if (get_futex_value_locked(&curval, uaddr))
718 return -EFAULT;
719
720 /*
721 * We simply start over in case of a robust
722 * futex. The code above will take the futex
723 * and return happy.
724 */
725 if (curval & FUTEX_OWNER_DIED) {
726 ownerdied = 1;
727 goto retry;
728 }
729 default:
730 break;
731 }
732 }
733
734 return ret;
735}
736
Ingo Molnarc87e2832006-06-27 02:54:58 -0700737/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 * The hash bucket lock must be held when this is called.
739 * Afterwards, the futex_q must not be accessed.
740 */
741static void wake_futex(struct futex_q *q)
742{
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200743 struct task_struct *p = q->task;
744
745 /*
746 * We set q->lock_ptr = NULL _before_ we wake up the task. If
Randy Dunlapfb62db22010-10-13 11:02:34 -0700747 * a non-futex wake up happens on another CPU then the task
748 * might exit and p would dereference a non-existing task
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200749 * struct. Prevent this by holding a reference on p across the
750 * wake up.
751 */
752 get_task_struct(p);
753
Pierre Peifferec92d082007-05-09 02:35:00 -0700754 plist_del(&q->list, &q->list.plist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 /*
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200756 * The waiting task can free the futex_q as soon as
757 * q->lock_ptr = NULL is written, without taking any locks. A
758 * memory barrier is required here to prevent the following
759 * store to lock_ptr from getting ahead of the plist_del.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 */
Ralf Baechleccdea2f2006-12-06 20:40:26 -0800761 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 q->lock_ptr = NULL;
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200763
764 wake_up_state(p, TASK_NORMAL);
765 put_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766}
767
Ingo Molnarc87e2832006-06-27 02:54:58 -0700768static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
769{
770 struct task_struct *new_owner;
771 struct futex_pi_state *pi_state = this->pi_state;
772 u32 curval, newval;
773
774 if (!pi_state)
775 return -EINVAL;
776
Thomas Gleixner51246bf2010-02-02 11:40:27 +0100777 /*
778 * If current does not own the pi_state then the futex is
779 * inconsistent and user space fiddled with the futex value.
780 */
781 if (pi_state->owner != current)
782 return -EINVAL;
783
Thomas Gleixnerd209d742009-11-17 18:22:11 +0100784 raw_spin_lock(&pi_state->pi_mutex.wait_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700785 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
786
787 /*
788 * This happens when we have stolen the lock and the original
789 * pending owner did not enqueue itself back on the rt_mutex.
790 * Thats not a tragedy. We know that way, that a lock waiter
791 * is on the fly. We make the futex_q waiter the pending owner.
792 */
793 if (!new_owner)
794 new_owner = this->task;
795
796 /*
797 * We pass it to the next owner. (The WAITERS bit is always
798 * kept enabled while there is PI state around. We must also
799 * preserve the owner died bit.)
800 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +0200801 if (!(uval & FUTEX_OWNER_DIED)) {
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700802 int ret = 0;
803
Pavel Emelyanovb4888932007-10-18 23:40:14 -0700804 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700805
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700806 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700807
Ingo Molnare3f2dde2006-07-29 05:17:57 +0200808 if (curval == -EFAULT)
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700809 ret = -EFAULT;
Thomas Gleixnercde898f2007-12-05 15:46:09 +0100810 else if (curval != uval)
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700811 ret = -EINVAL;
812 if (ret) {
Thomas Gleixnerd209d742009-11-17 18:22:11 +0100813 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700814 return ret;
815 }
Ingo Molnare3f2dde2006-07-29 05:17:57 +0200816 }
Ingo Molnarc87e2832006-06-27 02:54:58 -0700817
Thomas Gleixner1d615482009-11-17 14:54:03 +0100818 raw_spin_lock_irq(&pi_state->owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200819 WARN_ON(list_empty(&pi_state->list));
820 list_del_init(&pi_state->list);
Thomas Gleixner1d615482009-11-17 14:54:03 +0100821 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200822
Thomas Gleixner1d615482009-11-17 14:54:03 +0100823 raw_spin_lock_irq(&new_owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200824 WARN_ON(!list_empty(&pi_state->list));
Ingo Molnarc87e2832006-06-27 02:54:58 -0700825 list_add(&pi_state->list, &new_owner->pi_state_list);
826 pi_state->owner = new_owner;
Thomas Gleixner1d615482009-11-17 14:54:03 +0100827 raw_spin_unlock_irq(&new_owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200828
Thomas Gleixnerd209d742009-11-17 18:22:11 +0100829 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700830 rt_mutex_unlock(&pi_state->pi_mutex);
831
832 return 0;
833}
834
835static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
836{
837 u32 oldval;
838
839 /*
840 * There is no waiter, so we unlock the futex. The owner died
841 * bit has not to be preserved here. We are the owner:
842 */
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700843 oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700844
845 if (oldval == -EFAULT)
846 return oldval;
847 if (oldval != uval)
848 return -EAGAIN;
849
850 return 0;
851}
852
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853/*
Ingo Molnar8b8f3192006-07-03 00:25:05 -0700854 * Express the locking dependencies for lockdep:
855 */
856static inline void
857double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
858{
859 if (hb1 <= hb2) {
860 spin_lock(&hb1->lock);
861 if (hb1 < hb2)
862 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
863 } else { /* hb1 > hb2 */
864 spin_lock(&hb2->lock);
865 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
866 }
867}
868
Darren Hart5eb3dc62009-03-12 00:55:52 -0700869static inline void
870double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
871{
Darren Hartf061d352009-03-12 15:11:18 -0700872 spin_unlock(&hb1->lock);
Ingo Molnar88f502f2009-03-13 10:32:07 +0100873 if (hb1 != hb2)
874 spin_unlock(&hb2->lock);
Darren Hart5eb3dc62009-03-12 00:55:52 -0700875}
876
Ingo Molnar8b8f3192006-07-03 00:25:05 -0700877/*
Darren Hartb2d09942009-03-12 00:55:37 -0700878 * Wake up waiters matching bitset queued on this futex (uaddr).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 */
Darren Hartb41277d2010-11-08 13:10:09 -0800880static int
881futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882{
Ingo Molnare2970f22006-06-27 02:54:47 -0700883 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 struct futex_q *this, *next;
Pierre Peifferec92d082007-05-09 02:35:00 -0700885 struct plist_head *head;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200886 union futex_key key = FUTEX_KEY_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 int ret;
888
Thomas Gleixnercd689982008-02-01 17:45:14 +0100889 if (!bitset)
890 return -EINVAL;
891
Darren Hartb41277d2010-11-08 13:10:09 -0800892 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 if (unlikely(ret != 0))
894 goto out;
895
Ingo Molnare2970f22006-06-27 02:54:47 -0700896 hb = hash_futex(&key);
897 spin_lock(&hb->lock);
898 head = &hb->chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
Pierre Peifferec92d082007-05-09 02:35:00 -0700900 plist_for_each_entry_safe(this, next, head, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 if (match_futex (&this->key, &key)) {
Darren Hart52400ba2009-04-03 13:40:49 -0700902 if (this->pi_state || this->rt_waiter) {
Ingo Molnared6f7b12006-07-01 04:35:46 -0700903 ret = -EINVAL;
904 break;
905 }
Thomas Gleixnercd689982008-02-01 17:45:14 +0100906
907 /* Check if one of the bits is set in both bitsets */
908 if (!(this->bitset & bitset))
909 continue;
910
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 wake_futex(this);
912 if (++ret >= nr_wake)
913 break;
914 }
915 }
916
Ingo Molnare2970f22006-06-27 02:54:47 -0700917 spin_unlock(&hb->lock);
Thomas Gleixnerae791a22010-11-10 13:30:36 +0100918 put_futex_key(&key);
Darren Hart42d35d42008-12-29 15:49:53 -0800919out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return ret;
921}
922
923/*
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700924 * Wake up all waiters hashed on the physical page that is mapped
925 * to this virtual address:
926 */
Ingo Molnare2970f22006-06-27 02:54:47 -0700927static int
Darren Hartb41277d2010-11-08 13:10:09 -0800928futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
Ingo Molnare2970f22006-06-27 02:54:47 -0700929 int nr_wake, int nr_wake2, int op)
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700930{
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200931 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
Ingo Molnare2970f22006-06-27 02:54:47 -0700932 struct futex_hash_bucket *hb1, *hb2;
Pierre Peifferec92d082007-05-09 02:35:00 -0700933 struct plist_head *head;
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700934 struct futex_q *this, *next;
Darren Harte4dc5b72009-03-12 00:56:13 -0700935 int ret, op_ret;
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700936
Darren Harte4dc5b72009-03-12 00:56:13 -0700937retry:
Darren Hartb41277d2010-11-08 13:10:09 -0800938 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700939 if (unlikely(ret != 0))
940 goto out;
Darren Hartb41277d2010-11-08 13:10:09 -0800941 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700942 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -0800943 goto out_put_key1;
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700944
Ingo Molnare2970f22006-06-27 02:54:47 -0700945 hb1 = hash_futex(&key1);
946 hb2 = hash_futex(&key2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700947
Darren Harte4dc5b72009-03-12 00:56:13 -0700948retry_private:
Thomas Gleixnereaaea802009-10-04 09:34:17 +0200949 double_lock_hb(hb1, hb2);
Ingo Molnare2970f22006-06-27 02:54:47 -0700950 op_ret = futex_atomic_op_inuser(op, uaddr2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700951 if (unlikely(op_ret < 0)) {
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700952
Darren Hart5eb3dc62009-03-12 00:55:52 -0700953 double_unlock_hb(hb1, hb2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700954
David Howells7ee1dd32006-01-06 00:11:44 -0800955#ifndef CONFIG_MMU
Ingo Molnare2970f22006-06-27 02:54:47 -0700956 /*
957 * we don't get EFAULT from MMU faults if we don't have an MMU,
958 * but we might get them from range checking
959 */
David Howells7ee1dd32006-01-06 00:11:44 -0800960 ret = op_ret;
Darren Hart42d35d42008-12-29 15:49:53 -0800961 goto out_put_keys;
David Howells7ee1dd32006-01-06 00:11:44 -0800962#endif
963
David Gibson796f8d92005-11-07 00:59:33 -0800964 if (unlikely(op_ret != -EFAULT)) {
965 ret = op_ret;
Darren Hart42d35d42008-12-29 15:49:53 -0800966 goto out_put_keys;
David Gibson796f8d92005-11-07 00:59:33 -0800967 }
968
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200969 ret = fault_in_user_writeable(uaddr2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700970 if (ret)
Darren Hartde87fcc2009-03-12 00:55:46 -0700971 goto out_put_keys;
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700972
Darren Hartb41277d2010-11-08 13:10:09 -0800973 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -0700974 goto retry_private;
975
Thomas Gleixnerae791a22010-11-10 13:30:36 +0100976 put_futex_key(&key2);
977 put_futex_key(&key1);
Darren Harte4dc5b72009-03-12 00:56:13 -0700978 goto retry;
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700979 }
980
Ingo Molnare2970f22006-06-27 02:54:47 -0700981 head = &hb1->chain;
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700982
Pierre Peifferec92d082007-05-09 02:35:00 -0700983 plist_for_each_entry_safe(this, next, head, list) {
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700984 if (match_futex (&this->key, &key1)) {
985 wake_futex(this);
986 if (++ret >= nr_wake)
987 break;
988 }
989 }
990
991 if (op_ret > 0) {
Ingo Molnare2970f22006-06-27 02:54:47 -0700992 head = &hb2->chain;
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700993
994 op_ret = 0;
Pierre Peifferec92d082007-05-09 02:35:00 -0700995 plist_for_each_entry_safe(this, next, head, list) {
Jakub Jelinek4732efb2005-09-06 15:16:25 -0700996 if (match_futex (&this->key, &key2)) {
997 wake_futex(this);
998 if (++op_ret >= nr_wake2)
999 break;
1000 }
1001 }
1002 ret += op_ret;
1003 }
1004
Darren Hart5eb3dc62009-03-12 00:55:52 -07001005 double_unlock_hb(hb1, hb2);
Darren Hart42d35d42008-12-29 15:49:53 -08001006out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001007 put_futex_key(&key2);
Darren Hart42d35d42008-12-29 15:49:53 -08001008out_put_key1:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001009 put_futex_key(&key1);
Darren Hart42d35d42008-12-29 15:49:53 -08001010out:
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001011 return ret;
1012}
1013
Darren Hart9121e472009-04-03 13:40:31 -07001014/**
1015 * requeue_futex() - Requeue a futex_q from one hb to another
1016 * @q: the futex_q to requeue
1017 * @hb1: the source hash_bucket
1018 * @hb2: the target hash_bucket
1019 * @key2: the new key for the requeued futex_q
1020 */
1021static inline
1022void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1023 struct futex_hash_bucket *hb2, union futex_key *key2)
1024{
1025
1026 /*
1027 * If key1 and key2 hash to the same bucket, no need to
1028 * requeue.
1029 */
1030 if (likely(&hb1->chain != &hb2->chain)) {
1031 plist_del(&q->list, &hb1->chain);
1032 plist_add(&q->list, &hb2->chain);
1033 q->lock_ptr = &hb2->lock;
1034#ifdef CONFIG_DEBUG_PI_LIST
Thomas Gleixnera2672452009-11-17 14:46:14 +01001035 q->list.plist.spinlock = &hb2->lock;
Darren Hart9121e472009-04-03 13:40:31 -07001036#endif
1037 }
1038 get_futex_key_refs(key2);
1039 q->key = *key2;
1040}
1041
Darren Hart52400ba2009-04-03 13:40:49 -07001042/**
1043 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
Darren Hartd96ee562009-09-21 22:30:22 -07001044 * @q: the futex_q
1045 * @key: the key of the requeue target futex
1046 * @hb: the hash_bucket of the requeue target futex
Darren Hart52400ba2009-04-03 13:40:49 -07001047 *
1048 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1049 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1050 * to the requeue target futex so the waiter can detect the wakeup on the right
1051 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
Darren Hartbeda2c72009-08-09 15:34:39 -07001052 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1053 * to protect access to the pi_state to fixup the owner later. Must be called
1054 * with both q->lock_ptr and hb->lock held.
Darren Hart52400ba2009-04-03 13:40:49 -07001055 */
1056static inline
Darren Hartbeda2c72009-08-09 15:34:39 -07001057void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1058 struct futex_hash_bucket *hb)
Darren Hart52400ba2009-04-03 13:40:49 -07001059{
Darren Hart52400ba2009-04-03 13:40:49 -07001060 get_futex_key_refs(key);
1061 q->key = *key;
1062
1063 WARN_ON(plist_node_empty(&q->list));
1064 plist_del(&q->list, &q->list.plist);
1065
1066 WARN_ON(!q->rt_waiter);
1067 q->rt_waiter = NULL;
1068
Darren Hartbeda2c72009-08-09 15:34:39 -07001069 q->lock_ptr = &hb->lock;
1070#ifdef CONFIG_DEBUG_PI_LIST
Thomas Gleixnera2672452009-11-17 14:46:14 +01001071 q->list.plist.spinlock = &hb->lock;
Darren Hartbeda2c72009-08-09 15:34:39 -07001072#endif
1073
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001074 wake_up_state(q->task, TASK_NORMAL);
Darren Hart52400ba2009-04-03 13:40:49 -07001075}
1076
1077/**
1078 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
Darren Hartbab5bc92009-04-07 23:23:50 -07001079 * @pifutex: the user address of the to futex
1080 * @hb1: the from futex hash bucket, must be locked by the caller
1081 * @hb2: the to futex hash bucket, must be locked by the caller
1082 * @key1: the from futex key
1083 * @key2: the to futex key
1084 * @ps: address to store the pi_state pointer
1085 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
Darren Hart52400ba2009-04-03 13:40:49 -07001086 *
1087 * Try and get the lock on behalf of the top waiter if we can do it atomically.
Darren Hartbab5bc92009-04-07 23:23:50 -07001088 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1089 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1090 * hb1 and hb2 must be held by the caller.
Darren Hart52400ba2009-04-03 13:40:49 -07001091 *
1092 * Returns:
1093 * 0 - failed to acquire the lock atomicly
1094 * 1 - acquired the lock
1095 * <0 - error
1096 */
1097static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1098 struct futex_hash_bucket *hb1,
1099 struct futex_hash_bucket *hb2,
1100 union futex_key *key1, union futex_key *key2,
Darren Hartbab5bc92009-04-07 23:23:50 -07001101 struct futex_pi_state **ps, int set_waiters)
Darren Hart52400ba2009-04-03 13:40:49 -07001102{
Darren Hartbab5bc92009-04-07 23:23:50 -07001103 struct futex_q *top_waiter = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07001104 u32 curval;
1105 int ret;
1106
1107 if (get_futex_value_locked(&curval, pifutex))
1108 return -EFAULT;
1109
Darren Hartbab5bc92009-04-07 23:23:50 -07001110 /*
1111 * Find the top_waiter and determine if there are additional waiters.
1112 * If the caller intends to requeue more than 1 waiter to pifutex,
1113 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1114 * as we have means to handle the possible fault. If not, don't set
1115 * the bit unecessarily as it will force the subsequent unlock to enter
1116 * the kernel.
1117 */
Darren Hart52400ba2009-04-03 13:40:49 -07001118 top_waiter = futex_top_waiter(hb1, key1);
1119
1120 /* There are no waiters, nothing for us to do. */
1121 if (!top_waiter)
1122 return 0;
1123
Darren Hart84bc4af2009-08-13 17:36:53 -07001124 /* Ensure we requeue to the expected futex. */
1125 if (!match_futex(top_waiter->requeue_pi_key, key2))
1126 return -EINVAL;
1127
Darren Hart52400ba2009-04-03 13:40:49 -07001128 /*
Darren Hartbab5bc92009-04-07 23:23:50 -07001129 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1130 * the contended case or if set_waiters is 1. The pi_state is returned
1131 * in ps in contended cases.
Darren Hart52400ba2009-04-03 13:40:49 -07001132 */
Darren Hartbab5bc92009-04-07 23:23:50 -07001133 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1134 set_waiters);
Darren Hart52400ba2009-04-03 13:40:49 -07001135 if (ret == 1)
Darren Hartbeda2c72009-08-09 15:34:39 -07001136 requeue_pi_wake_futex(top_waiter, key2, hb2);
Darren Hart52400ba2009-04-03 13:40:49 -07001137
1138 return ret;
1139}
1140
1141/**
1142 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
Randy Dunlapfb62db22010-10-13 11:02:34 -07001143 * @uaddr1: source futex user address
Darren Hartb41277d2010-11-08 13:10:09 -08001144 * @flags: futex flags (FLAGS_SHARED, etc.)
Randy Dunlapfb62db22010-10-13 11:02:34 -07001145 * @uaddr2: target futex user address
1146 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1147 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1148 * @cmpval: @uaddr1 expected value (or %NULL)
1149 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
Darren Hartb41277d2010-11-08 13:10:09 -08001150 * pi futex (pi to pi requeue is not supported)
Darren Hart52400ba2009-04-03 13:40:49 -07001151 *
1152 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1153 * uaddr2 atomically on behalf of the top waiter.
1154 *
1155 * Returns:
1156 * >=0 - on success, the number of tasks requeued or woken
1157 * <0 - on error
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 */
Darren Hartb41277d2010-11-08 13:10:09 -08001159static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1160 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1161 u32 *cmpval, int requeue_pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162{
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001163 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
Darren Hart52400ba2009-04-03 13:40:49 -07001164 int drop_count = 0, task_count = 0, ret;
1165 struct futex_pi_state *pi_state = NULL;
Ingo Molnare2970f22006-06-27 02:54:47 -07001166 struct futex_hash_bucket *hb1, *hb2;
Pierre Peifferec92d082007-05-09 02:35:00 -07001167 struct plist_head *head1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 struct futex_q *this, *next;
Darren Hart52400ba2009-04-03 13:40:49 -07001169 u32 curval2;
1170
1171 if (requeue_pi) {
1172 /*
1173 * requeue_pi requires a pi_state, try to allocate it now
1174 * without any locks in case it fails.
1175 */
1176 if (refill_pi_state_cache())
1177 return -ENOMEM;
1178 /*
1179 * requeue_pi must wake as many tasks as it can, up to nr_wake
1180 * + nr_requeue, since it acquires the rt_mutex prior to
1181 * returning to userspace, so as to not leave the rt_mutex with
1182 * waiters and no owner. However, second and third wake-ups
1183 * cannot be predicted as they involve race conditions with the
1184 * first wake and a fault while looking up the pi_state. Both
1185 * pthread_cond_signal() and pthread_cond_broadcast() should
1186 * use nr_wake=1.
1187 */
1188 if (nr_wake != 1)
1189 return -EINVAL;
1190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Darren Hart42d35d42008-12-29 15:49:53 -08001192retry:
Darren Hart52400ba2009-04-03 13:40:49 -07001193 if (pi_state != NULL) {
1194 /*
1195 * We will have to lookup the pi_state again, so free this one
1196 * to keep the accounting correct.
1197 */
1198 free_pi_state(pi_state);
1199 pi_state = NULL;
1200 }
1201
Darren Hartb41277d2010-11-08 13:10:09 -08001202 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 if (unlikely(ret != 0))
1204 goto out;
Darren Hartb41277d2010-11-08 13:10:09 -08001205 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001207 goto out_put_key1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
Ingo Molnare2970f22006-06-27 02:54:47 -07001209 hb1 = hash_futex(&key1);
1210 hb2 = hash_futex(&key2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Darren Harte4dc5b72009-03-12 00:56:13 -07001212retry_private:
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001213 double_lock_hb(hb1, hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Ingo Molnare2970f22006-06-27 02:54:47 -07001215 if (likely(cmpval != NULL)) {
1216 u32 curval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Ingo Molnare2970f22006-06-27 02:54:47 -07001218 ret = get_futex_value_locked(&curval, uaddr1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 if (unlikely(ret)) {
Darren Hart5eb3dc62009-03-12 00:55:52 -07001221 double_unlock_hb(hb1, hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Darren Harte4dc5b72009-03-12 00:56:13 -07001223 ret = get_user(curval, uaddr1);
1224 if (ret)
1225 goto out_put_keys;
1226
Darren Hartb41277d2010-11-08 13:10:09 -08001227 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07001228 goto retry_private;
1229
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001230 put_futex_key(&key2);
1231 put_futex_key(&key1);
Darren Harte4dc5b72009-03-12 00:56:13 -07001232 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 }
Ingo Molnare2970f22006-06-27 02:54:47 -07001234 if (curval != *cmpval) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 ret = -EAGAIN;
1236 goto out_unlock;
1237 }
1238 }
1239
Darren Hart52400ba2009-04-03 13:40:49 -07001240 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
Darren Hartbab5bc92009-04-07 23:23:50 -07001241 /*
1242 * Attempt to acquire uaddr2 and wake the top waiter. If we
1243 * intend to requeue waiters, force setting the FUTEX_WAITERS
1244 * bit. We force this here where we are able to easily handle
1245 * faults rather in the requeue loop below.
1246 */
Darren Hart52400ba2009-04-03 13:40:49 -07001247 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
Darren Hartbab5bc92009-04-07 23:23:50 -07001248 &key2, &pi_state, nr_requeue);
Darren Hart52400ba2009-04-03 13:40:49 -07001249
1250 /*
1251 * At this point the top_waiter has either taken uaddr2 or is
1252 * waiting on it. If the former, then the pi_state will not
1253 * exist yet, look it up one more time to ensure we have a
1254 * reference to it.
1255 */
1256 if (ret == 1) {
1257 WARN_ON(pi_state);
Darren Hart89061d32009-10-15 15:30:48 -07001258 drop_count++;
Darren Hart52400ba2009-04-03 13:40:49 -07001259 task_count++;
1260 ret = get_futex_value_locked(&curval2, uaddr2);
1261 if (!ret)
1262 ret = lookup_pi_state(curval2, hb2, &key2,
1263 &pi_state);
1264 }
1265
1266 switch (ret) {
1267 case 0:
1268 break;
1269 case -EFAULT:
1270 double_unlock_hb(hb1, hb2);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001271 put_futex_key(&key2);
1272 put_futex_key(&key1);
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001273 ret = fault_in_user_writeable(uaddr2);
Darren Hart52400ba2009-04-03 13:40:49 -07001274 if (!ret)
1275 goto retry;
1276 goto out;
1277 case -EAGAIN:
1278 /* The owner was exiting, try again. */
1279 double_unlock_hb(hb1, hb2);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001280 put_futex_key(&key2);
1281 put_futex_key(&key1);
Darren Hart52400ba2009-04-03 13:40:49 -07001282 cond_resched();
1283 goto retry;
1284 default:
1285 goto out_unlock;
1286 }
1287 }
1288
Ingo Molnare2970f22006-06-27 02:54:47 -07001289 head1 = &hb1->chain;
Pierre Peifferec92d082007-05-09 02:35:00 -07001290 plist_for_each_entry_safe(this, next, head1, list) {
Darren Hart52400ba2009-04-03 13:40:49 -07001291 if (task_count - nr_wake >= nr_requeue)
1292 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Darren Hart52400ba2009-04-03 13:40:49 -07001294 if (!match_futex(&this->key, &key1))
1295 continue;
1296
Darren Hart392741e2009-08-07 15:20:48 -07001297 /*
1298 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1299 * be paired with each other and no other futex ops.
1300 */
1301 if ((requeue_pi && !this->rt_waiter) ||
1302 (!requeue_pi && this->rt_waiter)) {
1303 ret = -EINVAL;
1304 break;
1305 }
Darren Hart52400ba2009-04-03 13:40:49 -07001306
1307 /*
1308 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1309 * lock, we already woke the top_waiter. If not, it will be
1310 * woken by futex_unlock_pi().
1311 */
1312 if (++task_count <= nr_wake && !requeue_pi) {
1313 wake_futex(this);
1314 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 }
Darren Hart52400ba2009-04-03 13:40:49 -07001316
Darren Hart84bc4af2009-08-13 17:36:53 -07001317 /* Ensure we requeue to the expected futex for requeue_pi. */
1318 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1319 ret = -EINVAL;
1320 break;
1321 }
1322
Darren Hart52400ba2009-04-03 13:40:49 -07001323 /*
1324 * Requeue nr_requeue waiters and possibly one more in the case
1325 * of requeue_pi if we couldn't acquire the lock atomically.
1326 */
1327 if (requeue_pi) {
1328 /* Prepare the waiter to take the rt_mutex. */
1329 atomic_inc(&pi_state->refcount);
1330 this->pi_state = pi_state;
1331 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1332 this->rt_waiter,
1333 this->task, 1);
1334 if (ret == 1) {
1335 /* We got the lock. */
Darren Hartbeda2c72009-08-09 15:34:39 -07001336 requeue_pi_wake_futex(this, &key2, hb2);
Darren Hart89061d32009-10-15 15:30:48 -07001337 drop_count++;
Darren Hart52400ba2009-04-03 13:40:49 -07001338 continue;
1339 } else if (ret) {
1340 /* -EDEADLK */
1341 this->pi_state = NULL;
1342 free_pi_state(pi_state);
1343 goto out_unlock;
1344 }
1345 }
1346 requeue_futex(this, hb1, hb2, &key2);
1347 drop_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 }
1349
1350out_unlock:
Darren Hart5eb3dc62009-03-12 00:55:52 -07001351 double_unlock_hb(hb1, hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
Darren Hartcd84a422009-04-02 14:19:38 -07001353 /*
1354 * drop_futex_key_refs() must be called outside the spinlocks. During
1355 * the requeue we moved futex_q's from the hash bucket at key1 to the
1356 * one at key2 and updated their key pointer. We no longer need to
1357 * hold the references to key1.
1358 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 while (--drop_count >= 0)
Rusty Russell9adef582007-05-08 00:26:42 -07001360 drop_futex_key_refs(&key1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
Darren Hart42d35d42008-12-29 15:49:53 -08001362out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001363 put_futex_key(&key2);
Darren Hart42d35d42008-12-29 15:49:53 -08001364out_put_key1:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001365 put_futex_key(&key1);
Darren Hart42d35d42008-12-29 15:49:53 -08001366out:
Darren Hart52400ba2009-04-03 13:40:49 -07001367 if (pi_state != NULL)
1368 free_pi_state(pi_state);
1369 return ret ? ret : task_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370}
1371
1372/* The key must be already stored in q->key. */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01001373static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001374 __acquires(&hb->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375{
Ingo Molnare2970f22006-06-27 02:54:47 -07001376 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
Ingo Molnare2970f22006-06-27 02:54:47 -07001378 hb = hash_futex(&q->key);
1379 q->lock_ptr = &hb->lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Ingo Molnare2970f22006-06-27 02:54:47 -07001381 spin_lock(&hb->lock);
1382 return hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383}
1384
Darren Hartd40d65c2009-09-21 22:30:15 -07001385static inline void
1386queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001387 __releases(&hb->lock)
Darren Hartd40d65c2009-09-21 22:30:15 -07001388{
1389 spin_unlock(&hb->lock);
Darren Hartd40d65c2009-09-21 22:30:15 -07001390}
1391
1392/**
1393 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1394 * @q: The futex_q to enqueue
1395 * @hb: The destination hash bucket
1396 *
1397 * The hb->lock must be held by the caller, and is released here. A call to
1398 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1399 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1400 * or nothing if the unqueue is done as part of the wake process and the unqueue
1401 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1402 * an example).
1403 */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01001404static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001405 __releases(&hb->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406{
Pierre Peifferec92d082007-05-09 02:35:00 -07001407 int prio;
1408
1409 /*
1410 * The priority used to register this element is
1411 * - either the real thread-priority for the real-time threads
1412 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1413 * - or MAX_RT_PRIO for non-RT threads.
1414 * Thus, all RT-threads are woken first in priority order, and
1415 * the others are woken last, in FIFO order.
1416 */
1417 prio = min(current->normal_prio, MAX_RT_PRIO);
1418
1419 plist_node_init(&q->list, prio);
1420#ifdef CONFIG_DEBUG_PI_LIST
Thomas Gleixnera2672452009-11-17 14:46:14 +01001421 q->list.plist.spinlock = &hb->lock;
Pierre Peifferec92d082007-05-09 02:35:00 -07001422#endif
1423 plist_add(&q->list, &hb->chain);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001424 q->task = current;
Ingo Molnare2970f22006-06-27 02:54:47 -07001425 spin_unlock(&hb->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426}
1427
Darren Hartd40d65c2009-09-21 22:30:15 -07001428/**
1429 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1430 * @q: The futex_q to unqueue
1431 *
1432 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1433 * be paired with exactly one earlier call to queue_me().
1434 *
1435 * Returns:
1436 * 1 - if the futex_q was still queued (and we removed unqueued it)
1437 * 0 - if the futex_q was already removed by the waking thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439static int unqueue_me(struct futex_q *q)
1440{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 spinlock_t *lock_ptr;
Ingo Molnare2970f22006-06-27 02:54:47 -07001442 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
1444 /* In the common case we don't take the spinlock, which is nice. */
Darren Hart42d35d42008-12-29 15:49:53 -08001445retry:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 lock_ptr = q->lock_ptr;
Christian Borntraegere91467e2006-08-05 12:13:52 -07001447 barrier();
Stephen Hemmingerc80544d2007-10-18 03:07:05 -07001448 if (lock_ptr != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 spin_lock(lock_ptr);
1450 /*
1451 * q->lock_ptr can change between reading it and
1452 * spin_lock(), causing us to take the wrong lock. This
1453 * corrects the race condition.
1454 *
1455 * Reasoning goes like this: if we have the wrong lock,
1456 * q->lock_ptr must have changed (maybe several times)
1457 * between reading it and the spin_lock(). It can
1458 * change again after the spin_lock() but only if it was
1459 * already changed before the spin_lock(). It cannot,
1460 * however, change back to the original value. Therefore
1461 * we can detect whether we acquired the correct lock.
1462 */
1463 if (unlikely(lock_ptr != q->lock_ptr)) {
1464 spin_unlock(lock_ptr);
1465 goto retry;
1466 }
Pierre Peifferec92d082007-05-09 02:35:00 -07001467 WARN_ON(plist_node_empty(&q->list));
1468 plist_del(&q->list, &q->list.plist);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001469
1470 BUG_ON(q->pi_state);
1471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 spin_unlock(lock_ptr);
1473 ret = 1;
1474 }
1475
Rusty Russell9adef582007-05-08 00:26:42 -07001476 drop_futex_key_refs(&q->key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 return ret;
1478}
1479
Ingo Molnarc87e2832006-06-27 02:54:58 -07001480/*
1481 * PI futexes can not be requeued and must remove themself from the
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001482 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1483 * and dropped here.
Ingo Molnarc87e2832006-06-27 02:54:58 -07001484 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001485static void unqueue_me_pi(struct futex_q *q)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001486 __releases(q->lock_ptr)
Ingo Molnarc87e2832006-06-27 02:54:58 -07001487{
Pierre Peifferec92d082007-05-09 02:35:00 -07001488 WARN_ON(plist_node_empty(&q->list));
1489 plist_del(&q->list, &q->list.plist);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001490
1491 BUG_ON(!q->pi_state);
1492 free_pi_state(q->pi_state);
1493 q->pi_state = NULL;
1494
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001495 spin_unlock(q->lock_ptr);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001496}
1497
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001498/*
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01001499 * Fixup the pi_state owner with the new owner.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001500 *
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001501 * Must be called with hash bucket lock held and mm->sem held for non
1502 * private futexes.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001503 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001504static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001505 struct task_struct *newowner)
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001506{
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01001507 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001508 struct futex_pi_state *pi_state = q->pi_state;
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001509 struct task_struct *oldowner = pi_state->owner;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001510 u32 uval, curval, newval;
Darren Harte4dc5b72009-03-12 00:56:13 -07001511 int ret;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001512
1513 /* Owner died? */
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001514 if (!pi_state->owner)
1515 newtid |= FUTEX_OWNER_DIED;
1516
1517 /*
1518 * We are here either because we stole the rtmutex from the
1519 * pending owner or we are the pending owner which failed to
1520 * get the rtmutex. We have to replace the pending owner TID
1521 * in the user space variable. This must be atomic as we have
1522 * to preserve the owner died bit here.
1523 *
Darren Hartb2d09942009-03-12 00:55:37 -07001524 * Note: We write the user space value _before_ changing the pi_state
1525 * because we can fault here. Imagine swapped out pages or a fork
1526 * that marked all the anonymous memory readonly for cow.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001527 *
1528 * Modifying pi_state _before_ the user space value would
1529 * leave the pi_state in an inconsistent state when we fault
1530 * here, because we need to drop the hash bucket lock to
1531 * handle the fault. This might be observed in the PID check
1532 * in lookup_pi_state.
1533 */
1534retry:
1535 if (get_futex_value_locked(&uval, uaddr))
1536 goto handle_fault;
1537
1538 while (1) {
1539 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1540
1541 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1542
1543 if (curval == -EFAULT)
1544 goto handle_fault;
1545 if (curval == uval)
1546 break;
1547 uval = curval;
1548 }
1549
1550 /*
1551 * We fixed up user space. Now we need to fix the pi_state
1552 * itself.
1553 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001554 if (pi_state->owner != NULL) {
Thomas Gleixner1d615482009-11-17 14:54:03 +01001555 raw_spin_lock_irq(&pi_state->owner->pi_lock);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001556 WARN_ON(list_empty(&pi_state->list));
1557 list_del_init(&pi_state->list);
Thomas Gleixner1d615482009-11-17 14:54:03 +01001558 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001559 }
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001560
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01001561 pi_state->owner = newowner;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001562
Thomas Gleixner1d615482009-11-17 14:54:03 +01001563 raw_spin_lock_irq(&newowner->pi_lock);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001564 WARN_ON(!list_empty(&pi_state->list));
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01001565 list_add(&pi_state->list, &newowner->pi_state_list);
Thomas Gleixner1d615482009-11-17 14:54:03 +01001566 raw_spin_unlock_irq(&newowner->pi_lock);
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001567 return 0;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001568
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001569 /*
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001570 * To handle the page fault we need to drop the hash bucket
1571 * lock here. That gives the other task (either the pending
1572 * owner itself or the task which stole the rtmutex) the
1573 * chance to try the fixup of the pi_state. So once we are
1574 * back from handling the fault we need to check the pi_state
1575 * after reacquiring the hash bucket lock and before trying to
1576 * do another fixup. When the fixup has been done already we
1577 * simply return.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001578 */
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001579handle_fault:
1580 spin_unlock(q->lock_ptr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001581
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001582 ret = fault_in_user_writeable(uaddr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001583
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001584 spin_lock(q->lock_ptr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001585
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001586 /*
1587 * Check if someone else fixed it for us:
1588 */
1589 if (pi_state->owner != oldowner)
1590 return 0;
1591
1592 if (ret)
1593 return ret;
1594
1595 goto retry;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001596}
1597
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001598static long futex_wait_restart(struct restart_block *restart);
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07001599
Darren Hartca5f9522009-04-03 13:39:33 -07001600/**
Darren Hartdd973992009-04-03 13:40:02 -07001601 * fixup_owner() - Post lock pi_state and corner case management
1602 * @uaddr: user address of the futex
Darren Hartdd973992009-04-03 13:40:02 -07001603 * @q: futex_q (contains pi_state and access to the rt_mutex)
1604 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
1605 *
1606 * After attempting to lock an rt_mutex, this function is called to cleanup
1607 * the pi_state owner as well as handle race conditions that may allow us to
1608 * acquire the lock. Must be called with the hb lock held.
1609 *
1610 * Returns:
1611 * 1 - success, lock taken
1612 * 0 - success, lock not taken
1613 * <0 - on error (-EFAULT)
1614 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001615static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
Darren Hartdd973992009-04-03 13:40:02 -07001616{
1617 struct task_struct *owner;
1618 int ret = 0;
1619
1620 if (locked) {
1621 /*
1622 * Got the lock. We might not be the anticipated owner if we
1623 * did a lock-steal - fix up the PI-state in that case:
1624 */
1625 if (q->pi_state->owner != current)
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001626 ret = fixup_pi_state_owner(uaddr, q, current);
Darren Hartdd973992009-04-03 13:40:02 -07001627 goto out;
1628 }
1629
1630 /*
1631 * Catch the rare case, where the lock was released when we were on the
1632 * way back before we locked the hash bucket.
1633 */
1634 if (q->pi_state->owner == current) {
1635 /*
1636 * Try to get the rt_mutex now. This might fail as some other
1637 * task acquired the rt_mutex after we removed ourself from the
1638 * rt_mutex waiters list.
1639 */
1640 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1641 locked = 1;
1642 goto out;
1643 }
1644
1645 /*
1646 * pi_state is incorrect, some other task did a lock steal and
1647 * we returned due to timeout or signal without taking the
1648 * rt_mutex. Too late. We can access the rt_mutex_owner without
1649 * locking, as the other task is now blocked on the hash bucket
1650 * lock. Fix the state up.
1651 */
1652 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001653 ret = fixup_pi_state_owner(uaddr, q, owner);
Darren Hartdd973992009-04-03 13:40:02 -07001654 goto out;
1655 }
1656
1657 /*
1658 * Paranoia check. If we did not take the lock, then we should not be
1659 * the owner, nor the pending owner, of the rt_mutex.
1660 */
1661 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1662 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1663 "pi-state %p\n", ret,
1664 q->pi_state->pi_mutex.owner,
1665 q->pi_state->owner);
1666
1667out:
1668 return ret ? ret : locked;
1669}
1670
1671/**
Darren Hartca5f9522009-04-03 13:39:33 -07001672 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1673 * @hb: the futex hash bucket, must be locked by the caller
1674 * @q: the futex_q to queue up on
1675 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
Darren Hartca5f9522009-04-03 13:39:33 -07001676 */
1677static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001678 struct hrtimer_sleeper *timeout)
Darren Hartca5f9522009-04-03 13:39:33 -07001679{
Darren Hart9beba3c2009-09-24 11:54:47 -07001680 /*
1681 * The task state is guaranteed to be set before another task can
1682 * wake it. set_current_state() is implemented using set_mb() and
1683 * queue_me() calls spin_unlock() upon completion, both serializing
1684 * access to the hash list and forcing another memory barrier.
1685 */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001686 set_current_state(TASK_INTERRUPTIBLE);
Darren Hart0729e192009-09-21 22:30:38 -07001687 queue_me(q, hb);
Darren Hartca5f9522009-04-03 13:39:33 -07001688
1689 /* Arm the timer */
1690 if (timeout) {
1691 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1692 if (!hrtimer_active(&timeout->timer))
1693 timeout->task = NULL;
1694 }
1695
1696 /*
Darren Hart0729e192009-09-21 22:30:38 -07001697 * If we have been removed from the hash list, then another task
1698 * has tried to wake us, and we can skip the call to schedule().
Darren Hartca5f9522009-04-03 13:39:33 -07001699 */
1700 if (likely(!plist_node_empty(&q->list))) {
1701 /*
1702 * If the timer has already expired, current will already be
1703 * flagged for rescheduling. Only call schedule if there
1704 * is no timeout, or if it has yet to expire.
1705 */
1706 if (!timeout || timeout->task)
1707 schedule();
1708 }
1709 __set_current_state(TASK_RUNNING);
1710}
1711
Darren Hartf8010732009-04-03 13:40:40 -07001712/**
1713 * futex_wait_setup() - Prepare to wait on a futex
1714 * @uaddr: the futex userspace address
1715 * @val: the expected value
Darren Hartb41277d2010-11-08 13:10:09 -08001716 * @flags: futex flags (FLAGS_SHARED, etc.)
Darren Hartf8010732009-04-03 13:40:40 -07001717 * @q: the associated futex_q
1718 * @hb: storage for hash_bucket pointer to be returned to caller
1719 *
1720 * Setup the futex_q and locate the hash_bucket. Get the futex value and
1721 * compare it with the expected value. Handle atomic faults internally.
1722 * Return with the hb lock held and a q.key reference on success, and unlocked
1723 * with no q.key reference on failure.
1724 *
1725 * Returns:
1726 * 0 - uaddr contains val and hb has been locked
1727 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1728 */
Darren Hartb41277d2010-11-08 13:10:09 -08001729static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
Darren Hartf8010732009-04-03 13:40:40 -07001730 struct futex_q *q, struct futex_hash_bucket **hb)
1731{
1732 u32 uval;
1733 int ret;
1734
1735 /*
1736 * Access the page AFTER the hash-bucket is locked.
1737 * Order is important:
1738 *
1739 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1740 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
1741 *
1742 * The basic logical guarantee of a futex is that it blocks ONLY
1743 * if cond(var) is known to be true at the time of blocking, for
1744 * any cond. If we queued after testing *uaddr, that would open
1745 * a race condition where we could block indefinitely with
1746 * cond(var) false, which would violate the guarantee.
1747 *
1748 * A consequence is that futex_wait() can return zero and absorb
1749 * a wakeup when *uaddr != val on entry to the syscall. This is
1750 * rare, but normal.
1751 */
1752retry:
1753 q->key = FUTEX_KEY_INIT;
Darren Hartb41277d2010-11-08 13:10:09 -08001754 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key);
Darren Hartf8010732009-04-03 13:40:40 -07001755 if (unlikely(ret != 0))
Darren Harta5a2a0c2009-04-10 09:50:05 -07001756 return ret;
Darren Hartf8010732009-04-03 13:40:40 -07001757
1758retry_private:
1759 *hb = queue_lock(q);
1760
1761 ret = get_futex_value_locked(&uval, uaddr);
1762
1763 if (ret) {
1764 queue_unlock(q, *hb);
1765
1766 ret = get_user(uval, uaddr);
1767 if (ret)
1768 goto out;
1769
Darren Hartb41277d2010-11-08 13:10:09 -08001770 if (!(flags & FLAGS_SHARED))
Darren Hartf8010732009-04-03 13:40:40 -07001771 goto retry_private;
1772
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001773 put_futex_key(&q->key);
Darren Hartf8010732009-04-03 13:40:40 -07001774 goto retry;
1775 }
1776
1777 if (uval != val) {
1778 queue_unlock(q, *hb);
1779 ret = -EWOULDBLOCK;
1780 }
1781
1782out:
1783 if (ret)
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001784 put_futex_key(&q->key);
Darren Hartf8010732009-04-03 13:40:40 -07001785 return ret;
1786}
1787
Darren Hartb41277d2010-11-08 13:10:09 -08001788static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
1789 ktime_t *abs_time, u32 bitset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790{
Darren Hartca5f9522009-04-03 13:39:33 -07001791 struct hrtimer_sleeper timeout, *to = NULL;
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001792 struct restart_block *restart;
Ingo Molnare2970f22006-06-27 02:54:47 -07001793 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 struct futex_q q;
Ingo Molnare2970f22006-06-27 02:54:47 -07001795 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Thomas Gleixnercd689982008-02-01 17:45:14 +01001797 if (!bitset)
1798 return -EINVAL;
1799
Ingo Molnarc87e2832006-06-27 02:54:58 -07001800 q.pi_state = NULL;
Thomas Gleixnercd689982008-02-01 17:45:14 +01001801 q.bitset = bitset;
Darren Hart52400ba2009-04-03 13:40:49 -07001802 q.rt_waiter = NULL;
Darren Hart84bc4af2009-08-13 17:36:53 -07001803 q.requeue_pi_key = NULL;
Darren Hartca5f9522009-04-03 13:39:33 -07001804
1805 if (abs_time) {
1806 to = &timeout;
1807
Darren Hartb41277d2010-11-08 13:10:09 -08001808 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
1809 CLOCK_REALTIME : CLOCK_MONOTONIC,
1810 HRTIMER_MODE_ABS);
Darren Hartca5f9522009-04-03 13:39:33 -07001811 hrtimer_init_sleeper(to, current);
1812 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1813 current->timer_slack_ns);
1814 }
1815
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02001816retry:
Darren Hart7ada8762010-10-17 08:35:04 -07001817 /*
1818 * Prepare to wait on uaddr. On success, holds hb lock and increments
1819 * q.key refs.
1820 */
Darren Hartb41277d2010-11-08 13:10:09 -08001821 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
Darren Hartf8010732009-04-03 13:40:40 -07001822 if (ret)
Darren Hart42d35d42008-12-29 15:49:53 -08001823 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
Darren Hartca5f9522009-04-03 13:39:33 -07001825 /* queue_me and wait for wakeup, timeout, or a signal. */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001826 futex_wait_queue_me(hb, &q, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
1828 /* If we were woken (and unqueued), we succeeded, whatever. */
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001829 ret = 0;
Darren Hart7ada8762010-10-17 08:35:04 -07001830 /* unqueue_me() drops q.key ref */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 if (!unqueue_me(&q))
Darren Hart7ada8762010-10-17 08:35:04 -07001832 goto out;
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001833 ret = -ETIMEDOUT;
Darren Hartca5f9522009-04-03 13:39:33 -07001834 if (to && !to->task)
Darren Hart7ada8762010-10-17 08:35:04 -07001835 goto out;
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001836
Ingo Molnare2970f22006-06-27 02:54:47 -07001837 /*
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02001838 * We expect signal_pending(current), but we might be the
1839 * victim of a spurious wakeup as well.
Ingo Molnare2970f22006-06-27 02:54:47 -07001840 */
Darren Hart7ada8762010-10-17 08:35:04 -07001841 if (!signal_pending(current))
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02001842 goto retry;
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02001843
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001844 ret = -ERESTARTSYS;
Pierre Peifferc19384b2007-05-09 02:35:02 -07001845 if (!abs_time)
Darren Hart7ada8762010-10-17 08:35:04 -07001846 goto out;
Steven Rostedtce6bd422007-12-05 15:46:09 +01001847
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001848 restart = &current_thread_info()->restart_block;
1849 restart->fn = futex_wait_restart;
Namhyung Kima3c74c52010-09-14 21:43:47 +09001850 restart->futex.uaddr = uaddr;
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001851 restart->futex.val = val;
1852 restart->futex.time = abs_time->tv64;
1853 restart->futex.bitset = bitset;
Darren Hartb41277d2010-11-08 13:10:09 -08001854 restart->futex.flags = flags;
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001855
1856 ret = -ERESTART_RESTARTBLOCK;
1857
Darren Hart42d35d42008-12-29 15:49:53 -08001858out:
Darren Hartca5f9522009-04-03 13:39:33 -07001859 if (to) {
1860 hrtimer_cancel(&to->timer);
1861 destroy_hrtimer_on_stack(&to->timer);
1862 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07001863 return ret;
1864}
1865
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001866
1867static long futex_wait_restart(struct restart_block *restart)
1868{
Namhyung Kima3c74c52010-09-14 21:43:47 +09001869 u32 __user *uaddr = restart->futex.uaddr;
Darren Harta72188d2009-04-03 13:40:22 -07001870 ktime_t t, *tp = NULL;
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001871
Darren Harta72188d2009-04-03 13:40:22 -07001872 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1873 t.tv64 = restart->futex.time;
1874 tp = &t;
1875 }
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001876 restart->fn = do_no_restart_syscall;
Darren Hartb41277d2010-11-08 13:10:09 -08001877
1878 return (long)futex_wait(uaddr, restart->futex.flags,
1879 restart->futex.val, tp, restart->futex.bitset);
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001880}
1881
1882
Ingo Molnarc87e2832006-06-27 02:54:58 -07001883/*
1884 * Userspace tried a 0 -> TID atomic transition of the futex value
1885 * and failed. The kernel side here does the whole locking operation:
1886 * if there are waiters then it will block, it does PI, etc. (Due to
1887 * races the kernel might see a 0 value of the futex too.)
1888 */
Darren Hartb41277d2010-11-08 13:10:09 -08001889static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
1890 ktime_t *time, int trylock)
Ingo Molnarc87e2832006-06-27 02:54:58 -07001891{
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07001892 struct hrtimer_sleeper timeout, *to = NULL;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001893 struct futex_hash_bucket *hb;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001894 struct futex_q q;
Darren Hartdd973992009-04-03 13:40:02 -07001895 int res, ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001896
1897 if (refill_pi_state_cache())
1898 return -ENOMEM;
1899
Pierre Peifferc19384b2007-05-09 02:35:02 -07001900 if (time) {
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07001901 to = &timeout;
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001902 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1903 HRTIMER_MODE_ABS);
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07001904 hrtimer_init_sleeper(to, current);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001905 hrtimer_set_expires(&to->timer, *time);
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07001906 }
1907
Ingo Molnarc87e2832006-06-27 02:54:58 -07001908 q.pi_state = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07001909 q.rt_waiter = NULL;
Darren Hart84bc4af2009-08-13 17:36:53 -07001910 q.requeue_pi_key = NULL;
Darren Hart42d35d42008-12-29 15:49:53 -08001911retry:
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001912 q.key = FUTEX_KEY_INIT;
Darren Hartb41277d2010-11-08 13:10:09 -08001913 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001914 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001915 goto out;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001916
Darren Harte4dc5b72009-03-12 00:56:13 -07001917retry_private:
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01001918 hb = queue_lock(&q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001919
Darren Hartbab5bc92009-04-07 23:23:50 -07001920 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001921 if (unlikely(ret)) {
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001922 switch (ret) {
Darren Hart1a520842009-04-03 13:39:52 -07001923 case 1:
1924 /* We got the lock. */
1925 ret = 0;
1926 goto out_unlock_put_key;
1927 case -EFAULT:
1928 goto uaddr_faulted;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001929 case -EAGAIN:
1930 /*
1931 * Task is exiting and we just wait for the
1932 * exit to complete.
1933 */
1934 queue_unlock(&q, hb);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001935 put_futex_key(&q.key);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001936 cond_resched();
1937 goto retry;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001938 default:
Darren Hart42d35d42008-12-29 15:49:53 -08001939 goto out_unlock_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001940 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07001941 }
1942
1943 /*
1944 * Only actually queue now that the atomic ops are done:
1945 */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01001946 queue_me(&q, hb);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001947
Ingo Molnarc87e2832006-06-27 02:54:58 -07001948 WARN_ON(!q.pi_state);
1949 /*
1950 * Block on the PI mutex:
1951 */
1952 if (!trylock)
1953 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1954 else {
1955 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1956 /* Fixup the trylock return value: */
1957 ret = ret ? 0 : -EWOULDBLOCK;
1958 }
1959
Vernon Mauerya99e4e42006-07-01 04:35:42 -07001960 spin_lock(q.lock_ptr);
Darren Hartdd973992009-04-03 13:40:02 -07001961 /*
1962 * Fixup the pi_state owner and possibly acquire the lock if we
1963 * haven't already.
1964 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001965 res = fixup_owner(uaddr, &q, !ret);
Darren Hartdd973992009-04-03 13:40:02 -07001966 /*
1967 * If fixup_owner() returned an error, proprogate that. If it acquired
1968 * the lock, clear our -ETIMEDOUT or -EINTR.
1969 */
1970 if (res)
1971 ret = (res < 0) ? res : 0;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001972
Darren Harte8f63862009-03-12 00:56:06 -07001973 /*
Darren Hartdd973992009-04-03 13:40:02 -07001974 * If fixup_owner() faulted and was unable to handle the fault, unlock
1975 * it and return the fault to userspace.
Darren Harte8f63862009-03-12 00:56:06 -07001976 */
1977 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1978 rt_mutex_unlock(&q.pi_state->pi_mutex);
1979
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001980 /* Unqueue and drop the lock */
1981 unqueue_me_pi(&q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001982
Mikael Pettersson5ecb01c2010-01-23 22:36:29 +01001983 goto out_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001984
Darren Hart42d35d42008-12-29 15:49:53 -08001985out_unlock_put_key:
Ingo Molnarc87e2832006-06-27 02:54:58 -07001986 queue_unlock(&q, hb);
1987
Darren Hart42d35d42008-12-29 15:49:53 -08001988out_put_key:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001989 put_futex_key(&q.key);
Darren Hart42d35d42008-12-29 15:49:53 -08001990out:
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001991 if (to)
1992 destroy_hrtimer_on_stack(&to->timer);
Darren Hartdd973992009-04-03 13:40:02 -07001993 return ret != -EINTR ? ret : -ERESTARTNOINTR;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001994
Darren Hart42d35d42008-12-29 15:49:53 -08001995uaddr_faulted:
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001996 queue_unlock(&q, hb);
1997
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001998 ret = fault_in_user_writeable(uaddr);
Darren Harte4dc5b72009-03-12 00:56:13 -07001999 if (ret)
2000 goto out_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002001
Darren Hartb41277d2010-11-08 13:10:09 -08002002 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07002003 goto retry_private;
2004
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002005 put_futex_key(&q.key);
Darren Harte4dc5b72009-03-12 00:56:13 -07002006 goto retry;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002007}
2008
2009/*
Ingo Molnarc87e2832006-06-27 02:54:58 -07002010 * Userspace attempted a TID -> 0 atomic transition, and failed.
2011 * This is the in-kernel slowpath: we look up the PI state (if any),
2012 * and do the rt-mutex unlock.
2013 */
Darren Hartb41277d2010-11-08 13:10:09 -08002014static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002015{
2016 struct futex_hash_bucket *hb;
2017 struct futex_q *this, *next;
2018 u32 uval;
Pierre Peifferec92d082007-05-09 02:35:00 -07002019 struct plist_head *head;
Peter Zijlstra38d47c12008-09-26 19:32:20 +02002020 union futex_key key = FUTEX_KEY_INIT;
Darren Harte4dc5b72009-03-12 00:56:13 -07002021 int ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002022
2023retry:
2024 if (get_user(uval, uaddr))
2025 return -EFAULT;
2026 /*
2027 * We release only a lock we actually own:
2028 */
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002029 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
Ingo Molnarc87e2832006-06-27 02:54:58 -07002030 return -EPERM;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002031
Darren Hartb41277d2010-11-08 13:10:09 -08002032 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002033 if (unlikely(ret != 0))
2034 goto out;
2035
2036 hb = hash_futex(&key);
2037 spin_lock(&hb->lock);
2038
Ingo Molnarc87e2832006-06-27 02:54:58 -07002039 /*
2040 * To avoid races, try to do the TID -> 0 atomic transition
2041 * again. If it succeeds then we can return without waking
2042 * anyone else up:
2043 */
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07002044 if (!(uval & FUTEX_OWNER_DIED))
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002045 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07002046
Ingo Molnarc87e2832006-06-27 02:54:58 -07002047
2048 if (unlikely(uval == -EFAULT))
2049 goto pi_faulted;
2050 /*
2051 * Rare case: we managed to release the lock atomically,
2052 * no need to wake anyone else up:
2053 */
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002054 if (unlikely(uval == task_pid_vnr(current)))
Ingo Molnarc87e2832006-06-27 02:54:58 -07002055 goto out_unlock;
2056
2057 /*
2058 * Ok, other tasks may need to be woken up - check waiters
2059 * and do the wakeup if necessary:
2060 */
2061 head = &hb->chain;
2062
Pierre Peifferec92d082007-05-09 02:35:00 -07002063 plist_for_each_entry_safe(this, next, head, list) {
Ingo Molnarc87e2832006-06-27 02:54:58 -07002064 if (!match_futex (&this->key, &key))
2065 continue;
2066 ret = wake_futex_pi(uaddr, uval, this);
2067 /*
2068 * The atomic access to the futex value
2069 * generated a pagefault, so retry the
2070 * user-access and the wakeup:
2071 */
2072 if (ret == -EFAULT)
2073 goto pi_faulted;
2074 goto out_unlock;
2075 }
2076 /*
2077 * No waiters - kernel unlocks the futex:
2078 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002079 if (!(uval & FUTEX_OWNER_DIED)) {
2080 ret = unlock_futex_pi(uaddr, uval);
2081 if (ret == -EFAULT)
2082 goto pi_faulted;
2083 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002084
2085out_unlock:
2086 spin_unlock(&hb->lock);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002087 put_futex_key(&key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002088
Darren Hart42d35d42008-12-29 15:49:53 -08002089out:
Ingo Molnarc87e2832006-06-27 02:54:58 -07002090 return ret;
2091
2092pi_faulted:
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002093 spin_unlock(&hb->lock);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002094 put_futex_key(&key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002095
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002096 ret = fault_in_user_writeable(uaddr);
Darren Hartb5686362008-12-18 15:06:34 -08002097 if (!ret)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002098 goto retry;
2099
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 return ret;
2101}
2102
Darren Hart52400ba2009-04-03 13:40:49 -07002103/**
2104 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2105 * @hb: the hash_bucket futex_q was original enqueued on
2106 * @q: the futex_q woken while waiting to be requeued
2107 * @key2: the futex_key of the requeue target futex
2108 * @timeout: the timeout associated with the wait (NULL if none)
2109 *
2110 * Detect if the task was woken on the initial futex as opposed to the requeue
2111 * target futex. If so, determine if it was a timeout or a signal that caused
2112 * the wakeup and return the appropriate error code to the caller. Must be
2113 * called with the hb lock held.
2114 *
2115 * Returns
2116 * 0 - no early wakeup detected
Thomas Gleixner1c840c12009-05-20 09:22:40 +02002117 * <0 - -ETIMEDOUT or -ERESTARTNOINTR
Darren Hart52400ba2009-04-03 13:40:49 -07002118 */
2119static inline
2120int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2121 struct futex_q *q, union futex_key *key2,
2122 struct hrtimer_sleeper *timeout)
2123{
2124 int ret = 0;
2125
2126 /*
2127 * With the hb lock held, we avoid races while we process the wakeup.
2128 * We only need to hold hb (and not hb2) to ensure atomicity as the
2129 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2130 * It can't be requeued from uaddr2 to something else since we don't
2131 * support a PI aware source futex for requeue.
2132 */
2133 if (!match_futex(&q->key, key2)) {
2134 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2135 /*
2136 * We were woken prior to requeue by a timeout or a signal.
2137 * Unqueue the futex_q and determine which it was.
2138 */
2139 plist_del(&q->list, &q->list.plist);
Darren Hart52400ba2009-04-03 13:40:49 -07002140
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002141 /* Handle spurious wakeups gracefully */
Thomas Gleixner11df6dd2009-10-28 20:26:48 +01002142 ret = -EWOULDBLOCK;
Darren Hart52400ba2009-04-03 13:40:49 -07002143 if (timeout && !timeout->task)
2144 ret = -ETIMEDOUT;
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002145 else if (signal_pending(current))
Thomas Gleixner1c840c12009-05-20 09:22:40 +02002146 ret = -ERESTARTNOINTR;
Darren Hart52400ba2009-04-03 13:40:49 -07002147 }
2148 return ret;
2149}
2150
2151/**
2152 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
Darren Hart56ec1602009-09-21 22:29:59 -07002153 * @uaddr: the futex we initially wait on (non-pi)
Darren Hartb41277d2010-11-08 13:10:09 -08002154 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
Darren Hart52400ba2009-04-03 13:40:49 -07002155 * the same type, no requeueing from private to shared, etc.
2156 * @val: the expected value of uaddr
2157 * @abs_time: absolute timeout
Darren Hart56ec1602009-09-21 22:29:59 -07002158 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
Darren Hart52400ba2009-04-03 13:40:49 -07002159 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2160 * @uaddr2: the pi futex we will take prior to returning to user-space
2161 *
2162 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2163 * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
2164 * complete the acquisition of the rt_mutex prior to returning to userspace.
2165 * This ensures the rt_mutex maintains an owner when it has waiters; without
2166 * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2167 * need to.
2168 *
2169 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2170 * via the following:
2171 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
Darren Hartcc6db4e2009-07-31 16:20:10 -07002172 * 2) wakeup on uaddr2 after a requeue
2173 * 3) signal
2174 * 4) timeout
Darren Hart52400ba2009-04-03 13:40:49 -07002175 *
Darren Hartcc6db4e2009-07-31 16:20:10 -07002176 * If 3, cleanup and return -ERESTARTNOINTR.
Darren Hart52400ba2009-04-03 13:40:49 -07002177 *
2178 * If 2, we may then block on trying to take the rt_mutex and return via:
2179 * 5) successful lock
2180 * 6) signal
2181 * 7) timeout
2182 * 8) other lock acquisition failure
2183 *
Darren Hartcc6db4e2009-07-31 16:20:10 -07002184 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
Darren Hart52400ba2009-04-03 13:40:49 -07002185 *
2186 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2187 *
2188 * Returns:
2189 * 0 - On success
2190 * <0 - On error
2191 */
Darren Hartb41277d2010-11-08 13:10:09 -08002192static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
Darren Hart52400ba2009-04-03 13:40:49 -07002193 u32 val, ktime_t *abs_time, u32 bitset,
Darren Hartb41277d2010-11-08 13:10:09 -08002194 u32 __user *uaddr2)
Darren Hart52400ba2009-04-03 13:40:49 -07002195{
2196 struct hrtimer_sleeper timeout, *to = NULL;
2197 struct rt_mutex_waiter rt_waiter;
2198 struct rt_mutex *pi_mutex = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07002199 struct futex_hash_bucket *hb;
2200 union futex_key key2;
2201 struct futex_q q;
2202 int res, ret;
Darren Hart52400ba2009-04-03 13:40:49 -07002203
2204 if (!bitset)
2205 return -EINVAL;
2206
2207 if (abs_time) {
2208 to = &timeout;
Darren Hartb41277d2010-11-08 13:10:09 -08002209 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2210 CLOCK_REALTIME : CLOCK_MONOTONIC,
2211 HRTIMER_MODE_ABS);
Darren Hart52400ba2009-04-03 13:40:49 -07002212 hrtimer_init_sleeper(to, current);
2213 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2214 current->timer_slack_ns);
2215 }
2216
2217 /*
2218 * The waiter is allocated on our stack, manipulated by the requeue
2219 * code while we sleep on uaddr.
2220 */
2221 debug_rt_mutex_init_waiter(&rt_waiter);
2222 rt_waiter.task = NULL;
2223
Darren Hart52400ba2009-04-03 13:40:49 -07002224 key2 = FUTEX_KEY_INIT;
Darren Hartb41277d2010-11-08 13:10:09 -08002225 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
Darren Hart52400ba2009-04-03 13:40:49 -07002226 if (unlikely(ret != 0))
2227 goto out;
2228
Darren Hart84bc4af2009-08-13 17:36:53 -07002229 q.pi_state = NULL;
2230 q.bitset = bitset;
2231 q.rt_waiter = &rt_waiter;
2232 q.requeue_pi_key = &key2;
2233
Darren Hart7ada8762010-10-17 08:35:04 -07002234 /*
2235 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2236 * count.
2237 */
Darren Hartb41277d2010-11-08 13:10:09 -08002238 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
Thomas Gleixnerc8b15a72009-05-20 09:18:50 +02002239 if (ret)
2240 goto out_key2;
Darren Hart52400ba2009-04-03 13:40:49 -07002241
2242 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002243 futex_wait_queue_me(hb, &q, to);
Darren Hart52400ba2009-04-03 13:40:49 -07002244
2245 spin_lock(&hb->lock);
2246 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2247 spin_unlock(&hb->lock);
2248 if (ret)
2249 goto out_put_keys;
2250
2251 /*
2252 * In order for us to be here, we know our q.key == key2, and since
2253 * we took the hb->lock above, we also know that futex_requeue() has
2254 * completed and we no longer have to concern ourselves with a wakeup
Darren Hart7ada8762010-10-17 08:35:04 -07002255 * race with the atomic proxy lock acquisition by the requeue code. The
2256 * futex_requeue dropped our key1 reference and incremented our key2
2257 * reference count.
Darren Hart52400ba2009-04-03 13:40:49 -07002258 */
2259
2260 /* Check if the requeue code acquired the second futex for us. */
2261 if (!q.rt_waiter) {
2262 /*
2263 * Got the lock. We might not be the anticipated owner if we
2264 * did a lock-steal - fix up the PI-state in that case.
2265 */
2266 if (q.pi_state && (q.pi_state->owner != current)) {
2267 spin_lock(q.lock_ptr);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002268 ret = fixup_pi_state_owner(uaddr2, &q, current);
Darren Hart52400ba2009-04-03 13:40:49 -07002269 spin_unlock(q.lock_ptr);
2270 }
2271 } else {
2272 /*
2273 * We have been woken up by futex_unlock_pi(), a timeout, or a
2274 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2275 * the pi_state.
2276 */
2277 WARN_ON(!&q.pi_state);
2278 pi_mutex = &q.pi_state->pi_mutex;
2279 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2280 debug_rt_mutex_free_waiter(&rt_waiter);
2281
2282 spin_lock(q.lock_ptr);
2283 /*
2284 * Fixup the pi_state owner and possibly acquire the lock if we
2285 * haven't already.
2286 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002287 res = fixup_owner(uaddr2, &q, !ret);
Darren Hart52400ba2009-04-03 13:40:49 -07002288 /*
2289 * If fixup_owner() returned an error, proprogate that. If it
Darren Hart56ec1602009-09-21 22:29:59 -07002290 * acquired the lock, clear -ETIMEDOUT or -EINTR.
Darren Hart52400ba2009-04-03 13:40:49 -07002291 */
2292 if (res)
2293 ret = (res < 0) ? res : 0;
2294
2295 /* Unqueue and drop the lock. */
2296 unqueue_me_pi(&q);
2297 }
2298
2299 /*
2300 * If fixup_pi_state_owner() faulted and was unable to handle the
2301 * fault, unlock the rt_mutex and return the fault to userspace.
2302 */
2303 if (ret == -EFAULT) {
2304 if (rt_mutex_owner(pi_mutex) == current)
2305 rt_mutex_unlock(pi_mutex);
2306 } else if (ret == -EINTR) {
Darren Hart52400ba2009-04-03 13:40:49 -07002307 /*
Darren Hartcc6db4e2009-07-31 16:20:10 -07002308 * We've already been requeued, but cannot restart by calling
2309 * futex_lock_pi() directly. We could restart this syscall, but
2310 * it would detect that the user space "val" changed and return
2311 * -EWOULDBLOCK. Save the overhead of the restart and return
2312 * -EWOULDBLOCK directly.
Darren Hart52400ba2009-04-03 13:40:49 -07002313 */
Thomas Gleixner20708872009-05-19 23:04:59 +02002314 ret = -EWOULDBLOCK;
Darren Hart52400ba2009-04-03 13:40:49 -07002315 }
2316
2317out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002318 put_futex_key(&q.key);
Thomas Gleixnerc8b15a72009-05-20 09:18:50 +02002319out_key2:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002320 put_futex_key(&key2);
Darren Hart52400ba2009-04-03 13:40:49 -07002321
2322out:
2323 if (to) {
2324 hrtimer_cancel(&to->timer);
2325 destroy_hrtimer_on_stack(&to->timer);
2326 }
2327 return ret;
2328}
2329
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002330/*
2331 * Support for robust futexes: the kernel cleans up held futexes at
2332 * thread exit time.
2333 *
2334 * Implementation: user-space maintains a per-thread list of locks it
2335 * is holding. Upon do_exit(), the kernel carefully walks this list,
2336 * and marks all locks that are owned by this thread with the
Ingo Molnarc87e2832006-06-27 02:54:58 -07002337 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002338 * always manipulated with the lock held, so the list is private and
2339 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2340 * field, to allow the kernel to clean up if the thread dies after
2341 * acquiring the lock, but just before it could have added itself to
2342 * the list. There can only be one such pending lock.
2343 */
2344
2345/**
Darren Hartd96ee562009-09-21 22:30:22 -07002346 * sys_set_robust_list() - Set the robust-futex list head of a task
2347 * @head: pointer to the list-head
2348 * @len: length of the list-head, as userspace expects
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002349 */
Heiko Carstens836f92a2009-01-14 14:14:33 +01002350SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2351 size_t, len)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002352{
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002353 if (!futex_cmpxchg_enabled)
2354 return -ENOSYS;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002355 /*
2356 * The kernel knows only one size for now:
2357 */
2358 if (unlikely(len != sizeof(*head)))
2359 return -EINVAL;
2360
2361 current->robust_list = head;
2362
2363 return 0;
2364}
2365
2366/**
Darren Hartd96ee562009-09-21 22:30:22 -07002367 * sys_get_robust_list() - Get the robust-futex list head of a task
2368 * @pid: pid of the process [zero for current task]
2369 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2370 * @len_ptr: pointer to a length field, the kernel fills in the header size
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002371 */
Heiko Carstens836f92a2009-01-14 14:14:33 +01002372SYSCALL_DEFINE3(get_robust_list, int, pid,
2373 struct robust_list_head __user * __user *, head_ptr,
2374 size_t __user *, len_ptr)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002375{
Al Viroba46df92006-10-10 22:46:07 +01002376 struct robust_list_head __user *head;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002377 unsigned long ret;
David Howellsc69e8d92008-11-14 10:39:19 +11002378 const struct cred *cred = current_cred(), *pcred;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002379
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002380 if (!futex_cmpxchg_enabled)
2381 return -ENOSYS;
2382
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002383 if (!pid)
2384 head = current->robust_list;
2385 else {
2386 struct task_struct *p;
2387
2388 ret = -ESRCH;
Oleg Nesterovaaa2a972006-09-29 02:00:55 -07002389 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07002390 p = find_task_by_vpid(pid);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002391 if (!p)
2392 goto err_unlock;
2393 ret = -EPERM;
David Howellsc69e8d92008-11-14 10:39:19 +11002394 pcred = __task_cred(p);
2395 if (cred->euid != pcred->euid &&
2396 cred->euid != pcred->uid &&
David Howells76aac0e2008-11-14 10:39:12 +11002397 !capable(CAP_SYS_PTRACE))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002398 goto err_unlock;
2399 head = p->robust_list;
Oleg Nesterovaaa2a972006-09-29 02:00:55 -07002400 rcu_read_unlock();
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002401 }
2402
2403 if (put_user(sizeof(*head), len_ptr))
2404 return -EFAULT;
2405 return put_user(head, head_ptr);
2406
2407err_unlock:
Oleg Nesterovaaa2a972006-09-29 02:00:55 -07002408 rcu_read_unlock();
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002409
2410 return ret;
2411}
2412
2413/*
2414 * Process a futex-list entry, check whether it's owned by the
2415 * dying task, and do notification if so:
2416 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002417int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002418{
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002419 u32 uval, nval, mval;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002420
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08002421retry:
2422 if (get_user(uval, uaddr))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002423 return -1;
2424
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002425 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002426 /*
2427 * Ok, this dying thread is truly holding a futex
2428 * of interest. Set the OWNER_DIED bit atomically
2429 * via cmpxchg, and if the value had FUTEX_WAITERS
2430 * set, wake up a waiter (if any). (We have to do a
2431 * futex_wake() even if OWNER_DIED is already set -
2432 * to handle the rare but possible case of recursive
2433 * thread-death.) The rest of the cleanup is done in
2434 * userspace.
2435 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002436 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2437 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
2438
Ingo Molnarc87e2832006-06-27 02:54:58 -07002439 if (nval == -EFAULT)
2440 return -1;
2441
2442 if (nval != uval)
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08002443 goto retry;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002444
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002445 /*
2446 * Wake robust non-PI futexes here. The wakeup of
2447 * PI futexes happens in exit_pi_state():
2448 */
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07002449 if (!pi && (uval & FUTEX_WAITERS))
Peter Zijlstrac2f9f202008-09-26 19:32:23 +02002450 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002451 }
2452 return 0;
2453}
2454
2455/*
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002456 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2457 */
2458static inline int fetch_robust_entry(struct robust_list __user **entry,
Al Viroba46df92006-10-10 22:46:07 +01002459 struct robust_list __user * __user *head,
Namhyung Kim1dcc41b2010-09-14 21:43:46 +09002460 unsigned int *pi)
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002461{
2462 unsigned long uentry;
2463
Al Viroba46df92006-10-10 22:46:07 +01002464 if (get_user(uentry, (unsigned long __user *)head))
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002465 return -EFAULT;
2466
Al Viroba46df92006-10-10 22:46:07 +01002467 *entry = (void __user *)(uentry & ~1UL);
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002468 *pi = uentry & 1;
2469
2470 return 0;
2471}
2472
2473/*
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002474 * Walk curr->robust_list (very carefully, it's a userspace list!)
2475 * and mark any locks found there dead, and notify any waiters.
2476 *
2477 * We silently return on any sign of list-walking problem.
2478 */
2479void exit_robust_list(struct task_struct *curr)
2480{
2481 struct robust_list_head __user *head = curr->robust_list;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002482 struct robust_list __user *entry, *next_entry, *pending;
Darren Hart4c115e92010-11-04 15:00:00 -04002483 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2484 unsigned int uninitialized_var(next_pi);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002485 unsigned long futex_offset;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002486 int rc;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002487
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002488 if (!futex_cmpxchg_enabled)
2489 return;
2490
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002491 /*
2492 * Fetch the list head (which was registered earlier, via
2493 * sys_set_robust_list()):
2494 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002495 if (fetch_robust_entry(&entry, &head->list.next, &pi))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002496 return;
2497 /*
2498 * Fetch the relative futex offset:
2499 */
2500 if (get_user(futex_offset, &head->futex_offset))
2501 return;
2502 /*
2503 * Fetch any possibly pending lock-add first, and handle it
2504 * if it exists:
2505 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002506 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002507 return;
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002508
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002509 next_entry = NULL; /* avoid warning with gcc */
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002510 while (entry != &head->list) {
2511 /*
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002512 * Fetch the next entry in the list before calling
2513 * handle_futex_death:
2514 */
2515 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2516 /*
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002517 * A pending lock might already be on the list, so
Ingo Molnarc87e2832006-06-27 02:54:58 -07002518 * don't process it twice:
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002519 */
2520 if (entry != pending)
Al Viroba46df92006-10-10 22:46:07 +01002521 if (handle_futex_death((void __user *)entry + futex_offset,
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002522 curr, pi))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002523 return;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002524 if (rc)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002525 return;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002526 entry = next_entry;
2527 pi = next_pi;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002528 /*
2529 * Avoid excessively long or circular lists:
2530 */
2531 if (!--limit)
2532 break;
2533
2534 cond_resched();
2535 }
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002536
2537 if (pending)
2538 handle_futex_death((void __user *)pending + futex_offset,
2539 curr, pip);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002540}
2541
Pierre Peifferc19384b2007-05-09 02:35:02 -07002542long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
Ingo Molnare2970f22006-06-27 02:54:47 -07002543 u32 __user *uaddr2, u32 val2, u32 val3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544{
Darren Hartb41277d2010-11-08 13:10:09 -08002545 int ret = -ENOSYS, cmd = op & FUTEX_CMD_MASK;
2546 unsigned int flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547
Eric Dumazet34f01cc2007-05-09 02:35:04 -07002548 if (!(op & FUTEX_PRIVATE_FLAG))
Darren Hartb41277d2010-11-08 13:10:09 -08002549 flags |= FLAGS_SHARED;
Eric Dumazet34f01cc2007-05-09 02:35:04 -07002550
Darren Hartb41277d2010-11-08 13:10:09 -08002551 if (op & FUTEX_CLOCK_REALTIME) {
2552 flags |= FLAGS_CLOCKRT;
2553 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2554 return -ENOSYS;
2555 }
Eric Dumazet34f01cc2007-05-09 02:35:04 -07002556
2557 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 case FUTEX_WAIT:
Thomas Gleixnercd689982008-02-01 17:45:14 +01002559 val3 = FUTEX_BITSET_MATCH_ANY;
2560 case FUTEX_WAIT_BITSET:
Darren Hartb41277d2010-11-08 13:10:09 -08002561 ret = futex_wait(uaddr, flags, val, timeout, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 break;
2563 case FUTEX_WAKE:
Thomas Gleixnercd689982008-02-01 17:45:14 +01002564 val3 = FUTEX_BITSET_MATCH_ANY;
2565 case FUTEX_WAKE_BITSET:
Darren Hartb41277d2010-11-08 13:10:09 -08002566 ret = futex_wake(uaddr, flags, val, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 case FUTEX_REQUEUE:
Darren Hartb41277d2010-11-08 13:10:09 -08002569 ret = futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 break;
2571 case FUTEX_CMP_REQUEUE:
Darren Hartb41277d2010-11-08 13:10:09 -08002572 ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 break;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07002574 case FUTEX_WAKE_OP:
Darren Hartb41277d2010-11-08 13:10:09 -08002575 ret = futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07002576 break;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002577 case FUTEX_LOCK_PI:
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002578 if (futex_cmpxchg_enabled)
Darren Hartb41277d2010-11-08 13:10:09 -08002579 ret = futex_lock_pi(uaddr, flags, val, timeout, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002580 break;
2581 case FUTEX_UNLOCK_PI:
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002582 if (futex_cmpxchg_enabled)
Darren Hartb41277d2010-11-08 13:10:09 -08002583 ret = futex_unlock_pi(uaddr, flags);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002584 break;
2585 case FUTEX_TRYLOCK_PI:
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002586 if (futex_cmpxchg_enabled)
Darren Hartb41277d2010-11-08 13:10:09 -08002587 ret = futex_lock_pi(uaddr, flags, 0, timeout, 1);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002588 break;
Darren Hart52400ba2009-04-03 13:40:49 -07002589 case FUTEX_WAIT_REQUEUE_PI:
2590 val3 = FUTEX_BITSET_MATCH_ANY;
Darren Hartb41277d2010-11-08 13:10:09 -08002591 ret = futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2592 uaddr2);
Darren Hart52400ba2009-04-03 13:40:49 -07002593 break;
Darren Hart52400ba2009-04-03 13:40:49 -07002594 case FUTEX_CMP_REQUEUE_PI:
Darren Hartb41277d2010-11-08 13:10:09 -08002595 ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
Darren Hart52400ba2009-04-03 13:40:49 -07002596 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 default:
2598 ret = -ENOSYS;
2599 }
2600 return ret;
2601}
2602
2603
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002604SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2605 struct timespec __user *, utime, u32 __user *, uaddr2,
2606 u32, val3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607{
Pierre Peifferc19384b2007-05-09 02:35:02 -07002608 struct timespec ts;
2609 ktime_t t, *tp = NULL;
Ingo Molnare2970f22006-06-27 02:54:47 -07002610 u32 val2 = 0;
Eric Dumazet34f01cc2007-05-09 02:35:04 -07002611 int cmd = op & FUTEX_CMD_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612
Thomas Gleixnercd689982008-02-01 17:45:14 +01002613 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
Darren Hart52400ba2009-04-03 13:40:49 -07002614 cmd == FUTEX_WAIT_BITSET ||
2615 cmd == FUTEX_WAIT_REQUEUE_PI)) {
Pierre Peifferc19384b2007-05-09 02:35:02 -07002616 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 return -EFAULT;
Pierre Peifferc19384b2007-05-09 02:35:02 -07002618 if (!timespec_valid(&ts))
Thomas Gleixner9741ef92006-03-31 02:31:32 -08002619 return -EINVAL;
Pierre Peifferc19384b2007-05-09 02:35:02 -07002620
2621 t = timespec_to_ktime(ts);
Eric Dumazet34f01cc2007-05-09 02:35:04 -07002622 if (cmd == FUTEX_WAIT)
Thomas Gleixner5a7780e2008-02-13 09:20:43 +01002623 t = ktime_add_safe(ktime_get(), t);
Pierre Peifferc19384b2007-05-09 02:35:02 -07002624 tp = &t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 }
2626 /*
Darren Hart52400ba2009-04-03 13:40:49 -07002627 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
Andreas Schwabf54f0982007-07-31 00:38:51 -07002628 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 */
Andreas Schwabf54f0982007-07-31 00:38:51 -07002630 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
Darren Hartba9c22f2009-04-20 22:22:22 -07002631 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
Ingo Molnare2970f22006-06-27 02:54:47 -07002632 val2 = (u32) (unsigned long) utime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
Pierre Peifferc19384b2007-05-09 02:35:02 -07002634 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635}
2636
Benjamin Herrenschmidtf6d107f2008-03-27 14:52:15 +11002637static int __init futex_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638{
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002639 u32 curval;
Thomas Gleixner3e4ab742008-02-23 15:23:55 -08002640 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002642 /*
2643 * This will fail and we want it. Some arch implementations do
2644 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2645 * functionality. We want to know that before we call in any
2646 * of the complex code paths. Also we want to prevent
2647 * registration of robust lists in that case. NULL is
2648 * guaranteed to fault and we get -EFAULT on functional
Randy Dunlapfb62db22010-10-13 11:02:34 -07002649 * implementation, the non-functional ones will return
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002650 * -ENOSYS.
2651 */
2652 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2653 if (curval == -EFAULT)
2654 futex_cmpxchg_enabled = 1;
2655
Thomas Gleixner3e4ab742008-02-23 15:23:55 -08002656 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2657 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2658 spin_lock_init(&futex_queues[i].lock);
2659 }
2660
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 return 0;
2662}
Benjamin Herrenschmidtf6d107f2008-03-27 14:52:15 +11002663__initcall(futex_init);