blob: 3312eaa4292a9133f52896ca610f8a6e3a2f3698 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
Ingo Molnar0771dfe2006-03-27 01:16:22 -080011 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
Ingo Molnarc87e2832006-06-27 02:54:58 -070015 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
Eric Dumazet34f01cc2007-05-09 02:35:04 -070019 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
Darren Hart52400ba2009-04-03 13:40:49 -070022 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47#include <linux/slab.h>
48#include <linux/poll.h>
49#include <linux/fs.h>
50#include <linux/file.h>
51#include <linux/jhash.h>
52#include <linux/init.h>
53#include <linux/futex.h>
54#include <linux/mount.h>
55#include <linux/pagemap.h>
56#include <linux/syscalls.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070057#include <linux/signal.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040058#include <linux/export.h>
Andrey Mirkinfd5eea42007-10-16 23:30:13 -070059#include <linux/magic.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070060#include <linux/pid.h>
61#include <linux/nsproxy.h>
Kees Cookbdbb7762012-03-19 16:12:53 -070062#include <linux/ptrace.h>
Colin Cross943ea4a2013-05-06 23:50:18 +000063#include <linux/freezer.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070064
Jakub Jelinek4732efb2005-09-06 15:16:25 -070065#include <asm/futex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Ingo Molnarc87e2832006-06-27 02:54:58 -070067#include "rtmutex_common.h"
68
Thomas Gleixnera0c1e902008-02-23 15:23:57 -080069int __read_mostly futex_cmpxchg_enabled;
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
72
73/*
Darren Hartb41277d2010-11-08 13:10:09 -080074 * Futex flags used to encode options to functions and preserve them across
75 * restarts.
76 */
77#define FLAGS_SHARED 0x01
78#define FLAGS_CLOCKRT 0x02
79#define FLAGS_HAS_TIMEOUT 0x04
80
81/*
Ingo Molnarc87e2832006-06-27 02:54:58 -070082 * Priority Inheritance state:
83 */
84struct futex_pi_state {
85 /*
86 * list of 'owned' pi_state instances - these have to be
87 * cleaned up in do_exit() if the task exits prematurely:
88 */
89 struct list_head list;
90
91 /*
92 * The PI object:
93 */
94 struct rt_mutex pi_mutex;
95
96 struct task_struct *owner;
97 atomic_t refcount;
98
99 union futex_key key;
100};
101
Darren Hartd8d88fb2009-09-21 22:30:30 -0700102/**
103 * struct futex_q - The hashed futex queue entry, one per waiting task
Randy Dunlapfb62db22010-10-13 11:02:34 -0700104 * @list: priority-sorted list of tasks waiting on this futex
Darren Hartd8d88fb2009-09-21 22:30:30 -0700105 * @task: the task waiting on the futex
106 * @lock_ptr: the hash bucket lock
107 * @key: the key the futex is hashed on
108 * @pi_state: optional priority inheritance state
109 * @rt_waiter: rt_waiter storage for use with requeue_pi
110 * @requeue_pi_key: the requeue_pi target futex key
111 * @bitset: bitset for the optional bitmasked wakeup
112 *
113 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 * we can wake only the relevant ones (hashed queues may be shared).
115 *
116 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
Pierre Peifferec92d082007-05-09 02:35:00 -0700117 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
Randy Dunlapfb62db22010-10-13 11:02:34 -0700118 * The order of wakeup is always to make the first condition true, then
Darren Hartd8d88fb2009-09-21 22:30:30 -0700119 * the second.
120 *
121 * PI futexes are typically woken before they are removed from the hash list via
122 * the rt_mutex code. See unqueue_me_pi().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 */
124struct futex_q {
Pierre Peifferec92d082007-05-09 02:35:00 -0700125 struct plist_node list;
Darren Hartd8d88fb2009-09-21 22:30:30 -0700126
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200127 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 spinlock_t *lock_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 union futex_key key;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700130 struct futex_pi_state *pi_state;
Darren Hart52400ba2009-04-03 13:40:49 -0700131 struct rt_mutex_waiter *rt_waiter;
Darren Hart84bc4af2009-08-13 17:36:53 -0700132 union futex_key *requeue_pi_key;
Thomas Gleixnercd689982008-02-01 17:45:14 +0100133 u32 bitset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134};
135
Darren Hart5bdb05f2010-11-08 13:40:28 -0800136static const struct futex_q futex_q_init = {
137 /* list gets initialized in queue_me()*/
138 .key = FUTEX_KEY_INIT,
139 .bitset = FUTEX_BITSET_MATCH_ANY
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/*
Darren Hartb2d09942009-03-12 00:55:37 -0700143 * Hash buckets are shared by all the futex_keys that hash to the same
144 * location. Each key may have multiple futex_q structures, one for each task
145 * waiting on a futex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 */
147struct futex_hash_bucket {
Pierre Peifferec92d082007-05-09 02:35:00 -0700148 spinlock_t lock;
149 struct plist_head chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150};
151
152static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154/*
155 * We hash on the keys returned from get_futex_key (see below).
156 */
157static struct futex_hash_bucket *hash_futex(union futex_key *key)
158{
159 u32 hash = jhash2((u32*)&key->both.word,
160 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
161 key->both.offset);
162 return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
163}
164
165/*
166 * Return 1 if two futex_keys are equal, 0 otherwise.
167 */
168static inline int match_futex(union futex_key *key1, union futex_key *key2)
169{
Darren Hart2bc87202009-10-14 10:12:39 -0700170 return (key1 && key2
171 && key1->both.word == key2->both.word
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 && key1->both.ptr == key2->both.ptr
173 && key1->both.offset == key2->both.offset);
174}
175
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200176/*
177 * Take a reference to the resource addressed by a key.
178 * Can be called while holding spinlocks.
179 *
180 */
181static void get_futex_key_refs(union futex_key *key)
182{
183 if (!key->both.ptr)
184 return;
185
186 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
187 case FUT_OFF_INODE:
Al Viro7de9c6e2010-10-23 11:11:40 -0400188 ihold(key->shared.inode);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200189 break;
190 case FUT_OFF_MMSHARED:
191 atomic_inc(&key->private.mm->mm_count);
192 break;
193 }
194}
195
196/*
197 * Drop a reference to the resource addressed by a key.
198 * The hash bucket spinlock must not be held.
199 */
200static void drop_futex_key_refs(union futex_key *key)
201{
Darren Hart90621c42008-12-29 19:43:21 -0800202 if (!key->both.ptr) {
203 /* If we're here then we tried to put a key we failed to get */
204 WARN_ON_ONCE(1);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200205 return;
Darren Hart90621c42008-12-29 19:43:21 -0800206 }
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200207
208 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
209 case FUT_OFF_INODE:
210 iput(key->shared.inode);
211 break;
212 case FUT_OFF_MMSHARED:
213 mmdrop(key->private.mm);
214 break;
215 }
216}
217
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700218/**
Darren Hartd96ee562009-09-21 22:30:22 -0700219 * get_futex_key() - Get parameters which are the keys for a futex
220 * @uaddr: virtual address of the futex
221 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
222 * @key: address where result is stored.
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500223 * @rw: mapping needs to be read/write (values: VERIFY_READ,
224 * VERIFY_WRITE)
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700225 *
226 * Returns a negative error code or 0
227 * The key words are stored in *key on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 *
Josef "Jeff" Sipekf3a43f32006-12-08 02:36:43 -0800229 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 * offset_within_page). For private mappings, it's (uaddr, current->mm).
231 * We can usually work out the index without swapping in the page.
232 *
Darren Hartb2d09942009-03-12 00:55:37 -0700233 * lock_page() might sleep, the caller should not hold a spinlock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 */
Thomas Gleixner64d13042009-05-18 21:20:10 +0200235static int
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500236get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237{
Ingo Molnare2970f22006-06-27 02:54:47 -0700238 unsigned long address = (unsigned long)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 struct mm_struct *mm = current->mm;
Andrea Arcangelia5b338f2011-01-13 15:46:34 -0800240 struct page *page, *page_head;
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500241 int err, ro = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
243 /*
244 * The futex address must be "naturally" aligned.
245 */
Ingo Molnare2970f22006-06-27 02:54:47 -0700246 key->both.offset = address % PAGE_SIZE;
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700247 if (unlikely((address % sizeof(u32)) != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 return -EINVAL;
Ingo Molnare2970f22006-06-27 02:54:47 -0700249 address -= key->both.offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /*
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700252 * PROCESS_PRIVATE futexes are fast.
253 * As the mm cannot disappear under us and the 'key' only needs
254 * virtual address, we dont even have to find the underlying vma.
255 * Note : We do have to check 'uaddr' is a valid user address,
256 * but access_ok() should be faster than find_vma()
257 */
258 if (!fshared) {
KOSAKI Motohiro7485d0d2010-01-05 16:32:43 +0900259 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700260 return -EFAULT;
261 key->private.mm = mm;
262 key->private.address = address;
Peter Zijlstra42569c32008-09-30 12:33:07 +0200263 get_futex_key_refs(key);
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700264 return 0;
265 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200267again:
KOSAKI Motohiro7485d0d2010-01-05 16:32:43 +0900268 err = get_user_pages_fast(address, 1, 1, &page);
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500269 /*
270 * If write access is not required (eg. FUTEX_WAIT), try
271 * and get read-only access.
272 */
273 if (err == -EFAULT && rw == VERIFY_READ) {
274 err = get_user_pages_fast(address, 1, 0, &page);
275 ro = 1;
276 }
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200277 if (err < 0)
278 return err;
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500279 else
280 err = 0;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200281
Andrea Arcangelia5b338f2011-01-13 15:46:34 -0800282#ifdef CONFIG_TRANSPARENT_HUGEPAGE
283 page_head = page;
284 if (unlikely(PageTail(page))) {
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200285 put_page(page);
Andrea Arcangelia5b338f2011-01-13 15:46:34 -0800286 /* serialize against __split_huge_page_splitting() */
287 local_irq_disable();
288 if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
289 page_head = compound_head(page);
290 /*
291 * page_head is valid pointer but we must pin
292 * it before taking the PG_lock and/or
293 * PG_compound_lock. The moment we re-enable
294 * irqs __split_huge_page_splitting() can
295 * return and the head page can be freed from
296 * under us. We can't take the PG_lock and/or
297 * PG_compound_lock on a page that could be
298 * freed from under us.
299 */
300 if (page != page_head) {
301 get_page(page_head);
302 put_page(page);
303 }
304 local_irq_enable();
305 } else {
306 local_irq_enable();
307 goto again;
308 }
309 }
310#else
311 page_head = compound_head(page);
312 if (page != page_head) {
313 get_page(page_head);
314 put_page(page);
315 }
316#endif
317
318 lock_page(page_head);
Hugh Dickinse6780f72011-12-31 11:44:01 -0800319
320 /*
321 * If page_head->mapping is NULL, then it cannot be a PageAnon
322 * page; but it might be the ZERO_PAGE or in the gate area or
323 * in a special mapping (all cases which we are happy to fail);
324 * or it may have been a good file page when get_user_pages_fast
325 * found it, but truncated or holepunched or subjected to
326 * invalidate_complete_page2 before we got the page lock (also
327 * cases which we are happy to fail). And we hold a reference,
328 * so refcount care in invalidate_complete_page's remove_mapping
329 * prevents drop_caches from setting mapping to NULL beneath us.
330 *
331 * The case we do have to guard against is when memory pressure made
332 * shmem_writepage move it from filecache to swapcache beneath us:
333 * an unlikely race, but we do need to retry for page_head->mapping.
334 */
Andrea Arcangelia5b338f2011-01-13 15:46:34 -0800335 if (!page_head->mapping) {
Hugh Dickinse6780f72011-12-31 11:44:01 -0800336 int shmem_swizzled = PageSwapCache(page_head);
Andrea Arcangelia5b338f2011-01-13 15:46:34 -0800337 unlock_page(page_head);
338 put_page(page_head);
Hugh Dickinse6780f72011-12-31 11:44:01 -0800339 if (shmem_swizzled)
340 goto again;
341 return -EFAULT;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /*
345 * Private mappings are handled in a simple way.
346 *
347 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
348 * it's a read-only handle, it's expected that futexes attach to
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200349 * the object not the particular process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 */
Andrea Arcangelia5b338f2011-01-13 15:46:34 -0800351 if (PageAnon(page_head)) {
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500352 /*
353 * A RO anonymous page will never change and thus doesn't make
354 * sense for futex operations.
355 */
356 if (ro) {
357 err = -EFAULT;
358 goto out;
359 }
360
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200361 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 key->private.mm = mm;
Ingo Molnare2970f22006-06-27 02:54:47 -0700363 key->private.address = address;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200364 } else {
365 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
Andrea Arcangelia5b338f2011-01-13 15:46:34 -0800366 key->shared.inode = page_head->mapping->host;
367 key->shared.pgoff = page_head->index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 }
369
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200370 get_futex_key_refs(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500372out:
Andrea Arcangelia5b338f2011-01-13 15:46:34 -0800373 unlock_page(page_head);
374 put_page(page_head);
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500375 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
377
Thomas Gleixnerae791a22010-11-10 13:30:36 +0100378static inline void put_futex_key(union futex_key *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379{
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200380 drop_futex_key_refs(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
Darren Hartd96ee562009-09-21 22:30:22 -0700383/**
384 * fault_in_user_writeable() - Fault in user address and verify RW access
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200385 * @uaddr: pointer to faulting user space address
386 *
387 * Slow path to fixup the fault we just took in the atomic write
388 * access to @uaddr.
389 *
Randy Dunlapfb62db22010-10-13 11:02:34 -0700390 * We have no generic implementation of a non-destructive write to the
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200391 * user address. We know that we faulted in the atomic pagefault
392 * disabled section so we can as well avoid the #PF overhead by
393 * calling get_user_pages() right away.
394 */
395static int fault_in_user_writeable(u32 __user *uaddr)
396{
Andi Kleen722d0172009-12-08 13:19:42 +0100397 struct mm_struct *mm = current->mm;
398 int ret;
399
400 down_read(&mm->mmap_sem);
Benjamin Herrenschmidt2efaca92011-07-25 17:12:32 -0700401 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
402 FAULT_FLAG_WRITE);
Andi Kleen722d0172009-12-08 13:19:42 +0100403 up_read(&mm->mmap_sem);
404
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200405 return ret < 0 ? ret : 0;
406}
407
Darren Hart4b1c4862009-04-03 13:39:42 -0700408/**
409 * futex_top_waiter() - Return the highest priority waiter on a futex
Darren Hartd96ee562009-09-21 22:30:22 -0700410 * @hb: the hash bucket the futex_q's reside in
411 * @key: the futex key (to distinguish it from other futex futex_q's)
Darren Hart4b1c4862009-04-03 13:39:42 -0700412 *
413 * Must be called with the hb lock held.
414 */
415static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
416 union futex_key *key)
417{
418 struct futex_q *this;
419
420 plist_for_each_entry(this, &hb->chain, list) {
421 if (match_futex(&this->key, key))
422 return this;
423 }
424 return NULL;
425}
426
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800427static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
428 u32 uval, u32 newval)
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700429{
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800430 int ret;
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700431
432 pagefault_disable();
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800433 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700434 pagefault_enable();
435
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800436 return ret;
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700437}
438
439static int get_futex_value_locked(u32 *dest, u32 __user *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
441 int ret;
442
Peter Zijlstraa8663742006-12-06 20:32:20 -0800443 pagefault_disable();
Ingo Molnare2970f22006-06-27 02:54:47 -0700444 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
Peter Zijlstraa8663742006-12-06 20:32:20 -0800445 pagefault_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 return ret ? -EFAULT : 0;
448}
449
Ingo Molnarc87e2832006-06-27 02:54:58 -0700450
451/*
452 * PI code:
453 */
454static int refill_pi_state_cache(void)
455{
456 struct futex_pi_state *pi_state;
457
458 if (likely(current->pi_state_cache))
459 return 0;
460
Burman Yan4668edc2006-12-06 20:38:51 -0800461 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700462
463 if (!pi_state)
464 return -ENOMEM;
465
Ingo Molnarc87e2832006-06-27 02:54:58 -0700466 INIT_LIST_HEAD(&pi_state->list);
467 /* pi_mutex gets initialized later */
468 pi_state->owner = NULL;
469 atomic_set(&pi_state->refcount, 1);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200470 pi_state->key = FUTEX_KEY_INIT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700471
472 current->pi_state_cache = pi_state;
473
474 return 0;
475}
476
477static struct futex_pi_state * alloc_pi_state(void)
478{
479 struct futex_pi_state *pi_state = current->pi_state_cache;
480
481 WARN_ON(!pi_state);
482 current->pi_state_cache = NULL;
483
484 return pi_state;
485}
486
487static void free_pi_state(struct futex_pi_state *pi_state)
488{
489 if (!atomic_dec_and_test(&pi_state->refcount))
490 return;
491
492 /*
493 * If pi_state->owner is NULL, the owner is most probably dying
494 * and has cleaned up the pi_state already
495 */
496 if (pi_state->owner) {
Thomas Gleixner1d615482009-11-17 14:54:03 +0100497 raw_spin_lock_irq(&pi_state->owner->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700498 list_del_init(&pi_state->list);
Thomas Gleixner1d615482009-11-17 14:54:03 +0100499 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700500
501 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
502 }
503
504 if (current->pi_state_cache)
505 kfree(pi_state);
506 else {
507 /*
508 * pi_state->list is already empty.
509 * clear pi_state->owner.
510 * refcount is at 0 - put it back to 1.
511 */
512 pi_state->owner = NULL;
513 atomic_set(&pi_state->refcount, 1);
514 current->pi_state_cache = pi_state;
515 }
516}
517
518/*
519 * Look up the task based on what TID userspace gave us.
520 * We dont trust it.
521 */
522static struct task_struct * futex_find_get_task(pid_t pid)
523{
524 struct task_struct *p;
525
Oleg Nesterovd359b542006-09-29 02:00:55 -0700526 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -0700527 p = find_task_by_vpid(pid);
Michal Hocko7a0ea092010-06-30 09:51:19 +0200528 if (p)
529 get_task_struct(p);
Thomas Gleixnera06381f2007-06-23 11:48:40 +0200530
Oleg Nesterovd359b542006-09-29 02:00:55 -0700531 rcu_read_unlock();
Ingo Molnarc87e2832006-06-27 02:54:58 -0700532
533 return p;
534}
535
536/*
537 * This task is holding PI mutexes at exit time => bad.
538 * Kernel cleans up PI-state, but userspace is likely hosed.
539 * (Robust-futex cleanup is separate and might save the day for userspace.)
540 */
541void exit_pi_state_list(struct task_struct *curr)
542{
Ingo Molnarc87e2832006-06-27 02:54:58 -0700543 struct list_head *next, *head = &curr->pi_state_list;
544 struct futex_pi_state *pi_state;
Ingo Molnar627371d2006-07-29 05:16:20 +0200545 struct futex_hash_bucket *hb;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200546 union futex_key key = FUTEX_KEY_INIT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700547
Thomas Gleixnera0c1e902008-02-23 15:23:57 -0800548 if (!futex_cmpxchg_enabled)
549 return;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700550 /*
551 * We are a ZOMBIE and nobody can enqueue itself on
552 * pi_state_list anymore, but we have to be careful
Ingo Molnar627371d2006-07-29 05:16:20 +0200553 * versus waiters unqueueing themselves:
Ingo Molnarc87e2832006-06-27 02:54:58 -0700554 */
Thomas Gleixner1d615482009-11-17 14:54:03 +0100555 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700556 while (!list_empty(head)) {
557
558 next = head->next;
559 pi_state = list_entry(next, struct futex_pi_state, list);
560 key = pi_state->key;
Ingo Molnar627371d2006-07-29 05:16:20 +0200561 hb = hash_futex(&key);
Thomas Gleixner1d615482009-11-17 14:54:03 +0100562 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700563
Ingo Molnarc87e2832006-06-27 02:54:58 -0700564 spin_lock(&hb->lock);
565
Thomas Gleixner1d615482009-11-17 14:54:03 +0100566 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200567 /*
568 * We dropped the pi-lock, so re-check whether this
569 * task still owns the PI-state:
570 */
Ingo Molnarc87e2832006-06-27 02:54:58 -0700571 if (head->next != next) {
572 spin_unlock(&hb->lock);
573 continue;
574 }
575
Ingo Molnarc87e2832006-06-27 02:54:58 -0700576 WARN_ON(pi_state->owner != curr);
Ingo Molnar627371d2006-07-29 05:16:20 +0200577 WARN_ON(list_empty(&pi_state->list));
578 list_del_init(&pi_state->list);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700579 pi_state->owner = NULL;
Thomas Gleixner1d615482009-11-17 14:54:03 +0100580 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700581
582 rt_mutex_unlock(&pi_state->pi_mutex);
583
584 spin_unlock(&hb->lock);
585
Thomas Gleixner1d615482009-11-17 14:54:03 +0100586 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700587 }
Thomas Gleixner1d615482009-11-17 14:54:03 +0100588 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700589}
590
591static int
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700592lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
593 union futex_key *key, struct futex_pi_state **ps)
Ingo Molnarc87e2832006-06-27 02:54:58 -0700594{
595 struct futex_pi_state *pi_state = NULL;
596 struct futex_q *this, *next;
Pierre Peifferec92d082007-05-09 02:35:00 -0700597 struct plist_head *head;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700598 struct task_struct *p;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700599 pid_t pid = uval & FUTEX_TID_MASK;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700600
601 head = &hb->chain;
602
Pierre Peifferec92d082007-05-09 02:35:00 -0700603 plist_for_each_entry_safe(this, next, head, list) {
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700604 if (match_futex(&this->key, key)) {
Ingo Molnarc87e2832006-06-27 02:54:58 -0700605 /*
606 * Another waiter already exists - bump up
607 * the refcount and return its pi_state:
608 */
609 pi_state = this->pi_state;
Thomas Gleixner06a9ec22006-07-10 04:44:30 -0700610 /*
Randy Dunlapfb62db22010-10-13 11:02:34 -0700611 * Userspace might have messed up non-PI and PI futexes
Thomas Gleixner06a9ec22006-07-10 04:44:30 -0700612 */
613 if (unlikely(!pi_state))
614 return -EINVAL;
615
Ingo Molnar627371d2006-07-29 05:16:20 +0200616 WARN_ON(!atomic_read(&pi_state->refcount));
Thomas Gleixner59647b62010-02-03 09:33:05 +0100617
618 /*
619 * When pi_state->owner is NULL then the owner died
620 * and another waiter is on the fly. pi_state->owner
621 * is fixed up by the task which acquires
622 * pi_state->rt_mutex.
623 *
624 * We do not check for pid == 0 which can happen when
625 * the owner died and robust_list_exit() cleared the
626 * TID.
627 */
628 if (pid && pi_state->owner) {
629 /*
630 * Bail out if user space manipulated the
631 * futex value.
632 */
633 if (pid != task_pid_vnr(pi_state->owner))
634 return -EINVAL;
635 }
Ingo Molnar627371d2006-07-29 05:16:20 +0200636
Ingo Molnarc87e2832006-06-27 02:54:58 -0700637 atomic_inc(&pi_state->refcount);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700638 *ps = pi_state;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700639
640 return 0;
641 }
642 }
643
644 /*
Ingo Molnare3f2dde2006-07-29 05:17:57 +0200645 * We are the first waiter - try to look up the real owner and attach
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700646 * the new pi_state to it, but bail out when TID = 0
Ingo Molnarc87e2832006-06-27 02:54:58 -0700647 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700648 if (!pid)
Ingo Molnare3f2dde2006-07-29 05:17:57 +0200649 return -ESRCH;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700650 p = futex_find_get_task(pid);
Michal Hocko7a0ea092010-06-30 09:51:19 +0200651 if (!p)
652 return -ESRCH;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700653
654 /*
655 * We need to look at the task state flags to figure out,
656 * whether the task is exiting. To protect against the do_exit
657 * change of the task flags, we do this protected by
658 * p->pi_lock:
659 */
Thomas Gleixner1d615482009-11-17 14:54:03 +0100660 raw_spin_lock_irq(&p->pi_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700661 if (unlikely(p->flags & PF_EXITING)) {
662 /*
663 * The task is on the way out. When PF_EXITPIDONE is
664 * set, we know that the task has finished the
665 * cleanup:
666 */
667 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
668
Thomas Gleixner1d615482009-11-17 14:54:03 +0100669 raw_spin_unlock_irq(&p->pi_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700670 put_task_struct(p);
671 return ret;
672 }
Ingo Molnarc87e2832006-06-27 02:54:58 -0700673
674 pi_state = alloc_pi_state();
675
676 /*
677 * Initialize the pi_mutex in locked state and make 'p'
678 * the owner of it:
679 */
680 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
681
682 /* Store the key for possible exit cleanups: */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700683 pi_state->key = *key;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700684
Ingo Molnar627371d2006-07-29 05:16:20 +0200685 WARN_ON(!list_empty(&pi_state->list));
Ingo Molnarc87e2832006-06-27 02:54:58 -0700686 list_add(&pi_state->list, &p->pi_state_list);
687 pi_state->owner = p;
Thomas Gleixner1d615482009-11-17 14:54:03 +0100688 raw_spin_unlock_irq(&p->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700689
690 put_task_struct(p);
691
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700692 *ps = pi_state;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700693
694 return 0;
695}
696
Darren Hart1a520842009-04-03 13:39:52 -0700697/**
Darren Hartd96ee562009-09-21 22:30:22 -0700698 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
Darren Hartbab5bc92009-04-07 23:23:50 -0700699 * @uaddr: the pi futex user address
700 * @hb: the pi futex hash bucket
701 * @key: the futex key associated with uaddr and hb
702 * @ps: the pi_state pointer where we store the result of the
703 * lookup
704 * @task: the task to perform the atomic lock work for. This will
705 * be "current" except in the case of requeue pi.
706 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
Darren Hart1a520842009-04-03 13:39:52 -0700707 *
708 * Returns:
709 * 0 - ready to wait
710 * 1 - acquired the lock
711 * <0 - error
712 *
713 * The hb->lock and futex_key refs shall be held by the caller.
714 */
715static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
716 union futex_key *key,
717 struct futex_pi_state **ps,
Darren Hartbab5bc92009-04-07 23:23:50 -0700718 struct task_struct *task, int set_waiters)
Darren Hart1a520842009-04-03 13:39:52 -0700719{
720 int lock_taken, ret, ownerdied = 0;
Thomas Gleixnerc0c9ed12011-03-11 11:51:22 +0100721 u32 uval, newval, curval, vpid = task_pid_vnr(task);
Darren Hart1a520842009-04-03 13:39:52 -0700722
723retry:
724 ret = lock_taken = 0;
725
726 /*
727 * To avoid races, we attempt to take the lock here again
728 * (by doing a 0 -> TID atomic cmpxchg), while holding all
729 * the locks. It will most likely not succeed.
730 */
Thomas Gleixnerc0c9ed12011-03-11 11:51:22 +0100731 newval = vpid;
Darren Hartbab5bc92009-04-07 23:23:50 -0700732 if (set_waiters)
733 newval |= FUTEX_WAITERS;
Darren Hart1a520842009-04-03 13:39:52 -0700734
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800735 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
Darren Hart1a520842009-04-03 13:39:52 -0700736 return -EFAULT;
737
738 /*
739 * Detect deadlocks.
740 */
Thomas Gleixnerc0c9ed12011-03-11 11:51:22 +0100741 if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
Darren Hart1a520842009-04-03 13:39:52 -0700742 return -EDEADLK;
743
744 /*
745 * Surprise - we got the lock. Just return to userspace:
746 */
747 if (unlikely(!curval))
748 return 1;
749
750 uval = curval;
751
752 /*
753 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
754 * to wake at the next unlock.
755 */
756 newval = curval | FUTEX_WAITERS;
757
758 /*
759 * There are two cases, where a futex might have no owner (the
760 * owner TID is 0): OWNER_DIED. We take over the futex in this
761 * case. We also do an unconditional take over, when the owner
762 * of the futex died.
763 *
764 * This is safe as we are protected by the hash bucket lock !
765 */
766 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
767 /* Keep the OWNER_DIED bit */
Thomas Gleixnerc0c9ed12011-03-11 11:51:22 +0100768 newval = (curval & ~FUTEX_TID_MASK) | vpid;
Darren Hart1a520842009-04-03 13:39:52 -0700769 ownerdied = 0;
770 lock_taken = 1;
771 }
772
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800773 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
Darren Hart1a520842009-04-03 13:39:52 -0700774 return -EFAULT;
775 if (unlikely(curval != uval))
776 goto retry;
777
778 /*
779 * We took the lock due to owner died take over.
780 */
781 if (unlikely(lock_taken))
782 return 1;
783
784 /*
785 * We dont have the lock. Look up the PI state (or create it if
786 * we are the first waiter):
787 */
788 ret = lookup_pi_state(uval, hb, key, ps);
789
790 if (unlikely(ret)) {
791 switch (ret) {
792 case -ESRCH:
793 /*
794 * No owner found for this futex. Check if the
795 * OWNER_DIED bit is set to figure out whether
796 * this is a robust futex or not.
797 */
798 if (get_futex_value_locked(&curval, uaddr))
799 return -EFAULT;
800
801 /*
802 * We simply start over in case of a robust
803 * futex. The code above will take the futex
804 * and return happy.
805 */
806 if (curval & FUTEX_OWNER_DIED) {
807 ownerdied = 1;
808 goto retry;
809 }
810 default:
811 break;
812 }
813 }
814
815 return ret;
816}
817
Lai Jiangshan2e129782010-12-22 14:18:50 +0800818/**
819 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
820 * @q: The futex_q to unqueue
821 *
822 * The q->lock_ptr must not be NULL and must be held by the caller.
823 */
824static void __unqueue_futex(struct futex_q *q)
825{
826 struct futex_hash_bucket *hb;
827
Steven Rostedt29096202011-03-17 15:21:07 -0400828 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
829 || WARN_ON(plist_node_empty(&q->list)))
Lai Jiangshan2e129782010-12-22 14:18:50 +0800830 return;
831
832 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
833 plist_del(&q->list, &hb->chain);
834}
835
Ingo Molnarc87e2832006-06-27 02:54:58 -0700836/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 * The hash bucket lock must be held when this is called.
838 * Afterwards, the futex_q must not be accessed.
839 */
840static void wake_futex(struct futex_q *q)
841{
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200842 struct task_struct *p = q->task;
843
844 /*
845 * We set q->lock_ptr = NULL _before_ we wake up the task. If
Randy Dunlapfb62db22010-10-13 11:02:34 -0700846 * a non-futex wake up happens on another CPU then the task
847 * might exit and p would dereference a non-existing task
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200848 * struct. Prevent this by holding a reference on p across the
849 * wake up.
850 */
851 get_task_struct(p);
852
Lai Jiangshan2e129782010-12-22 14:18:50 +0800853 __unqueue_futex(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 /*
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200855 * The waiting task can free the futex_q as soon as
856 * q->lock_ptr = NULL is written, without taking any locks. A
857 * memory barrier is required here to prevent the following
858 * store to lock_ptr from getting ahead of the plist_del.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 */
Ralf Baechleccdea2f2006-12-06 20:40:26 -0800860 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 q->lock_ptr = NULL;
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200862
863 wake_up_state(p, TASK_NORMAL);
864 put_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865}
866
Ingo Molnarc87e2832006-06-27 02:54:58 -0700867static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
868{
869 struct task_struct *new_owner;
870 struct futex_pi_state *pi_state = this->pi_state;
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +0300871 u32 uninitialized_var(curval), newval;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700872
873 if (!pi_state)
874 return -EINVAL;
875
Thomas Gleixner51246bf2010-02-02 11:40:27 +0100876 /*
877 * If current does not own the pi_state then the futex is
878 * inconsistent and user space fiddled with the futex value.
879 */
880 if (pi_state->owner != current)
881 return -EINVAL;
882
Thomas Gleixnerd209d742009-11-17 18:22:11 +0100883 raw_spin_lock(&pi_state->pi_mutex.wait_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700884 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
885
886 /*
Steven Rostedtf123c982011-01-06 15:08:29 -0500887 * It is possible that the next waiter (the one that brought
888 * this owner to the kernel) timed out and is no longer
889 * waiting on the lock.
Ingo Molnarc87e2832006-06-27 02:54:58 -0700890 */
891 if (!new_owner)
892 new_owner = this->task;
893
894 /*
895 * We pass it to the next owner. (The WAITERS bit is always
896 * kept enabled while there is PI state around. We must also
897 * preserve the owner died bit.)
898 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +0200899 if (!(uval & FUTEX_OWNER_DIED)) {
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700900 int ret = 0;
901
Pavel Emelyanovb4888932007-10-18 23:40:14 -0700902 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700903
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800904 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700905 ret = -EFAULT;
Thomas Gleixnercde898f2007-12-05 15:46:09 +0100906 else if (curval != uval)
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700907 ret = -EINVAL;
908 if (ret) {
Thomas Gleixnerd209d742009-11-17 18:22:11 +0100909 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -0700910 return ret;
911 }
Ingo Molnare3f2dde2006-07-29 05:17:57 +0200912 }
Ingo Molnarc87e2832006-06-27 02:54:58 -0700913
Thomas Gleixner1d615482009-11-17 14:54:03 +0100914 raw_spin_lock_irq(&pi_state->owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200915 WARN_ON(list_empty(&pi_state->list));
916 list_del_init(&pi_state->list);
Thomas Gleixner1d615482009-11-17 14:54:03 +0100917 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200918
Thomas Gleixner1d615482009-11-17 14:54:03 +0100919 raw_spin_lock_irq(&new_owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200920 WARN_ON(!list_empty(&pi_state->list));
Ingo Molnarc87e2832006-06-27 02:54:58 -0700921 list_add(&pi_state->list, &new_owner->pi_state_list);
922 pi_state->owner = new_owner;
Thomas Gleixner1d615482009-11-17 14:54:03 +0100923 raw_spin_unlock_irq(&new_owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200924
Thomas Gleixnerd209d742009-11-17 18:22:11 +0100925 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700926 rt_mutex_unlock(&pi_state->pi_mutex);
927
928 return 0;
929}
930
931static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
932{
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +0300933 u32 uninitialized_var(oldval);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700934
935 /*
936 * There is no waiter, so we unlock the futex. The owner died
937 * bit has not to be preserved here. We are the owner:
938 */
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800939 if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
940 return -EFAULT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700941 if (oldval != uval)
942 return -EAGAIN;
943
944 return 0;
945}
946
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947/*
Ingo Molnar8b8f3192006-07-03 00:25:05 -0700948 * Express the locking dependencies for lockdep:
949 */
950static inline void
951double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
952{
953 if (hb1 <= hb2) {
954 spin_lock(&hb1->lock);
955 if (hb1 < hb2)
956 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
957 } else { /* hb1 > hb2 */
958 spin_lock(&hb2->lock);
959 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
960 }
961}
962
Darren Hart5eb3dc62009-03-12 00:55:52 -0700963static inline void
964double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
965{
Darren Hartf061d352009-03-12 15:11:18 -0700966 spin_unlock(&hb1->lock);
Ingo Molnar88f502f2009-03-13 10:32:07 +0100967 if (hb1 != hb2)
968 spin_unlock(&hb2->lock);
Darren Hart5eb3dc62009-03-12 00:55:52 -0700969}
970
Ingo Molnar8b8f3192006-07-03 00:25:05 -0700971/*
Darren Hartb2d09942009-03-12 00:55:37 -0700972 * Wake up waiters matching bitset queued on this futex (uaddr).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 */
Darren Hartb41277d2010-11-08 13:10:09 -0800974static int
975futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976{
Ingo Molnare2970f22006-06-27 02:54:47 -0700977 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 struct futex_q *this, *next;
Pierre Peifferec92d082007-05-09 02:35:00 -0700979 struct plist_head *head;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200980 union futex_key key = FUTEX_KEY_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 int ret;
982
Thomas Gleixnercd689982008-02-01 17:45:14 +0100983 if (!bitset)
984 return -EINVAL;
985
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500986 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 if (unlikely(ret != 0))
988 goto out;
989
Ingo Molnare2970f22006-06-27 02:54:47 -0700990 hb = hash_futex(&key);
991 spin_lock(&hb->lock);
992 head = &hb->chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Pierre Peifferec92d082007-05-09 02:35:00 -0700994 plist_for_each_entry_safe(this, next, head, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 if (match_futex (&this->key, &key)) {
Darren Hart52400ba2009-04-03 13:40:49 -0700996 if (this->pi_state || this->rt_waiter) {
Ingo Molnared6f7b12006-07-01 04:35:46 -0700997 ret = -EINVAL;
998 break;
999 }
Thomas Gleixnercd689982008-02-01 17:45:14 +01001000
1001 /* Check if one of the bits is set in both bitsets */
1002 if (!(this->bitset & bitset))
1003 continue;
1004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 wake_futex(this);
1006 if (++ret >= nr_wake)
1007 break;
1008 }
1009 }
1010
Ingo Molnare2970f22006-06-27 02:54:47 -07001011 spin_unlock(&hb->lock);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001012 put_futex_key(&key);
Darren Hart42d35d42008-12-29 15:49:53 -08001013out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 return ret;
1015}
1016
1017/*
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001018 * Wake up all waiters hashed on the physical page that is mapped
1019 * to this virtual address:
1020 */
Ingo Molnare2970f22006-06-27 02:54:47 -07001021static int
Darren Hartb41277d2010-11-08 13:10:09 -08001022futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
Ingo Molnare2970f22006-06-27 02:54:47 -07001023 int nr_wake, int nr_wake2, int op)
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001024{
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001025 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
Ingo Molnare2970f22006-06-27 02:54:47 -07001026 struct futex_hash_bucket *hb1, *hb2;
Pierre Peifferec92d082007-05-09 02:35:00 -07001027 struct plist_head *head;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001028 struct futex_q *this, *next;
Darren Harte4dc5b72009-03-12 00:56:13 -07001029 int ret, op_ret;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001030
Darren Harte4dc5b72009-03-12 00:56:13 -07001031retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001032 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001033 if (unlikely(ret != 0))
1034 goto out;
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001035 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001036 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001037 goto out_put_key1;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001038
Ingo Molnare2970f22006-06-27 02:54:47 -07001039 hb1 = hash_futex(&key1);
1040 hb2 = hash_futex(&key2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001041
Darren Harte4dc5b72009-03-12 00:56:13 -07001042retry_private:
Thomas Gleixnereaaea802009-10-04 09:34:17 +02001043 double_lock_hb(hb1, hb2);
Ingo Molnare2970f22006-06-27 02:54:47 -07001044 op_ret = futex_atomic_op_inuser(op, uaddr2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001045 if (unlikely(op_ret < 0)) {
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001046
Darren Hart5eb3dc62009-03-12 00:55:52 -07001047 double_unlock_hb(hb1, hb2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001048
David Howells7ee1dd32006-01-06 00:11:44 -08001049#ifndef CONFIG_MMU
Ingo Molnare2970f22006-06-27 02:54:47 -07001050 /*
1051 * we don't get EFAULT from MMU faults if we don't have an MMU,
1052 * but we might get them from range checking
1053 */
David Howells7ee1dd32006-01-06 00:11:44 -08001054 ret = op_ret;
Darren Hart42d35d42008-12-29 15:49:53 -08001055 goto out_put_keys;
David Howells7ee1dd32006-01-06 00:11:44 -08001056#endif
1057
David Gibson796f8d92005-11-07 00:59:33 -08001058 if (unlikely(op_ret != -EFAULT)) {
1059 ret = op_ret;
Darren Hart42d35d42008-12-29 15:49:53 -08001060 goto out_put_keys;
David Gibson796f8d92005-11-07 00:59:33 -08001061 }
1062
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001063 ret = fault_in_user_writeable(uaddr2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001064 if (ret)
Darren Hartde87fcc2009-03-12 00:55:46 -07001065 goto out_put_keys;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001066
Darren Hartb41277d2010-11-08 13:10:09 -08001067 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07001068 goto retry_private;
1069
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001070 put_futex_key(&key2);
1071 put_futex_key(&key1);
Darren Harte4dc5b72009-03-12 00:56:13 -07001072 goto retry;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001073 }
1074
Ingo Molnare2970f22006-06-27 02:54:47 -07001075 head = &hb1->chain;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001076
Pierre Peifferec92d082007-05-09 02:35:00 -07001077 plist_for_each_entry_safe(this, next, head, list) {
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001078 if (match_futex (&this->key, &key1)) {
1079 wake_futex(this);
1080 if (++ret >= nr_wake)
1081 break;
1082 }
1083 }
1084
1085 if (op_ret > 0) {
Ingo Molnare2970f22006-06-27 02:54:47 -07001086 head = &hb2->chain;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001087
1088 op_ret = 0;
Pierre Peifferec92d082007-05-09 02:35:00 -07001089 plist_for_each_entry_safe(this, next, head, list) {
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001090 if (match_futex (&this->key, &key2)) {
1091 wake_futex(this);
1092 if (++op_ret >= nr_wake2)
1093 break;
1094 }
1095 }
1096 ret += op_ret;
1097 }
1098
Darren Hart5eb3dc62009-03-12 00:55:52 -07001099 double_unlock_hb(hb1, hb2);
Darren Hart42d35d42008-12-29 15:49:53 -08001100out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001101 put_futex_key(&key2);
Darren Hart42d35d42008-12-29 15:49:53 -08001102out_put_key1:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001103 put_futex_key(&key1);
Darren Hart42d35d42008-12-29 15:49:53 -08001104out:
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001105 return ret;
1106}
1107
Darren Hart9121e472009-04-03 13:40:31 -07001108/**
1109 * requeue_futex() - Requeue a futex_q from one hb to another
1110 * @q: the futex_q to requeue
1111 * @hb1: the source hash_bucket
1112 * @hb2: the target hash_bucket
1113 * @key2: the new key for the requeued futex_q
1114 */
1115static inline
1116void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1117 struct futex_hash_bucket *hb2, union futex_key *key2)
1118{
1119
1120 /*
1121 * If key1 and key2 hash to the same bucket, no need to
1122 * requeue.
1123 */
1124 if (likely(&hb1->chain != &hb2->chain)) {
1125 plist_del(&q->list, &hb1->chain);
1126 plist_add(&q->list, &hb2->chain);
1127 q->lock_ptr = &hb2->lock;
Darren Hart9121e472009-04-03 13:40:31 -07001128 }
1129 get_futex_key_refs(key2);
1130 q->key = *key2;
1131}
1132
Darren Hart52400ba2009-04-03 13:40:49 -07001133/**
1134 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
Darren Hartd96ee562009-09-21 22:30:22 -07001135 * @q: the futex_q
1136 * @key: the key of the requeue target futex
1137 * @hb: the hash_bucket of the requeue target futex
Darren Hart52400ba2009-04-03 13:40:49 -07001138 *
1139 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1140 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1141 * to the requeue target futex so the waiter can detect the wakeup on the right
1142 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
Darren Hartbeda2c72009-08-09 15:34:39 -07001143 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1144 * to protect access to the pi_state to fixup the owner later. Must be called
1145 * with both q->lock_ptr and hb->lock held.
Darren Hart52400ba2009-04-03 13:40:49 -07001146 */
1147static inline
Darren Hartbeda2c72009-08-09 15:34:39 -07001148void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1149 struct futex_hash_bucket *hb)
Darren Hart52400ba2009-04-03 13:40:49 -07001150{
Darren Hart52400ba2009-04-03 13:40:49 -07001151 get_futex_key_refs(key);
1152 q->key = *key;
1153
Lai Jiangshan2e129782010-12-22 14:18:50 +08001154 __unqueue_futex(q);
Darren Hart52400ba2009-04-03 13:40:49 -07001155
1156 WARN_ON(!q->rt_waiter);
1157 q->rt_waiter = NULL;
1158
Darren Hartbeda2c72009-08-09 15:34:39 -07001159 q->lock_ptr = &hb->lock;
Darren Hartbeda2c72009-08-09 15:34:39 -07001160
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001161 wake_up_state(q->task, TASK_NORMAL);
Darren Hart52400ba2009-04-03 13:40:49 -07001162}
1163
1164/**
1165 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
Darren Hartbab5bc92009-04-07 23:23:50 -07001166 * @pifutex: the user address of the to futex
1167 * @hb1: the from futex hash bucket, must be locked by the caller
1168 * @hb2: the to futex hash bucket, must be locked by the caller
1169 * @key1: the from futex key
1170 * @key2: the to futex key
1171 * @ps: address to store the pi_state pointer
1172 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
Darren Hart52400ba2009-04-03 13:40:49 -07001173 *
1174 * Try and get the lock on behalf of the top waiter if we can do it atomically.
Darren Hartbab5bc92009-04-07 23:23:50 -07001175 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1176 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1177 * hb1 and hb2 must be held by the caller.
Darren Hart52400ba2009-04-03 13:40:49 -07001178 *
1179 * Returns:
1180 * 0 - failed to acquire the lock atomicly
1181 * 1 - acquired the lock
1182 * <0 - error
1183 */
1184static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1185 struct futex_hash_bucket *hb1,
1186 struct futex_hash_bucket *hb2,
1187 union futex_key *key1, union futex_key *key2,
Darren Hartbab5bc92009-04-07 23:23:50 -07001188 struct futex_pi_state **ps, int set_waiters)
Darren Hart52400ba2009-04-03 13:40:49 -07001189{
Darren Hartbab5bc92009-04-07 23:23:50 -07001190 struct futex_q *top_waiter = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07001191 u32 curval;
1192 int ret;
1193
1194 if (get_futex_value_locked(&curval, pifutex))
1195 return -EFAULT;
1196
Darren Hartbab5bc92009-04-07 23:23:50 -07001197 /*
1198 * Find the top_waiter and determine if there are additional waiters.
1199 * If the caller intends to requeue more than 1 waiter to pifutex,
1200 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1201 * as we have means to handle the possible fault. If not, don't set
1202 * the bit unecessarily as it will force the subsequent unlock to enter
1203 * the kernel.
1204 */
Darren Hart52400ba2009-04-03 13:40:49 -07001205 top_waiter = futex_top_waiter(hb1, key1);
1206
1207 /* There are no waiters, nothing for us to do. */
1208 if (!top_waiter)
1209 return 0;
1210
Darren Hart84bc4af2009-08-13 17:36:53 -07001211 /* Ensure we requeue to the expected futex. */
1212 if (!match_futex(top_waiter->requeue_pi_key, key2))
1213 return -EINVAL;
1214
Darren Hart52400ba2009-04-03 13:40:49 -07001215 /*
Darren Hartbab5bc92009-04-07 23:23:50 -07001216 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1217 * the contended case or if set_waiters is 1. The pi_state is returned
1218 * in ps in contended cases.
Darren Hart52400ba2009-04-03 13:40:49 -07001219 */
Darren Hartbab5bc92009-04-07 23:23:50 -07001220 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1221 set_waiters);
Darren Hart52400ba2009-04-03 13:40:49 -07001222 if (ret == 1)
Darren Hartbeda2c72009-08-09 15:34:39 -07001223 requeue_pi_wake_futex(top_waiter, key2, hb2);
Darren Hart52400ba2009-04-03 13:40:49 -07001224
1225 return ret;
1226}
1227
1228/**
1229 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
Randy Dunlapfb62db22010-10-13 11:02:34 -07001230 * @uaddr1: source futex user address
Darren Hartb41277d2010-11-08 13:10:09 -08001231 * @flags: futex flags (FLAGS_SHARED, etc.)
Randy Dunlapfb62db22010-10-13 11:02:34 -07001232 * @uaddr2: target futex user address
1233 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1234 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1235 * @cmpval: @uaddr1 expected value (or %NULL)
1236 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
Darren Hartb41277d2010-11-08 13:10:09 -08001237 * pi futex (pi to pi requeue is not supported)
Darren Hart52400ba2009-04-03 13:40:49 -07001238 *
1239 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1240 * uaddr2 atomically on behalf of the top waiter.
1241 *
1242 * Returns:
1243 * >=0 - on success, the number of tasks requeued or woken
1244 * <0 - on error
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 */
Darren Hartb41277d2010-11-08 13:10:09 -08001246static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1247 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1248 u32 *cmpval, int requeue_pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249{
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001250 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
Darren Hart52400ba2009-04-03 13:40:49 -07001251 int drop_count = 0, task_count = 0, ret;
1252 struct futex_pi_state *pi_state = NULL;
Ingo Molnare2970f22006-06-27 02:54:47 -07001253 struct futex_hash_bucket *hb1, *hb2;
Pierre Peifferec92d082007-05-09 02:35:00 -07001254 struct plist_head *head1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 struct futex_q *this, *next;
Darren Hart52400ba2009-04-03 13:40:49 -07001256 u32 curval2;
1257
1258 if (requeue_pi) {
1259 /*
1260 * requeue_pi requires a pi_state, try to allocate it now
1261 * without any locks in case it fails.
1262 */
1263 if (refill_pi_state_cache())
1264 return -ENOMEM;
1265 /*
1266 * requeue_pi must wake as many tasks as it can, up to nr_wake
1267 * + nr_requeue, since it acquires the rt_mutex prior to
1268 * returning to userspace, so as to not leave the rt_mutex with
1269 * waiters and no owner. However, second and third wake-ups
1270 * cannot be predicted as they involve race conditions with the
1271 * first wake and a fault while looking up the pi_state. Both
1272 * pthread_cond_signal() and pthread_cond_broadcast() should
1273 * use nr_wake=1.
1274 */
1275 if (nr_wake != 1)
1276 return -EINVAL;
1277 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278
Darren Hart42d35d42008-12-29 15:49:53 -08001279retry:
Darren Hart52400ba2009-04-03 13:40:49 -07001280 if (pi_state != NULL) {
1281 /*
1282 * We will have to lookup the pi_state again, so free this one
1283 * to keep the accounting correct.
1284 */
1285 free_pi_state(pi_state);
1286 pi_state = NULL;
1287 }
1288
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001289 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 if (unlikely(ret != 0))
1291 goto out;
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001292 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1293 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001295 goto out_put_key1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Ingo Molnare2970f22006-06-27 02:54:47 -07001297 hb1 = hash_futex(&key1);
1298 hb2 = hash_futex(&key2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
Darren Harte4dc5b72009-03-12 00:56:13 -07001300retry_private:
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001301 double_lock_hb(hb1, hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Ingo Molnare2970f22006-06-27 02:54:47 -07001303 if (likely(cmpval != NULL)) {
1304 u32 curval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305
Ingo Molnare2970f22006-06-27 02:54:47 -07001306 ret = get_futex_value_locked(&curval, uaddr1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
1308 if (unlikely(ret)) {
Darren Hart5eb3dc62009-03-12 00:55:52 -07001309 double_unlock_hb(hb1, hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310
Darren Harte4dc5b72009-03-12 00:56:13 -07001311 ret = get_user(curval, uaddr1);
1312 if (ret)
1313 goto out_put_keys;
1314
Darren Hartb41277d2010-11-08 13:10:09 -08001315 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07001316 goto retry_private;
1317
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001318 put_futex_key(&key2);
1319 put_futex_key(&key1);
Darren Harte4dc5b72009-03-12 00:56:13 -07001320 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 }
Ingo Molnare2970f22006-06-27 02:54:47 -07001322 if (curval != *cmpval) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 ret = -EAGAIN;
1324 goto out_unlock;
1325 }
1326 }
1327
Darren Hart52400ba2009-04-03 13:40:49 -07001328 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
Darren Hartbab5bc92009-04-07 23:23:50 -07001329 /*
1330 * Attempt to acquire uaddr2 and wake the top waiter. If we
1331 * intend to requeue waiters, force setting the FUTEX_WAITERS
1332 * bit. We force this here where we are able to easily handle
1333 * faults rather in the requeue loop below.
1334 */
Darren Hart52400ba2009-04-03 13:40:49 -07001335 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
Darren Hartbab5bc92009-04-07 23:23:50 -07001336 &key2, &pi_state, nr_requeue);
Darren Hart52400ba2009-04-03 13:40:49 -07001337
1338 /*
1339 * At this point the top_waiter has either taken uaddr2 or is
1340 * waiting on it. If the former, then the pi_state will not
1341 * exist yet, look it up one more time to ensure we have a
1342 * reference to it.
1343 */
1344 if (ret == 1) {
1345 WARN_ON(pi_state);
Darren Hart89061d32009-10-15 15:30:48 -07001346 drop_count++;
Darren Hart52400ba2009-04-03 13:40:49 -07001347 task_count++;
1348 ret = get_futex_value_locked(&curval2, uaddr2);
1349 if (!ret)
1350 ret = lookup_pi_state(curval2, hb2, &key2,
1351 &pi_state);
1352 }
1353
1354 switch (ret) {
1355 case 0:
1356 break;
1357 case -EFAULT:
1358 double_unlock_hb(hb1, hb2);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001359 put_futex_key(&key2);
1360 put_futex_key(&key1);
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001361 ret = fault_in_user_writeable(uaddr2);
Darren Hart52400ba2009-04-03 13:40:49 -07001362 if (!ret)
1363 goto retry;
1364 goto out;
1365 case -EAGAIN:
1366 /* The owner was exiting, try again. */
1367 double_unlock_hb(hb1, hb2);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001368 put_futex_key(&key2);
1369 put_futex_key(&key1);
Darren Hart52400ba2009-04-03 13:40:49 -07001370 cond_resched();
1371 goto retry;
1372 default:
1373 goto out_unlock;
1374 }
1375 }
1376
Ingo Molnare2970f22006-06-27 02:54:47 -07001377 head1 = &hb1->chain;
Pierre Peifferec92d082007-05-09 02:35:00 -07001378 plist_for_each_entry_safe(this, next, head1, list) {
Darren Hart52400ba2009-04-03 13:40:49 -07001379 if (task_count - nr_wake >= nr_requeue)
1380 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Darren Hart52400ba2009-04-03 13:40:49 -07001382 if (!match_futex(&this->key, &key1))
1383 continue;
1384
Darren Hart392741e2009-08-07 15:20:48 -07001385 /*
1386 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1387 * be paired with each other and no other futex ops.
1388 */
1389 if ((requeue_pi && !this->rt_waiter) ||
1390 (!requeue_pi && this->rt_waiter)) {
1391 ret = -EINVAL;
1392 break;
1393 }
Darren Hart52400ba2009-04-03 13:40:49 -07001394
1395 /*
1396 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1397 * lock, we already woke the top_waiter. If not, it will be
1398 * woken by futex_unlock_pi().
1399 */
1400 if (++task_count <= nr_wake && !requeue_pi) {
1401 wake_futex(this);
1402 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 }
Darren Hart52400ba2009-04-03 13:40:49 -07001404
Darren Hart84bc4af2009-08-13 17:36:53 -07001405 /* Ensure we requeue to the expected futex for requeue_pi. */
1406 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1407 ret = -EINVAL;
1408 break;
1409 }
1410
Darren Hart52400ba2009-04-03 13:40:49 -07001411 /*
1412 * Requeue nr_requeue waiters and possibly one more in the case
1413 * of requeue_pi if we couldn't acquire the lock atomically.
1414 */
1415 if (requeue_pi) {
1416 /* Prepare the waiter to take the rt_mutex. */
1417 atomic_inc(&pi_state->refcount);
1418 this->pi_state = pi_state;
1419 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1420 this->rt_waiter,
1421 this->task, 1);
1422 if (ret == 1) {
1423 /* We got the lock. */
Darren Hartbeda2c72009-08-09 15:34:39 -07001424 requeue_pi_wake_futex(this, &key2, hb2);
Darren Hart89061d32009-10-15 15:30:48 -07001425 drop_count++;
Darren Hart52400ba2009-04-03 13:40:49 -07001426 continue;
1427 } else if (ret) {
1428 /* -EDEADLK */
1429 this->pi_state = NULL;
1430 free_pi_state(pi_state);
1431 goto out_unlock;
1432 }
1433 }
1434 requeue_futex(this, hb1, hb2, &key2);
1435 drop_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 }
1437
1438out_unlock:
Darren Hart5eb3dc62009-03-12 00:55:52 -07001439 double_unlock_hb(hb1, hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
Darren Hartcd84a422009-04-02 14:19:38 -07001441 /*
1442 * drop_futex_key_refs() must be called outside the spinlocks. During
1443 * the requeue we moved futex_q's from the hash bucket at key1 to the
1444 * one at key2 and updated their key pointer. We no longer need to
1445 * hold the references to key1.
1446 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 while (--drop_count >= 0)
Rusty Russell9adef582007-05-08 00:26:42 -07001448 drop_futex_key_refs(&key1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
Darren Hart42d35d42008-12-29 15:49:53 -08001450out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001451 put_futex_key(&key2);
Darren Hart42d35d42008-12-29 15:49:53 -08001452out_put_key1:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001453 put_futex_key(&key1);
Darren Hart42d35d42008-12-29 15:49:53 -08001454out:
Darren Hart52400ba2009-04-03 13:40:49 -07001455 if (pi_state != NULL)
1456 free_pi_state(pi_state);
1457 return ret ? ret : task_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458}
1459
1460/* The key must be already stored in q->key. */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01001461static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001462 __acquires(&hb->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463{
Ingo Molnare2970f22006-06-27 02:54:47 -07001464 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
Ingo Molnare2970f22006-06-27 02:54:47 -07001466 hb = hash_futex(&q->key);
1467 q->lock_ptr = &hb->lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Ingo Molnare2970f22006-06-27 02:54:47 -07001469 spin_lock(&hb->lock);
1470 return hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471}
1472
Darren Hartd40d65c2009-09-21 22:30:15 -07001473static inline void
1474queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001475 __releases(&hb->lock)
Darren Hartd40d65c2009-09-21 22:30:15 -07001476{
1477 spin_unlock(&hb->lock);
Darren Hartd40d65c2009-09-21 22:30:15 -07001478}
1479
1480/**
1481 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1482 * @q: The futex_q to enqueue
1483 * @hb: The destination hash bucket
1484 *
1485 * The hb->lock must be held by the caller, and is released here. A call to
1486 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1487 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1488 * or nothing if the unqueue is done as part of the wake process and the unqueue
1489 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1490 * an example).
1491 */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01001492static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001493 __releases(&hb->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494{
Pierre Peifferec92d082007-05-09 02:35:00 -07001495 int prio;
1496
1497 /*
1498 * The priority used to register this element is
1499 * - either the real thread-priority for the real-time threads
1500 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1501 * - or MAX_RT_PRIO for non-RT threads.
1502 * Thus, all RT-threads are woken first in priority order, and
1503 * the others are woken last, in FIFO order.
1504 */
1505 prio = min(current->normal_prio, MAX_RT_PRIO);
1506
1507 plist_node_init(&q->list, prio);
Pierre Peifferec92d082007-05-09 02:35:00 -07001508 plist_add(&q->list, &hb->chain);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001509 q->task = current;
Ingo Molnare2970f22006-06-27 02:54:47 -07001510 spin_unlock(&hb->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511}
1512
Darren Hartd40d65c2009-09-21 22:30:15 -07001513/**
1514 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1515 * @q: The futex_q to unqueue
1516 *
1517 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1518 * be paired with exactly one earlier call to queue_me().
1519 *
1520 * Returns:
1521 * 1 - if the futex_q was still queued (and we removed unqueued it)
1522 * 0 - if the futex_q was already removed by the waking thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524static int unqueue_me(struct futex_q *q)
1525{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 spinlock_t *lock_ptr;
Ingo Molnare2970f22006-06-27 02:54:47 -07001527 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
1529 /* In the common case we don't take the spinlock, which is nice. */
Darren Hart42d35d42008-12-29 15:49:53 -08001530retry:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 lock_ptr = q->lock_ptr;
Christian Borntraegere91467e2006-08-05 12:13:52 -07001532 barrier();
Stephen Hemmingerc80544d2007-10-18 03:07:05 -07001533 if (lock_ptr != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 spin_lock(lock_ptr);
1535 /*
1536 * q->lock_ptr can change between reading it and
1537 * spin_lock(), causing us to take the wrong lock. This
1538 * corrects the race condition.
1539 *
1540 * Reasoning goes like this: if we have the wrong lock,
1541 * q->lock_ptr must have changed (maybe several times)
1542 * between reading it and the spin_lock(). It can
1543 * change again after the spin_lock() but only if it was
1544 * already changed before the spin_lock(). It cannot,
1545 * however, change back to the original value. Therefore
1546 * we can detect whether we acquired the correct lock.
1547 */
1548 if (unlikely(lock_ptr != q->lock_ptr)) {
1549 spin_unlock(lock_ptr);
1550 goto retry;
1551 }
Lai Jiangshan2e129782010-12-22 14:18:50 +08001552 __unqueue_futex(q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001553
1554 BUG_ON(q->pi_state);
1555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 spin_unlock(lock_ptr);
1557 ret = 1;
1558 }
1559
Rusty Russell9adef582007-05-08 00:26:42 -07001560 drop_futex_key_refs(&q->key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 return ret;
1562}
1563
Ingo Molnarc87e2832006-06-27 02:54:58 -07001564/*
1565 * PI futexes can not be requeued and must remove themself from the
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001566 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1567 * and dropped here.
Ingo Molnarc87e2832006-06-27 02:54:58 -07001568 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001569static void unqueue_me_pi(struct futex_q *q)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001570 __releases(q->lock_ptr)
Ingo Molnarc87e2832006-06-27 02:54:58 -07001571{
Lai Jiangshan2e129782010-12-22 14:18:50 +08001572 __unqueue_futex(q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001573
1574 BUG_ON(!q->pi_state);
1575 free_pi_state(q->pi_state);
1576 q->pi_state = NULL;
1577
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001578 spin_unlock(q->lock_ptr);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001579}
1580
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001581/*
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01001582 * Fixup the pi_state owner with the new owner.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001583 *
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001584 * Must be called with hash bucket lock held and mm->sem held for non
1585 * private futexes.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001586 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001587static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001588 struct task_struct *newowner)
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001589{
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01001590 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001591 struct futex_pi_state *pi_state = q->pi_state;
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001592 struct task_struct *oldowner = pi_state->owner;
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03001593 u32 uval, uninitialized_var(curval), newval;
Darren Harte4dc5b72009-03-12 00:56:13 -07001594 int ret;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001595
1596 /* Owner died? */
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001597 if (!pi_state->owner)
1598 newtid |= FUTEX_OWNER_DIED;
1599
1600 /*
1601 * We are here either because we stole the rtmutex from the
Lai Jiangshan81612392011-01-14 17:09:41 +08001602 * previous highest priority waiter or we are the highest priority
1603 * waiter but failed to get the rtmutex the first time.
1604 * We have to replace the newowner TID in the user space variable.
1605 * This must be atomic as we have to preserve the owner died bit here.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001606 *
Darren Hartb2d09942009-03-12 00:55:37 -07001607 * Note: We write the user space value _before_ changing the pi_state
1608 * because we can fault here. Imagine swapped out pages or a fork
1609 * that marked all the anonymous memory readonly for cow.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001610 *
1611 * Modifying pi_state _before_ the user space value would
1612 * leave the pi_state in an inconsistent state when we fault
1613 * here, because we need to drop the hash bucket lock to
1614 * handle the fault. This might be observed in the PID check
1615 * in lookup_pi_state.
1616 */
1617retry:
1618 if (get_futex_value_locked(&uval, uaddr))
1619 goto handle_fault;
1620
1621 while (1) {
1622 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1623
Michel Lespinasse37a9d912011-03-10 18:48:51 -08001624 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001625 goto handle_fault;
1626 if (curval == uval)
1627 break;
1628 uval = curval;
1629 }
1630
1631 /*
1632 * We fixed up user space. Now we need to fix the pi_state
1633 * itself.
1634 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001635 if (pi_state->owner != NULL) {
Thomas Gleixner1d615482009-11-17 14:54:03 +01001636 raw_spin_lock_irq(&pi_state->owner->pi_lock);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001637 WARN_ON(list_empty(&pi_state->list));
1638 list_del_init(&pi_state->list);
Thomas Gleixner1d615482009-11-17 14:54:03 +01001639 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001640 }
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001641
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01001642 pi_state->owner = newowner;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001643
Thomas Gleixner1d615482009-11-17 14:54:03 +01001644 raw_spin_lock_irq(&newowner->pi_lock);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001645 WARN_ON(!list_empty(&pi_state->list));
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01001646 list_add(&pi_state->list, &newowner->pi_state_list);
Thomas Gleixner1d615482009-11-17 14:54:03 +01001647 raw_spin_unlock_irq(&newowner->pi_lock);
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001648 return 0;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001649
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001650 /*
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001651 * To handle the page fault we need to drop the hash bucket
Lai Jiangshan81612392011-01-14 17:09:41 +08001652 * lock here. That gives the other task (either the highest priority
1653 * waiter itself or the task which stole the rtmutex) the
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001654 * chance to try the fixup of the pi_state. So once we are
1655 * back from handling the fault we need to check the pi_state
1656 * after reacquiring the hash bucket lock and before trying to
1657 * do another fixup. When the fixup has been done already we
1658 * simply return.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001659 */
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001660handle_fault:
1661 spin_unlock(q->lock_ptr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001662
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001663 ret = fault_in_user_writeable(uaddr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001664
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001665 spin_lock(q->lock_ptr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001666
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02001667 /*
1668 * Check if someone else fixed it for us:
1669 */
1670 if (pi_state->owner != oldowner)
1671 return 0;
1672
1673 if (ret)
1674 return ret;
1675
1676 goto retry;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001677}
1678
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001679static long futex_wait_restart(struct restart_block *restart);
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07001680
Darren Hartca5f9522009-04-03 13:39:33 -07001681/**
Darren Hartdd973992009-04-03 13:40:02 -07001682 * fixup_owner() - Post lock pi_state and corner case management
1683 * @uaddr: user address of the futex
Darren Hartdd973992009-04-03 13:40:02 -07001684 * @q: futex_q (contains pi_state and access to the rt_mutex)
1685 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
1686 *
1687 * After attempting to lock an rt_mutex, this function is called to cleanup
1688 * the pi_state owner as well as handle race conditions that may allow us to
1689 * acquire the lock. Must be called with the hb lock held.
1690 *
1691 * Returns:
1692 * 1 - success, lock taken
1693 * 0 - success, lock not taken
1694 * <0 - on error (-EFAULT)
1695 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001696static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
Darren Hartdd973992009-04-03 13:40:02 -07001697{
1698 struct task_struct *owner;
1699 int ret = 0;
1700
1701 if (locked) {
1702 /*
1703 * Got the lock. We might not be the anticipated owner if we
1704 * did a lock-steal - fix up the PI-state in that case:
1705 */
1706 if (q->pi_state->owner != current)
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001707 ret = fixup_pi_state_owner(uaddr, q, current);
Darren Hartdd973992009-04-03 13:40:02 -07001708 goto out;
1709 }
1710
1711 /*
1712 * Catch the rare case, where the lock was released when we were on the
1713 * way back before we locked the hash bucket.
1714 */
1715 if (q->pi_state->owner == current) {
1716 /*
1717 * Try to get the rt_mutex now. This might fail as some other
1718 * task acquired the rt_mutex after we removed ourself from the
1719 * rt_mutex waiters list.
1720 */
1721 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1722 locked = 1;
1723 goto out;
1724 }
1725
1726 /*
1727 * pi_state is incorrect, some other task did a lock steal and
1728 * we returned due to timeout or signal without taking the
Lai Jiangshan81612392011-01-14 17:09:41 +08001729 * rt_mutex. Too late.
Darren Hartdd973992009-04-03 13:40:02 -07001730 */
Lai Jiangshan81612392011-01-14 17:09:41 +08001731 raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
Darren Hartdd973992009-04-03 13:40:02 -07001732 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
Lai Jiangshan81612392011-01-14 17:09:41 +08001733 if (!owner)
1734 owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
1735 raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001736 ret = fixup_pi_state_owner(uaddr, q, owner);
Darren Hartdd973992009-04-03 13:40:02 -07001737 goto out;
1738 }
1739
1740 /*
1741 * Paranoia check. If we did not take the lock, then we should not be
Lai Jiangshan81612392011-01-14 17:09:41 +08001742 * the owner of the rt_mutex.
Darren Hartdd973992009-04-03 13:40:02 -07001743 */
1744 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1745 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1746 "pi-state %p\n", ret,
1747 q->pi_state->pi_mutex.owner,
1748 q->pi_state->owner);
1749
1750out:
1751 return ret ? ret : locked;
1752}
1753
1754/**
Darren Hartca5f9522009-04-03 13:39:33 -07001755 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1756 * @hb: the futex hash bucket, must be locked by the caller
1757 * @q: the futex_q to queue up on
1758 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
Darren Hartca5f9522009-04-03 13:39:33 -07001759 */
1760static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001761 struct hrtimer_sleeper *timeout)
Darren Hartca5f9522009-04-03 13:39:33 -07001762{
Darren Hart9beba3c2009-09-24 11:54:47 -07001763 /*
1764 * The task state is guaranteed to be set before another task can
1765 * wake it. set_current_state() is implemented using set_mb() and
1766 * queue_me() calls spin_unlock() upon completion, both serializing
1767 * access to the hash list and forcing another memory barrier.
1768 */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001769 set_current_state(TASK_INTERRUPTIBLE);
Darren Hart0729e192009-09-21 22:30:38 -07001770 queue_me(q, hb);
Darren Hartca5f9522009-04-03 13:39:33 -07001771
1772 /* Arm the timer */
1773 if (timeout) {
1774 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1775 if (!hrtimer_active(&timeout->timer))
1776 timeout->task = NULL;
1777 }
1778
1779 /*
Darren Hart0729e192009-09-21 22:30:38 -07001780 * If we have been removed from the hash list, then another task
1781 * has tried to wake us, and we can skip the call to schedule().
Darren Hartca5f9522009-04-03 13:39:33 -07001782 */
1783 if (likely(!plist_node_empty(&q->list))) {
1784 /*
1785 * If the timer has already expired, current will already be
1786 * flagged for rescheduling. Only call schedule if there
1787 * is no timeout, or if it has yet to expire.
1788 */
1789 if (!timeout || timeout->task)
Colin Cross943ea4a2013-05-06 23:50:18 +00001790 freezable_schedule();
Darren Hartca5f9522009-04-03 13:39:33 -07001791 }
1792 __set_current_state(TASK_RUNNING);
1793}
1794
Darren Hartf8010732009-04-03 13:40:40 -07001795/**
1796 * futex_wait_setup() - Prepare to wait on a futex
1797 * @uaddr: the futex userspace address
1798 * @val: the expected value
Darren Hartb41277d2010-11-08 13:10:09 -08001799 * @flags: futex flags (FLAGS_SHARED, etc.)
Darren Hartf8010732009-04-03 13:40:40 -07001800 * @q: the associated futex_q
1801 * @hb: storage for hash_bucket pointer to be returned to caller
1802 *
1803 * Setup the futex_q and locate the hash_bucket. Get the futex value and
1804 * compare it with the expected value. Handle atomic faults internally.
1805 * Return with the hb lock held and a q.key reference on success, and unlocked
1806 * with no q.key reference on failure.
1807 *
1808 * Returns:
1809 * 0 - uaddr contains val and hb has been locked
Bart Van Asscheca4a04c2011-07-17 09:01:00 +02001810 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
Darren Hartf8010732009-04-03 13:40:40 -07001811 */
Darren Hartb41277d2010-11-08 13:10:09 -08001812static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
Darren Hartf8010732009-04-03 13:40:40 -07001813 struct futex_q *q, struct futex_hash_bucket **hb)
1814{
1815 u32 uval;
1816 int ret;
1817
1818 /*
1819 * Access the page AFTER the hash-bucket is locked.
1820 * Order is important:
1821 *
1822 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1823 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
1824 *
1825 * The basic logical guarantee of a futex is that it blocks ONLY
1826 * if cond(var) is known to be true at the time of blocking, for
Michel Lespinasse8fe8f542011-03-06 18:07:50 -08001827 * any cond. If we locked the hash-bucket after testing *uaddr, that
1828 * would open a race condition where we could block indefinitely with
Darren Hartf8010732009-04-03 13:40:40 -07001829 * cond(var) false, which would violate the guarantee.
1830 *
Michel Lespinasse8fe8f542011-03-06 18:07:50 -08001831 * On the other hand, we insert q and release the hash-bucket only
1832 * after testing *uaddr. This guarantees that futex_wait() will NOT
1833 * absorb a wakeup if *uaddr does not match the desired values
1834 * while the syscall executes.
Darren Hartf8010732009-04-03 13:40:40 -07001835 */
1836retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001837 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
Darren Hartf8010732009-04-03 13:40:40 -07001838 if (unlikely(ret != 0))
Darren Harta5a2a0c2009-04-10 09:50:05 -07001839 return ret;
Darren Hartf8010732009-04-03 13:40:40 -07001840
1841retry_private:
1842 *hb = queue_lock(q);
1843
1844 ret = get_futex_value_locked(&uval, uaddr);
1845
1846 if (ret) {
1847 queue_unlock(q, *hb);
1848
1849 ret = get_user(uval, uaddr);
1850 if (ret)
1851 goto out;
1852
Darren Hartb41277d2010-11-08 13:10:09 -08001853 if (!(flags & FLAGS_SHARED))
Darren Hartf8010732009-04-03 13:40:40 -07001854 goto retry_private;
1855
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001856 put_futex_key(&q->key);
Darren Hartf8010732009-04-03 13:40:40 -07001857 goto retry;
1858 }
1859
1860 if (uval != val) {
1861 queue_unlock(q, *hb);
1862 ret = -EWOULDBLOCK;
1863 }
1864
1865out:
1866 if (ret)
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001867 put_futex_key(&q->key);
Darren Hartf8010732009-04-03 13:40:40 -07001868 return ret;
1869}
1870
Darren Hartb41277d2010-11-08 13:10:09 -08001871static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
1872 ktime_t *abs_time, u32 bitset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873{
Darren Hartca5f9522009-04-03 13:39:33 -07001874 struct hrtimer_sleeper timeout, *to = NULL;
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001875 struct restart_block *restart;
Ingo Molnare2970f22006-06-27 02:54:47 -07001876 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08001877 struct futex_q q = futex_q_init;
Ingo Molnare2970f22006-06-27 02:54:47 -07001878 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
Thomas Gleixnercd689982008-02-01 17:45:14 +01001880 if (!bitset)
1881 return -EINVAL;
Thomas Gleixnercd689982008-02-01 17:45:14 +01001882 q.bitset = bitset;
Darren Hartca5f9522009-04-03 13:39:33 -07001883
1884 if (abs_time) {
1885 to = &timeout;
1886
Darren Hartb41277d2010-11-08 13:10:09 -08001887 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
1888 CLOCK_REALTIME : CLOCK_MONOTONIC,
1889 HRTIMER_MODE_ABS);
Darren Hartca5f9522009-04-03 13:39:33 -07001890 hrtimer_init_sleeper(to, current);
1891 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1892 current->timer_slack_ns);
1893 }
1894
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02001895retry:
Darren Hart7ada8762010-10-17 08:35:04 -07001896 /*
1897 * Prepare to wait on uaddr. On success, holds hb lock and increments
1898 * q.key refs.
1899 */
Darren Hartb41277d2010-11-08 13:10:09 -08001900 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
Darren Hartf8010732009-04-03 13:40:40 -07001901 if (ret)
Darren Hart42d35d42008-12-29 15:49:53 -08001902 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903
Darren Hartca5f9522009-04-03 13:39:33 -07001904 /* queue_me and wait for wakeup, timeout, or a signal. */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001905 futex_wait_queue_me(hb, &q, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
1907 /* If we were woken (and unqueued), we succeeded, whatever. */
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001908 ret = 0;
Darren Hart7ada8762010-10-17 08:35:04 -07001909 /* unqueue_me() drops q.key ref */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 if (!unqueue_me(&q))
Darren Hart7ada8762010-10-17 08:35:04 -07001911 goto out;
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001912 ret = -ETIMEDOUT;
Darren Hartca5f9522009-04-03 13:39:33 -07001913 if (to && !to->task)
Darren Hart7ada8762010-10-17 08:35:04 -07001914 goto out;
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001915
Ingo Molnare2970f22006-06-27 02:54:47 -07001916 /*
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02001917 * We expect signal_pending(current), but we might be the
1918 * victim of a spurious wakeup as well.
Ingo Molnare2970f22006-06-27 02:54:47 -07001919 */
Darren Hart7ada8762010-10-17 08:35:04 -07001920 if (!signal_pending(current))
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02001921 goto retry;
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02001922
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001923 ret = -ERESTARTSYS;
Pierre Peifferc19384b2007-05-09 02:35:02 -07001924 if (!abs_time)
Darren Hart7ada8762010-10-17 08:35:04 -07001925 goto out;
Steven Rostedtce6bd422007-12-05 15:46:09 +01001926
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001927 restart = &current_thread_info()->restart_block;
1928 restart->fn = futex_wait_restart;
Namhyung Kima3c74c52010-09-14 21:43:47 +09001929 restart->futex.uaddr = uaddr;
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001930 restart->futex.val = val;
1931 restart->futex.time = abs_time->tv64;
1932 restart->futex.bitset = bitset;
Darren Hart0cd9c642011-04-14 15:41:57 -07001933 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
Peter Zijlstra2fff78c72009-02-11 18:10:10 +01001934
1935 ret = -ERESTART_RESTARTBLOCK;
1936
Darren Hart42d35d42008-12-29 15:49:53 -08001937out:
Darren Hartca5f9522009-04-03 13:39:33 -07001938 if (to) {
1939 hrtimer_cancel(&to->timer);
1940 destroy_hrtimer_on_stack(&to->timer);
1941 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07001942 return ret;
1943}
1944
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001945
1946static long futex_wait_restart(struct restart_block *restart)
1947{
Namhyung Kima3c74c52010-09-14 21:43:47 +09001948 u32 __user *uaddr = restart->futex.uaddr;
Darren Harta72188d2009-04-03 13:40:22 -07001949 ktime_t t, *tp = NULL;
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001950
Darren Harta72188d2009-04-03 13:40:22 -07001951 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1952 t.tv64 = restart->futex.time;
1953 tp = &t;
1954 }
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001955 restart->fn = do_no_restart_syscall;
Darren Hartb41277d2010-11-08 13:10:09 -08001956
1957 return (long)futex_wait(uaddr, restart->futex.flags,
1958 restart->futex.val, tp, restart->futex.bitset);
Nick Piggin72c1bbf2007-05-08 00:26:43 -07001959}
1960
1961
Ingo Molnarc87e2832006-06-27 02:54:58 -07001962/*
1963 * Userspace tried a 0 -> TID atomic transition of the futex value
1964 * and failed. The kernel side here does the whole locking operation:
1965 * if there are waiters then it will block, it does PI, etc. (Due to
1966 * races the kernel might see a 0 value of the futex too.)
1967 */
Darren Hartb41277d2010-11-08 13:10:09 -08001968static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
1969 ktime_t *time, int trylock)
Ingo Molnarc87e2832006-06-27 02:54:58 -07001970{
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07001971 struct hrtimer_sleeper timeout, *to = NULL;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001972 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08001973 struct futex_q q = futex_q_init;
Darren Hartdd973992009-04-03 13:40:02 -07001974 int res, ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001975
1976 if (refill_pi_state_cache())
1977 return -ENOMEM;
1978
Pierre Peifferc19384b2007-05-09 02:35:02 -07001979 if (time) {
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07001980 to = &timeout;
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001981 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1982 HRTIMER_MODE_ABS);
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07001983 hrtimer_init_sleeper(to, current);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001984 hrtimer_set_expires(&to->timer, *time);
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07001985 }
1986
Darren Hart42d35d42008-12-29 15:49:53 -08001987retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001988 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001989 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001990 goto out;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001991
Darren Harte4dc5b72009-03-12 00:56:13 -07001992retry_private:
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01001993 hb = queue_lock(&q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001994
Darren Hartbab5bc92009-04-07 23:23:50 -07001995 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001996 if (unlikely(ret)) {
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001997 switch (ret) {
Darren Hart1a520842009-04-03 13:39:52 -07001998 case 1:
1999 /* We got the lock. */
2000 ret = 0;
2001 goto out_unlock_put_key;
2002 case -EFAULT:
2003 goto uaddr_faulted;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002004 case -EAGAIN:
2005 /*
2006 * Task is exiting and we just wait for the
2007 * exit to complete.
2008 */
2009 queue_unlock(&q, hb);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002010 put_futex_key(&q.key);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002011 cond_resched();
2012 goto retry;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002013 default:
Darren Hart42d35d42008-12-29 15:49:53 -08002014 goto out_unlock_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002015 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002016 }
2017
2018 /*
2019 * Only actually queue now that the atomic ops are done:
2020 */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01002021 queue_me(&q, hb);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002022
Ingo Molnarc87e2832006-06-27 02:54:58 -07002023 WARN_ON(!q.pi_state);
2024 /*
2025 * Block on the PI mutex:
2026 */
2027 if (!trylock)
2028 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
2029 else {
2030 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2031 /* Fixup the trylock return value: */
2032 ret = ret ? 0 : -EWOULDBLOCK;
2033 }
2034
Vernon Mauerya99e4e42006-07-01 04:35:42 -07002035 spin_lock(q.lock_ptr);
Darren Hartdd973992009-04-03 13:40:02 -07002036 /*
2037 * Fixup the pi_state owner and possibly acquire the lock if we
2038 * haven't already.
2039 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002040 res = fixup_owner(uaddr, &q, !ret);
Darren Hartdd973992009-04-03 13:40:02 -07002041 /*
2042 * If fixup_owner() returned an error, proprogate that. If it acquired
2043 * the lock, clear our -ETIMEDOUT or -EINTR.
2044 */
2045 if (res)
2046 ret = (res < 0) ? res : 0;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002047
Darren Harte8f63862009-03-12 00:56:06 -07002048 /*
Darren Hartdd973992009-04-03 13:40:02 -07002049 * If fixup_owner() faulted and was unable to handle the fault, unlock
2050 * it and return the fault to userspace.
Darren Harte8f63862009-03-12 00:56:06 -07002051 */
2052 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2053 rt_mutex_unlock(&q.pi_state->pi_mutex);
2054
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002055 /* Unqueue and drop the lock */
2056 unqueue_me_pi(&q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002057
Mikael Pettersson5ecb01c2010-01-23 22:36:29 +01002058 goto out_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002059
Darren Hart42d35d42008-12-29 15:49:53 -08002060out_unlock_put_key:
Ingo Molnarc87e2832006-06-27 02:54:58 -07002061 queue_unlock(&q, hb);
2062
Darren Hart42d35d42008-12-29 15:49:53 -08002063out_put_key:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002064 put_futex_key(&q.key);
Darren Hart42d35d42008-12-29 15:49:53 -08002065out:
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07002066 if (to)
2067 destroy_hrtimer_on_stack(&to->timer);
Darren Hartdd973992009-04-03 13:40:02 -07002068 return ret != -EINTR ? ret : -ERESTARTNOINTR;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002069
Darren Hart42d35d42008-12-29 15:49:53 -08002070uaddr_faulted:
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002071 queue_unlock(&q, hb);
2072
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002073 ret = fault_in_user_writeable(uaddr);
Darren Harte4dc5b72009-03-12 00:56:13 -07002074 if (ret)
2075 goto out_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002076
Darren Hartb41277d2010-11-08 13:10:09 -08002077 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07002078 goto retry_private;
2079
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002080 put_futex_key(&q.key);
Darren Harte4dc5b72009-03-12 00:56:13 -07002081 goto retry;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002082}
2083
2084/*
Ingo Molnarc87e2832006-06-27 02:54:58 -07002085 * Userspace attempted a TID -> 0 atomic transition, and failed.
2086 * This is the in-kernel slowpath: we look up the PI state (if any),
2087 * and do the rt-mutex unlock.
2088 */
Darren Hartb41277d2010-11-08 13:10:09 -08002089static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002090{
2091 struct futex_hash_bucket *hb;
2092 struct futex_q *this, *next;
Pierre Peifferec92d082007-05-09 02:35:00 -07002093 struct plist_head *head;
Peter Zijlstra38d47c12008-09-26 19:32:20 +02002094 union futex_key key = FUTEX_KEY_INIT;
Thomas Gleixnerc0c9ed12011-03-11 11:51:22 +01002095 u32 uval, vpid = task_pid_vnr(current);
Darren Harte4dc5b72009-03-12 00:56:13 -07002096 int ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002097
2098retry:
2099 if (get_user(uval, uaddr))
2100 return -EFAULT;
2101 /*
2102 * We release only a lock we actually own:
2103 */
Thomas Gleixnerc0c9ed12011-03-11 11:51:22 +01002104 if ((uval & FUTEX_TID_MASK) != vpid)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002105 return -EPERM;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002106
Shawn Bohrer9ea71502011-06-30 11:21:32 -05002107 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002108 if (unlikely(ret != 0))
2109 goto out;
2110
2111 hb = hash_futex(&key);
2112 spin_lock(&hb->lock);
2113
Ingo Molnarc87e2832006-06-27 02:54:58 -07002114 /*
2115 * To avoid races, try to do the TID -> 0 atomic transition
2116 * again. If it succeeds then we can return without waking
2117 * anyone else up:
2118 */
Michel Lespinasse37a9d912011-03-10 18:48:51 -08002119 if (!(uval & FUTEX_OWNER_DIED) &&
2120 cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
Ingo Molnarc87e2832006-06-27 02:54:58 -07002121 goto pi_faulted;
2122 /*
2123 * Rare case: we managed to release the lock atomically,
2124 * no need to wake anyone else up:
2125 */
Thomas Gleixnerc0c9ed12011-03-11 11:51:22 +01002126 if (unlikely(uval == vpid))
Ingo Molnarc87e2832006-06-27 02:54:58 -07002127 goto out_unlock;
2128
2129 /*
2130 * Ok, other tasks may need to be woken up - check waiters
2131 * and do the wakeup if necessary:
2132 */
2133 head = &hb->chain;
2134
Pierre Peifferec92d082007-05-09 02:35:00 -07002135 plist_for_each_entry_safe(this, next, head, list) {
Ingo Molnarc87e2832006-06-27 02:54:58 -07002136 if (!match_futex (&this->key, &key))
2137 continue;
2138 ret = wake_futex_pi(uaddr, uval, this);
2139 /*
2140 * The atomic access to the futex value
2141 * generated a pagefault, so retry the
2142 * user-access and the wakeup:
2143 */
2144 if (ret == -EFAULT)
2145 goto pi_faulted;
2146 goto out_unlock;
2147 }
2148 /*
2149 * No waiters - kernel unlocks the futex:
2150 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002151 if (!(uval & FUTEX_OWNER_DIED)) {
2152 ret = unlock_futex_pi(uaddr, uval);
2153 if (ret == -EFAULT)
2154 goto pi_faulted;
2155 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002156
2157out_unlock:
2158 spin_unlock(&hb->lock);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002159 put_futex_key(&key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002160
Darren Hart42d35d42008-12-29 15:49:53 -08002161out:
Ingo Molnarc87e2832006-06-27 02:54:58 -07002162 return ret;
2163
2164pi_faulted:
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002165 spin_unlock(&hb->lock);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002166 put_futex_key(&key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002167
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002168 ret = fault_in_user_writeable(uaddr);
Darren Hartb5686362008-12-18 15:06:34 -08002169 if (!ret)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002170 goto retry;
2171
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 return ret;
2173}
2174
Darren Hart52400ba2009-04-03 13:40:49 -07002175/**
2176 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2177 * @hb: the hash_bucket futex_q was original enqueued on
2178 * @q: the futex_q woken while waiting to be requeued
2179 * @key2: the futex_key of the requeue target futex
2180 * @timeout: the timeout associated with the wait (NULL if none)
2181 *
2182 * Detect if the task was woken on the initial futex as opposed to the requeue
2183 * target futex. If so, determine if it was a timeout or a signal that caused
2184 * the wakeup and return the appropriate error code to the caller. Must be
2185 * called with the hb lock held.
2186 *
2187 * Returns
2188 * 0 - no early wakeup detected
Thomas Gleixner1c840c12009-05-20 09:22:40 +02002189 * <0 - -ETIMEDOUT or -ERESTARTNOINTR
Darren Hart52400ba2009-04-03 13:40:49 -07002190 */
2191static inline
2192int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2193 struct futex_q *q, union futex_key *key2,
2194 struct hrtimer_sleeper *timeout)
2195{
2196 int ret = 0;
2197
2198 /*
2199 * With the hb lock held, we avoid races while we process the wakeup.
2200 * We only need to hold hb (and not hb2) to ensure atomicity as the
2201 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2202 * It can't be requeued from uaddr2 to something else since we don't
2203 * support a PI aware source futex for requeue.
2204 */
2205 if (!match_futex(&q->key, key2)) {
2206 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2207 /*
2208 * We were woken prior to requeue by a timeout or a signal.
2209 * Unqueue the futex_q and determine which it was.
2210 */
Lai Jiangshan2e129782010-12-22 14:18:50 +08002211 plist_del(&q->list, &hb->chain);
Darren Hart52400ba2009-04-03 13:40:49 -07002212
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002213 /* Handle spurious wakeups gracefully */
Thomas Gleixner11df6dd2009-10-28 20:26:48 +01002214 ret = -EWOULDBLOCK;
Darren Hart52400ba2009-04-03 13:40:49 -07002215 if (timeout && !timeout->task)
2216 ret = -ETIMEDOUT;
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002217 else if (signal_pending(current))
Thomas Gleixner1c840c12009-05-20 09:22:40 +02002218 ret = -ERESTARTNOINTR;
Darren Hart52400ba2009-04-03 13:40:49 -07002219 }
2220 return ret;
2221}
2222
2223/**
2224 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
Darren Hart56ec1602009-09-21 22:29:59 -07002225 * @uaddr: the futex we initially wait on (non-pi)
Darren Hartb41277d2010-11-08 13:10:09 -08002226 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
Darren Hart52400ba2009-04-03 13:40:49 -07002227 * the same type, no requeueing from private to shared, etc.
2228 * @val: the expected value of uaddr
2229 * @abs_time: absolute timeout
Darren Hart56ec1602009-09-21 22:29:59 -07002230 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
Darren Hart52400ba2009-04-03 13:40:49 -07002231 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2232 * @uaddr2: the pi futex we will take prior to returning to user-space
2233 *
2234 * The caller will wait on uaddr and will be requeued by futex_requeue() to
Darren Hartb3f95762012-07-20 11:53:31 -07002235 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
2236 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2237 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
2238 * without one, the pi logic would not know which task to boost/deboost, if
2239 * there was a need to.
Darren Hart52400ba2009-04-03 13:40:49 -07002240 *
2241 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2242 * via the following:
2243 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
Darren Hartcc6db4e2009-07-31 16:20:10 -07002244 * 2) wakeup on uaddr2 after a requeue
2245 * 3) signal
2246 * 4) timeout
Darren Hart52400ba2009-04-03 13:40:49 -07002247 *
Darren Hartcc6db4e2009-07-31 16:20:10 -07002248 * If 3, cleanup and return -ERESTARTNOINTR.
Darren Hart52400ba2009-04-03 13:40:49 -07002249 *
2250 * If 2, we may then block on trying to take the rt_mutex and return via:
2251 * 5) successful lock
2252 * 6) signal
2253 * 7) timeout
2254 * 8) other lock acquisition failure
2255 *
Darren Hartcc6db4e2009-07-31 16:20:10 -07002256 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
Darren Hart52400ba2009-04-03 13:40:49 -07002257 *
2258 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2259 *
2260 * Returns:
2261 * 0 - On success
2262 * <0 - On error
2263 */
Darren Hartb41277d2010-11-08 13:10:09 -08002264static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
Darren Hart52400ba2009-04-03 13:40:49 -07002265 u32 val, ktime_t *abs_time, u32 bitset,
Darren Hartb41277d2010-11-08 13:10:09 -08002266 u32 __user *uaddr2)
Darren Hart52400ba2009-04-03 13:40:49 -07002267{
2268 struct hrtimer_sleeper timeout, *to = NULL;
2269 struct rt_mutex_waiter rt_waiter;
2270 struct rt_mutex *pi_mutex = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07002271 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08002272 union futex_key key2 = FUTEX_KEY_INIT;
2273 struct futex_q q = futex_q_init;
Darren Hart52400ba2009-04-03 13:40:49 -07002274 int res, ret;
Darren Hart52400ba2009-04-03 13:40:49 -07002275
Darren Hartb3f95762012-07-20 11:53:31 -07002276 if (uaddr == uaddr2)
2277 return -EINVAL;
2278
Darren Hart52400ba2009-04-03 13:40:49 -07002279 if (!bitset)
2280 return -EINVAL;
2281
2282 if (abs_time) {
2283 to = &timeout;
Darren Hartb41277d2010-11-08 13:10:09 -08002284 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2285 CLOCK_REALTIME : CLOCK_MONOTONIC,
2286 HRTIMER_MODE_ABS);
Darren Hart52400ba2009-04-03 13:40:49 -07002287 hrtimer_init_sleeper(to, current);
2288 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2289 current->timer_slack_ns);
2290 }
2291
2292 /*
2293 * The waiter is allocated on our stack, manipulated by the requeue
2294 * code while we sleep on uaddr.
2295 */
2296 debug_rt_mutex_init_waiter(&rt_waiter);
2297 rt_waiter.task = NULL;
2298
Shawn Bohrer9ea71502011-06-30 11:21:32 -05002299 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
Darren Hart52400ba2009-04-03 13:40:49 -07002300 if (unlikely(ret != 0))
2301 goto out;
2302
Darren Hart84bc4af2009-08-13 17:36:53 -07002303 q.bitset = bitset;
2304 q.rt_waiter = &rt_waiter;
2305 q.requeue_pi_key = &key2;
2306
Darren Hart7ada8762010-10-17 08:35:04 -07002307 /*
2308 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2309 * count.
2310 */
Darren Hartb41277d2010-11-08 13:10:09 -08002311 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
Thomas Gleixnerc8b15a72009-05-20 09:18:50 +02002312 if (ret)
2313 goto out_key2;
Darren Hart52400ba2009-04-03 13:40:49 -07002314
2315 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002316 futex_wait_queue_me(hb, &q, to);
Darren Hart52400ba2009-04-03 13:40:49 -07002317
2318 spin_lock(&hb->lock);
2319 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2320 spin_unlock(&hb->lock);
2321 if (ret)
2322 goto out_put_keys;
2323
2324 /*
2325 * In order for us to be here, we know our q.key == key2, and since
2326 * we took the hb->lock above, we also know that futex_requeue() has
2327 * completed and we no longer have to concern ourselves with a wakeup
Darren Hart7ada8762010-10-17 08:35:04 -07002328 * race with the atomic proxy lock acquisition by the requeue code. The
2329 * futex_requeue dropped our key1 reference and incremented our key2
2330 * reference count.
Darren Hart52400ba2009-04-03 13:40:49 -07002331 */
2332
2333 /* Check if the requeue code acquired the second futex for us. */
2334 if (!q.rt_waiter) {
2335 /*
2336 * Got the lock. We might not be the anticipated owner if we
2337 * did a lock-steal - fix up the PI-state in that case.
2338 */
2339 if (q.pi_state && (q.pi_state->owner != current)) {
2340 spin_lock(q.lock_ptr);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002341 ret = fixup_pi_state_owner(uaddr2, &q, current);
Darren Hart52400ba2009-04-03 13:40:49 -07002342 spin_unlock(q.lock_ptr);
2343 }
2344 } else {
2345 /*
2346 * We have been woken up by futex_unlock_pi(), a timeout, or a
2347 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2348 * the pi_state.
2349 */
Darren Hart47b6ff72012-07-20 11:53:30 -07002350 WARN_ON(!q.pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07002351 pi_mutex = &q.pi_state->pi_mutex;
2352 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2353 debug_rt_mutex_free_waiter(&rt_waiter);
2354
2355 spin_lock(q.lock_ptr);
2356 /*
2357 * Fixup the pi_state owner and possibly acquire the lock if we
2358 * haven't already.
2359 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002360 res = fixup_owner(uaddr2, &q, !ret);
Darren Hart52400ba2009-04-03 13:40:49 -07002361 /*
2362 * If fixup_owner() returned an error, proprogate that. If it
Darren Hart56ec1602009-09-21 22:29:59 -07002363 * acquired the lock, clear -ETIMEDOUT or -EINTR.
Darren Hart52400ba2009-04-03 13:40:49 -07002364 */
2365 if (res)
2366 ret = (res < 0) ? res : 0;
2367
2368 /* Unqueue and drop the lock. */
2369 unqueue_me_pi(&q);
2370 }
2371
2372 /*
2373 * If fixup_pi_state_owner() faulted and was unable to handle the
2374 * fault, unlock the rt_mutex and return the fault to userspace.
2375 */
2376 if (ret == -EFAULT) {
Darren Hartd48c1ba2012-07-20 11:53:29 -07002377 if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
Darren Hart52400ba2009-04-03 13:40:49 -07002378 rt_mutex_unlock(pi_mutex);
2379 } else if (ret == -EINTR) {
Darren Hart52400ba2009-04-03 13:40:49 -07002380 /*
Darren Hartcc6db4e2009-07-31 16:20:10 -07002381 * We've already been requeued, but cannot restart by calling
2382 * futex_lock_pi() directly. We could restart this syscall, but
2383 * it would detect that the user space "val" changed and return
2384 * -EWOULDBLOCK. Save the overhead of the restart and return
2385 * -EWOULDBLOCK directly.
Darren Hart52400ba2009-04-03 13:40:49 -07002386 */
Thomas Gleixner20708872009-05-19 23:04:59 +02002387 ret = -EWOULDBLOCK;
Darren Hart52400ba2009-04-03 13:40:49 -07002388 }
2389
2390out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002391 put_futex_key(&q.key);
Thomas Gleixnerc8b15a72009-05-20 09:18:50 +02002392out_key2:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002393 put_futex_key(&key2);
Darren Hart52400ba2009-04-03 13:40:49 -07002394
2395out:
2396 if (to) {
2397 hrtimer_cancel(&to->timer);
2398 destroy_hrtimer_on_stack(&to->timer);
2399 }
2400 return ret;
2401}
2402
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002403/*
2404 * Support for robust futexes: the kernel cleans up held futexes at
2405 * thread exit time.
2406 *
2407 * Implementation: user-space maintains a per-thread list of locks it
2408 * is holding. Upon do_exit(), the kernel carefully walks this list,
2409 * and marks all locks that are owned by this thread with the
Ingo Molnarc87e2832006-06-27 02:54:58 -07002410 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002411 * always manipulated with the lock held, so the list is private and
2412 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2413 * field, to allow the kernel to clean up if the thread dies after
2414 * acquiring the lock, but just before it could have added itself to
2415 * the list. There can only be one such pending lock.
2416 */
2417
2418/**
Darren Hartd96ee562009-09-21 22:30:22 -07002419 * sys_set_robust_list() - Set the robust-futex list head of a task
2420 * @head: pointer to the list-head
2421 * @len: length of the list-head, as userspace expects
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002422 */
Heiko Carstens836f92a2009-01-14 14:14:33 +01002423SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2424 size_t, len)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002425{
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002426 if (!futex_cmpxchg_enabled)
2427 return -ENOSYS;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002428 /*
2429 * The kernel knows only one size for now:
2430 */
2431 if (unlikely(len != sizeof(*head)))
2432 return -EINVAL;
2433
2434 current->robust_list = head;
2435
2436 return 0;
2437}
2438
2439/**
Darren Hartd96ee562009-09-21 22:30:22 -07002440 * sys_get_robust_list() - Get the robust-futex list head of a task
2441 * @pid: pid of the process [zero for current task]
2442 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2443 * @len_ptr: pointer to a length field, the kernel fills in the header size
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002444 */
Heiko Carstens836f92a2009-01-14 14:14:33 +01002445SYSCALL_DEFINE3(get_robust_list, int, pid,
2446 struct robust_list_head __user * __user *, head_ptr,
2447 size_t __user *, len_ptr)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002448{
Al Viroba46df92006-10-10 22:46:07 +01002449 struct robust_list_head __user *head;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002450 unsigned long ret;
Kees Cookbdbb7762012-03-19 16:12:53 -07002451 struct task_struct *p;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002452
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002453 if (!futex_cmpxchg_enabled)
2454 return -ENOSYS;
2455
Kees Cookec0c4272012-03-23 12:08:55 -07002456 WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
2457
Kees Cookbdbb7762012-03-19 16:12:53 -07002458 rcu_read_lock();
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002459
Kees Cookbdbb7762012-03-19 16:12:53 -07002460 ret = -ESRCH;
2461 if (!pid)
2462 p = current;
2463 else {
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07002464 p = find_task_by_vpid(pid);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002465 if (!p)
2466 goto err_unlock;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002467 }
2468
Kees Cookbdbb7762012-03-19 16:12:53 -07002469 ret = -EPERM;
2470 if (!ptrace_may_access(p, PTRACE_MODE_READ))
2471 goto err_unlock;
2472
2473 head = p->robust_list;
2474 rcu_read_unlock();
2475
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002476 if (put_user(sizeof(*head), len_ptr))
2477 return -EFAULT;
2478 return put_user(head, head_ptr);
2479
2480err_unlock:
Oleg Nesterovaaa2a972006-09-29 02:00:55 -07002481 rcu_read_unlock();
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002482
2483 return ret;
2484}
2485
2486/*
2487 * Process a futex-list entry, check whether it's owned by the
2488 * dying task, and do notification if so:
2489 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002490int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002491{
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03002492 u32 uval, uninitialized_var(nval), mval;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002493
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08002494retry:
2495 if (get_user(uval, uaddr))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002496 return -1;
2497
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002498 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002499 /*
2500 * Ok, this dying thread is truly holding a futex
2501 * of interest. Set the OWNER_DIED bit atomically
2502 * via cmpxchg, and if the value had FUTEX_WAITERS
2503 * set, wake up a waiter (if any). (We have to do a
2504 * futex_wake() even if OWNER_DIED is already set -
2505 * to handle the rare but possible case of recursive
2506 * thread-death.) The rest of the cleanup is done in
2507 * userspace.
2508 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002509 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
Thomas Gleixner6e0aa9f2011-03-14 10:34:35 +01002510 /*
2511 * We are not holding a lock here, but we want to have
2512 * the pagefault_disable/enable() protection because
2513 * we want to handle the fault gracefully. If the
2514 * access fails we try to fault in the futex with R/W
2515 * verification via get_user_pages. get_user() above
2516 * does not guarantee R/W access. If that fails we
2517 * give up and leave the futex locked.
2518 */
2519 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
2520 if (fault_in_user_writeable(uaddr))
2521 return -1;
2522 goto retry;
2523 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002524 if (nval != uval)
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08002525 goto retry;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002526
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002527 /*
2528 * Wake robust non-PI futexes here. The wakeup of
2529 * PI futexes happens in exit_pi_state():
2530 */
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07002531 if (!pi && (uval & FUTEX_WAITERS))
Peter Zijlstrac2f9f202008-09-26 19:32:23 +02002532 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002533 }
2534 return 0;
2535}
2536
2537/*
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002538 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2539 */
2540static inline int fetch_robust_entry(struct robust_list __user **entry,
Al Viroba46df92006-10-10 22:46:07 +01002541 struct robust_list __user * __user *head,
Namhyung Kim1dcc41b2010-09-14 21:43:46 +09002542 unsigned int *pi)
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002543{
2544 unsigned long uentry;
2545
Al Viroba46df92006-10-10 22:46:07 +01002546 if (get_user(uentry, (unsigned long __user *)head))
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002547 return -EFAULT;
2548
Al Viroba46df92006-10-10 22:46:07 +01002549 *entry = (void __user *)(uentry & ~1UL);
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002550 *pi = uentry & 1;
2551
2552 return 0;
2553}
2554
2555/*
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002556 * Walk curr->robust_list (very carefully, it's a userspace list!)
2557 * and mark any locks found there dead, and notify any waiters.
2558 *
2559 * We silently return on any sign of list-walking problem.
2560 */
2561void exit_robust_list(struct task_struct *curr)
2562{
2563 struct robust_list_head __user *head = curr->robust_list;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002564 struct robust_list __user *entry, *next_entry, *pending;
Darren Hart4c115e92010-11-04 15:00:00 -04002565 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2566 unsigned int uninitialized_var(next_pi);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002567 unsigned long futex_offset;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002568 int rc;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002569
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002570 if (!futex_cmpxchg_enabled)
2571 return;
2572
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002573 /*
2574 * Fetch the list head (which was registered earlier, via
2575 * sys_set_robust_list()):
2576 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002577 if (fetch_robust_entry(&entry, &head->list.next, &pi))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002578 return;
2579 /*
2580 * Fetch the relative futex offset:
2581 */
2582 if (get_user(futex_offset, &head->futex_offset))
2583 return;
2584 /*
2585 * Fetch any possibly pending lock-add first, and handle it
2586 * if it exists:
2587 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002588 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002589 return;
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002590
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002591 next_entry = NULL; /* avoid warning with gcc */
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002592 while (entry != &head->list) {
2593 /*
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002594 * Fetch the next entry in the list before calling
2595 * handle_futex_death:
2596 */
2597 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2598 /*
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002599 * A pending lock might already be on the list, so
Ingo Molnarc87e2832006-06-27 02:54:58 -07002600 * don't process it twice:
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002601 */
2602 if (entry != pending)
Al Viroba46df92006-10-10 22:46:07 +01002603 if (handle_futex_death((void __user *)entry + futex_offset,
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002604 curr, pi))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002605 return;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002606 if (rc)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002607 return;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002608 entry = next_entry;
2609 pi = next_pi;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002610 /*
2611 * Avoid excessively long or circular lists:
2612 */
2613 if (!--limit)
2614 break;
2615
2616 cond_resched();
2617 }
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07002618
2619 if (pending)
2620 handle_futex_death((void __user *)pending + futex_offset,
2621 curr, pip);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002622}
2623
Pierre Peifferc19384b2007-05-09 02:35:02 -07002624long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
Ingo Molnare2970f22006-06-27 02:54:47 -07002625 u32 __user *uaddr2, u32 val2, u32 val3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626{
Thomas Gleixner81b40532012-02-15 12:17:09 +01002627 int cmd = op & FUTEX_CMD_MASK;
Darren Hartb41277d2010-11-08 13:10:09 -08002628 unsigned int flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
Eric Dumazet34f01cc2007-05-09 02:35:04 -07002630 if (!(op & FUTEX_PRIVATE_FLAG))
Darren Hartb41277d2010-11-08 13:10:09 -08002631 flags |= FLAGS_SHARED;
Eric Dumazet34f01cc2007-05-09 02:35:04 -07002632
Darren Hartb41277d2010-11-08 13:10:09 -08002633 if (op & FUTEX_CLOCK_REALTIME) {
2634 flags |= FLAGS_CLOCKRT;
2635 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2636 return -ENOSYS;
2637 }
Eric Dumazet34f01cc2007-05-09 02:35:04 -07002638
2639 switch (cmd) {
Thomas Gleixner59263b52012-02-15 12:08:34 +01002640 case FUTEX_LOCK_PI:
2641 case FUTEX_UNLOCK_PI:
2642 case FUTEX_TRYLOCK_PI:
2643 case FUTEX_WAIT_REQUEUE_PI:
2644 case FUTEX_CMP_REQUEUE_PI:
2645 if (!futex_cmpxchg_enabled)
2646 return -ENOSYS;
2647 }
2648
2649 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 case FUTEX_WAIT:
Thomas Gleixnercd689982008-02-01 17:45:14 +01002651 val3 = FUTEX_BITSET_MATCH_ANY;
2652 case FUTEX_WAIT_BITSET:
Thomas Gleixner81b40532012-02-15 12:17:09 +01002653 return futex_wait(uaddr, flags, val, timeout, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 case FUTEX_WAKE:
Thomas Gleixnercd689982008-02-01 17:45:14 +01002655 val3 = FUTEX_BITSET_MATCH_ANY;
2656 case FUTEX_WAKE_BITSET:
Thomas Gleixner81b40532012-02-15 12:17:09 +01002657 return futex_wake(uaddr, flags, val, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 case FUTEX_REQUEUE:
Thomas Gleixner81b40532012-02-15 12:17:09 +01002659 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 case FUTEX_CMP_REQUEUE:
Thomas Gleixner81b40532012-02-15 12:17:09 +01002661 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07002662 case FUTEX_WAKE_OP:
Thomas Gleixner81b40532012-02-15 12:17:09 +01002663 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002664 case FUTEX_LOCK_PI:
Thomas Gleixner81b40532012-02-15 12:17:09 +01002665 return futex_lock_pi(uaddr, flags, val, timeout, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002666 case FUTEX_UNLOCK_PI:
Thomas Gleixner81b40532012-02-15 12:17:09 +01002667 return futex_unlock_pi(uaddr, flags);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002668 case FUTEX_TRYLOCK_PI:
Thomas Gleixner81b40532012-02-15 12:17:09 +01002669 return futex_lock_pi(uaddr, flags, 0, timeout, 1);
Darren Hart52400ba2009-04-03 13:40:49 -07002670 case FUTEX_WAIT_REQUEUE_PI:
2671 val3 = FUTEX_BITSET_MATCH_ANY;
Thomas Gleixner81b40532012-02-15 12:17:09 +01002672 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2673 uaddr2);
Darren Hart52400ba2009-04-03 13:40:49 -07002674 case FUTEX_CMP_REQUEUE_PI:
Thomas Gleixner81b40532012-02-15 12:17:09 +01002675 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 }
Thomas Gleixner81b40532012-02-15 12:17:09 +01002677 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678}
2679
2680
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002681SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2682 struct timespec __user *, utime, u32 __user *, uaddr2,
2683 u32, val3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684{
Pierre Peifferc19384b2007-05-09 02:35:02 -07002685 struct timespec ts;
2686 ktime_t t, *tp = NULL;
Ingo Molnare2970f22006-06-27 02:54:47 -07002687 u32 val2 = 0;
Eric Dumazet34f01cc2007-05-09 02:35:04 -07002688 int cmd = op & FUTEX_CMD_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
Thomas Gleixnercd689982008-02-01 17:45:14 +01002690 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
Darren Hart52400ba2009-04-03 13:40:49 -07002691 cmd == FUTEX_WAIT_BITSET ||
2692 cmd == FUTEX_WAIT_REQUEUE_PI)) {
Pierre Peifferc19384b2007-05-09 02:35:02 -07002693 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 return -EFAULT;
Pierre Peifferc19384b2007-05-09 02:35:02 -07002695 if (!timespec_valid(&ts))
Thomas Gleixner9741ef92006-03-31 02:31:32 -08002696 return -EINVAL;
Pierre Peifferc19384b2007-05-09 02:35:02 -07002697
2698 t = timespec_to_ktime(ts);
Eric Dumazet34f01cc2007-05-09 02:35:04 -07002699 if (cmd == FUTEX_WAIT)
Thomas Gleixner5a7780e2008-02-13 09:20:43 +01002700 t = ktime_add_safe(ktime_get(), t);
Pierre Peifferc19384b2007-05-09 02:35:02 -07002701 tp = &t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 }
2703 /*
Darren Hart52400ba2009-04-03 13:40:49 -07002704 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
Andreas Schwabf54f0982007-07-31 00:38:51 -07002705 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 */
Andreas Schwabf54f0982007-07-31 00:38:51 -07002707 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
Darren Hartba9c22f2009-04-20 22:22:22 -07002708 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
Ingo Molnare2970f22006-06-27 02:54:47 -07002709 val2 = (u32) (unsigned long) utime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
Pierre Peifferc19384b2007-05-09 02:35:02 -07002711 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712}
2713
Benjamin Herrenschmidtf6d107f2008-03-27 14:52:15 +11002714static int __init futex_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715{
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002716 u32 curval;
Thomas Gleixner3e4ab742008-02-23 15:23:55 -08002717 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002719 /*
2720 * This will fail and we want it. Some arch implementations do
2721 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2722 * functionality. We want to know that before we call in any
2723 * of the complex code paths. Also we want to prevent
2724 * registration of robust lists in that case. NULL is
2725 * guaranteed to fault and we get -EFAULT on functional
Randy Dunlapfb62db22010-10-13 11:02:34 -07002726 * implementation, the non-functional ones will return
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002727 * -ENOSYS.
2728 */
Michel Lespinasse37a9d912011-03-10 18:48:51 -08002729 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002730 futex_cmpxchg_enabled = 1;
2731
Thomas Gleixner3e4ab742008-02-23 15:23:55 -08002732 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
Dima Zavin732375c2011-07-07 17:27:59 -07002733 plist_head_init(&futex_queues[i].chain);
Thomas Gleixner3e4ab742008-02-23 15:23:55 -08002734 spin_lock_init(&futex_queues[i].lock);
2735 }
2736
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 return 0;
2738}
Benjamin Herrenschmidtf6d107f2008-03-27 14:52:15 +11002739__initcall(futex_init);