blob: 70ebd855d9e83bb7c6a08d9f89ade4dbef1fdd33 [file] [log] [blame]
Ingo Molnar6053ee32006-01-09 15:59:19 -08001/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010013 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
Ingo Molnar6053ee32006-01-09 15:59:19 -080018 * Also see Documentation/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/sched.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060022#include <linux/sched/rt.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040023#include <linux/export.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080024#include <linux/spinlock.h>
25#include <linux/interrupt.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070026#include <linux/debug_locks.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080027
28/*
29 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
30 * which forces all calls into the slowpath:
31 */
32#ifdef CONFIG_DEBUG_MUTEXES
33# include "mutex-debug.h"
34# include <asm-generic/mutex-null.h>
35#else
36# include "mutex.h"
37# include <asm/mutex.h>
38#endif
39
Waiman Long0dc8c732013-04-17 15:23:12 -040040/*
41 * A mutex count of -1 indicates that waiters are sleeping waiting for the
42 * mutex. Some architectures can allow any negative number, not just -1, for
43 * this purpose.
44 */
45#ifdef __ARCH_ALLOW_ANY_NEGATIVE_MUTEX_COUNT
46#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
47#else
48#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) != -1)
49#endif
50
Ingo Molnaref5d4702006-07-03 00:24:55 -070051void
52__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
Ingo Molnar6053ee32006-01-09 15:59:19 -080053{
54 atomic_set(&lock->count, 1);
55 spin_lock_init(&lock->wait_lock);
56 INIT_LIST_HEAD(&lock->wait_list);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010057 mutex_clear_owner(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -080058
Ingo Molnaref5d4702006-07-03 00:24:55 -070059 debug_mutex_init(lock, name, key);
Ingo Molnar6053ee32006-01-09 15:59:19 -080060}
61
62EXPORT_SYMBOL(__mutex_init);
63
Peter Zijlstrae4564f72007-10-11 22:11:12 +020064#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar6053ee32006-01-09 15:59:19 -080065/*
66 * We split the mutex lock/unlock logic into separate fastpath and
67 * slowpath functions, to reduce the register pressure on the fastpath.
68 * We also put the fastpath first in the kernel image, to make sure the
69 * branch is predicted by the CPU as default-untaken.
70 */
Török Edwin7918baa2008-11-24 10:17:42 +020071static __used noinline void __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070072__mutex_lock_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -080073
Randy Dunlapef5dc122010-09-02 15:48:16 -070074/**
Ingo Molnar6053ee32006-01-09 15:59:19 -080075 * mutex_lock - acquire the mutex
76 * @lock: the mutex to be acquired
77 *
78 * Lock the mutex exclusively for this task. If the mutex is not
79 * available right now, it will sleep until it can get it.
80 *
81 * The mutex must later on be released by the same task that
82 * acquired it. Recursive locking is not allowed. The task
83 * may not exit without first unlocking the mutex. Also, kernel
84 * memory where the mutex resides mutex must not be freed with
85 * the mutex still locked. The mutex must first be initialized
86 * (or statically defined) before it can be locked. memset()-ing
87 * the mutex to 0 is not allowed.
88 *
89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
90 * checks that will enforce the restrictions and will also do
91 * deadlock debugging. )
92 *
93 * This function is similar to (but not equivalent to) down().
94 */
H. Peter Anvinb09d2502009-04-01 17:21:56 -070095void __sched mutex_lock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -080096{
Ingo Molnarc544bdb2006-01-10 22:10:36 +010097 might_sleep();
Ingo Molnar6053ee32006-01-09 15:59:19 -080098 /*
99 * The locking fastpath is the 1->0 transition from
100 * 'unlocked' into 'locked' state.
Ingo Molnar6053ee32006-01-09 15:59:19 -0800101 */
102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100103 mutex_set_owner(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800104}
105
106EXPORT_SYMBOL(mutex_lock);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200107#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800108
Waiman Long41fcb9f2013-04-17 15:23:11 -0400109#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
110/*
111 * Mutex spinning code migrated from kernel/sched/core.c
112 */
113
114static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
115{
116 if (lock->owner != owner)
117 return false;
118
119 /*
120 * Ensure we emit the owner->on_cpu, dereference _after_ checking
121 * lock->owner still matches owner, if that fails, owner might
122 * point to free()d memory, if it still matches, the rcu_read_lock()
123 * ensures the memory stays valid.
124 */
125 barrier();
126
127 return owner->on_cpu;
128}
129
130/*
131 * Look out! "owner" is an entirely speculative pointer
132 * access and not reliable.
133 */
134static noinline
135int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
136{
137 rcu_read_lock();
138 while (owner_running(lock, owner)) {
139 if (need_resched())
140 break;
141
142 arch_mutex_cpu_relax();
143 }
144 rcu_read_unlock();
145
146 /*
147 * We break out the loop above on need_resched() and when the
148 * owner changed, which is a sign for heavy contention. Return
149 * success only when lock->owner is NULL.
150 */
151 return lock->owner == NULL;
152}
153#endif
154
Török Edwin7918baa2008-11-24 10:17:42 +0200155static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800156
Randy Dunlapef5dc122010-09-02 15:48:16 -0700157/**
Ingo Molnar6053ee32006-01-09 15:59:19 -0800158 * mutex_unlock - release the mutex
159 * @lock: the mutex to be released
160 *
161 * Unlock a mutex that has been locked by this task previously.
162 *
163 * This function must not be used in interrupt context. Unlocking
164 * of a not locked mutex is not allowed.
165 *
166 * This function is similar to (but not equivalent to) up().
167 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800168void __sched mutex_unlock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800169{
170 /*
171 * The unlocking fastpath is the 0->1 transition from 'locked'
172 * into 'unlocked' state:
Ingo Molnar6053ee32006-01-09 15:59:19 -0800173 */
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100174#ifndef CONFIG_DEBUG_MUTEXES
175 /*
176 * When debugging is enabled we must not clear the owner before time,
177 * the slow path will always be taken, and that clears the owner field
178 * after verifying that it was indeed current.
179 */
180 mutex_clear_owner(lock);
181#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800182 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
183}
184
185EXPORT_SYMBOL(mutex_unlock);
186
187/*
188 * Lock a mutex (possibly interruptible), slowpath:
189 */
190static inline int __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200191__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700192 struct lockdep_map *nest_lock, unsigned long ip)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800193{
194 struct task_struct *task = current;
195 struct mutex_waiter waiter;
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700196 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800197
Peter Zijlstra41719b02009-01-14 15:36:26 +0100198 preempt_disable();
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700199 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
Frederic Weisbeckerc0226022009-12-02 20:49:16 +0100200
201#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100202 /*
203 * Optimistic spinning.
204 *
205 * We try to spin for acquisition when we find that there are no
206 * pending waiters and the lock owner is currently running on a
207 * (different) CPU.
208 *
209 * The rationale is that if the lock owner is running, it is likely to
210 * release the lock soon.
211 *
212 * Since this needs the lock owner, and this mutex implementation
213 * doesn't track the owner atomically in the lock field, we need to
214 * track it non-atomically.
215 *
216 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
217 * to serialize everything.
218 */
219
220 for (;;) {
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +0200221 struct task_struct *owner;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100222
223 /*
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100224 * If there's an owner, wait for it to either
225 * release the lock or go to sleep.
226 */
227 owner = ACCESS_ONCE(lock->owner);
228 if (owner && !mutex_spin_on_owner(lock, owner))
229 break;
230
Waiman Long0dc8c732013-04-17 15:23:12 -0400231 if ((atomic_read(&lock->count) == 1) &&
232 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
Chris Masonac6e60e2009-01-14 17:29:31 +0100233 lock_acquired(&lock->dep_map, ip);
234 mutex_set_owner(lock);
235 preempt_enable();
236 return 0;
237 }
238
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100239 /*
240 * When there's no owner, we might have preempted between the
241 * owner acquiring the lock and setting the owner field. If
242 * we're an RT task that will live-lock because we won't let
243 * the owner complete.
244 */
245 if (!owner && (need_resched() || rt_task(task)))
246 break;
247
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100248 /*
249 * The cpu_relax() call is a compiler barrier which forces
250 * everything in this loop to be re-loaded. We don't need
251 * memory barriers as we'll eventually observe the right
252 * values at the cost of a few extra spins.
253 */
Gerald Schaefer335d7af2010-11-22 15:47:36 +0100254 arch_mutex_cpu_relax();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100255 }
256#endif
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700257 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800258
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700259 debug_mutex_lock_common(lock, &waiter);
Roman Zippelc9f4f062007-05-09 02:35:16 -0700260 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
Ingo Molnar6053ee32006-01-09 15:59:19 -0800261
262 /* add waiting tasks to the end of the waitqueue (FIFO): */
263 list_add_tail(&waiter.list, &lock->wait_list);
264 waiter.task = task;
265
Waiman Long0dc8c732013-04-17 15:23:12 -0400266 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700267 goto done;
268
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200269 lock_contended(&lock->dep_map, ip);
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700270
Ingo Molnar6053ee32006-01-09 15:59:19 -0800271 for (;;) {
272 /*
273 * Lets try to take the lock again - this is needed even if
274 * we get here for the first time (shortly after failing to
275 * acquire the lock), to make sure that we get a wakeup once
276 * it's unlocked. Later on, if we sleep, this is the
277 * operation that gives us the lock. We xchg it to -1, so
278 * that when we release the lock, we properly wake up the
279 * other waiters:
280 */
Waiman Long0dc8c732013-04-17 15:23:12 -0400281 if (MUTEX_SHOW_NO_WAITER(lock) &&
282 (atomic_xchg(&lock->count, -1) == 1))
Ingo Molnar6053ee32006-01-09 15:59:19 -0800283 break;
284
285 /*
286 * got a signal? (This code gets eliminated in the
287 * TASK_UNINTERRUPTIBLE case.)
288 */
Oleg Nesterov6ad36762008-06-08 21:20:42 +0400289 if (unlikely(signal_pending_state(state, task))) {
Liam R. Howlettad776532007-12-06 17:37:59 -0500290 mutex_remove_waiter(lock, &waiter,
291 task_thread_info(task));
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200292 mutex_release(&lock->dep_map, 1, ip);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700293 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800294
295 debug_mutex_free_waiter(&waiter);
Peter Zijlstra41719b02009-01-14 15:36:26 +0100296 preempt_enable();
Ingo Molnar6053ee32006-01-09 15:59:19 -0800297 return -EINTR;
298 }
299 __set_task_state(task, state);
300
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300301 /* didn't get the lock, go to sleep: */
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700302 spin_unlock_mutex(&lock->wait_lock, flags);
Thomas Gleixnerbd2f5532011-03-21 12:33:18 +0100303 schedule_preempt_disabled();
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700304 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800305 }
306
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700307done:
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200308 lock_acquired(&lock->dep_map, ip);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800309 /* got the lock - rejoice! */
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100310 mutex_remove_waiter(lock, &waiter, current_thread_info());
311 mutex_set_owner(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800312
313 /* set it to 0 if there are no waiters left: */
314 if (likely(list_empty(&lock->wait_list)))
315 atomic_set(&lock->count, 0);
316
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700317 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800318
319 debug_mutex_free_waiter(&waiter);
Peter Zijlstra41719b02009-01-14 15:36:26 +0100320 preempt_enable();
Ingo Molnar6053ee32006-01-09 15:59:19 -0800321
Ingo Molnar6053ee32006-01-09 15:59:19 -0800322 return 0;
323}
324
Ingo Molnaref5d4702006-07-03 00:24:55 -0700325#ifdef CONFIG_DEBUG_LOCK_ALLOC
326void __sched
327mutex_lock_nested(struct mutex *lock, unsigned int subclass)
328{
329 might_sleep();
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700330 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700331}
332
333EXPORT_SYMBOL_GPL(mutex_lock_nested);
NeilBrownd63a5a72006-12-08 02:36:17 -0800334
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700335void __sched
336_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
337{
338 might_sleep();
339 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
340}
341
342EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
343
NeilBrownd63a5a72006-12-08 02:36:17 -0800344int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500345mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
346{
347 might_sleep();
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700348 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
Liam R. Howlettad776532007-12-06 17:37:59 -0500349}
350EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
351
352int __sched
NeilBrownd63a5a72006-12-08 02:36:17 -0800353mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
354{
355 might_sleep();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100356 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700357 subclass, NULL, _RET_IP_);
NeilBrownd63a5a72006-12-08 02:36:17 -0800358}
359
360EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700361#endif
362
Ingo Molnar6053ee32006-01-09 15:59:19 -0800363/*
364 * Release the lock, slowpath:
365 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800366static inline void
Ingo Molnaref5d4702006-07-03 00:24:55 -0700367__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800368{
Ingo Molnar02706642006-01-10 23:15:02 +0100369 struct mutex *lock = container_of(lock_count, struct mutex, count);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700370 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800371
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700372 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700373 mutex_release(&lock->dep_map, nested, _RET_IP_);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700374 debug_mutex_unlock(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800375
376 /*
377 * some architectures leave the lock unlocked in the fastpath failure
378 * case, others need to leave it locked. In the later case we have to
379 * unlock it here
380 */
381 if (__mutex_slowpath_needs_to_unlock())
382 atomic_set(&lock->count, 1);
383
Ingo Molnar6053ee32006-01-09 15:59:19 -0800384 if (!list_empty(&lock->wait_list)) {
385 /* get the first entry from the wait-list: */
386 struct mutex_waiter *waiter =
387 list_entry(lock->wait_list.next,
388 struct mutex_waiter, list);
389
390 debug_mutex_wake_waiter(lock, waiter);
391
392 wake_up_process(waiter->task);
393 }
394
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700395 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800396}
397
398/*
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700399 * Release the lock, slowpath:
400 */
Török Edwin7918baa2008-11-24 10:17:42 +0200401static __used noinline void
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700402__mutex_unlock_slowpath(atomic_t *lock_count)
403{
Ingo Molnaref5d4702006-07-03 00:24:55 -0700404 __mutex_unlock_common_slowpath(lock_count, 1);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700405}
406
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200407#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700408/*
Ingo Molnar6053ee32006-01-09 15:59:19 -0800409 * Here come the less common (and hence less performance-critical) APIs:
410 * mutex_lock_interruptible() and mutex_trylock().
411 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800412static noinline int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500413__mutex_lock_killable_slowpath(atomic_t *lock_count);
414
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800415static noinline int __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700416__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800417
Randy Dunlapef5dc122010-09-02 15:48:16 -0700418/**
419 * mutex_lock_interruptible - acquire the mutex, interruptible
Ingo Molnar6053ee32006-01-09 15:59:19 -0800420 * @lock: the mutex to be acquired
421 *
422 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
423 * been acquired or sleep until the mutex becomes available. If a
424 * signal arrives while waiting for the lock then this function
425 * returns -EINTR.
426 *
427 * This function is similar to (but not equivalent to) down_interruptible().
428 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800429int __sched mutex_lock_interruptible(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800430{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100431 int ret;
432
Ingo Molnarc544bdb2006-01-10 22:10:36 +0100433 might_sleep();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100434 ret = __mutex_fastpath_lock_retval
Ingo Molnar6053ee32006-01-09 15:59:19 -0800435 (&lock->count, __mutex_lock_interruptible_slowpath);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100436 if (!ret)
437 mutex_set_owner(lock);
438
439 return ret;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800440}
441
442EXPORT_SYMBOL(mutex_lock_interruptible);
443
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800444int __sched mutex_lock_killable(struct mutex *lock)
Liam R. Howlettad776532007-12-06 17:37:59 -0500445{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100446 int ret;
447
Liam R. Howlettad776532007-12-06 17:37:59 -0500448 might_sleep();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100449 ret = __mutex_fastpath_lock_retval
Liam R. Howlettad776532007-12-06 17:37:59 -0500450 (&lock->count, __mutex_lock_killable_slowpath);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100451 if (!ret)
452 mutex_set_owner(lock);
453
454 return ret;
Liam R. Howlettad776532007-12-06 17:37:59 -0500455}
456EXPORT_SYMBOL(mutex_lock_killable);
457
Török Edwin7918baa2008-11-24 10:17:42 +0200458static __used noinline void __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200459__mutex_lock_slowpath(atomic_t *lock_count)
460{
461 struct mutex *lock = container_of(lock_count, struct mutex, count);
462
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700463 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200464}
465
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800466static noinline int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500467__mutex_lock_killable_slowpath(atomic_t *lock_count)
468{
469 struct mutex *lock = container_of(lock_count, struct mutex, count);
470
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700471 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
Liam R. Howlettad776532007-12-06 17:37:59 -0500472}
473
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800474static noinline int __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700475__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800476{
477 struct mutex *lock = container_of(lock_count, struct mutex, count);
478
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700479 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800480}
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200481#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800482
483/*
484 * Spinlock based trylock, we take the spinlock and check whether we
485 * can get the lock:
486 */
487static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
488{
489 struct mutex *lock = container_of(lock_count, struct mutex, count);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700490 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800491 int prev;
492
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700493 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800494
495 prev = atomic_xchg(&lock->count, -1);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700496 if (likely(prev == 1)) {
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100497 mutex_set_owner(lock);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700498 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
499 }
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100500
Ingo Molnar6053ee32006-01-09 15:59:19 -0800501 /* Set it back to 0 if there are no waiters: */
502 if (likely(list_empty(&lock->wait_list)))
503 atomic_set(&lock->count, 0);
504
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700505 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800506
507 return prev == 1;
508}
509
Randy Dunlapef5dc122010-09-02 15:48:16 -0700510/**
511 * mutex_trylock - try to acquire the mutex, without waiting
Ingo Molnar6053ee32006-01-09 15:59:19 -0800512 * @lock: the mutex to be acquired
513 *
514 * Try to acquire the mutex atomically. Returns 1 if the mutex
515 * has been acquired successfully, and 0 on contention.
516 *
517 * NOTE: this function follows the spin_trylock() convention, so
Randy Dunlapef5dc122010-09-02 15:48:16 -0700518 * it is negated from the down_trylock() return values! Be careful
Ingo Molnar6053ee32006-01-09 15:59:19 -0800519 * about this when converting semaphore users to mutexes.
520 *
521 * This function must not be used in interrupt context. The
522 * mutex must be released by the same task that acquired it.
523 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800524int __sched mutex_trylock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800525{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100526 int ret;
527
528 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
529 if (ret)
530 mutex_set_owner(lock);
531
532 return ret;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800533}
Ingo Molnar6053ee32006-01-09 15:59:19 -0800534EXPORT_SYMBOL(mutex_trylock);
Andrew Mortona511e3f2009-04-29 15:59:58 -0700535
536/**
537 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
538 * @cnt: the atomic which we are to dec
539 * @lock: the mutex to return holding if we dec to 0
540 *
541 * return true and hold lock if we dec to 0, return false otherwise
542 */
543int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
544{
545 /* dec if we can't possibly hit 0 */
546 if (atomic_add_unless(cnt, -1, 1))
547 return 0;
548 /* we might hit 0, so take the lock */
549 mutex_lock(lock);
550 if (!atomic_dec_and_test(cnt)) {
551 /* when we actually did the dec, we didn't hit 0 */
552 mutex_unlock(lock);
553 return 0;
554 }
555 /* we hit 0, and we hold the lock */
556 return 1;
557}
558EXPORT_SYMBOL(atomic_dec_and_mutex_lock);