| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * kernel/mutex.c | 
|  | 3 | * | 
|  | 4 | * Mutexes: blocking mutual exclusion locks | 
|  | 5 | * | 
|  | 6 | * Started by Ingo Molnar: | 
|  | 7 | * | 
|  | 8 | *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
|  | 9 | * | 
|  | 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | 
|  | 11 | * David Howells for suggestions and improvements. | 
|  | 12 | * | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 13 | *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline | 
|  | 14 | *    from the -rt tree, where it was originally implemented for rtmutexes | 
|  | 15 | *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | 
|  | 16 | *    and Sven Dietrich. | 
|  | 17 | * | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 18 | * Also see Documentation/mutex-design.txt. | 
|  | 19 | */ | 
|  | 20 | #include <linux/mutex.h> | 
|  | 21 | #include <linux/sched.h> | 
| Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 22 | #include <linux/export.h> | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 23 | #include <linux/spinlock.h> | 
|  | 24 | #include <linux/interrupt.h> | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 25 | #include <linux/debug_locks.h> | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 26 |  | 
|  | 27 | /* | 
|  | 28 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | 
|  | 29 | * which forces all calls into the slowpath: | 
|  | 30 | */ | 
|  | 31 | #ifdef CONFIG_DEBUG_MUTEXES | 
|  | 32 | # include "mutex-debug.h" | 
|  | 33 | # include <asm-generic/mutex-null.h> | 
|  | 34 | #else | 
|  | 35 | # include "mutex.h" | 
|  | 36 | # include <asm/mutex.h> | 
|  | 37 | #endif | 
|  | 38 |  | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 39 | void | 
|  | 40 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 41 | { | 
|  | 42 | atomic_set(&lock->count, 1); | 
|  | 43 | spin_lock_init(&lock->wait_lock); | 
|  | 44 | INIT_LIST_HEAD(&lock->wait_list); | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 45 | mutex_clear_owner(lock); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 46 |  | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 47 | debug_mutex_init(lock, name, key); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 48 | } | 
|  | 49 |  | 
|  | 50 | EXPORT_SYMBOL(__mutex_init); | 
|  | 51 |  | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 52 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 53 | /* | 
|  | 54 | * We split the mutex lock/unlock logic into separate fastpath and | 
|  | 55 | * slowpath functions, to reduce the register pressure on the fastpath. | 
|  | 56 | * We also put the fastpath first in the kernel image, to make sure the | 
|  | 57 | * branch is predicted by the CPU as default-untaken. | 
|  | 58 | */ | 
| Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 59 | static __used noinline void __sched | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 60 | __mutex_lock_slowpath(atomic_t *lock_count); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 61 |  | 
| Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 62 | /** | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 63 | * mutex_lock - acquire the mutex | 
|  | 64 | * @lock: the mutex to be acquired | 
|  | 65 | * | 
|  | 66 | * Lock the mutex exclusively for this task. If the mutex is not | 
|  | 67 | * available right now, it will sleep until it can get it. | 
|  | 68 | * | 
|  | 69 | * The mutex must later on be released by the same task that | 
|  | 70 | * acquired it. Recursive locking is not allowed. The task | 
|  | 71 | * may not exit without first unlocking the mutex. Also, kernel | 
|  | 72 | * memory where the mutex resides mutex must not be freed with | 
|  | 73 | * the mutex still locked. The mutex must first be initialized | 
|  | 74 | * (or statically defined) before it can be locked. memset()-ing | 
|  | 75 | * the mutex to 0 is not allowed. | 
|  | 76 | * | 
|  | 77 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | 
|  | 78 | *   checks that will enforce the restrictions and will also do | 
|  | 79 | *   deadlock debugging. ) | 
|  | 80 | * | 
|  | 81 | * This function is similar to (but not equivalent to) down(). | 
|  | 82 | */ | 
| H. Peter Anvin | b09d250 | 2009-04-01 17:21:56 -0700 | [diff] [blame] | 83 | void __sched mutex_lock(struct mutex *lock) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 84 | { | 
| Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 85 | might_sleep(); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 86 | /* | 
|  | 87 | * The locking fastpath is the 1->0 transition from | 
|  | 88 | * 'unlocked' into 'locked' state. | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 89 | */ | 
|  | 90 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 91 | mutex_set_owner(lock); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 92 | } | 
|  | 93 |  | 
|  | 94 | EXPORT_SYMBOL(mutex_lock); | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 95 | #endif | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 96 |  | 
| Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 97 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 98 |  | 
| Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 99 | /** | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 100 | * mutex_unlock - release the mutex | 
|  | 101 | * @lock: the mutex to be released | 
|  | 102 | * | 
|  | 103 | * Unlock a mutex that has been locked by this task previously. | 
|  | 104 | * | 
|  | 105 | * This function must not be used in interrupt context. Unlocking | 
|  | 106 | * of a not locked mutex is not allowed. | 
|  | 107 | * | 
|  | 108 | * This function is similar to (but not equivalent to) up(). | 
|  | 109 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 110 | void __sched mutex_unlock(struct mutex *lock) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 111 | { | 
|  | 112 | /* | 
|  | 113 | * The unlocking fastpath is the 0->1 transition from 'locked' | 
|  | 114 | * into 'unlocked' state: | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 115 | */ | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 116 | #ifndef CONFIG_DEBUG_MUTEXES | 
|  | 117 | /* | 
|  | 118 | * When debugging is enabled we must not clear the owner before time, | 
|  | 119 | * the slow path will always be taken, and that clears the owner field | 
|  | 120 | * after verifying that it was indeed current. | 
|  | 121 | */ | 
|  | 122 | mutex_clear_owner(lock); | 
|  | 123 | #endif | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 124 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | EXPORT_SYMBOL(mutex_unlock); | 
|  | 128 |  | 
|  | 129 | /* | 
|  | 130 | * Lock a mutex (possibly interruptible), slowpath: | 
|  | 131 | */ | 
|  | 132 | static inline int __sched | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 133 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | 
| Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 134 | struct lockdep_map *nest_lock, unsigned long ip) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 135 | { | 
|  | 136 | struct task_struct *task = current; | 
|  | 137 | struct mutex_waiter waiter; | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 138 | unsigned long flags; | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 139 |  | 
| Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 140 | preempt_disable(); | 
| Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 141 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); | 
| Frederic Weisbecker | c022602 | 2009-12-02 20:49:16 +0100 | [diff] [blame] | 142 |  | 
|  | 143 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 144 | /* | 
|  | 145 | * Optimistic spinning. | 
|  | 146 | * | 
|  | 147 | * We try to spin for acquisition when we find that there are no | 
|  | 148 | * pending waiters and the lock owner is currently running on a | 
|  | 149 | * (different) CPU. | 
|  | 150 | * | 
|  | 151 | * The rationale is that if the lock owner is running, it is likely to | 
|  | 152 | * release the lock soon. | 
|  | 153 | * | 
|  | 154 | * Since this needs the lock owner, and this mutex implementation | 
|  | 155 | * doesn't track the owner atomically in the lock field, we need to | 
|  | 156 | * track it non-atomically. | 
|  | 157 | * | 
|  | 158 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock | 
|  | 159 | * to serialize everything. | 
|  | 160 | */ | 
|  | 161 |  | 
|  | 162 | for (;;) { | 
| Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 163 | struct task_struct *owner; | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 164 |  | 
|  | 165 | /* | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 166 | * If there's an owner, wait for it to either | 
|  | 167 | * release the lock or go to sleep. | 
|  | 168 | */ | 
|  | 169 | owner = ACCESS_ONCE(lock->owner); | 
|  | 170 | if (owner && !mutex_spin_on_owner(lock, owner)) | 
|  | 171 | break; | 
|  | 172 |  | 
| Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 173 | if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { | 
|  | 174 | lock_acquired(&lock->dep_map, ip); | 
|  | 175 | mutex_set_owner(lock); | 
|  | 176 | preempt_enable(); | 
|  | 177 | return 0; | 
|  | 178 | } | 
|  | 179 |  | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 180 | /* | 
|  | 181 | * When there's no owner, we might have preempted between the | 
|  | 182 | * owner acquiring the lock and setting the owner field. If | 
|  | 183 | * we're an RT task that will live-lock because we won't let | 
|  | 184 | * the owner complete. | 
|  | 185 | */ | 
|  | 186 | if (!owner && (need_resched() || rt_task(task))) | 
|  | 187 | break; | 
|  | 188 |  | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 189 | /* | 
|  | 190 | * The cpu_relax() call is a compiler barrier which forces | 
|  | 191 | * everything in this loop to be re-loaded. We don't need | 
|  | 192 | * memory barriers as we'll eventually observe the right | 
|  | 193 | * values at the cost of a few extra spins. | 
|  | 194 | */ | 
| Gerald Schaefer | 335d7af | 2010-11-22 15:47:36 +0100 | [diff] [blame] | 195 | arch_mutex_cpu_relax(); | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 196 | } | 
|  | 197 | #endif | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 198 | spin_lock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 199 |  | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 200 | debug_mutex_lock_common(lock, &waiter); | 
| Roman Zippel | c9f4f06 | 2007-05-09 02:35:16 -0700 | [diff] [blame] | 201 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 202 |  | 
|  | 203 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 
|  | 204 | list_add_tail(&waiter.list, &lock->wait_list); | 
|  | 205 | waiter.task = task; | 
|  | 206 |  | 
| Peter Zijlstra | 93d81d1 | 2009-01-14 15:32:51 +0100 | [diff] [blame] | 207 | if (atomic_xchg(&lock->count, -1) == 1) | 
| Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 208 | goto done; | 
|  | 209 |  | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 210 | lock_contended(&lock->dep_map, ip); | 
| Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 211 |  | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 212 | for (;;) { | 
|  | 213 | /* | 
|  | 214 | * Lets try to take the lock again - this is needed even if | 
|  | 215 | * we get here for the first time (shortly after failing to | 
|  | 216 | * acquire the lock), to make sure that we get a wakeup once | 
|  | 217 | * it's unlocked. Later on, if we sleep, this is the | 
|  | 218 | * operation that gives us the lock. We xchg it to -1, so | 
|  | 219 | * that when we release the lock, we properly wake up the | 
|  | 220 | * other waiters: | 
|  | 221 | */ | 
| Peter Zijlstra | 93d81d1 | 2009-01-14 15:32:51 +0100 | [diff] [blame] | 222 | if (atomic_xchg(&lock->count, -1) == 1) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 223 | break; | 
|  | 224 |  | 
|  | 225 | /* | 
|  | 226 | * got a signal? (This code gets eliminated in the | 
|  | 227 | * TASK_UNINTERRUPTIBLE case.) | 
|  | 228 | */ | 
| Oleg Nesterov | 6ad3676 | 2008-06-08 21:20:42 +0400 | [diff] [blame] | 229 | if (unlikely(signal_pending_state(state, task))) { | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 230 | mutex_remove_waiter(lock, &waiter, | 
|  | 231 | task_thread_info(task)); | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 232 | mutex_release(&lock->dep_map, 1, ip); | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 233 | spin_unlock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 234 |  | 
|  | 235 | debug_mutex_free_waiter(&waiter); | 
| Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 236 | preempt_enable(); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 237 | return -EINTR; | 
|  | 238 | } | 
|  | 239 | __set_task_state(task, state); | 
|  | 240 |  | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 241 | /* didn't get the lock, go to sleep: */ | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 242 | spin_unlock_mutex(&lock->wait_lock, flags); | 
| Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 243 | schedule_preempt_disabled(); | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 244 | spin_lock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 245 | } | 
|  | 246 |  | 
| Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 247 | done: | 
| Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 248 | lock_acquired(&lock->dep_map, ip); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 249 | /* got the lock - rejoice! */ | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 250 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | 
|  | 251 | mutex_set_owner(lock); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 252 |  | 
|  | 253 | /* set it to 0 if there are no waiters left: */ | 
|  | 254 | if (likely(list_empty(&lock->wait_list))) | 
|  | 255 | atomic_set(&lock->count, 0); | 
|  | 256 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 257 | spin_unlock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 258 |  | 
|  | 259 | debug_mutex_free_waiter(&waiter); | 
| Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 260 | preempt_enable(); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 261 |  | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 262 | return 0; | 
|  | 263 | } | 
|  | 264 |  | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 265 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
|  | 266 | void __sched | 
|  | 267 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | 
|  | 268 | { | 
|  | 269 | might_sleep(); | 
| Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 270 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 271 | } | 
|  | 272 |  | 
|  | 273 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | 
| NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 274 |  | 
| Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 275 | void __sched | 
|  | 276 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | 
|  | 277 | { | 
|  | 278 | might_sleep(); | 
|  | 279 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); | 
|  | 283 |  | 
| NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 284 | int __sched | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 285 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | 
|  | 286 | { | 
|  | 287 | might_sleep(); | 
| Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 288 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 289 | } | 
|  | 290 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | 
|  | 291 |  | 
|  | 292 | int __sched | 
| NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 293 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | 
|  | 294 | { | 
|  | 295 | might_sleep(); | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 296 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, | 
| Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 297 | subclass, NULL, _RET_IP_); | 
| NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 298 | } | 
|  | 299 |  | 
|  | 300 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 301 | #endif | 
|  | 302 |  | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 303 | /* | 
|  | 304 | * Release the lock, slowpath: | 
|  | 305 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 306 | static inline void | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 307 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 308 | { | 
| Ingo Molnar | 0270664 | 2006-01-10 23:15:02 +0100 | [diff] [blame] | 309 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 310 | unsigned long flags; | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 311 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 312 | spin_lock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 313 | mutex_release(&lock->dep_map, nested, _RET_IP_); | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 314 | debug_mutex_unlock(lock); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 315 |  | 
|  | 316 | /* | 
|  | 317 | * some architectures leave the lock unlocked in the fastpath failure | 
|  | 318 | * case, others need to leave it locked. In the later case we have to | 
|  | 319 | * unlock it here | 
|  | 320 | */ | 
|  | 321 | if (__mutex_slowpath_needs_to_unlock()) | 
|  | 322 | atomic_set(&lock->count, 1); | 
|  | 323 |  | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 324 | if (!list_empty(&lock->wait_list)) { | 
|  | 325 | /* get the first entry from the wait-list: */ | 
|  | 326 | struct mutex_waiter *waiter = | 
|  | 327 | list_entry(lock->wait_list.next, | 
|  | 328 | struct mutex_waiter, list); | 
|  | 329 |  | 
|  | 330 | debug_mutex_wake_waiter(lock, waiter); | 
|  | 331 |  | 
|  | 332 | wake_up_process(waiter->task); | 
|  | 333 | } | 
|  | 334 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 335 | spin_unlock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 336 | } | 
|  | 337 |  | 
|  | 338 | /* | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 339 | * Release the lock, slowpath: | 
|  | 340 | */ | 
| Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 341 | static __used noinline void | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 342 | __mutex_unlock_slowpath(atomic_t *lock_count) | 
|  | 343 | { | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 344 | __mutex_unlock_common_slowpath(lock_count, 1); | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 345 | } | 
|  | 346 |  | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 347 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 348 | /* | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 349 | * Here come the less common (and hence less performance-critical) APIs: | 
|  | 350 | * mutex_lock_interruptible() and mutex_trylock(). | 
|  | 351 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 352 | static noinline int __sched | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 353 | __mutex_lock_killable_slowpath(atomic_t *lock_count); | 
|  | 354 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 355 | static noinline int __sched | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 356 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 357 |  | 
| Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 358 | /** | 
|  | 359 | * mutex_lock_interruptible - acquire the mutex, interruptible | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 360 | * @lock: the mutex to be acquired | 
|  | 361 | * | 
|  | 362 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | 
|  | 363 | * been acquired or sleep until the mutex becomes available. If a | 
|  | 364 | * signal arrives while waiting for the lock then this function | 
|  | 365 | * returns -EINTR. | 
|  | 366 | * | 
|  | 367 | * This function is similar to (but not equivalent to) down_interruptible(). | 
|  | 368 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 369 | int __sched mutex_lock_interruptible(struct mutex *lock) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 370 | { | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 371 | int ret; | 
|  | 372 |  | 
| Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 373 | might_sleep(); | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 374 | ret =  __mutex_fastpath_lock_retval | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 375 | (&lock->count, __mutex_lock_interruptible_slowpath); | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 376 | if (!ret) | 
|  | 377 | mutex_set_owner(lock); | 
|  | 378 |  | 
|  | 379 | return ret; | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 380 | } | 
|  | 381 |  | 
|  | 382 | EXPORT_SYMBOL(mutex_lock_interruptible); | 
|  | 383 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 384 | int __sched mutex_lock_killable(struct mutex *lock) | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 385 | { | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 386 | int ret; | 
|  | 387 |  | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 388 | might_sleep(); | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 389 | ret = __mutex_fastpath_lock_retval | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 390 | (&lock->count, __mutex_lock_killable_slowpath); | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 391 | if (!ret) | 
|  | 392 | mutex_set_owner(lock); | 
|  | 393 |  | 
|  | 394 | return ret; | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 395 | } | 
|  | 396 | EXPORT_SYMBOL(mutex_lock_killable); | 
|  | 397 |  | 
| Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 398 | static __used noinline void __sched | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 399 | __mutex_lock_slowpath(atomic_t *lock_count) | 
|  | 400 | { | 
|  | 401 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 
|  | 402 |  | 
| Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 403 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 404 | } | 
|  | 405 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 406 | static noinline int __sched | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 407 | __mutex_lock_killable_slowpath(atomic_t *lock_count) | 
|  | 408 | { | 
|  | 409 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 
|  | 410 |  | 
| Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 411 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 412 | } | 
|  | 413 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 414 | static noinline int __sched | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 415 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 416 | { | 
|  | 417 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 
|  | 418 |  | 
| Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 419 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 420 | } | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 421 | #endif | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 422 |  | 
|  | 423 | /* | 
|  | 424 | * Spinlock based trylock, we take the spinlock and check whether we | 
|  | 425 | * can get the lock: | 
|  | 426 | */ | 
|  | 427 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | 
|  | 428 | { | 
|  | 429 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 430 | unsigned long flags; | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 431 | int prev; | 
|  | 432 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 433 | spin_lock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 434 |  | 
|  | 435 | prev = atomic_xchg(&lock->count, -1); | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 436 | if (likely(prev == 1)) { | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 437 | mutex_set_owner(lock); | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 438 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 
|  | 439 | } | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 440 |  | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 441 | /* Set it back to 0 if there are no waiters: */ | 
|  | 442 | if (likely(list_empty(&lock->wait_list))) | 
|  | 443 | atomic_set(&lock->count, 0); | 
|  | 444 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 445 | spin_unlock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 446 |  | 
|  | 447 | return prev == 1; | 
|  | 448 | } | 
|  | 449 |  | 
| Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 450 | /** | 
|  | 451 | * mutex_trylock - try to acquire the mutex, without waiting | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 452 | * @lock: the mutex to be acquired | 
|  | 453 | * | 
|  | 454 | * Try to acquire the mutex atomically. Returns 1 if the mutex | 
|  | 455 | * has been acquired successfully, and 0 on contention. | 
|  | 456 | * | 
|  | 457 | * NOTE: this function follows the spin_trylock() convention, so | 
| Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 458 | * it is negated from the down_trylock() return values! Be careful | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 459 | * about this when converting semaphore users to mutexes. | 
|  | 460 | * | 
|  | 461 | * This function must not be used in interrupt context. The | 
|  | 462 | * mutex must be released by the same task that acquired it. | 
|  | 463 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 464 | int __sched mutex_trylock(struct mutex *lock) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 465 | { | 
| Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 466 | int ret; | 
|  | 467 |  | 
|  | 468 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); | 
|  | 469 | if (ret) | 
|  | 470 | mutex_set_owner(lock); | 
|  | 471 |  | 
|  | 472 | return ret; | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 473 | } | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 474 | EXPORT_SYMBOL(mutex_trylock); | 
| Andrew Morton | a511e3f | 2009-04-29 15:59:58 -0700 | [diff] [blame] | 475 |  | 
|  | 476 | /** | 
|  | 477 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 | 
|  | 478 | * @cnt: the atomic which we are to dec | 
|  | 479 | * @lock: the mutex to return holding if we dec to 0 | 
|  | 480 | * | 
|  | 481 | * return true and hold lock if we dec to 0, return false otherwise | 
|  | 482 | */ | 
|  | 483 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | 
|  | 484 | { | 
|  | 485 | /* dec if we can't possibly hit 0 */ | 
|  | 486 | if (atomic_add_unless(cnt, -1, 1)) | 
|  | 487 | return 0; | 
|  | 488 | /* we might hit 0, so take the lock */ | 
|  | 489 | mutex_lock(lock); | 
|  | 490 | if (!atomic_dec_and_test(cnt)) { | 
|  | 491 | /* when we actually did the dec, we didn't hit 0 */ | 
|  | 492 | mutex_unlock(lock); | 
|  | 493 | return 0; | 
|  | 494 | } | 
|  | 495 | /* we hit 0, and we hold the lock */ | 
|  | 496 | return 1; | 
|  | 497 | } | 
|  | 498 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |