| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * kernel/mutex.c | 
|  | 3 | * | 
|  | 4 | * Mutexes: blocking mutual exclusion locks | 
|  | 5 | * | 
|  | 6 | * Started by Ingo Molnar: | 
|  | 7 | * | 
|  | 8 | *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
|  | 9 | * | 
|  | 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | 
|  | 11 | * David Howells for suggestions and improvements. | 
|  | 12 | * | 
|  | 13 | * Also see Documentation/mutex-design.txt. | 
|  | 14 | */ | 
|  | 15 | #include <linux/mutex.h> | 
|  | 16 | #include <linux/sched.h> | 
|  | 17 | #include <linux/module.h> | 
|  | 18 | #include <linux/spinlock.h> | 
|  | 19 | #include <linux/interrupt.h> | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 20 | #include <linux/debug_locks.h> | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 21 |  | 
|  | 22 | /* | 
|  | 23 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | 
|  | 24 | * which forces all calls into the slowpath: | 
|  | 25 | */ | 
|  | 26 | #ifdef CONFIG_DEBUG_MUTEXES | 
|  | 27 | # include "mutex-debug.h" | 
|  | 28 | # include <asm-generic/mutex-null.h> | 
|  | 29 | #else | 
|  | 30 | # include "mutex.h" | 
|  | 31 | # include <asm/mutex.h> | 
|  | 32 | #endif | 
|  | 33 |  | 
|  | 34 | /*** | 
|  | 35 | * mutex_init - initialize the mutex | 
|  | 36 | * @lock: the mutex to be initialized | 
| Randy Dunlap | 0e241ff | 2008-07-24 16:58:42 -0700 | [diff] [blame] | 37 | * @key: the lock_class_key for the class; used by mutex lock debugging | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 38 | * | 
|  | 39 | * Initialize the mutex to unlocked state. | 
|  | 40 | * | 
|  | 41 | * It is not allowed to initialize an already locked mutex. | 
|  | 42 | */ | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 43 | void | 
|  | 44 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 45 | { | 
|  | 46 | atomic_set(&lock->count, 1); | 
|  | 47 | spin_lock_init(&lock->wait_lock); | 
|  | 48 | INIT_LIST_HEAD(&lock->wait_list); | 
|  | 49 |  | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 50 | debug_mutex_init(lock, name, key); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 51 | } | 
|  | 52 |  | 
|  | 53 | EXPORT_SYMBOL(__mutex_init); | 
|  | 54 |  | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 55 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 56 | /* | 
|  | 57 | * We split the mutex lock/unlock logic into separate fastpath and | 
|  | 58 | * slowpath functions, to reduce the register pressure on the fastpath. | 
|  | 59 | * We also put the fastpath first in the kernel image, to make sure the | 
|  | 60 | * branch is predicted by the CPU as default-untaken. | 
|  | 61 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 62 | static void noinline __sched | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 63 | __mutex_lock_slowpath(atomic_t *lock_count); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 64 |  | 
|  | 65 | /*** | 
|  | 66 | * mutex_lock - acquire the mutex | 
|  | 67 | * @lock: the mutex to be acquired | 
|  | 68 | * | 
|  | 69 | * Lock the mutex exclusively for this task. If the mutex is not | 
|  | 70 | * available right now, it will sleep until it can get it. | 
|  | 71 | * | 
|  | 72 | * The mutex must later on be released by the same task that | 
|  | 73 | * acquired it. Recursive locking is not allowed. The task | 
|  | 74 | * may not exit without first unlocking the mutex. Also, kernel | 
|  | 75 | * memory where the mutex resides mutex must not be freed with | 
|  | 76 | * the mutex still locked. The mutex must first be initialized | 
|  | 77 | * (or statically defined) before it can be locked. memset()-ing | 
|  | 78 | * the mutex to 0 is not allowed. | 
|  | 79 | * | 
|  | 80 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | 
|  | 81 | *   checks that will enforce the restrictions and will also do | 
|  | 82 | *   deadlock debugging. ) | 
|  | 83 | * | 
|  | 84 | * This function is similar to (but not equivalent to) down(). | 
|  | 85 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 86 | void inline __sched mutex_lock(struct mutex *lock) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 87 | { | 
| Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 88 | might_sleep(); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 89 | /* | 
|  | 90 | * The locking fastpath is the 1->0 transition from | 
|  | 91 | * 'unlocked' into 'locked' state. | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 92 | */ | 
|  | 93 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | EXPORT_SYMBOL(mutex_lock); | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 97 | #endif | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 98 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 99 | static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 100 |  | 
|  | 101 | /*** | 
|  | 102 | * mutex_unlock - release the mutex | 
|  | 103 | * @lock: the mutex to be released | 
|  | 104 | * | 
|  | 105 | * Unlock a mutex that has been locked by this task previously. | 
|  | 106 | * | 
|  | 107 | * This function must not be used in interrupt context. Unlocking | 
|  | 108 | * of a not locked mutex is not allowed. | 
|  | 109 | * | 
|  | 110 | * This function is similar to (but not equivalent to) up(). | 
|  | 111 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 112 | void __sched mutex_unlock(struct mutex *lock) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 113 | { | 
|  | 114 | /* | 
|  | 115 | * The unlocking fastpath is the 0->1 transition from 'locked' | 
|  | 116 | * into 'unlocked' state: | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 117 | */ | 
|  | 118 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | EXPORT_SYMBOL(mutex_unlock); | 
|  | 122 |  | 
|  | 123 | /* | 
|  | 124 | * Lock a mutex (possibly interruptible), slowpath: | 
|  | 125 | */ | 
|  | 126 | static inline int __sched | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 127 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | 
|  | 128 | unsigned long ip) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 129 | { | 
|  | 130 | struct task_struct *task = current; | 
|  | 131 | struct mutex_waiter waiter; | 
|  | 132 | unsigned int old_val; | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 133 | unsigned long flags; | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 134 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 135 | spin_lock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 136 |  | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 137 | debug_mutex_lock_common(lock, &waiter); | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 138 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | 
| Roman Zippel | c9f4f06 | 2007-05-09 02:35:16 -0700 | [diff] [blame] | 139 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 140 |  | 
|  | 141 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 
|  | 142 | list_add_tail(&waiter.list, &lock->wait_list); | 
|  | 143 | waiter.task = task; | 
|  | 144 |  | 
| Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 145 | old_val = atomic_xchg(&lock->count, -1); | 
|  | 146 | if (old_val == 1) | 
|  | 147 | goto done; | 
|  | 148 |  | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 149 | lock_contended(&lock->dep_map, ip); | 
| Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 150 |  | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 151 | for (;;) { | 
|  | 152 | /* | 
|  | 153 | * Lets try to take the lock again - this is needed even if | 
|  | 154 | * we get here for the first time (shortly after failing to | 
|  | 155 | * acquire the lock), to make sure that we get a wakeup once | 
|  | 156 | * it's unlocked. Later on, if we sleep, this is the | 
|  | 157 | * operation that gives us the lock. We xchg it to -1, so | 
|  | 158 | * that when we release the lock, we properly wake up the | 
|  | 159 | * other waiters: | 
|  | 160 | */ | 
|  | 161 | old_val = atomic_xchg(&lock->count, -1); | 
|  | 162 | if (old_val == 1) | 
|  | 163 | break; | 
|  | 164 |  | 
|  | 165 | /* | 
|  | 166 | * got a signal? (This code gets eliminated in the | 
|  | 167 | * TASK_UNINTERRUPTIBLE case.) | 
|  | 168 | */ | 
| Oleg Nesterov | 6ad3676 | 2008-06-08 21:20:42 +0400 | [diff] [blame] | 169 | if (unlikely(signal_pending_state(state, task))) { | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 170 | mutex_remove_waiter(lock, &waiter, | 
|  | 171 | task_thread_info(task)); | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 172 | mutex_release(&lock->dep_map, 1, ip); | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 173 | spin_unlock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 174 |  | 
|  | 175 | debug_mutex_free_waiter(&waiter); | 
|  | 176 | return -EINTR; | 
|  | 177 | } | 
|  | 178 | __set_task_state(task, state); | 
|  | 179 |  | 
|  | 180 | /* didnt get the lock, go to sleep: */ | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 181 | spin_unlock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 182 | schedule(); | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 183 | spin_lock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 184 | } | 
|  | 185 |  | 
| Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 186 | done: | 
| Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 187 | lock_acquired(&lock->dep_map); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 188 | /* got the lock - rejoice! */ | 
| Roman Zippel | c9f4f06 | 2007-05-09 02:35:16 -0700 | [diff] [blame] | 189 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | 
|  | 190 | debug_mutex_set_owner(lock, task_thread_info(task)); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 191 |  | 
|  | 192 | /* set it to 0 if there are no waiters left: */ | 
|  | 193 | if (likely(list_empty(&lock->wait_list))) | 
|  | 194 | atomic_set(&lock->count, 0); | 
|  | 195 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 196 | spin_unlock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 197 |  | 
|  | 198 | debug_mutex_free_waiter(&waiter); | 
|  | 199 |  | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 200 | return 0; | 
|  | 201 | } | 
|  | 202 |  | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 203 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
|  | 204 | void __sched | 
|  | 205 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | 
|  | 206 | { | 
|  | 207 | might_sleep(); | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 208 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 209 | } | 
|  | 210 |  | 
|  | 211 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | 
| NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 212 |  | 
|  | 213 | int __sched | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 214 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | 
|  | 215 | { | 
|  | 216 | might_sleep(); | 
|  | 217 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); | 
|  | 218 | } | 
|  | 219 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | 
|  | 220 |  | 
|  | 221 | int __sched | 
| NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 222 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | 
|  | 223 | { | 
|  | 224 | might_sleep(); | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 225 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); | 
| NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 226 | } | 
|  | 227 |  | 
|  | 228 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 229 | #endif | 
|  | 230 |  | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 231 | /* | 
|  | 232 | * Release the lock, slowpath: | 
|  | 233 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 234 | static inline void | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 235 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 236 | { | 
| Ingo Molnar | 0270664 | 2006-01-10 23:15:02 +0100 | [diff] [blame] | 237 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 238 | unsigned long flags; | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 239 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 240 | spin_lock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 241 | mutex_release(&lock->dep_map, nested, _RET_IP_); | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 242 | debug_mutex_unlock(lock); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 243 |  | 
|  | 244 | /* | 
|  | 245 | * some architectures leave the lock unlocked in the fastpath failure | 
|  | 246 | * case, others need to leave it locked. In the later case we have to | 
|  | 247 | * unlock it here | 
|  | 248 | */ | 
|  | 249 | if (__mutex_slowpath_needs_to_unlock()) | 
|  | 250 | atomic_set(&lock->count, 1); | 
|  | 251 |  | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 252 | if (!list_empty(&lock->wait_list)) { | 
|  | 253 | /* get the first entry from the wait-list: */ | 
|  | 254 | struct mutex_waiter *waiter = | 
|  | 255 | list_entry(lock->wait_list.next, | 
|  | 256 | struct mutex_waiter, list); | 
|  | 257 |  | 
|  | 258 | debug_mutex_wake_waiter(lock, waiter); | 
|  | 259 |  | 
|  | 260 | wake_up_process(waiter->task); | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | debug_mutex_clear_owner(lock); | 
|  | 264 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 265 | spin_unlock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 266 | } | 
|  | 267 |  | 
|  | 268 | /* | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 269 | * Release the lock, slowpath: | 
|  | 270 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 271 | static noinline void | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 272 | __mutex_unlock_slowpath(atomic_t *lock_count) | 
|  | 273 | { | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 274 | __mutex_unlock_common_slowpath(lock_count, 1); | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 275 | } | 
|  | 276 |  | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 277 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 278 | /* | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 279 | * Here come the less common (and hence less performance-critical) APIs: | 
|  | 280 | * mutex_lock_interruptible() and mutex_trylock(). | 
|  | 281 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 282 | static noinline int __sched | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 283 | __mutex_lock_killable_slowpath(atomic_t *lock_count); | 
|  | 284 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 285 | static noinline int __sched | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 286 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 287 |  | 
|  | 288 | /*** | 
|  | 289 | * mutex_lock_interruptible - acquire the mutex, interruptable | 
|  | 290 | * @lock: the mutex to be acquired | 
|  | 291 | * | 
|  | 292 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | 
|  | 293 | * been acquired or sleep until the mutex becomes available. If a | 
|  | 294 | * signal arrives while waiting for the lock then this function | 
|  | 295 | * returns -EINTR. | 
|  | 296 | * | 
|  | 297 | * This function is similar to (but not equivalent to) down_interruptible(). | 
|  | 298 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 299 | int __sched mutex_lock_interruptible(struct mutex *lock) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 300 | { | 
| Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 301 | might_sleep(); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 302 | return __mutex_fastpath_lock_retval | 
|  | 303 | (&lock->count, __mutex_lock_interruptible_slowpath); | 
|  | 304 | } | 
|  | 305 |  | 
|  | 306 | EXPORT_SYMBOL(mutex_lock_interruptible); | 
|  | 307 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 308 | int __sched mutex_lock_killable(struct mutex *lock) | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 309 | { | 
|  | 310 | might_sleep(); | 
|  | 311 | return __mutex_fastpath_lock_retval | 
|  | 312 | (&lock->count, __mutex_lock_killable_slowpath); | 
|  | 313 | } | 
|  | 314 | EXPORT_SYMBOL(mutex_lock_killable); | 
|  | 315 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 316 | static noinline void __sched | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 317 | __mutex_lock_slowpath(atomic_t *lock_count) | 
|  | 318 | { | 
|  | 319 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 
|  | 320 |  | 
|  | 321 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); | 
|  | 322 | } | 
|  | 323 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 324 | static noinline int __sched | 
| Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 325 | __mutex_lock_killable_slowpath(atomic_t *lock_count) | 
|  | 326 | { | 
|  | 327 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 
|  | 328 |  | 
|  | 329 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); | 
|  | 330 | } | 
|  | 331 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 332 | static noinline int __sched | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 333 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 334 | { | 
|  | 335 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 
|  | 336 |  | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 337 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 338 | } | 
| Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 339 | #endif | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 340 |  | 
|  | 341 | /* | 
|  | 342 | * Spinlock based trylock, we take the spinlock and check whether we | 
|  | 343 | * can get the lock: | 
|  | 344 | */ | 
|  | 345 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | 
|  | 346 | { | 
|  | 347 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 348 | unsigned long flags; | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 349 | int prev; | 
|  | 350 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 351 | spin_lock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 352 |  | 
|  | 353 | prev = atomic_xchg(&lock->count, -1); | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 354 | if (likely(prev == 1)) { | 
| Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 355 | debug_mutex_set_owner(lock, current_thread_info()); | 
| Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 356 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 
|  | 357 | } | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 358 | /* Set it back to 0 if there are no waiters: */ | 
|  | 359 | if (likely(list_empty(&lock->wait_list))) | 
|  | 360 | atomic_set(&lock->count, 0); | 
|  | 361 |  | 
| Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 362 | spin_unlock_mutex(&lock->wait_lock, flags); | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 363 |  | 
|  | 364 | return prev == 1; | 
|  | 365 | } | 
|  | 366 |  | 
|  | 367 | /*** | 
|  | 368 | * mutex_trylock - try acquire the mutex, without waiting | 
|  | 369 | * @lock: the mutex to be acquired | 
|  | 370 | * | 
|  | 371 | * Try to acquire the mutex atomically. Returns 1 if the mutex | 
|  | 372 | * has been acquired successfully, and 0 on contention. | 
|  | 373 | * | 
|  | 374 | * NOTE: this function follows the spin_trylock() convention, so | 
|  | 375 | * it is negated to the down_trylock() return values! Be careful | 
|  | 376 | * about this when converting semaphore users to mutexes. | 
|  | 377 | * | 
|  | 378 | * This function must not be used in interrupt context. The | 
|  | 379 | * mutex must be released by the same task that acquired it. | 
|  | 380 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 381 | int __sched mutex_trylock(struct mutex *lock) | 
| Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 382 | { | 
|  | 383 | return __mutex_fastpath_trylock(&lock->count, | 
|  | 384 | __mutex_trylock_slowpath); | 
|  | 385 | } | 
|  | 386 |  | 
|  | 387 | EXPORT_SYMBOL(mutex_trylock); |