| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2008 Intel Corporation | 
|  | 3 | * Author: Matthew Wilcox <willy@linux.intel.com> | 
|  | 4 | * | 
|  | 5 | * Distributed under the terms of the GNU GPL, version 2 | 
| Matthew Wilcox | 714493c | 2008-04-11 15:23:52 -0400 | [diff] [blame] | 6 | * | 
|  | 7 | * This file implements counting semaphores. | 
|  | 8 | * A counting semaphore may be acquired 'n' times before sleeping. | 
|  | 9 | * See mutex.c for single-acquisition sleeping locks which enforce | 
|  | 10 | * rules which allow code to be debugged more easily. | 
|  | 11 | */ | 
|  | 12 |  | 
|  | 13 | /* | 
|  | 14 | * Some notes on the implementation: | 
|  | 15 | * | 
|  | 16 | * The spinlock controls access to the other members of the semaphore. | 
|  | 17 | * down_trylock() and up() can be called from interrupt context, so we | 
|  | 18 | * have to disable interrupts when taking the lock.  It turns out various | 
|  | 19 | * parts of the kernel expect to be able to use down() on a semaphore in | 
|  | 20 | * interrupt context when they know it will succeed, so we have to use | 
|  | 21 | * irqsave variants for down(), down_interruptible() and down_killable() | 
|  | 22 | * too. | 
|  | 23 | * | 
|  | 24 | * The ->count variable represents how many more tasks can acquire this | 
|  | 25 | * semaphore.  If it's zero, there may be tasks waiting on the wait_list. | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 26 | */ | 
|  | 27 |  | 
|  | 28 | #include <linux/compiler.h> | 
|  | 29 | #include <linux/kernel.h> | 
|  | 30 | #include <linux/module.h> | 
|  | 31 | #include <linux/sched.h> | 
|  | 32 | #include <linux/semaphore.h> | 
|  | 33 | #include <linux/spinlock.h> | 
| Ingo Molnar | 74f4e36 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 34 | #include <linux/ftrace.h> | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 35 |  | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 36 | static noinline void __down(struct semaphore *sem); | 
|  | 37 | static noinline int __down_interruptible(struct semaphore *sem); | 
| Matthew Wilcox | f06d968 | 2008-03-14 13:19:33 -0400 | [diff] [blame] | 38 | static noinline int __down_killable(struct semaphore *sem); | 
| Matthew Wilcox | f1241c8 | 2008-03-14 13:43:13 -0400 | [diff] [blame] | 39 | static noinline int __down_timeout(struct semaphore *sem, long jiffies); | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 40 | static noinline void __up(struct semaphore *sem); | 
|  | 41 |  | 
| Matthew Wilcox | 714493c | 2008-04-11 15:23:52 -0400 | [diff] [blame] | 42 | /** | 
|  | 43 | * down - acquire the semaphore | 
|  | 44 | * @sem: the semaphore to be acquired | 
|  | 45 | * | 
|  | 46 | * Acquires the semaphore.  If no more tasks are allowed to acquire the | 
|  | 47 | * semaphore, calling this function will put the task to sleep until the | 
|  | 48 | * semaphore is released. | 
|  | 49 | * | 
|  | 50 | * Use of this function is deprecated, please use down_interruptible() or | 
|  | 51 | * down_killable() instead. | 
|  | 52 | */ | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 53 | void down(struct semaphore *sem) | 
|  | 54 | { | 
|  | 55 | unsigned long flags; | 
|  | 56 |  | 
|  | 57 | spin_lock_irqsave(&sem->lock, flags); | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 58 | if (likely(sem->count > 0)) | 
|  | 59 | sem->count--; | 
|  | 60 | else | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 61 | __down(sem); | 
|  | 62 | spin_unlock_irqrestore(&sem->lock, flags); | 
|  | 63 | } | 
|  | 64 | EXPORT_SYMBOL(down); | 
|  | 65 |  | 
| Matthew Wilcox | 714493c | 2008-04-11 15:23:52 -0400 | [diff] [blame] | 66 | /** | 
|  | 67 | * down_interruptible - acquire the semaphore unless interrupted | 
|  | 68 | * @sem: the semaphore to be acquired | 
|  | 69 | * | 
|  | 70 | * Attempts to acquire the semaphore.  If no more tasks are allowed to | 
|  | 71 | * acquire the semaphore, calling this function will put the task to sleep. | 
|  | 72 | * If the sleep is interrupted by a signal, this function will return -EINTR. | 
|  | 73 | * If the semaphore is successfully acquired, this function returns 0. | 
|  | 74 | */ | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 75 | int down_interruptible(struct semaphore *sem) | 
|  | 76 | { | 
|  | 77 | unsigned long flags; | 
|  | 78 | int result = 0; | 
|  | 79 |  | 
|  | 80 | spin_lock_irqsave(&sem->lock, flags); | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 81 | if (likely(sem->count > 0)) | 
| Ingo Molnar | bf726ea | 2008-05-08 11:53:48 +0200 | [diff] [blame] | 82 | sem->count--; | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 83 | else | 
|  | 84 | result = __down_interruptible(sem); | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 85 | spin_unlock_irqrestore(&sem->lock, flags); | 
|  | 86 |  | 
|  | 87 | return result; | 
|  | 88 | } | 
|  | 89 | EXPORT_SYMBOL(down_interruptible); | 
|  | 90 |  | 
| Matthew Wilcox | 714493c | 2008-04-11 15:23:52 -0400 | [diff] [blame] | 91 | /** | 
|  | 92 | * down_killable - acquire the semaphore unless killed | 
|  | 93 | * @sem: the semaphore to be acquired | 
|  | 94 | * | 
|  | 95 | * Attempts to acquire the semaphore.  If no more tasks are allowed to | 
|  | 96 | * acquire the semaphore, calling this function will put the task to sleep. | 
|  | 97 | * If the sleep is interrupted by a fatal signal, this function will return | 
|  | 98 | * -EINTR.  If the semaphore is successfully acquired, this function returns | 
|  | 99 | * 0. | 
|  | 100 | */ | 
| Matthew Wilcox | f06d968 | 2008-03-14 13:19:33 -0400 | [diff] [blame] | 101 | int down_killable(struct semaphore *sem) | 
|  | 102 | { | 
|  | 103 | unsigned long flags; | 
|  | 104 | int result = 0; | 
|  | 105 |  | 
|  | 106 | spin_lock_irqsave(&sem->lock, flags); | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 107 | if (likely(sem->count > 0)) | 
| Ingo Molnar | bf726ea | 2008-05-08 11:53:48 +0200 | [diff] [blame] | 108 | sem->count--; | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 109 | else | 
|  | 110 | result = __down_killable(sem); | 
| Matthew Wilcox | f06d968 | 2008-03-14 13:19:33 -0400 | [diff] [blame] | 111 | spin_unlock_irqrestore(&sem->lock, flags); | 
|  | 112 |  | 
|  | 113 | return result; | 
|  | 114 | } | 
|  | 115 | EXPORT_SYMBOL(down_killable); | 
|  | 116 |  | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 117 | /** | 
|  | 118 | * down_trylock - try to acquire the semaphore, without waiting | 
|  | 119 | * @sem: the semaphore to be acquired | 
|  | 120 | * | 
|  | 121 | * Try to acquire the semaphore atomically.  Returns 0 if the mutex has | 
| Matthew Wilcox | 714493c | 2008-04-11 15:23:52 -0400 | [diff] [blame] | 122 | * been acquired successfully or 1 if it it cannot be acquired. | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 123 | * | 
|  | 124 | * NOTE: This return value is inverted from both spin_trylock and | 
|  | 125 | * mutex_trylock!  Be careful about this when converting code. | 
|  | 126 | * | 
|  | 127 | * Unlike mutex_trylock, this function can be used from interrupt context, | 
|  | 128 | * and the semaphore can be released by any task or interrupt. | 
|  | 129 | */ | 
|  | 130 | int down_trylock(struct semaphore *sem) | 
|  | 131 | { | 
|  | 132 | unsigned long flags; | 
|  | 133 | int count; | 
|  | 134 |  | 
|  | 135 | spin_lock_irqsave(&sem->lock, flags); | 
|  | 136 | count = sem->count - 1; | 
|  | 137 | if (likely(count >= 0)) | 
|  | 138 | sem->count = count; | 
|  | 139 | spin_unlock_irqrestore(&sem->lock, flags); | 
|  | 140 |  | 
|  | 141 | return (count < 0); | 
|  | 142 | } | 
|  | 143 | EXPORT_SYMBOL(down_trylock); | 
|  | 144 |  | 
| Matthew Wilcox | 714493c | 2008-04-11 15:23:52 -0400 | [diff] [blame] | 145 | /** | 
|  | 146 | * down_timeout - acquire the semaphore within a specified time | 
|  | 147 | * @sem: the semaphore to be acquired | 
|  | 148 | * @jiffies: how long to wait before failing | 
|  | 149 | * | 
|  | 150 | * Attempts to acquire the semaphore.  If no more tasks are allowed to | 
|  | 151 | * acquire the semaphore, calling this function will put the task to sleep. | 
|  | 152 | * If the semaphore is not released within the specified number of jiffies, | 
|  | 153 | * this function returns -ETIME.  It returns 0 if the semaphore was acquired. | 
|  | 154 | */ | 
| Matthew Wilcox | f1241c8 | 2008-03-14 13:43:13 -0400 | [diff] [blame] | 155 | int down_timeout(struct semaphore *sem, long jiffies) | 
|  | 156 | { | 
|  | 157 | unsigned long flags; | 
|  | 158 | int result = 0; | 
|  | 159 |  | 
|  | 160 | spin_lock_irqsave(&sem->lock, flags); | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 161 | if (likely(sem->count > 0)) | 
| Ingo Molnar | bf726ea | 2008-05-08 11:53:48 +0200 | [diff] [blame] | 162 | sem->count--; | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 163 | else | 
|  | 164 | result = __down_timeout(sem, jiffies); | 
| Matthew Wilcox | f1241c8 | 2008-03-14 13:43:13 -0400 | [diff] [blame] | 165 | spin_unlock_irqrestore(&sem->lock, flags); | 
|  | 166 |  | 
|  | 167 | return result; | 
|  | 168 | } | 
|  | 169 | EXPORT_SYMBOL(down_timeout); | 
|  | 170 |  | 
| Matthew Wilcox | 714493c | 2008-04-11 15:23:52 -0400 | [diff] [blame] | 171 | /** | 
|  | 172 | * up - release the semaphore | 
|  | 173 | * @sem: the semaphore to release | 
|  | 174 | * | 
|  | 175 | * Release the semaphore.  Unlike mutexes, up() may be called from any | 
|  | 176 | * context and even by tasks which have never called down(). | 
|  | 177 | */ | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 178 | void up(struct semaphore *sem) | 
|  | 179 | { | 
|  | 180 | unsigned long flags; | 
|  | 181 |  | 
|  | 182 | spin_lock_irqsave(&sem->lock, flags); | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 183 | if (likely(list_empty(&sem->wait_list))) | 
|  | 184 | sem->count++; | 
|  | 185 | else | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 186 | __up(sem); | 
|  | 187 | spin_unlock_irqrestore(&sem->lock, flags); | 
|  | 188 | } | 
|  | 189 | EXPORT_SYMBOL(up); | 
|  | 190 |  | 
|  | 191 | /* Functions for the contended case */ | 
|  | 192 |  | 
|  | 193 | struct semaphore_waiter { | 
|  | 194 | struct list_head list; | 
|  | 195 | struct task_struct *task; | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 196 | int up; | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 197 | }; | 
|  | 198 |  | 
|  | 199 | /* | 
| Matthew Wilcox | f1241c8 | 2008-03-14 13:43:13 -0400 | [diff] [blame] | 200 | * Because this function is inlined, the 'state' parameter will be | 
|  | 201 | * constant, and thus optimised away by the compiler.  Likewise the | 
|  | 202 | * 'timeout' parameter for the cases without timeouts. | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 203 | */ | 
| Matthew Wilcox | f1241c8 | 2008-03-14 13:43:13 -0400 | [diff] [blame] | 204 | static inline int __sched __down_common(struct semaphore *sem, long state, | 
|  | 205 | long timeout) | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 206 | { | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 207 | struct task_struct *task = current; | 
|  | 208 | struct semaphore_waiter waiter; | 
|  | 209 |  | 
| Ingo Molnar | bf726ea | 2008-05-08 11:53:48 +0200 | [diff] [blame] | 210 | list_add_tail(&waiter.list, &sem->wait_list); | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 211 | waiter.task = task; | 
|  | 212 | waiter.up = 0; | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 213 |  | 
|  | 214 | for (;;) { | 
| Oleg Nesterov | 5b2becc | 2008-08-05 13:01:13 -0700 | [diff] [blame] | 215 | if (signal_pending_state(state, task)) | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 216 | goto interrupted; | 
|  | 217 | if (timeout <= 0) | 
|  | 218 | goto timed_out; | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 219 | __set_task_state(task, state); | 
|  | 220 | spin_unlock_irq(&sem->lock); | 
| Matthew Wilcox | f1241c8 | 2008-03-14 13:43:13 -0400 | [diff] [blame] | 221 | timeout = schedule_timeout(timeout); | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 222 | spin_lock_irq(&sem->lock); | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 223 | if (waiter.up) | 
|  | 224 | return 0; | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 225 | } | 
|  | 226 |  | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 227 | timed_out: | 
| Matthew Wilcox | f1241c8 | 2008-03-14 13:43:13 -0400 | [diff] [blame] | 228 | list_del(&waiter.list); | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 229 | return -ETIME; | 
|  | 230 |  | 
|  | 231 | interrupted: | 
|  | 232 | list_del(&waiter.list); | 
|  | 233 | return -EINTR; | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 234 | } | 
|  | 235 |  | 
|  | 236 | static noinline void __sched __down(struct semaphore *sem) | 
|  | 237 | { | 
| Matthew Wilcox | f1241c8 | 2008-03-14 13:43:13 -0400 | [diff] [blame] | 238 | __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 239 | } | 
|  | 240 |  | 
|  | 241 | static noinline int __sched __down_interruptible(struct semaphore *sem) | 
|  | 242 | { | 
| Matthew Wilcox | f1241c8 | 2008-03-14 13:43:13 -0400 | [diff] [blame] | 243 | return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 244 | } | 
|  | 245 |  | 
| Matthew Wilcox | f06d968 | 2008-03-14 13:19:33 -0400 | [diff] [blame] | 246 | static noinline int __sched __down_killable(struct semaphore *sem) | 
|  | 247 | { | 
| Matthew Wilcox | f1241c8 | 2008-03-14 13:43:13 -0400 | [diff] [blame] | 248 | return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT); | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies) | 
|  | 252 | { | 
|  | 253 | return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); | 
| Matthew Wilcox | f06d968 | 2008-03-14 13:19:33 -0400 | [diff] [blame] | 254 | } | 
|  | 255 |  | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 256 | static noinline void __sched __up(struct semaphore *sem) | 
|  | 257 | { | 
| Matthew Wilcox | b17170b | 2008-03-14 14:35:22 -0400 | [diff] [blame] | 258 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, | 
|  | 259 | struct semaphore_waiter, list); | 
| Linus Torvalds | 00b41ec | 2008-05-10 20:43:22 -0700 | [diff] [blame] | 260 | list_del(&waiter->list); | 
|  | 261 | waiter->up = 1; | 
| Matthew Wilcox | b17170b | 2008-03-14 14:35:22 -0400 | [diff] [blame] | 262 | wake_up_process(waiter->task); | 
| Matthew Wilcox | 64ac24e | 2008-03-07 21:55:58 -0500 | [diff] [blame] | 263 | } |