blob: 5e41217239e89f3fa203f9f73e4476e0985ecd79 [file] [log] [blame]
Matthew Wilcox64ac24e2008-03-07 21:55:58 -05001/*
2 * Copyright (c) 2008 Intel Corporation
3 * Author: Matthew Wilcox <willy@linux.intel.com>
4 *
5 * Distributed under the terms of the GNU GPL, version 2
Matthew Wilcox714493c2008-04-11 15:23:52 -04006 *
7 * This file implements counting semaphores.
8 * A counting semaphore may be acquired 'n' times before sleeping.
9 * See mutex.c for single-acquisition sleeping locks which enforce
10 * rules which allow code to be debugged more easily.
11 */
12
13/*
14 * Some notes on the implementation:
15 *
16 * The spinlock controls access to the other members of the semaphore.
17 * down_trylock() and up() can be called from interrupt context, so we
18 * have to disable interrupts when taking the lock. It turns out various
19 * parts of the kernel expect to be able to use down() on a semaphore in
20 * interrupt context when they know it will succeed, so we have to use
21 * irqsave variants for down(), down_interruptible() and down_killable()
22 * too.
23 *
24 * The ->count variable represents how many more tasks can acquire this
25 * semaphore. If it's zero, there may be tasks waiting on the wait_list.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050026 */
27
28#include <linux/compiler.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <linux/semaphore.h>
33#include <linux/spinlock.h>
34
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050035static noinline void __down(struct semaphore *sem);
36static noinline int __down_interruptible(struct semaphore *sem);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -040037static noinline int __down_killable(struct semaphore *sem);
Matthew Wilcoxf1241c82008-03-14 13:43:13 -040038static noinline int __down_timeout(struct semaphore *sem, long jiffies);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050039static noinline void __up(struct semaphore *sem);
40
Matthew Wilcox714493c2008-04-11 15:23:52 -040041/**
42 * down - acquire the semaphore
43 * @sem: the semaphore to be acquired
44 *
45 * Acquires the semaphore. If no more tasks are allowed to acquire the
46 * semaphore, calling this function will put the task to sleep until the
47 * semaphore is released.
48 *
49 * Use of this function is deprecated, please use down_interruptible() or
50 * down_killable() instead.
51 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050052void down(struct semaphore *sem)
53{
54 unsigned long flags;
55
56 spin_lock_irqsave(&sem->lock, flags);
Ingo Molnarbf726ea2008-05-08 11:53:48 +020057 if (unlikely(!sem->count))
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050058 __down(sem);
Ingo Molnarbf726ea2008-05-08 11:53:48 +020059 sem->count--;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050060 spin_unlock_irqrestore(&sem->lock, flags);
61}
62EXPORT_SYMBOL(down);
63
Matthew Wilcox714493c2008-04-11 15:23:52 -040064/**
65 * down_interruptible - acquire the semaphore unless interrupted
66 * @sem: the semaphore to be acquired
67 *
68 * Attempts to acquire the semaphore. If no more tasks are allowed to
69 * acquire the semaphore, calling this function will put the task to sleep.
70 * If the sleep is interrupted by a signal, this function will return -EINTR.
71 * If the semaphore is successfully acquired, this function returns 0.
72 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050073int down_interruptible(struct semaphore *sem)
74{
75 unsigned long flags;
76 int result = 0;
77
78 spin_lock_irqsave(&sem->lock, flags);
Ingo Molnarbf726ea2008-05-08 11:53:48 +020079 if (unlikely(!sem->count))
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050080 result = __down_interruptible(sem);
Ingo Molnarbf726ea2008-05-08 11:53:48 +020081 if (!result)
82 sem->count--;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050083 spin_unlock_irqrestore(&sem->lock, flags);
84
85 return result;
86}
87EXPORT_SYMBOL(down_interruptible);
88
Matthew Wilcox714493c2008-04-11 15:23:52 -040089/**
90 * down_killable - acquire the semaphore unless killed
91 * @sem: the semaphore to be acquired
92 *
93 * Attempts to acquire the semaphore. If no more tasks are allowed to
94 * acquire the semaphore, calling this function will put the task to sleep.
95 * If the sleep is interrupted by a fatal signal, this function will return
96 * -EINTR. If the semaphore is successfully acquired, this function returns
97 * 0.
98 */
Matthew Wilcoxf06d9682008-03-14 13:19:33 -040099int down_killable(struct semaphore *sem)
100{
101 unsigned long flags;
102 int result = 0;
103
104 spin_lock_irqsave(&sem->lock, flags);
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200105 if (unlikely(!sem->count))
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400106 result = __down_killable(sem);
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200107 if (!result)
108 sem->count--;
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400109 spin_unlock_irqrestore(&sem->lock, flags);
110
111 return result;
112}
113EXPORT_SYMBOL(down_killable);
114
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500115/**
116 * down_trylock - try to acquire the semaphore, without waiting
117 * @sem: the semaphore to be acquired
118 *
119 * Try to acquire the semaphore atomically. Returns 0 if the mutex has
Matthew Wilcox714493c2008-04-11 15:23:52 -0400120 * been acquired successfully or 1 if it it cannot be acquired.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500121 *
122 * NOTE: This return value is inverted from both spin_trylock and
123 * mutex_trylock! Be careful about this when converting code.
124 *
125 * Unlike mutex_trylock, this function can be used from interrupt context,
126 * and the semaphore can be released by any task or interrupt.
127 */
128int down_trylock(struct semaphore *sem)
129{
130 unsigned long flags;
131 int count;
132
133 spin_lock_irqsave(&sem->lock, flags);
134 count = sem->count - 1;
135 if (likely(count >= 0))
136 sem->count = count;
137 spin_unlock_irqrestore(&sem->lock, flags);
138
139 return (count < 0);
140}
141EXPORT_SYMBOL(down_trylock);
142
Matthew Wilcox714493c2008-04-11 15:23:52 -0400143/**
144 * down_timeout - acquire the semaphore within a specified time
145 * @sem: the semaphore to be acquired
146 * @jiffies: how long to wait before failing
147 *
148 * Attempts to acquire the semaphore. If no more tasks are allowed to
149 * acquire the semaphore, calling this function will put the task to sleep.
150 * If the semaphore is not released within the specified number of jiffies,
151 * this function returns -ETIME. It returns 0 if the semaphore was acquired.
152 */
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400153int down_timeout(struct semaphore *sem, long jiffies)
154{
155 unsigned long flags;
156 int result = 0;
157
158 spin_lock_irqsave(&sem->lock, flags);
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200159 if (unlikely(!sem->count))
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400160 result = __down_timeout(sem, jiffies);
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200161 if (!result)
162 sem->count--;
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400163 spin_unlock_irqrestore(&sem->lock, flags);
164
165 return result;
166}
167EXPORT_SYMBOL(down_timeout);
168
Matthew Wilcox714493c2008-04-11 15:23:52 -0400169/**
170 * up - release the semaphore
171 * @sem: the semaphore to release
172 *
173 * Release the semaphore. Unlike mutexes, up() may be called from any
174 * context and even by tasks which have never called down().
175 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500176void up(struct semaphore *sem)
177{
178 unsigned long flags;
179
180 spin_lock_irqsave(&sem->lock, flags);
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200181 sem->count++;
182 if (unlikely(!list_empty(&sem->wait_list)))
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500183 __up(sem);
184 spin_unlock_irqrestore(&sem->lock, flags);
185}
186EXPORT_SYMBOL(up);
187
188/* Functions for the contended case */
189
190struct semaphore_waiter {
191 struct list_head list;
192 struct task_struct *task;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500193};
194
195/*
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400196 * Because this function is inlined, the 'state' parameter will be
197 * constant, and thus optimised away by the compiler. Likewise the
198 * 'timeout' parameter for the cases without timeouts.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500199 */
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400200static inline int __sched __down_common(struct semaphore *sem, long state,
201 long timeout)
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500202{
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500203 struct task_struct *task = current;
204 struct semaphore_waiter waiter;
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200205 int ret = 0;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500206
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500207 waiter.task = task;
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200208 list_add_tail(&waiter.list, &sem->wait_list);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500209
210 for (;;) {
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200211 if (state == TASK_INTERRUPTIBLE && signal_pending(task)) {
212 ret = -EINTR;
213 break;
214 }
215 if (state == TASK_KILLABLE && fatal_signal_pending(task)) {
216 ret = -EINTR;
217 break;
218 }
219 if (timeout <= 0) {
220 ret = -ETIME;
221 break;
222 }
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500223 __set_task_state(task, state);
224 spin_unlock_irq(&sem->lock);
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400225 timeout = schedule_timeout(timeout);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500226 spin_lock_irq(&sem->lock);
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200227 if (sem->count > 0)
228 break;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500229 }
230
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400231 list_del(&waiter.list);
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200232 return ret;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500233}
234
235static noinline void __sched __down(struct semaphore *sem)
236{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400237 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500238}
239
240static noinline int __sched __down_interruptible(struct semaphore *sem)
241{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400242 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500243}
244
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400245static noinline int __sched __down_killable(struct semaphore *sem)
246{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400247 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
248}
249
250static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
251{
252 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400253}
254
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500255static noinline void __sched __up(struct semaphore *sem)
256{
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400257 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
258 struct semaphore_waiter, list);
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400259 wake_up_process(waiter->task);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500260}