blob: bef977b169665cf8ad318354b31872dd348b5390 [file] [log] [blame]
Matthew Wilcox64ac24e2008-03-07 21:55:58 -05001/*
2 * Copyright (c) 2008 Intel Corporation
3 * Author: Matthew Wilcox <willy@linux.intel.com>
4 *
5 * Distributed under the terms of the GNU GPL, version 2
6 */
7
8#include <linux/compiler.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/semaphore.h>
13#include <linux/spinlock.h>
14
15/*
16 * Some notes on the implementation:
17 *
18 * down_trylock() and up() can be called from interrupt context.
19 * So we have to disable interrupts when taking the lock.
20 *
Matthew Wilcoxb17170b2008-03-14 14:35:22 -040021 * The ->count variable defines how many more tasks can acquire the
22 * semaphore. If it's zero, there may be tasks waiting on the list.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050023 */
24
25static noinline void __down(struct semaphore *sem);
26static noinline int __down_interruptible(struct semaphore *sem);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -040027static noinline int __down_killable(struct semaphore *sem);
Matthew Wilcoxf1241c82008-03-14 13:43:13 -040028static noinline int __down_timeout(struct semaphore *sem, long jiffies);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050029static noinline void __up(struct semaphore *sem);
30
31void down(struct semaphore *sem)
32{
33 unsigned long flags;
34
35 spin_lock_irqsave(&sem->lock, flags);
Matthew Wilcoxb17170b2008-03-14 14:35:22 -040036 if (likely(sem->count > 0))
37 sem->count--;
38 else
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050039 __down(sem);
40 spin_unlock_irqrestore(&sem->lock, flags);
41}
42EXPORT_SYMBOL(down);
43
44int down_interruptible(struct semaphore *sem)
45{
46 unsigned long flags;
47 int result = 0;
48
49 spin_lock_irqsave(&sem->lock, flags);
Matthew Wilcoxb17170b2008-03-14 14:35:22 -040050 if (likely(sem->count > 0))
51 sem->count--;
52 else
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050053 result = __down_interruptible(sem);
54 spin_unlock_irqrestore(&sem->lock, flags);
55
56 return result;
57}
58EXPORT_SYMBOL(down_interruptible);
59
Matthew Wilcoxf06d9682008-03-14 13:19:33 -040060int down_killable(struct semaphore *sem)
61{
62 unsigned long flags;
63 int result = 0;
64
65 spin_lock_irqsave(&sem->lock, flags);
Matthew Wilcoxb17170b2008-03-14 14:35:22 -040066 if (likely(sem->count > 0))
67 sem->count--;
68 else
Matthew Wilcoxf06d9682008-03-14 13:19:33 -040069 result = __down_killable(sem);
70 spin_unlock_irqrestore(&sem->lock, flags);
71
72 return result;
73}
74EXPORT_SYMBOL(down_killable);
75
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050076/**
77 * down_trylock - try to acquire the semaphore, without waiting
78 * @sem: the semaphore to be acquired
79 *
80 * Try to acquire the semaphore atomically. Returns 0 if the mutex has
81 * been acquired successfully and 1 if it is contended.
82 *
83 * NOTE: This return value is inverted from both spin_trylock and
84 * mutex_trylock! Be careful about this when converting code.
85 *
86 * Unlike mutex_trylock, this function can be used from interrupt context,
87 * and the semaphore can be released by any task or interrupt.
88 */
89int down_trylock(struct semaphore *sem)
90{
91 unsigned long flags;
92 int count;
93
94 spin_lock_irqsave(&sem->lock, flags);
95 count = sem->count - 1;
96 if (likely(count >= 0))
97 sem->count = count;
98 spin_unlock_irqrestore(&sem->lock, flags);
99
100 return (count < 0);
101}
102EXPORT_SYMBOL(down_trylock);
103
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400104int down_timeout(struct semaphore *sem, long jiffies)
105{
106 unsigned long flags;
107 int result = 0;
108
109 spin_lock_irqsave(&sem->lock, flags);
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400110 if (likely(sem->count > 0))
111 sem->count--;
112 else
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400113 result = __down_timeout(sem, jiffies);
114 spin_unlock_irqrestore(&sem->lock, flags);
115
116 return result;
117}
118EXPORT_SYMBOL(down_timeout);
119
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500120void up(struct semaphore *sem)
121{
122 unsigned long flags;
123
124 spin_lock_irqsave(&sem->lock, flags);
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400125 if (likely(list_empty(&sem->wait_list)))
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500126 sem->count++;
127 else
128 __up(sem);
129 spin_unlock_irqrestore(&sem->lock, flags);
130}
131EXPORT_SYMBOL(up);
132
133/* Functions for the contended case */
134
135struct semaphore_waiter {
136 struct list_head list;
137 struct task_struct *task;
138 int up;
139};
140
141/*
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400142 * Because this function is inlined, the 'state' parameter will be
143 * constant, and thus optimised away by the compiler. Likewise the
144 * 'timeout' parameter for the cases without timeouts.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500145 */
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400146static inline int __sched __down_common(struct semaphore *sem, long state,
147 long timeout)
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500148{
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500149 struct task_struct *task = current;
150 struct semaphore_waiter waiter;
151
152 list_add_tail(&waiter.list, &sem->wait_list);
153 waiter.task = task;
154 waiter.up = 0;
155
156 for (;;) {
157 if (state == TASK_INTERRUPTIBLE && signal_pending(task))
158 goto interrupted;
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400159 if (state == TASK_KILLABLE && fatal_signal_pending(task))
160 goto interrupted;
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400161 if (timeout <= 0)
162 goto timed_out;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500163 __set_task_state(task, state);
164 spin_unlock_irq(&sem->lock);
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400165 timeout = schedule_timeout(timeout);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500166 spin_lock_irq(&sem->lock);
167 if (waiter.up)
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400168 return 0;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500169 }
170
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400171 timed_out:
172 list_del(&waiter.list);
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400173 return -ETIME;
174
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500175 interrupted:
176 list_del(&waiter.list);
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400177 return -EINTR;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500178}
179
180static noinline void __sched __down(struct semaphore *sem)
181{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400182 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500183}
184
185static noinline int __sched __down_interruptible(struct semaphore *sem)
186{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400187 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500188}
189
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400190static noinline int __sched __down_killable(struct semaphore *sem)
191{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400192 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
193}
194
195static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
196{
197 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400198}
199
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500200static noinline void __sched __up(struct semaphore *sem)
201{
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400202 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
203 struct semaphore_waiter, list);
204 list_del(&waiter->list);
205 waiter->up = 1;
206 wake_up_process(waiter->task);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500207}