blob: 2c000f5c070b8c9a3651c3e93fd34544ad6ad875 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (2004) Linus Torvalds
3 *
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5 *
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07006 * Copyright (2004, 2005) Ingo Molnar
7 *
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
Andi Kleen0cb91a22006-09-26 10:52:28 +020010 *
11 * Note that some architectures have special knowledge about the
12 * stack frames of these functions in their profile_pc. If you
13 * change anything significant here that could change the stack
14 * frame contact the architecture maintainers.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/linkage.h>
18#include <linux/preempt.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070021#include <linux/debug_locks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/module.h>
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024int __lockfunc _spin_trylock(spinlock_t *lock)
25{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020026 return __spin_trylock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027}
28EXPORT_SYMBOL(_spin_trylock);
29
30int __lockfunc _read_trylock(rwlock_t *lock)
31{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020032 return __read_trylock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033}
34EXPORT_SYMBOL(_read_trylock);
35
36int __lockfunc _write_trylock(rwlock_t *lock)
37{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020038 return __write_trylock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039}
40EXPORT_SYMBOL(_write_trylock);
41
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070042/*
43 * If lockdep is enabled then we use the non-preemption spin-ops
44 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
45 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
46 */
Nick Piggin95c354f2008-01-30 13:31:20 +010047#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49void __lockfunc _read_lock(rwlock_t *lock)
50{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020051 __read_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052}
53EXPORT_SYMBOL(_read_lock);
54
55unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
56{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020057 return __spin_lock_irqsave(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59EXPORT_SYMBOL(_spin_lock_irqsave);
60
61void __lockfunc _spin_lock_irq(spinlock_t *lock)
62{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020063 __spin_lock_irq(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064}
65EXPORT_SYMBOL(_spin_lock_irq);
66
67void __lockfunc _spin_lock_bh(spinlock_t *lock)
68{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020069 __spin_lock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070}
71EXPORT_SYMBOL(_spin_lock_bh);
72
73unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
74{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020075 return __read_lock_irqsave(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076}
77EXPORT_SYMBOL(_read_lock_irqsave);
78
79void __lockfunc _read_lock_irq(rwlock_t *lock)
80{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020081 __read_lock_irq(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83EXPORT_SYMBOL(_read_lock_irq);
84
85void __lockfunc _read_lock_bh(rwlock_t *lock)
86{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020087 __read_lock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89EXPORT_SYMBOL(_read_lock_bh);
90
91unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
92{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020093 return __write_lock_irqsave(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95EXPORT_SYMBOL(_write_lock_irqsave);
96
97void __lockfunc _write_lock_irq(rwlock_t *lock)
98{
Heiko Carstens69d0ee72009-08-31 14:43:36 +020099 __write_lock_irq(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101EXPORT_SYMBOL(_write_lock_irq);
102
103void __lockfunc _write_lock_bh(rwlock_t *lock)
104{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200105 __write_lock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106}
107EXPORT_SYMBOL(_write_lock_bh);
108
109void __lockfunc _spin_lock(spinlock_t *lock)
110{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200111 __spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113EXPORT_SYMBOL(_spin_lock);
114
115void __lockfunc _write_lock(rwlock_t *lock)
116{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200117 __write_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119EXPORT_SYMBOL(_write_lock);
120
121#else /* CONFIG_PREEMPT: */
122
123/*
124 * This could be a long-held lock. We both prepare to spin for a long
125 * time (making _this_ CPU preemptable if possible), and we also signal
126 * towards that other CPU that it should break the lock ASAP.
127 *
128 * (We do this in a function because inlining it would be excessive.)
129 */
130
131#define BUILD_LOCK_OPS(op, locktype) \
132void __lockfunc _##op##_lock(locktype##_t *lock) \
133{ \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 for (;;) { \
Oleg Nesterovee25e962006-03-23 03:00:58 -0800135 preempt_disable(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 if (likely(_raw_##op##_trylock(lock))) \
137 break; \
138 preempt_enable(); \
Oleg Nesterovee25e962006-03-23 03:00:58 -0800139 \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 if (!(lock)->break_lock) \
141 (lock)->break_lock = 1; \
142 while (!op##_can_lock(lock) && (lock)->break_lock) \
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700143 _raw_##op##_relax(&lock->raw_lock); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 } \
145 (lock)->break_lock = 0; \
146} \
147 \
148EXPORT_SYMBOL(_##op##_lock); \
149 \
150unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
151{ \
152 unsigned long flags; \
153 \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 for (;;) { \
Oleg Nesterovee25e962006-03-23 03:00:58 -0800155 preempt_disable(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 local_irq_save(flags); \
157 if (likely(_raw_##op##_trylock(lock))) \
158 break; \
159 local_irq_restore(flags); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 preempt_enable(); \
Oleg Nesterovee25e962006-03-23 03:00:58 -0800161 \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 if (!(lock)->break_lock) \
163 (lock)->break_lock = 1; \
164 while (!op##_can_lock(lock) && (lock)->break_lock) \
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700165 _raw_##op##_relax(&lock->raw_lock); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 } \
167 (lock)->break_lock = 0; \
168 return flags; \
169} \
170 \
171EXPORT_SYMBOL(_##op##_lock_irqsave); \
172 \
173void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
174{ \
175 _##op##_lock_irqsave(lock); \
176} \
177 \
178EXPORT_SYMBOL(_##op##_lock_irq); \
179 \
180void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
181{ \
182 unsigned long flags; \
183 \
184 /* */ \
185 /* Careful: we must exclude softirqs too, hence the */ \
186 /* irq-disabling. We use the generic preemption-aware */ \
187 /* function: */ \
188 /**/ \
189 flags = _##op##_lock_irqsave(lock); \
190 local_bh_disable(); \
191 local_irq_restore(flags); \
192} \
193 \
194EXPORT_SYMBOL(_##op##_lock_bh)
195
196/*
197 * Build preemption-friendly versions of the following
198 * lock-spinning functions:
199 *
200 * _[spin|read|write]_lock()
201 * _[spin|read|write]_lock_irq()
202 * _[spin|read|write]_lock_irqsave()
203 * _[spin|read|write]_lock_bh()
204 */
205BUILD_LOCK_OPS(spin, spinlock);
206BUILD_LOCK_OPS(read, rwlock);
207BUILD_LOCK_OPS(write, rwlock);
208
209#endif /* CONFIG_PREEMPT */
210
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700211#ifdef CONFIG_DEBUG_LOCK_ALLOC
212
213void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
214{
215 preempt_disable();
216 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700217 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700218}
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700219EXPORT_SYMBOL(_spin_lock_nested);
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200220
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800221unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
222{
223 unsigned long flags;
224
225 local_irq_save(flags);
226 preempt_disable();
227 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
Robin Holte8c158b2009-04-02 16:59:45 -0700228 LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
229 _raw_spin_lock_flags, &flags);
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800230 return flags;
231}
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800232EXPORT_SYMBOL(_spin_lock_irqsave_nested);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700233
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200234void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
235 struct lockdep_map *nest_lock)
236{
237 preempt_disable();
238 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
239 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
240}
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200241EXPORT_SYMBOL(_spin_lock_nest_lock);
242
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700243#endif
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245void __lockfunc _spin_unlock(spinlock_t *lock)
246{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200247 __spin_unlock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248}
249EXPORT_SYMBOL(_spin_unlock);
250
251void __lockfunc _write_unlock(rwlock_t *lock)
252{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200253 __write_unlock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255EXPORT_SYMBOL(_write_unlock);
256
257void __lockfunc _read_unlock(rwlock_t *lock)
258{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200259 __read_unlock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260}
261EXPORT_SYMBOL(_read_unlock);
262
263void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
264{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200265 __spin_unlock_irqrestore(lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266}
267EXPORT_SYMBOL(_spin_unlock_irqrestore);
268
269void __lockfunc _spin_unlock_irq(spinlock_t *lock)
270{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200271 __spin_unlock_irq(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273EXPORT_SYMBOL(_spin_unlock_irq);
274
275void __lockfunc _spin_unlock_bh(spinlock_t *lock)
276{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200277 __spin_unlock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
279EXPORT_SYMBOL(_spin_unlock_bh);
280
281void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
282{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200283 __read_unlock_irqrestore(lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}
285EXPORT_SYMBOL(_read_unlock_irqrestore);
286
287void __lockfunc _read_unlock_irq(rwlock_t *lock)
288{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200289 __read_unlock_irq(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290}
291EXPORT_SYMBOL(_read_unlock_irq);
292
293void __lockfunc _read_unlock_bh(rwlock_t *lock)
294{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200295 __read_unlock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297EXPORT_SYMBOL(_read_unlock_bh);
298
299void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
300{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200301 __write_unlock_irqrestore(lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302}
303EXPORT_SYMBOL(_write_unlock_irqrestore);
304
305void __lockfunc _write_unlock_irq(rwlock_t *lock)
306{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200307 __write_unlock_irq(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308}
309EXPORT_SYMBOL(_write_unlock_irq);
310
311void __lockfunc _write_unlock_bh(rwlock_t *lock)
312{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200313 __write_unlock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314}
315EXPORT_SYMBOL(_write_unlock_bh);
316
317int __lockfunc _spin_trylock_bh(spinlock_t *lock)
318{
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200319 return __spin_trylock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321EXPORT_SYMBOL(_spin_trylock_bh);
322
Steven Rostedt0764d232008-05-12 21:20:44 +0200323notrace int in_lock_functions(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
325 /* Linker adds these: start and end of __lockfunc functions */
326 extern char __lock_text_start[], __lock_text_end[];
327
328 return addr >= (unsigned long)__lock_text_start
329 && addr < (unsigned long)__lock_text_end;
330}
331EXPORT_SYMBOL(in_lock_functions);