| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_SPINLOCK_H | 
 | 2 | #define __LINUX_SPINLOCK_H | 
 | 3 |  | 
 | 4 | /* | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 5 |  * include/linux/spinlock.h - generic spinlock/rwlock declarations | 
 | 6 |  * | 
 | 7 |  * here's the role of the various spinlock/rwlock related include files: | 
 | 8 |  * | 
 | 9 |  * on SMP builds: | 
 | 10 |  * | 
| Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 11 |  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 12 |  *                        initializers | 
 | 13 |  * | 
 | 14 |  *  linux/spinlock_types.h: | 
 | 15 |  *                        defines the generic type and initializers | 
 | 16 |  * | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 17 |  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 18 |  *                        implementations, mostly inline assembly code | 
 | 19 |  * | 
 | 20 |  *   (also included on UP-debug builds:) | 
 | 21 |  * | 
 | 22 |  *  linux/spinlock_api_smp.h: | 
 | 23 |  *                        contains the prototypes for the _spin_*() APIs. | 
 | 24 |  * | 
 | 25 |  *  linux/spinlock.h:     builds the final spin_*() APIs. | 
 | 26 |  * | 
 | 27 |  * on UP builds: | 
 | 28 |  * | 
 | 29 |  *  linux/spinlock_type_up.h: | 
 | 30 |  *                        contains the generic, simplified UP spinlock type. | 
 | 31 |  *                        (which is an empty structure on non-debug builds) | 
 | 32 |  * | 
 | 33 |  *  linux/spinlock_types.h: | 
 | 34 |  *                        defines the generic type and initializers | 
 | 35 |  * | 
 | 36 |  *  linux/spinlock_up.h: | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 37 |  *                        contains the arch_spin_*()/etc. version of UP | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 38 |  *                        builds. (which are NOPs on non-debug, non-preempt | 
 | 39 |  *                        builds) | 
 | 40 |  * | 
 | 41 |  *   (included on UP-non-debug builds:) | 
 | 42 |  * | 
 | 43 |  *  linux/spinlock_api_up.h: | 
 | 44 |  *                        builds the _spin_*() APIs. | 
 | 45 |  * | 
 | 46 |  *  linux/spinlock.h:     builds the final spin_*() APIs. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  */ | 
 | 48 |  | 
| Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 49 | #include <linux/typecheck.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #include <linux/preempt.h> | 
 | 51 | #include <linux/linkage.h> | 
 | 52 | #include <linux/compiler.h> | 
| David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 53 | #include <linux/irqflags.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #include <linux/thread_info.h> | 
 | 55 | #include <linux/kernel.h> | 
 | 56 | #include <linux/stringify.h> | 
| Andrew Morton | 676dcb8 | 2006-12-06 20:31:30 -0800 | [diff] [blame] | 57 | #include <linux/bottom_half.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #include <asm/system.h> | 
 | 60 |  | 
 | 61 | /* | 
 | 62 |  * Must define these before including other files, inline functions need them | 
 | 63 |  */ | 
| Denys Vlasenko | 75ddb0e | 2010-02-20 01:03:48 +0100 | [diff] [blame] | 64 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  | 
 | 66 | #define LOCK_SECTION_START(extra)               \ | 
 | 67 |         ".subsection 1\n\t"                     \ | 
 | 68 |         extra                                   \ | 
 | 69 |         ".ifndef " LOCK_SECTION_NAME "\n\t"     \ | 
 | 70 |         LOCK_SECTION_NAME ":\n\t"               \ | 
 | 71 |         ".endif\n" | 
 | 72 |  | 
 | 73 | #define LOCK_SECTION_END                        \ | 
 | 74 |         ".previous\n\t" | 
 | 75 |  | 
| Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 76 | #define __lockfunc __attribute__((section(".spinlock.text"))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 |  | 
 | 78 | /* | 
| Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 79 |  * Pull the arch_spinlock_t and arch_rwlock_t definitions: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 |  */ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 81 | #include <linux/spinlock_types.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 83 | /* | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 84 |  * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them): | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 85 |  */ | 
| Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 86 | #ifdef CONFIG_SMP | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 87 | # include <asm/spinlock.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | #else | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 89 | # include <linux/spinlock_up.h> | 
 | 90 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 |  | 
| Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 92 | #ifdef CONFIG_DEBUG_SPINLOCK | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 93 |   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, | 
 | 94 | 				   struct lock_class_key *key); | 
 | 95 | # define raw_spin_lock_init(lock)				\ | 
| Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 96 | do {								\ | 
 | 97 | 	static struct lock_class_key __key;			\ | 
 | 98 | 								\ | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 99 | 	__raw_spin_lock_init((lock), #lock, &__key);		\ | 
| Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 100 | } while (0) | 
 | 101 |  | 
 | 102 | #else | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 103 | # define raw_spin_lock_init(lock)				\ | 
 | 104 | 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) | 
| Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 105 | #endif | 
 | 106 |  | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 107 | #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 108 |  | 
| Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 109 | #ifdef CONFIG_GENERIC_LOCKBREAK | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 110 | #define raw_spin_is_contended(lock) ((lock)->break_lock) | 
| Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 111 | #else | 
| Kyle McMartin | a5ef7ca | 2009-02-08 17:39:58 -0500 | [diff] [blame] | 112 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 113 | #ifdef arch_spin_is_contended | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 114 | #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock) | 
| Kyle McMartin | a5ef7ca | 2009-02-08 17:39:58 -0500 | [diff] [blame] | 115 | #else | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 116 | #define raw_spin_is_contended(lock)	(((void)(lock), 0)) | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 117 | #endif /*arch_spin_is_contended*/ | 
| Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 118 | #endif | 
 | 119 |  | 
| Jiri Olsa | ad46276 | 2009-07-08 12:10:31 +0000 | [diff] [blame] | 120 | /* The lock does not imply full memory barrier. */ | 
 | 121 | #ifndef ARCH_HAS_SMP_MB_AFTER_LOCK | 
 | 122 | static inline void smp_mb__after_lock(void) { smp_mb(); } | 
 | 123 | #endif | 
 | 124 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 125 | /** | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 126 |  * raw_spin_unlock_wait - wait until the spinlock gets unlocked | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 127 |  * @lock: the spinlock in question. | 
 | 128 |  */ | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 129 | #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 130 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | #ifdef CONFIG_DEBUG_SPINLOCK | 
| Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 132 |  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); | 
| Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 133 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) | 
 | 134 |  extern int do_raw_spin_trylock(raw_spinlock_t *lock); | 
| Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 135 |  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | #else | 
| Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 137 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 138 | { | 
| Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 139 | 	__acquire(lock); | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 140 | 	arch_spin_lock(&lock->raw_lock); | 
 | 141 | } | 
 | 142 |  | 
 | 143 | static inline void | 
| Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 144 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 145 | { | 
| Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 146 | 	__acquire(lock); | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 147 | 	arch_spin_lock_flags(&lock->raw_lock, *flags); | 
 | 148 | } | 
 | 149 |  | 
| Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 150 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 151 | { | 
 | 152 | 	return arch_spin_trylock(&(lock)->raw_lock); | 
 | 153 | } | 
 | 154 |  | 
| Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 155 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 156 | { | 
 | 157 | 	arch_spin_unlock(&lock->raw_lock); | 
| Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 158 | 	__release(lock); | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 159 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | #endif | 
 | 161 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | /* | 
| Thomas Gleixner | ef12f10 | 2009-11-07 23:04:15 +0100 | [diff] [blame] | 163 |  * Define the various spin_lock methods.  Note we define these | 
 | 164 |  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The | 
 | 165 |  * various methods are defined as nops in the case they are not | 
 | 166 |  * required. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 |  */ | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 168 | #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 |  | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 170 | #define raw_spin_lock(lock)	_raw_spin_lock(lock) | 
| Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 171 |  | 
 | 172 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 173 | # define raw_spin_lock_nested(lock, subclass) \ | 
 | 174 | 	_raw_spin_lock_nested(lock, subclass) | 
 | 175 |  | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 176 | # define raw_spin_lock_nest_lock(lock, nest_lock)			\ | 
| Peter Zijlstra | b7d39af | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 177 | 	 do {								\ | 
 | 178 | 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 179 | 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\ | 
| Peter Zijlstra | b7d39af | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 180 | 	 } while (0) | 
| Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 181 | #else | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 182 | # define raw_spin_lock_nested(lock, subclass)		_raw_spin_lock(lock) | 
 | 183 | # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock) | 
| Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 184 | #endif | 
 | 185 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 186 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 
| Linus Torvalds | b8e6ec8 | 2006-11-26 16:27:17 -0800 | [diff] [blame] | 187 |  | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 188 | #define raw_spin_lock_irqsave(lock, flags)			\ | 
| Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 189 | 	do {						\ | 
 | 190 | 		typecheck(unsigned long, flags);	\ | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 191 | 		flags = _raw_spin_lock_irqsave(lock);	\ | 
| Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 192 | 	} while (0) | 
| Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 193 |  | 
 | 194 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 195 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\ | 
| Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 196 | 	do {								\ | 
 | 197 | 		typecheck(unsigned long, flags);			\ | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 198 | 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\ | 
| Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 199 | 	} while (0) | 
| Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 200 | #else | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 201 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\ | 
| Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 202 | 	do {								\ | 
 | 203 | 		typecheck(unsigned long, flags);			\ | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 204 | 		flags = _raw_spin_lock_irqsave(lock);			\ | 
| Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 205 | 	} while (0) | 
| Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 206 | #endif | 
 | 207 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | #else | 
| Linus Torvalds | b8e6ec8 | 2006-11-26 16:27:17 -0800 | [diff] [blame] | 209 |  | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 210 | #define raw_spin_lock_irqsave(lock, flags)		\ | 
| Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 211 | 	do {						\ | 
 | 212 | 		typecheck(unsigned long, flags);	\ | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 213 | 		_raw_spin_lock_irqsave(lock, flags);	\ | 
| Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 214 | 	} while (0) | 
| Thomas Gleixner | ef12f10 | 2009-11-07 23:04:15 +0100 | [diff] [blame] | 215 |  | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 216 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\ | 
 | 217 | 	raw_spin_lock_irqsave(lock, flags) | 
| Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 218 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | #endif | 
 | 220 |  | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 221 | #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock) | 
 | 222 | #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock) | 
 | 223 | #define raw_spin_unlock(lock)		_raw_spin_unlock(lock) | 
 | 224 | #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 |  | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 226 | #define raw_spin_unlock_irqrestore(lock, flags)		\ | 
 | 227 | 	do {							\ | 
 | 228 | 		typecheck(unsigned long, flags);		\ | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 229 | 		_raw_spin_unlock_irqrestore(lock, flags);	\ | 
| Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 230 | 	} while (0) | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 231 | #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 |  | 
| Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 233 | #define raw_spin_trylock_bh(lock) \ | 
 | 234 | 	__cond_lock(lock, _raw_spin_trylock_bh(lock)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 |  | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 236 | #define raw_spin_trylock_irq(lock) \ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | ({ \ | 
 | 238 | 	local_irq_disable(); \ | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 239 | 	raw_spin_trylock(lock) ? \ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 240 | 	1 : ({ local_irq_enable(); 0;  }); \ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | }) | 
 | 242 |  | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 243 | #define raw_spin_trylock_irqsave(lock, flags) \ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | ({ \ | 
 | 245 | 	local_irq_save(flags); \ | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 246 | 	raw_spin_trylock(lock) ? \ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 247 | 	1 : ({ local_irq_restore(flags); 0; }); \ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | }) | 
 | 249 |  | 
| Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 250 | /** | 
 | 251 |  * raw_spin_can_lock - would raw_spin_trylock() succeed? | 
 | 252 |  * @lock: the spinlock in question. | 
 | 253 |  */ | 
 | 254 | #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock)) | 
 | 255 |  | 
 | 256 | /* Include rwlock functions */ | 
 | 257 | #include <linux/rwlock.h> | 
 | 258 |  | 
 | 259 | /* | 
 | 260 |  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | 
 | 261 |  */ | 
 | 262 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 
 | 263 | # include <linux/spinlock_api_smp.h> | 
 | 264 | #else | 
 | 265 | # include <linux/spinlock_api_up.h> | 
 | 266 | #endif | 
 | 267 |  | 
 | 268 | /* | 
 | 269 |  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | 
 | 270 |  */ | 
 | 271 |  | 
 | 272 | static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) | 
 | 273 | { | 
 | 274 | 	return &lock->rlock; | 
 | 275 | } | 
 | 276 |  | 
 | 277 | #define spin_lock_init(_lock)				\ | 
 | 278 | do {							\ | 
 | 279 | 	spinlock_check(_lock);				\ | 
 | 280 | 	raw_spin_lock_init(&(_lock)->rlock);		\ | 
 | 281 | } while (0) | 
 | 282 |  | 
 | 283 | static inline void spin_lock(spinlock_t *lock) | 
 | 284 | { | 
 | 285 | 	raw_spin_lock(&lock->rlock); | 
 | 286 | } | 
 | 287 |  | 
 | 288 | static inline void spin_lock_bh(spinlock_t *lock) | 
 | 289 | { | 
 | 290 | 	raw_spin_lock_bh(&lock->rlock); | 
 | 291 | } | 
 | 292 |  | 
 | 293 | static inline int spin_trylock(spinlock_t *lock) | 
 | 294 | { | 
 | 295 | 	return raw_spin_trylock(&lock->rlock); | 
 | 296 | } | 
 | 297 |  | 
 | 298 | #define spin_lock_nested(lock, subclass)			\ | 
 | 299 | do {								\ | 
 | 300 | 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\ | 
 | 301 | } while (0) | 
 | 302 |  | 
 | 303 | #define spin_lock_nest_lock(lock, nest_lock)				\ | 
 | 304 | do {									\ | 
 | 305 | 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\ | 
 | 306 | } while (0) | 
 | 307 |  | 
 | 308 | static inline void spin_lock_irq(spinlock_t *lock) | 
 | 309 | { | 
 | 310 | 	raw_spin_lock_irq(&lock->rlock); | 
 | 311 | } | 
 | 312 |  | 
 | 313 | #define spin_lock_irqsave(lock, flags)				\ | 
 | 314 | do {								\ | 
 | 315 | 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\ | 
 | 316 | } while (0) | 
 | 317 |  | 
 | 318 | #define spin_lock_irqsave_nested(lock, flags, subclass)			\ | 
 | 319 | do {									\ | 
 | 320 | 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | 
 | 321 | } while (0) | 
 | 322 |  | 
 | 323 | static inline void spin_unlock(spinlock_t *lock) | 
 | 324 | { | 
 | 325 | 	raw_spin_unlock(&lock->rlock); | 
 | 326 | } | 
 | 327 |  | 
 | 328 | static inline void spin_unlock_bh(spinlock_t *lock) | 
 | 329 | { | 
 | 330 | 	raw_spin_unlock_bh(&lock->rlock); | 
 | 331 | } | 
 | 332 |  | 
 | 333 | static inline void spin_unlock_irq(spinlock_t *lock) | 
 | 334 | { | 
 | 335 | 	raw_spin_unlock_irq(&lock->rlock); | 
 | 336 | } | 
 | 337 |  | 
 | 338 | static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 
 | 339 | { | 
 | 340 | 	raw_spin_unlock_irqrestore(&lock->rlock, flags); | 
 | 341 | } | 
 | 342 |  | 
 | 343 | static inline int spin_trylock_bh(spinlock_t *lock) | 
 | 344 | { | 
 | 345 | 	return raw_spin_trylock_bh(&lock->rlock); | 
 | 346 | } | 
 | 347 |  | 
 | 348 | static inline int spin_trylock_irq(spinlock_t *lock) | 
 | 349 | { | 
 | 350 | 	return raw_spin_trylock_irq(&lock->rlock); | 
 | 351 | } | 
 | 352 |  | 
 | 353 | #define spin_trylock_irqsave(lock, flags)			\ | 
 | 354 | ({								\ | 
 | 355 | 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | 
 | 356 | }) | 
 | 357 |  | 
 | 358 | static inline void spin_unlock_wait(spinlock_t *lock) | 
 | 359 | { | 
 | 360 | 	raw_spin_unlock_wait(&lock->rlock); | 
 | 361 | } | 
 | 362 |  | 
 | 363 | static inline int spin_is_locked(spinlock_t *lock) | 
 | 364 | { | 
 | 365 | 	return raw_spin_is_locked(&lock->rlock); | 
 | 366 | } | 
 | 367 |  | 
 | 368 | static inline int spin_is_contended(spinlock_t *lock) | 
 | 369 | { | 
 | 370 | 	return raw_spin_is_contended(&lock->rlock); | 
 | 371 | } | 
 | 372 |  | 
 | 373 | static inline int spin_can_lock(spinlock_t *lock) | 
 | 374 | { | 
 | 375 | 	return raw_spin_can_lock(&lock->rlock); | 
 | 376 | } | 
 | 377 |  | 
 | 378 | static inline void assert_spin_locked(spinlock_t *lock) | 
 | 379 | { | 
 | 380 | 	assert_raw_spin_locked(&lock->rlock); | 
 | 381 | } | 
 | 382 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 383 | /* | 
 | 384 |  * Pull the atomic_t declaration: | 
 | 385 |  * (asm-mips/atomic.h needs above definitions) | 
 | 386 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | #include <asm/atomic.h> | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 388 | /** | 
 | 389 |  * atomic_dec_and_lock - lock on reaching reference count zero | 
 | 390 |  * @atomic: the atomic counter | 
 | 391 |  * @lock: the spinlock in question | 
| J. Bruce Fields | dc07e72 | 2008-04-07 15:59:05 -0400 | [diff] [blame] | 392 |  * | 
 | 393 |  * Decrements @atomic by 1.  If the result is 0, returns true and locks | 
 | 394 |  * @lock.  Returns false for all other cases. | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 395 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 397 | #define atomic_dec_and_lock(atomic, lock) \ | 
| Josh Triplett | dcc8e55 | 2006-09-29 02:01:03 -0700 | [diff] [blame] | 398 | 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | #endif /* __LINUX_SPINLOCK_H */ |