| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H | 
|  | 2 | #define __ASM_SPINLOCK_H | 
| Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 3 | #ifdef __KERNEL__ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 |  | 
|  | 5 | /* | 
|  | 6 | * Simple spin lock operations. | 
|  | 7 | * | 
|  | 8 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM | 
|  | 9 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | 
|  | 10 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM | 
|  | 11 | *	Rework to support virtual processors | 
|  | 12 | * | 
|  | 13 | * Type of int is used as a full 64b word is not necessary. | 
|  | 14 | * | 
|  | 15 | * This program is free software; you can redistribute it and/or | 
|  | 16 | * modify it under the terms of the GNU General Public License | 
|  | 17 | * as published by the Free Software Foundation; either version | 
|  | 18 | * 2 of the License, or (at your option) any later version. | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 19 | * | 
|  | 20 | * (the type definitions are in asm/spinlock_types.h) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | */ | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 22 | #ifdef CONFIG_PPC64 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/paca.h> | 
|  | 24 | #include <asm/hvcall.h> | 
| Kelly Daly | 1da4403 | 2005-11-01 16:59:20 +1100 | [diff] [blame] | 25 | #include <asm/iseries/hv_call.h> | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 26 | #endif | 
|  | 27 | #include <asm/asm-compat.h> | 
|  | 28 | #include <asm/synch.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 30 | #define __raw_spin_is_locked(x)		((x)->slock != 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 |  | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 32 | #ifdef CONFIG_PPC64 | 
|  | 33 | /* use 0x800000yy when locked, where yy == CPU number */ | 
|  | 34 | #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token)) | 
|  | 35 | #else | 
|  | 36 | #define LOCK_TOKEN	1 | 
|  | 37 | #endif | 
|  | 38 |  | 
| Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 39 | #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) | 
|  | 40 | #define CLEAR_IO_SYNC	(get_paca()->io_sync = 0) | 
|  | 41 | #define SYNC_IO		do {						\ | 
|  | 42 | if (unlikely(get_paca()->io_sync)) {	\ | 
|  | 43 | mb();				\ | 
|  | 44 | get_paca()->io_sync = 0;	\ | 
|  | 45 | }					\ | 
|  | 46 | } while (0) | 
|  | 47 | #else | 
|  | 48 | #define CLEAR_IO_SYNC | 
|  | 49 | #define SYNC_IO | 
|  | 50 | #endif | 
|  | 51 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 52 | /* | 
|  | 53 | * This returns the old value in the lock, so we succeeded | 
|  | 54 | * in getting the lock if the return value is 0. | 
|  | 55 | */ | 
|  | 56 | static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | { | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 58 | unsigned long tmp, token; | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 59 |  | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 60 | token = LOCK_TOKEN; | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 61 | __asm__ __volatile__( | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 62 | "1:	lwarx		%0,0,%2\n\ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 63 | cmpwi		0,%0,0\n\ | 
|  | 64 | bne-		2f\n\ | 
|  | 65 | stwcx.		%1,0,%2\n\ | 
|  | 66 | bne-		1b\n\ | 
|  | 67 | isync\n\ | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 68 | 2:"	: "=&r" (tmp) | 
|  | 69 | : "r" (token), "r" (&lock->slock) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 70 | : "cr0", "memory"); | 
|  | 71 |  | 
|  | 72 | return tmp; | 
|  | 73 | } | 
|  | 74 |  | 
|  | 75 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) | 
|  | 76 | { | 
| Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 77 | CLEAR_IO_SYNC; | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 78 | return __spin_trylock(lock) == 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | } | 
|  | 80 |  | 
|  | 81 | /* | 
|  | 82 | * On a system with shared processors (that is, where a physical | 
|  | 83 | * processor is multiplexed between several virtual processors), | 
|  | 84 | * there is no point spinning on a lock if the holder of the lock | 
|  | 85 | * isn't currently scheduled on a physical processor.  Instead | 
|  | 86 | * we detect this situation and ask the hypervisor to give the | 
|  | 87 | * rest of our timeslice to the lock holder. | 
|  | 88 | * | 
|  | 89 | * So that we can tell which virtual processor is holding a lock, | 
|  | 90 | * we put 0x80000000 | smp_processor_id() in the lock when it is | 
|  | 91 | * held.  Conveniently, we have a word in the paca that holds this | 
|  | 92 | * value. | 
|  | 93 | */ | 
|  | 94 |  | 
|  | 95 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | 
|  | 96 | /* We only yield to the hypervisor if we are in shared processor mode */ | 
| David Gibson | 3356bb9 | 2006-01-13 10:26:42 +1100 | [diff] [blame] | 97 | #define SHARED_PROCESSOR (get_lppaca()->shared_proc) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 98 | extern void __spin_yield(raw_spinlock_t *lock); | 
|  | 99 | extern void __rw_yield(raw_rwlock_t *lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | #else /* SPLPAR || ISERIES */ | 
|  | 101 | #define __spin_yield(x)	barrier() | 
|  | 102 | #define __rw_yield(x)	barrier() | 
|  | 103 | #define SHARED_PROCESSOR	0 | 
|  | 104 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 106 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | { | 
| Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 108 | CLEAR_IO_SYNC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | while (1) { | 
|  | 110 | if (likely(__spin_trylock(lock) == 0)) | 
|  | 111 | break; | 
|  | 112 | do { | 
|  | 113 | HMT_low(); | 
|  | 114 | if (SHARED_PROCESSOR) | 
|  | 115 | __spin_yield(lock); | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 116 | } while (unlikely(lock->slock != 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | HMT_medium(); | 
|  | 118 | } | 
|  | 119 | } | 
|  | 120 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 121 | static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | { | 
|  | 123 | unsigned long flags_dis; | 
|  | 124 |  | 
| Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 125 | CLEAR_IO_SYNC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | while (1) { | 
|  | 127 | if (likely(__spin_trylock(lock) == 0)) | 
|  | 128 | break; | 
|  | 129 | local_save_flags(flags_dis); | 
|  | 130 | local_irq_restore(flags); | 
|  | 131 | do { | 
|  | 132 | HMT_low(); | 
|  | 133 | if (SHARED_PROCESSOR) | 
|  | 134 | __spin_yield(lock); | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 135 | } while (unlikely(lock->slock != 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | HMT_medium(); | 
|  | 137 | local_irq_restore(flags_dis); | 
|  | 138 | } | 
|  | 139 | } | 
|  | 140 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 141 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | 
|  | 142 | { | 
| Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 143 | SYNC_IO; | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 144 | __asm__ __volatile__("# __raw_spin_unlock\n\t" | 
|  | 145 | LWSYNC_ON_SMP: : :"memory"); | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 146 | lock->slock = 0; | 
|  | 147 | } | 
|  | 148 |  | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 149 | #ifdef CONFIG_PPC64 | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 150 | extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 151 | #else | 
|  | 152 | #define __raw_spin_unlock_wait(lock) \ | 
|  | 153 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 
|  | 154 | #endif | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 155 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | /* | 
|  | 157 | * Read-write spinlocks, allowing multiple readers | 
|  | 158 | * but only one writer. | 
|  | 159 | * | 
|  | 160 | * NOTE! it is quite common to have readers in interrupts | 
|  | 161 | * but no interrupt writers. For those circumstances we | 
|  | 162 | * can "mix" irq-safe locks - any writer needs to get a | 
|  | 163 | * irq-safe write-lock, but readers can get non-irqsafe | 
|  | 164 | * read-locks. | 
|  | 165 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 167 | #define __raw_read_can_lock(rw)		((rw)->lock >= 0) | 
|  | 168 | #define __raw_write_can_lock(rw)	(!(rw)->lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 |  | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 170 | #ifdef CONFIG_PPC64 | 
|  | 171 | #define __DO_SIGN_EXTEND	"extsw	%0,%0\n" | 
|  | 172 | #define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */ | 
|  | 173 | #else | 
|  | 174 | #define __DO_SIGN_EXTEND | 
|  | 175 | #define WRLOCK_TOKEN		(-1) | 
|  | 176 | #endif | 
|  | 177 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | /* | 
|  | 179 | * This returns the old value in the lock + 1, | 
|  | 180 | * so we got a read lock if the return value is > 0. | 
|  | 181 | */ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 182 | static long __inline__ __read_trylock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | { | 
|  | 184 | long tmp; | 
|  | 185 |  | 
|  | 186 | __asm__ __volatile__( | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 187 | "1:	lwarx		%0,0,%1\n" | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 188 | __DO_SIGN_EXTEND | 
|  | 189 | "	addic.		%0,%0,1\n\ | 
|  | 190 | ble-		2f\n" | 
|  | 191 | PPC405_ERR77(0,%1) | 
|  | 192 | "	stwcx.		%0,0,%1\n\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | bne-		1b\n\ | 
|  | 194 | isync\n\ | 
|  | 195 | 2:"	: "=&r" (tmp) | 
|  | 196 | : "r" (&rw->lock) | 
|  | 197 | : "cr0", "xer", "memory"); | 
|  | 198 |  | 
|  | 199 | return tmp; | 
|  | 200 | } | 
|  | 201 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | /* | 
|  | 203 | * This returns the old value in the lock, | 
|  | 204 | * so we got the write lock if the return value is 0. | 
|  | 205 | */ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 206 | static __inline__ long __write_trylock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | { | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 208 | long tmp, token; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 |  | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 210 | token = WRLOCK_TOKEN; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | __asm__ __volatile__( | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 212 | "1:	lwarx		%0,0,%2\n\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | cmpwi		0,%0,0\n\ | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 214 | bne-		2f\n" | 
|  | 215 | PPC405_ERR77(0,%1) | 
|  | 216 | "	stwcx.		%1,0,%2\n\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | bne-		1b\n\ | 
|  | 218 | isync\n\ | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 219 | 2:"	: "=&r" (tmp) | 
|  | 220 | : "r" (token), "r" (&rw->lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | : "cr0", "memory"); | 
|  | 222 |  | 
|  | 223 | return tmp; | 
|  | 224 | } | 
|  | 225 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 226 | static void __inline__ __raw_read_lock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | { | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 228 | while (1) { | 
|  | 229 | if (likely(__read_trylock(rw) > 0)) | 
|  | 230 | break; | 
|  | 231 | do { | 
|  | 232 | HMT_low(); | 
|  | 233 | if (SHARED_PROCESSOR) | 
|  | 234 | __rw_yield(rw); | 
|  | 235 | } while (unlikely(rw->lock < 0)); | 
|  | 236 | HMT_medium(); | 
|  | 237 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | } | 
|  | 239 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 240 | static void __inline__ __raw_write_lock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | { | 
|  | 242 | while (1) { | 
|  | 243 | if (likely(__write_trylock(rw) == 0)) | 
|  | 244 | break; | 
|  | 245 | do { | 
|  | 246 | HMT_low(); | 
|  | 247 | if (SHARED_PROCESSOR) | 
|  | 248 | __rw_yield(rw); | 
| Jake Moilanen | d637413 | 2005-05-01 08:58:47 -0700 | [diff] [blame] | 249 | } while (unlikely(rw->lock != 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | HMT_medium(); | 
|  | 251 | } | 
|  | 252 | } | 
|  | 253 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 254 | static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) | 
|  | 255 | { | 
|  | 256 | return __read_trylock(rw) > 0; | 
|  | 257 | } | 
|  | 258 |  | 
|  | 259 | static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) | 
|  | 260 | { | 
|  | 261 | return __write_trylock(rw) == 0; | 
|  | 262 | } | 
|  | 263 |  | 
|  | 264 | static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | 
|  | 265 | { | 
|  | 266 | long tmp; | 
|  | 267 |  | 
|  | 268 | __asm__ __volatile__( | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 269 | "# read_unlock\n\t" | 
|  | 270 | LWSYNC_ON_SMP | 
|  | 271 | "1:	lwarx		%0,0,%1\n\ | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 272 | addic		%0,%0,-1\n" | 
|  | 273 | PPC405_ERR77(0,%1) | 
|  | 274 | "	stwcx.		%0,0,%1\n\ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 275 | bne-		1b" | 
|  | 276 | : "=&r"(tmp) | 
|  | 277 | : "r"(&rw->lock) | 
|  | 278 | : "cr0", "memory"); | 
|  | 279 | } | 
|  | 280 |  | 
|  | 281 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | 
|  | 282 | { | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 283 | __asm__ __volatile__("# write_unlock\n\t" | 
|  | 284 | LWSYNC_ON_SMP: : :"memory"); | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 285 | rw->lock = 0; | 
|  | 286 | } | 
|  | 287 |  | 
| Martin Schwidefsky | cdc3936 | 2006-09-30 23:27:44 -0700 | [diff] [blame] | 288 | #define _raw_spin_relax(lock)	__spin_yield(lock) | 
|  | 289 | #define _raw_read_relax(lock)	__rw_yield(lock) | 
|  | 290 | #define _raw_write_relax(lock)	__rw_yield(lock) | 
| Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 291 |  | 
| Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 292 | #endif /* __KERNEL__ */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | #endif /* __ASM_SPINLOCK_H */ |