| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* spinlock.h: 64-bit Sparc spinlock support. | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | 
 | 4 |  */ | 
 | 5 |  | 
 | 6 | #ifndef __SPARC64_SPINLOCK_H | 
 | 7 | #define __SPARC64_SPINLOCK_H | 
 | 8 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/threads.h>	/* For NR_CPUS */ | 
 | 10 |  | 
 | 11 | #ifndef __ASSEMBLY__ | 
 | 12 |  | 
 | 13 | /* To get debugging spinlocks which detect and catch | 
 | 14 |  * deadlock situations, set CONFIG_DEBUG_SPINLOCK | 
 | 15 |  * and rebuild your kernel. | 
 | 16 |  */ | 
 | 17 |  | 
 | 18 | /* All of these locking primitives are expected to work properly | 
 | 19 |  * even in an RMO memory model, which currently is what the kernel | 
 | 20 |  * runs in. | 
 | 21 |  * | 
 | 22 |  * There is another issue.  Because we play games to save cycles | 
 | 23 |  * in the non-contention case, we need to be extra careful about | 
 | 24 |  * branch targets into the "spinning" code.  They live in their | 
 | 25 |  * own section, but the newer V9 branches have a shorter range | 
 | 26 |  * than the traditional 32-bit sparc branch variants.  The rule | 
 | 27 |  * is that the branches that go into and out of the spinner sections | 
 | 28 |  * must be pre-V9 branches. | 
 | 29 |  */ | 
 | 30 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 31 | #define __raw_spin_is_locked(lp)	((lp)->lock != 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 33 | #define __raw_spin_unlock_wait(lp)	\ | 
 | 34 | 	do {	rmb();			\ | 
 | 35 | 	} while((lp)->lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | { | 
 | 39 | 	unsigned long tmp; | 
 | 40 |  | 
 | 41 | 	__asm__ __volatile__( | 
 | 42 | "1:	ldstub		[%1], %0\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 43 | "	membar		#StoreLoad | #StoreStore\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | "	brnz,pn		%0, 2f\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 45 | "	 nop\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | "	.subsection	2\n" | 
 | 47 | "2:	ldub		[%1], %0\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 48 | "	membar		#LoadLoad\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | "	brnz,pt		%0, 2b\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 50 | "	 nop\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | "	ba,a,pt		%%xcc, 1b\n" | 
 | 52 | "	.previous" | 
 | 53 | 	: "=&r" (tmp) | 
 | 54 | 	: "r" (lock) | 
 | 55 | 	: "memory"); | 
 | 56 | } | 
 | 57 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 58 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | { | 
 | 60 | 	unsigned long result; | 
 | 61 |  | 
 | 62 | 	__asm__ __volatile__( | 
 | 63 | "	ldstub		[%1], %0\n" | 
 | 64 | "	membar		#StoreLoad | #StoreStore" | 
 | 65 | 	: "=r" (result) | 
 | 66 | 	: "r" (lock) | 
 | 67 | 	: "memory"); | 
 | 68 |  | 
 | 69 | 	return (result == 0UL); | 
 | 70 | } | 
 | 71 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 72 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | { | 
 | 74 | 	__asm__ __volatile__( | 
 | 75 | "	membar		#StoreStore | #LoadStore\n" | 
 | 76 | "	stb		%%g0, [%0]" | 
 | 77 | 	: /* No outputs */ | 
 | 78 | 	: "r" (lock) | 
 | 79 | 	: "memory"); | 
 | 80 | } | 
 | 81 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 82 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | { | 
 | 84 | 	unsigned long tmp1, tmp2; | 
 | 85 |  | 
 | 86 | 	__asm__ __volatile__( | 
 | 87 | "1:	ldstub		[%2], %0\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | "	membar		#StoreLoad | #StoreStore\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 89 | "	brnz,pn		%0, 2f\n" | 
 | 90 | "	 nop\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | "	.subsection	2\n" | 
 | 92 | "2:	rdpr		%%pil, %1\n" | 
 | 93 | "	wrpr		%3, %%pil\n" | 
 | 94 | "3:	ldub		[%2], %0\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | "	membar		#LoadLoad\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 96 | "	brnz,pt		%0, 3b\n" | 
 | 97 | "	 nop\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | "	ba,pt		%%xcc, 1b\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 99 | "	 wrpr		%1, %%pil\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | "	.previous" | 
 | 101 | 	: "=&r" (tmp1), "=&r" (tmp2) | 
 | 102 | 	: "r"(lock), "r"(flags) | 
 | 103 | 	: "memory"); | 
 | 104 | } | 
 | 105 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 
 | 107 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 108 | static void inline __read_lock(raw_rwlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | { | 
 | 110 | 	unsigned long tmp1, tmp2; | 
 | 111 |  | 
 | 112 | 	__asm__ __volatile__ ( | 
 | 113 | "1:	ldsw		[%2], %0\n" | 
 | 114 | "	brlz,pn		%0, 2f\n" | 
 | 115 | "4:	 add		%0, 1, %1\n" | 
 | 116 | "	cas		[%2], %0, %1\n" | 
 | 117 | "	cmp		%0, %1\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 118 | "	membar		#StoreLoad | #StoreStore\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | "	bne,pn		%%icc, 1b\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 120 | "	 nop\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | "	.subsection	2\n" | 
 | 122 | "2:	ldsw		[%2], %0\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 123 | "	membar		#LoadLoad\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | "	brlz,pt		%0, 2b\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 125 | "	 nop\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | "	ba,a,pt		%%xcc, 4b\n" | 
 | 127 | "	.previous" | 
 | 128 | 	: "=&r" (tmp1), "=&r" (tmp2) | 
 | 129 | 	: "r" (lock) | 
 | 130 | 	: "memory"); | 
 | 131 | } | 
 | 132 |  | 
| David S. Miller | d3ed309 | 2006-01-23 21:03:56 -0800 | [diff] [blame] | 133 | static int inline __read_trylock(raw_rwlock_t *lock) | 
 | 134 | { | 
 | 135 | 	int tmp1, tmp2; | 
 | 136 |  | 
 | 137 | 	__asm__ __volatile__ ( | 
 | 138 | "1:	ldsw		[%2], %0\n" | 
 | 139 | "	brlz,a,pn	%0, 2f\n" | 
 | 140 | "	 mov		0, %0\n" | 
 | 141 | "	add		%0, 1, %1\n" | 
 | 142 | "	cas		[%2], %0, %1\n" | 
 | 143 | "	cmp		%0, %1\n" | 
 | 144 | "	membar		#StoreLoad | #StoreStore\n" | 
 | 145 | "	bne,pn		%%icc, 1b\n" | 
 | 146 | "	 mov		1, %0\n" | 
 | 147 | "2:" | 
 | 148 | 	: "=&r" (tmp1), "=&r" (tmp2) | 
 | 149 | 	: "r" (lock) | 
 | 150 | 	: "memory"); | 
 | 151 |  | 
 | 152 | 	return tmp1; | 
 | 153 | } | 
 | 154 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 155 | static void inline __read_unlock(raw_rwlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | { | 
 | 157 | 	unsigned long tmp1, tmp2; | 
 | 158 |  | 
 | 159 | 	__asm__ __volatile__( | 
 | 160 | "	membar	#StoreLoad | #LoadLoad\n" | 
 | 161 | "1:	lduw	[%2], %0\n" | 
 | 162 | "	sub	%0, 1, %1\n" | 
 | 163 | "	cas	[%2], %0, %1\n" | 
 | 164 | "	cmp	%0, %1\n" | 
 | 165 | "	bne,pn	%%xcc, 1b\n" | 
 | 166 | "	 nop" | 
 | 167 | 	: "=&r" (tmp1), "=&r" (tmp2) | 
 | 168 | 	: "r" (lock) | 
 | 169 | 	: "memory"); | 
 | 170 | } | 
 | 171 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 172 | static void inline __write_lock(raw_rwlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | { | 
 | 174 | 	unsigned long mask, tmp1, tmp2; | 
 | 175 |  | 
 | 176 | 	mask = 0x80000000UL; | 
 | 177 |  | 
 | 178 | 	__asm__ __volatile__( | 
 | 179 | "1:	lduw		[%2], %0\n" | 
 | 180 | "	brnz,pn		%0, 2f\n" | 
 | 181 | "4:	 or		%0, %3, %1\n" | 
 | 182 | "	cas		[%2], %0, %1\n" | 
 | 183 | "	cmp		%0, %1\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 184 | "	membar		#StoreLoad | #StoreStore\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | "	bne,pn		%%icc, 1b\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 186 | "	 nop\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | "	.subsection	2\n" | 
 | 188 | "2:	lduw		[%2], %0\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 189 | "	membar		#LoadLoad\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | "	brnz,pt		%0, 2b\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 191 | "	 nop\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | "	ba,a,pt		%%xcc, 4b\n" | 
 | 193 | "	.previous" | 
 | 194 | 	: "=&r" (tmp1), "=&r" (tmp2) | 
 | 195 | 	: "r" (lock), "r" (mask) | 
 | 196 | 	: "memory"); | 
 | 197 | } | 
 | 198 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 199 | static void inline __write_unlock(raw_rwlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | { | 
 | 201 | 	__asm__ __volatile__( | 
 | 202 | "	membar		#LoadStore | #StoreStore\n" | 
 | 203 | "	stw		%%g0, [%0]" | 
 | 204 | 	: /* no outputs */ | 
 | 205 | 	: "r" (lock) | 
 | 206 | 	: "memory"); | 
 | 207 | } | 
 | 208 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 209 | static int inline __write_trylock(raw_rwlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | { | 
 | 211 | 	unsigned long mask, tmp1, tmp2, result; | 
 | 212 |  | 
 | 213 | 	mask = 0x80000000UL; | 
 | 214 |  | 
 | 215 | 	__asm__ __volatile__( | 
 | 216 | "	mov		0, %2\n" | 
 | 217 | "1:	lduw		[%3], %0\n" | 
 | 218 | "	brnz,pn		%0, 2f\n" | 
 | 219 | "	 or		%0, %4, %1\n" | 
 | 220 | "	cas		[%3], %0, %1\n" | 
 | 221 | "	cmp		%0, %1\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 222 | "	membar		#StoreLoad | #StoreStore\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | "	bne,pn		%%icc, 1b\n" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 224 | "	 nop\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | "	mov		1, %2\n" | 
 | 226 | "2:" | 
 | 227 | 	: "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) | 
 | 228 | 	: "r" (lock), "r" (mask) | 
 | 229 | 	: "memory"); | 
 | 230 |  | 
 | 231 | 	return result; | 
 | 232 | } | 
 | 233 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 234 | #define __raw_read_lock(p)	__read_lock(p) | 
| David S. Miller | d3ed309 | 2006-01-23 21:03:56 -0800 | [diff] [blame] | 235 | #define __raw_read_trylock(p)	__read_trylock(p) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 236 | #define __raw_read_unlock(p)	__read_unlock(p) | 
 | 237 | #define __raw_write_lock(p)	__write_lock(p) | 
 | 238 | #define __raw_write_unlock(p)	__write_unlock(p) | 
 | 239 | #define __raw_write_trylock(p)	__write_trylock(p) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 241 | #define __raw_read_can_lock(rw)		(!((rw)->lock & 0x80000000UL)) | 
 | 242 | #define __raw_write_can_lock(rw)	(!(rw)->lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 |  | 
| Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 244 | #define _raw_spin_relax(lock)	cpu_relax() | 
 | 245 | #define _raw_read_relax(lock)	cpu_relax() | 
 | 246 | #define _raw_write_relax(lock)	cpu_relax() | 
 | 247 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | #endif /* !(__ASSEMBLY__) */ | 
 | 249 |  | 
 | 250 | #endif /* !(__SPARC64_SPINLOCK_H) */ |