| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H | 
 | 2 | #define __ASM_SPINLOCK_H | 
 | 3 |  | 
 | 4 | #if __LINUX_ARM_ARCH__ < 6 | 
 | 5 | #error SMP not supported on pre-ARMv6 CPUs | 
 | 6 | #endif | 
 | 7 |  | 
| Marc Zyngier | 603605a | 2011-05-23 17:16:59 +0100 | [diff] [blame] | 8 | #include <asm/processor.h> | 
 | 9 |  | 
| Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 10 | /* | 
 | 11 |  * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K | 
 | 12 |  * extensions, so when running on UP, we have to patch these instructions away. | 
 | 13 |  */ | 
 | 14 | #define ALT_SMP(smp, up)					\ | 
 | 15 | 	"9998:	" smp "\n"					\ | 
 | 16 | 	"	.pushsection \".alt.smp.init\", \"a\"\n"	\ | 
 | 17 | 	"	.long	9998b\n"				\ | 
 | 18 | 	"	" up "\n"					\ | 
 | 19 | 	"	.popsection\n" | 
 | 20 |  | 
 | 21 | #ifdef CONFIG_THUMB2_KERNEL | 
 | 22 | #define SEV		ALT_SMP("sev.w", "nop.w") | 
| Dave Martin | 917692f | 2011-02-09 12:06:59 +0100 | [diff] [blame] | 23 | /* | 
 | 24 |  * For Thumb-2, special care is needed to ensure that the conditional WFE | 
 | 25 |  * instruction really does assemble to exactly 4 bytes (as required by | 
 | 26 |  * the SMP_ON_UP fixup code).   By itself "wfene" might cause the | 
 | 27 |  * assembler to insert a extra (16-bit) IT instruction, depending on the | 
 | 28 |  * presence or absence of neighbouring conditional instructions. | 
 | 29 |  * | 
 | 30 |  * To avoid this unpredictableness, an approprite IT is inserted explicitly: | 
 | 31 |  * the assembler won't change IT instructions which are explicitly present | 
 | 32 |  * in the input. | 
 | 33 |  */ | 
 | 34 | #define WFE(cond)	ALT_SMP(		\ | 
 | 35 | 	"it " cond "\n\t"			\ | 
 | 36 | 	"wfe" cond ".n",			\ | 
 | 37 | 						\ | 
 | 38 | 	"nop.w"					\ | 
 | 39 | ) | 
| Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 40 | #else | 
 | 41 | #define SEV		ALT_SMP("sev", "nop") | 
 | 42 | #define WFE(cond)	ALT_SMP("wfe" cond, "nop") | 
 | 43 | #endif | 
 | 44 |  | 
| Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 45 | static inline void dsb_sev(void) | 
 | 46 | { | 
 | 47 | #if __LINUX_ARM_ARCH__ >= 7 | 
 | 48 | 	__asm__ __volatile__ ( | 
 | 49 | 		"dsb\n" | 
| Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 50 | 		SEV | 
| Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 51 | 	); | 
| Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 52 | #else | 
| Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 53 | 	__asm__ __volatile__ ( | 
 | 54 | 		"mcr p15, 0, %0, c7, c10, 4\n" | 
| Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 55 | 		SEV | 
| Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 56 | 		: : "r" (0) | 
 | 57 | 	); | 
 | 58 | #endif | 
 | 59 | } | 
 | 60 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | /* | 
 | 62 |  * ARMv6 Spin-locking. | 
 | 63 |  * | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 64 |  * We exclusively read the old value.  If it is zero, we may have | 
 | 65 |  * won the lock, so we try exclusively storing it.  A memory barrier | 
 | 66 |  * is required after we get a lock, and before we release it, because | 
 | 67 |  * V6 CPUs are assumed to have weakly ordered memory. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 |  * | 
 | 69 |  * Unlocked value: 0 | 
 | 70 |  * Locked value: 1 | 
 | 71 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 73 | #define arch_spin_is_locked(x)		((x)->lock != 0) | 
 | 74 | #define arch_spin_unlock_wait(lock) \ | 
 | 75 | 	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 77 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 79 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | { | 
 | 81 | 	unsigned long tmp; | 
 | 82 |  | 
 | 83 | 	__asm__ __volatile__( | 
 | 84 | "1:	ldrex	%0, [%1]\n" | 
 | 85 | "	teq	%0, #0\n" | 
| Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 86 | 	WFE("ne") | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | "	strexeq	%0, %2, [%1]\n" | 
 | 88 | "	teqeq	%0, #0\n" | 
 | 89 | "	bne	1b" | 
 | 90 | 	: "=&r" (tmp) | 
 | 91 | 	: "r" (&lock->lock), "r" (1) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 92 | 	: "cc"); | 
 | 93 |  | 
 | 94 | 	smp_mb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | } | 
 | 96 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 97 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | { | 
 | 99 | 	unsigned long tmp; | 
 | 100 |  | 
 | 101 | 	__asm__ __volatile__( | 
 | 102 | "	ldrex	%0, [%1]\n" | 
 | 103 | "	teq	%0, #0\n" | 
 | 104 | "	strexeq	%0, %2, [%1]" | 
 | 105 | 	: "=&r" (tmp) | 
 | 106 | 	: "r" (&lock->lock), "r" (1) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 107 | 	: "cc"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 |  | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 109 | 	if (tmp == 0) { | 
 | 110 | 		smp_mb(); | 
 | 111 | 		return 1; | 
 | 112 | 	} else { | 
 | 113 | 		return 0; | 
 | 114 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | } | 
 | 116 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 117 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | { | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 119 | 	smp_mb(); | 
 | 120 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | 	__asm__ __volatile__( | 
| Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 122 | "	str	%1, [%0]\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | 	: | 
 | 124 | 	: "r" (&lock->lock), "r" (0) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 125 | 	: "cc"); | 
| Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 126 |  | 
 | 127 | 	dsb_sev(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | } | 
 | 129 |  | 
 | 130 | /* | 
 | 131 |  * RWLOCKS | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 132 |  * | 
 | 133 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 |  * Write locks are easy - we just set bit 31.  When unlocking, we can | 
 | 135 |  * just write zero since the lock is exclusively held. | 
 | 136 |  */ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 137 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 138 | static inline void arch_write_lock(arch_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | { | 
 | 140 | 	unsigned long tmp; | 
 | 141 |  | 
 | 142 | 	__asm__ __volatile__( | 
 | 143 | "1:	ldrex	%0, [%1]\n" | 
 | 144 | "	teq	%0, #0\n" | 
| Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 145 | 	WFE("ne") | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | "	strexeq	%0, %2, [%1]\n" | 
 | 147 | "	teq	%0, #0\n" | 
 | 148 | "	bne	1b" | 
 | 149 | 	: "=&r" (tmp) | 
 | 150 | 	: "r" (&rw->lock), "r" (0x80000000) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 151 | 	: "cc"); | 
 | 152 |  | 
 | 153 | 	smp_mb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | } | 
 | 155 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 156 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 
| Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 157 | { | 
 | 158 | 	unsigned long tmp; | 
 | 159 |  | 
 | 160 | 	__asm__ __volatile__( | 
 | 161 | "1:	ldrex	%0, [%1]\n" | 
 | 162 | "	teq	%0, #0\n" | 
 | 163 | "	strexeq	%0, %2, [%1]" | 
 | 164 | 	: "=&r" (tmp) | 
 | 165 | 	: "r" (&rw->lock), "r" (0x80000000) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 166 | 	: "cc"); | 
| Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 167 |  | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 168 | 	if (tmp == 0) { | 
 | 169 | 		smp_mb(); | 
 | 170 | 		return 1; | 
 | 171 | 	} else { | 
 | 172 | 		return 0; | 
 | 173 | 	} | 
| Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 174 | } | 
 | 175 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 176 | static inline void arch_write_unlock(arch_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | { | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 178 | 	smp_mb(); | 
 | 179 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | 	__asm__ __volatile__( | 
| Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 181 | 	"str	%1, [%0]\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | 	: | 
 | 183 | 	: "r" (&rw->lock), "r" (0) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 184 | 	: "cc"); | 
| Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 185 |  | 
 | 186 | 	dsb_sev(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | } | 
 | 188 |  | 
| Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 189 | /* write_can_lock - would write_trylock() succeed? */ | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 190 | #define arch_write_can_lock(x)		((x)->lock == 0) | 
| Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 191 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | /* | 
 | 193 |  * Read locks are a bit more hairy: | 
 | 194 |  *  - Exclusively load the lock value. | 
 | 195 |  *  - Increment it. | 
 | 196 |  *  - Store new lock value if positive, and we still own this location. | 
 | 197 |  *    If the value is negative, we've already failed. | 
 | 198 |  *  - If we failed to store the value, we want a negative result. | 
 | 199 |  *  - If we failed, try again. | 
 | 200 |  * Unlocking is similarly hairy.  We may have multiple read locks | 
 | 201 |  * currently active.  However, we know we won't have any write | 
 | 202 |  * locks. | 
 | 203 |  */ | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 204 | static inline void arch_read_lock(arch_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | { | 
 | 206 | 	unsigned long tmp, tmp2; | 
 | 207 |  | 
 | 208 | 	__asm__ __volatile__( | 
 | 209 | "1:	ldrex	%0, [%2]\n" | 
 | 210 | "	adds	%0, %0, #1\n" | 
 | 211 | "	strexpl	%1, %0, [%2]\n" | 
| Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 212 | 	WFE("mi") | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | "	rsbpls	%0, %1, #0\n" | 
 | 214 | "	bmi	1b" | 
 | 215 | 	: "=&r" (tmp), "=&r" (tmp2) | 
 | 216 | 	: "r" (&rw->lock) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 217 | 	: "cc"); | 
 | 218 |  | 
 | 219 | 	smp_mb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | } | 
 | 221 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 222 | static inline void arch_read_unlock(arch_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | { | 
| Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 224 | 	unsigned long tmp, tmp2; | 
 | 225 |  | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 226 | 	smp_mb(); | 
 | 227 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | 	__asm__ __volatile__( | 
 | 229 | "1:	ldrex	%0, [%2]\n" | 
 | 230 | "	sub	%0, %0, #1\n" | 
 | 231 | "	strex	%1, %0, [%2]\n" | 
 | 232 | "	teq	%1, #0\n" | 
 | 233 | "	bne	1b" | 
 | 234 | 	: "=&r" (tmp), "=&r" (tmp2) | 
 | 235 | 	: "r" (&rw->lock) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 236 | 	: "cc"); | 
| Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 237 |  | 
 | 238 | 	if (tmp == 0) | 
 | 239 | 		dsb_sev(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | } | 
 | 241 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 242 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 
| Russell King | 8e34703 | 2006-08-31 15:09:30 +0100 | [diff] [blame] | 243 | { | 
| Catalin Marinas | e89bc81 | 2006-09-06 19:03:14 +0100 | [diff] [blame] | 244 | 	unsigned long tmp, tmp2 = 1; | 
| Russell King | 8e34703 | 2006-08-31 15:09:30 +0100 | [diff] [blame] | 245 |  | 
 | 246 | 	__asm__ __volatile__( | 
 | 247 | "1:	ldrex	%0, [%2]\n" | 
 | 248 | "	adds	%0, %0, #1\n" | 
 | 249 | "	strexpl	%1, %0, [%2]\n" | 
 | 250 | 	: "=&r" (tmp), "+r" (tmp2) | 
 | 251 | 	: "r" (&rw->lock) | 
 | 252 | 	: "cc"); | 
 | 253 |  | 
 | 254 | 	smp_mb(); | 
 | 255 | 	return tmp2 == 0; | 
 | 256 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 |  | 
| Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 258 | /* read_can_lock - would read_trylock() succeed? */ | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 259 | #define arch_read_can_lock(x)		((x)->lock < 0x80000000) | 
| Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 260 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 261 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | 
 | 262 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | 
| Robin Holt | f5f7eac | 2009-04-02 16:59:46 -0700 | [diff] [blame] | 263 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 264 | #define arch_spin_relax(lock)	cpu_relax() | 
 | 265 | #define arch_read_relax(lock)	cpu_relax() | 
 | 266 | #define arch_write_relax(lock)	cpu_relax() | 
| Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 267 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | #endif /* __ASM_SPINLOCK_H */ |