| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H | 
|  | 2 | #define __ASM_SPINLOCK_H | 
|  | 3 |  | 
|  | 4 | #if __LINUX_ARM_ARCH__ < 6 | 
|  | 5 | #error SMP not supported on pre-ARMv6 CPUs | 
|  | 6 | #endif | 
|  | 7 |  | 
|  | 8 | /* | 
|  | 9 | * ARMv6 Spin-locking. | 
|  | 10 | * | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 11 | * We exclusively read the old value.  If it is zero, we may have | 
|  | 12 | * won the lock, so we try exclusively storing it.  A memory barrier | 
|  | 13 | * is required after we get a lock, and before we release it, because | 
|  | 14 | * V6 CPUs are assumed to have weakly ordered memory. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | * | 
|  | 16 | * Unlocked value: 0 | 
|  | 17 | * Locked value: 1 | 
|  | 18 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 20 | #define __raw_spin_is_locked(x)		((x)->lock != 0) | 
|  | 21 | #define __raw_spin_unlock_wait(lock) \ | 
|  | 22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | { | 
|  | 28 | unsigned long tmp; | 
|  | 29 |  | 
|  | 30 | __asm__ __volatile__( | 
|  | 31 | "1:	ldrex	%0, [%1]\n" | 
|  | 32 | "	teq	%0, #0\n" | 
| Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 33 | #ifdef CONFIG_CPU_32v6K | 
|  | 34 | "	wfene\n" | 
|  | 35 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | "	strexeq	%0, %2, [%1]\n" | 
|  | 37 | "	teqeq	%0, #0\n" | 
|  | 38 | "	bne	1b" | 
|  | 39 | : "=&r" (tmp) | 
|  | 40 | : "r" (&lock->lock), "r" (1) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 41 | : "cc"); | 
|  | 42 |  | 
|  | 43 | smp_mb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | } | 
|  | 45 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 46 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | { | 
|  | 48 | unsigned long tmp; | 
|  | 49 |  | 
|  | 50 | __asm__ __volatile__( | 
|  | 51 | "	ldrex	%0, [%1]\n" | 
|  | 52 | "	teq	%0, #0\n" | 
|  | 53 | "	strexeq	%0, %2, [%1]" | 
|  | 54 | : "=&r" (tmp) | 
|  | 55 | : "r" (&lock->lock), "r" (1) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 56 | : "cc"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 |  | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 58 | if (tmp == 0) { | 
|  | 59 | smp_mb(); | 
|  | 60 | return 1; | 
|  | 61 | } else { | 
|  | 62 | return 0; | 
|  | 63 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | } | 
|  | 65 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 66 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | { | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 68 | smp_mb(); | 
|  | 69 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | __asm__ __volatile__( | 
| Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 71 | "	str	%1, [%0]\n" | 
|  | 72 | #ifdef CONFIG_CPU_32v6K | 
|  | 73 | "	mcr	p15, 0, %1, c7, c10, 4\n" /* DSB */ | 
|  | 74 | "	sev" | 
|  | 75 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | : | 
|  | 77 | : "r" (&lock->lock), "r" (0) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 78 | : "cc"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | } | 
|  | 80 |  | 
|  | 81 | /* | 
|  | 82 | * RWLOCKS | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 83 | * | 
|  | 84 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | * Write locks are easy - we just set bit 31.  When unlocking, we can | 
|  | 86 | * just write zero since the lock is exclusively held. | 
|  | 87 | */ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 88 |  | 
| Russell King | 7e86df2 | 2005-11-02 15:09:31 +0000 | [diff] [blame] | 89 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | { | 
|  | 91 | unsigned long tmp; | 
|  | 92 |  | 
|  | 93 | __asm__ __volatile__( | 
|  | 94 | "1:	ldrex	%0, [%1]\n" | 
|  | 95 | "	teq	%0, #0\n" | 
| Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 96 | #ifdef CONFIG_CPU_32v6K | 
|  | 97 | "	wfene\n" | 
|  | 98 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | "	strexeq	%0, %2, [%1]\n" | 
|  | 100 | "	teq	%0, #0\n" | 
|  | 101 | "	bne	1b" | 
|  | 102 | : "=&r" (tmp) | 
|  | 103 | : "r" (&rw->lock), "r" (0x80000000) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 104 | : "cc"); | 
|  | 105 |  | 
|  | 106 | smp_mb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | } | 
|  | 108 |  | 
| Russell King | 7e86df2 | 2005-11-02 15:09:31 +0000 | [diff] [blame] | 109 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 
| Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 110 | { | 
|  | 111 | unsigned long tmp; | 
|  | 112 |  | 
|  | 113 | __asm__ __volatile__( | 
|  | 114 | "1:	ldrex	%0, [%1]\n" | 
|  | 115 | "	teq	%0, #0\n" | 
|  | 116 | "	strexeq	%0, %2, [%1]" | 
|  | 117 | : "=&r" (tmp) | 
|  | 118 | : "r" (&rw->lock), "r" (0x80000000) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 119 | : "cc"); | 
| Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 120 |  | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 121 | if (tmp == 0) { | 
|  | 122 | smp_mb(); | 
|  | 123 | return 1; | 
|  | 124 | } else { | 
|  | 125 | return 0; | 
|  | 126 | } | 
| Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 127 | } | 
|  | 128 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 129 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | { | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 131 | smp_mb(); | 
|  | 132 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | __asm__ __volatile__( | 
| Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 134 | "str	%1, [%0]\n" | 
|  | 135 | #ifdef CONFIG_CPU_32v6K | 
|  | 136 | "	mcr	p15, 0, %1, c7, c10, 4\n" /* DSB */ | 
|  | 137 | "	sev\n" | 
|  | 138 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | : | 
|  | 140 | : "r" (&rw->lock), "r" (0) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 141 | : "cc"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | } | 
|  | 143 |  | 
| Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 144 | /* write_can_lock - would write_trylock() succeed? */ | 
|  | 145 | #define __raw_write_can_lock(x)		((x)->lock == 0x80000000) | 
|  | 146 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | /* | 
|  | 148 | * Read locks are a bit more hairy: | 
|  | 149 | *  - Exclusively load the lock value. | 
|  | 150 | *  - Increment it. | 
|  | 151 | *  - Store new lock value if positive, and we still own this location. | 
|  | 152 | *    If the value is negative, we've already failed. | 
|  | 153 | *  - If we failed to store the value, we want a negative result. | 
|  | 154 | *  - If we failed, try again. | 
|  | 155 | * Unlocking is similarly hairy.  We may have multiple read locks | 
|  | 156 | * currently active.  However, we know we won't have any write | 
|  | 157 | * locks. | 
|  | 158 | */ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 159 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | { | 
|  | 161 | unsigned long tmp, tmp2; | 
|  | 162 |  | 
|  | 163 | __asm__ __volatile__( | 
|  | 164 | "1:	ldrex	%0, [%2]\n" | 
|  | 165 | "	adds	%0, %0, #1\n" | 
|  | 166 | "	strexpl	%1, %0, [%2]\n" | 
| Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 167 | #ifdef CONFIG_CPU_32v6K | 
|  | 168 | "	wfemi\n" | 
|  | 169 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | "	rsbpls	%0, %1, #0\n" | 
|  | 171 | "	bmi	1b" | 
|  | 172 | : "=&r" (tmp), "=&r" (tmp2) | 
|  | 173 | : "r" (&rw->lock) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 174 | : "cc"); | 
|  | 175 |  | 
|  | 176 | smp_mb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | } | 
|  | 178 |  | 
| Russell King | 7e86df2 | 2005-11-02 15:09:31 +0000 | [diff] [blame] | 179 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | { | 
| Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 181 | unsigned long tmp, tmp2; | 
|  | 182 |  | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 183 | smp_mb(); | 
|  | 184 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | __asm__ __volatile__( | 
|  | 186 | "1:	ldrex	%0, [%2]\n" | 
|  | 187 | "	sub	%0, %0, #1\n" | 
|  | 188 | "	strex	%1, %0, [%2]\n" | 
|  | 189 | "	teq	%1, #0\n" | 
|  | 190 | "	bne	1b" | 
| Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 191 | #ifdef CONFIG_CPU_32v6K | 
|  | 192 | "\n	cmp	%0, #0\n" | 
|  | 193 | "	mcreq   p15, 0, %0, c7, c10, 4\n" | 
|  | 194 | "	seveq" | 
|  | 195 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | : "=&r" (tmp), "=&r" (tmp2) | 
|  | 197 | : "r" (&rw->lock) | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 198 | : "cc"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | } | 
|  | 200 |  | 
| Russell King | 8e34703 | 2006-08-31 15:09:30 +0100 | [diff] [blame] | 201 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 
|  | 202 | { | 
| Catalin Marinas | e89bc81 | 2006-09-06 19:03:14 +0100 | [diff] [blame] | 203 | unsigned long tmp, tmp2 = 1; | 
| Russell King | 8e34703 | 2006-08-31 15:09:30 +0100 | [diff] [blame] | 204 |  | 
|  | 205 | __asm__ __volatile__( | 
|  | 206 | "1:	ldrex	%0, [%2]\n" | 
|  | 207 | "	adds	%0, %0, #1\n" | 
|  | 208 | "	strexpl	%1, %0, [%2]\n" | 
|  | 209 | : "=&r" (tmp), "+r" (tmp2) | 
|  | 210 | : "r" (&rw->lock) | 
|  | 211 | : "cc"); | 
|  | 212 |  | 
|  | 213 | smp_mb(); | 
|  | 214 | return tmp2 == 0; | 
|  | 215 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 |  | 
| Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 217 | /* read_can_lock - would read_trylock() succeed? */ | 
|  | 218 | #define __raw_read_can_lock(x)		((x)->lock < 0x80000000) | 
|  | 219 |  | 
| Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 220 | #define _raw_spin_relax(lock)	cpu_relax() | 
|  | 221 | #define _raw_read_relax(lock)	cpu_relax() | 
|  | 222 | #define _raw_write_relax(lock)	cpu_relax() | 
|  | 223 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | #endif /* __ASM_SPINLOCK_H */ |