Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H |
| 2 | #define __ASM_SPINLOCK_H |
| 3 | |
| 4 | #if __LINUX_ARM_ARCH__ < 6 |
| 5 | #error SMP not supported on pre-ARMv6 CPUs |
| 6 | #endif |
| 7 | |
Marc Zyngier | 603605a | 2011-05-23 17:16:59 +0100 | [diff] [blame] | 8 | #include <asm/processor.h> |
| 9 | |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 10 | /* |
| 11 | * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K |
| 12 | * extensions, so when running on UP, we have to patch these instructions away. |
| 13 | */ |
| 14 | #define ALT_SMP(smp, up) \ |
| 15 | "9998: " smp "\n" \ |
| 16 | " .pushsection \".alt.smp.init\", \"a\"\n" \ |
| 17 | " .long 9998b\n" \ |
| 18 | " " up "\n" \ |
| 19 | " .popsection\n" |
| 20 | |
| 21 | #ifdef CONFIG_THUMB2_KERNEL |
| 22 | #define SEV ALT_SMP("sev.w", "nop.w") |
Dave Martin | 917692f | 2011-02-09 12:06:59 +0100 | [diff] [blame] | 23 | /* |
| 24 | * For Thumb-2, special care is needed to ensure that the conditional WFE |
| 25 | * instruction really does assemble to exactly 4 bytes (as required by |
| 26 | * the SMP_ON_UP fixup code). By itself "wfene" might cause the |
| 27 | * assembler to insert a extra (16-bit) IT instruction, depending on the |
| 28 | * presence or absence of neighbouring conditional instructions. |
| 29 | * |
| 30 | * To avoid this unpredictableness, an approprite IT is inserted explicitly: |
| 31 | * the assembler won't change IT instructions which are explicitly present |
| 32 | * in the input. |
| 33 | */ |
| 34 | #define WFE(cond) ALT_SMP( \ |
| 35 | "it " cond "\n\t" \ |
| 36 | "wfe" cond ".n", \ |
| 37 | \ |
| 38 | "nop.w" \ |
| 39 | ) |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 40 | #else |
| 41 | #define SEV ALT_SMP("sev", "nop") |
| 42 | #define WFE(cond) ALT_SMP("wfe" cond, "nop") |
| 43 | #endif |
| 44 | |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 45 | static inline void dsb_sev(void) |
| 46 | { |
| 47 | #if __LINUX_ARM_ARCH__ >= 7 |
| 48 | __asm__ __volatile__ ( |
| 49 | "dsb\n" |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 50 | SEV |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 51 | ); |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 52 | #else |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 53 | __asm__ __volatile__ ( |
| 54 | "mcr p15, 0, %0, c7, c10, 4\n" |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 55 | SEV |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 56 | : : "r" (0) |
| 57 | ); |
| 58 | #endif |
| 59 | } |
| 60 | |
Brent DeGraaf | 8e7b857 | 2011-09-21 16:21:31 -0400 | [diff] [blame^] | 61 | #ifndef CONFIG_ARM_TICKET_LOCKS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | /* |
| 63 | * ARMv6 Spin-locking. |
| 64 | * |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 65 | * We exclusively read the old value. If it is zero, we may have |
| 66 | * won the lock, so we try exclusively storing it. A memory barrier |
| 67 | * is required after we get a lock, and before we release it, because |
| 68 | * V6 CPUs are assumed to have weakly ordered memory. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | * |
| 70 | * Unlocked value: 0 |
| 71 | * Locked value: 1 |
| 72 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 74 | #define arch_spin_is_locked(x) ((x)->lock != 0) |
| 75 | #define arch_spin_unlock_wait(lock) \ |
| 76 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 78 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 80 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | { |
| 82 | unsigned long tmp; |
| 83 | |
| 84 | __asm__ __volatile__( |
| 85 | "1: ldrex %0, [%1]\n" |
| 86 | " teq %0, #0\n" |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 87 | WFE("ne") |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | " strexeq %0, %2, [%1]\n" |
| 89 | " teqeq %0, #0\n" |
| 90 | " bne 1b" |
| 91 | : "=&r" (tmp) |
| 92 | : "r" (&lock->lock), "r" (1) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 93 | : "cc"); |
| 94 | |
| 95 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | } |
| 97 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | { |
| 100 | unsigned long tmp; |
| 101 | |
| 102 | __asm__ __volatile__( |
| 103 | " ldrex %0, [%1]\n" |
| 104 | " teq %0, #0\n" |
| 105 | " strexeq %0, %2, [%1]" |
| 106 | : "=&r" (tmp) |
| 107 | : "r" (&lock->lock), "r" (1) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 108 | : "cc"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 110 | if (tmp == 0) { |
| 111 | smp_mb(); |
| 112 | return 1; |
| 113 | } else { |
| 114 | return 0; |
| 115 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | } |
| 117 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 118 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | { |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 120 | smp_mb(); |
| 121 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | __asm__ __volatile__( |
Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 123 | " str %1, [%0]\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | : |
| 125 | : "r" (&lock->lock), "r" (0) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 126 | : "cc"); |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 127 | |
| 128 | dsb_sev(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | } |
Brent DeGraaf | 8e7b857 | 2011-09-21 16:21:31 -0400 | [diff] [blame^] | 130 | #else |
| 131 | /* |
| 132 | * ARM Ticket spin-locking |
| 133 | * |
| 134 | * Ticket locks are conceptually two parts, one indicating the current head of |
| 135 | * the queue, and the other indicating the current tail. The lock is acquired |
| 136 | * by atomically noting the tail and incrementing it by one (thus adding |
| 137 | * ourself to the queue and noting our position), then waiting until the head |
| 138 | * becomes equal to the the initial value of the tail. |
| 139 | * |
| 140 | * Unlocked value: 0 |
| 141 | * Locked value: now_serving != next_ticket |
| 142 | * |
| 143 | * 31 17 16 15 14 0 |
| 144 | * +----------------------------------------------------+ |
| 145 | * | now_serving | next_ticket | |
| 146 | * +----------------------------------------------------+ |
| 147 | */ |
| 148 | |
| 149 | #define TICKET_SHIFT 16 |
| 150 | #define TICKET_BITS 16 |
| 151 | #define TICKET_MASK 0xFFFF |
| 152 | |
| 153 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
| 154 | |
| 155 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
| 156 | { |
| 157 | unsigned long tmp, ticket, next_ticket; |
| 158 | |
| 159 | /* Grab the next ticket and wait for it to be "served" */ |
| 160 | __asm__ __volatile__( |
| 161 | "1: ldrex %[ticket], [%[lockaddr]]\n" |
| 162 | " uadd16 %[next_ticket], %[ticket], %[val1]\n" |
| 163 | " strex %[tmp], %[next_ticket], [%[lockaddr]]\n" |
| 164 | " teq %[tmp], #0\n" |
| 165 | " bne 1b\n" |
| 166 | " uxth %[ticket], %[ticket]\n" |
| 167 | "2:\n" |
| 168 | #ifdef CONFIG_CPU_32v6K |
| 169 | " wfene\n" |
| 170 | #endif |
| 171 | " ldr %[tmp], [%[lockaddr]]\n" |
| 172 | " cmp %[ticket], %[tmp], lsr #16\n" |
| 173 | " bne 2b" |
| 174 | : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), [next_ticket]"=&r" (next_ticket) |
| 175 | : [lockaddr]"r" (&lock->lock), [val1]"r" (1) |
| 176 | : "cc"); |
| 177 | smp_mb(); |
| 178 | } |
| 179 | |
| 180 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
| 181 | { |
| 182 | unsigned long tmp, ticket, next_ticket; |
| 183 | |
| 184 | /* Grab lock if now_serving == next_ticket and access is exclusive */ |
| 185 | __asm__ __volatile__( |
| 186 | " ldrex %[ticket], [%[lockaddr]]\n" |
| 187 | " ror %[tmp], %[ticket], #16\n" |
| 188 | " eors %[tmp], %[tmp], %[ticket]\n" |
| 189 | " bne 1f\n" |
| 190 | " uadd16 %[next_ticket], %[ticket], %[val1]\n" |
| 191 | " strex %[tmp], %[next_ticket], [%[lockaddr]]\n" |
| 192 | "1:" |
| 193 | : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), |
| 194 | [next_ticket]"=&r" (next_ticket) |
| 195 | : [lockaddr]"r" (&lock->lock), [val1]"r" (1) |
| 196 | : "cc"); |
| 197 | if (!tmp) |
| 198 | smp_mb(); |
| 199 | return !tmp; |
| 200 | } |
| 201 | |
| 202 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
| 203 | { |
| 204 | unsigned long ticket, tmp; |
| 205 | |
| 206 | smp_mb(); |
| 207 | |
| 208 | /* Bump now_serving by 1 */ |
| 209 | __asm__ __volatile__( |
| 210 | "1: ldrex %[ticket], [%[lockaddr]]\n" |
| 211 | " uadd16 %[ticket], %[ticket], %[serving1]\n" |
| 212 | " strex %[tmp], %[ticket], [%[lockaddr]]\n" |
| 213 | " teq %[tmp], #0\n" |
| 214 | " bne 1b" |
| 215 | : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp) |
| 216 | : [lockaddr]"r" (&lock->lock), [serving1]"r" (0x00010000) |
| 217 | : "cc"); |
| 218 | dsb_sev(); |
| 219 | } |
| 220 | |
| 221 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
| 222 | { |
| 223 | unsigned long ticket; |
| 224 | |
| 225 | /* Wait for now_serving == next_ticket */ |
| 226 | __asm__ __volatile__( |
| 227 | #ifdef CONFIG_CPU_32v6K |
| 228 | " cmpne %[lockaddr], %[lockaddr]\n" |
| 229 | "1: wfene\n" |
| 230 | #else |
| 231 | "1:\n" |
| 232 | #endif |
| 233 | " ldr %[ticket], [%[lockaddr]]\n" |
| 234 | " eor %[ticket], %[ticket], %[ticket], lsr #16\n" |
| 235 | " uxth %[ticket], %[ticket]\n" |
| 236 | " cmp %[ticket], #0\n" |
| 237 | " bne 1b" |
| 238 | : [ticket]"=&r" (ticket) |
| 239 | : [lockaddr]"r" (&lock->lock) |
| 240 | : "cc"); |
| 241 | } |
| 242 | |
| 243 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
| 244 | { |
| 245 | unsigned long tmp = ACCESS_ONCE(lock->lock); |
| 246 | return (((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK) != 0; |
| 247 | } |
| 248 | |
| 249 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
| 250 | { |
| 251 | unsigned long tmp = ACCESS_ONCE(lock->lock); |
| 252 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
| 253 | } |
| 254 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | |
| 256 | /* |
| 257 | * RWLOCKS |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 258 | * |
| 259 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | * Write locks are easy - we just set bit 31. When unlocking, we can |
| 261 | * just write zero since the lock is exclusively held. |
| 262 | */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 263 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 264 | static inline void arch_write_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | { |
| 266 | unsigned long tmp; |
| 267 | |
| 268 | __asm__ __volatile__( |
| 269 | "1: ldrex %0, [%1]\n" |
| 270 | " teq %0, #0\n" |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 271 | WFE("ne") |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | " strexeq %0, %2, [%1]\n" |
| 273 | " teq %0, #0\n" |
| 274 | " bne 1b" |
| 275 | : "=&r" (tmp) |
| 276 | : "r" (&rw->lock), "r" (0x80000000) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 277 | : "cc"); |
| 278 | |
| 279 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | } |
| 281 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 282 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 283 | { |
| 284 | unsigned long tmp; |
| 285 | |
| 286 | __asm__ __volatile__( |
| 287 | "1: ldrex %0, [%1]\n" |
| 288 | " teq %0, #0\n" |
| 289 | " strexeq %0, %2, [%1]" |
| 290 | : "=&r" (tmp) |
| 291 | : "r" (&rw->lock), "r" (0x80000000) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 292 | : "cc"); |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 293 | |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 294 | if (tmp == 0) { |
| 295 | smp_mb(); |
| 296 | return 1; |
| 297 | } else { |
| 298 | return 0; |
| 299 | } |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 300 | } |
| 301 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 302 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | { |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 304 | smp_mb(); |
| 305 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | __asm__ __volatile__( |
Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 307 | "str %1, [%0]\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | : |
| 309 | : "r" (&rw->lock), "r" (0) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 310 | : "cc"); |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 311 | |
| 312 | dsb_sev(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | } |
| 314 | |
Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 315 | /* write_can_lock - would write_trylock() succeed? */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 316 | #define arch_write_can_lock(x) ((x)->lock == 0) |
Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 317 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | /* |
| 319 | * Read locks are a bit more hairy: |
| 320 | * - Exclusively load the lock value. |
| 321 | * - Increment it. |
| 322 | * - Store new lock value if positive, and we still own this location. |
| 323 | * If the value is negative, we've already failed. |
| 324 | * - If we failed to store the value, we want a negative result. |
| 325 | * - If we failed, try again. |
| 326 | * Unlocking is similarly hairy. We may have multiple read locks |
| 327 | * currently active. However, we know we won't have any write |
| 328 | * locks. |
| 329 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 330 | static inline void arch_read_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | { |
| 332 | unsigned long tmp, tmp2; |
| 333 | |
| 334 | __asm__ __volatile__( |
| 335 | "1: ldrex %0, [%2]\n" |
| 336 | " adds %0, %0, #1\n" |
| 337 | " strexpl %1, %0, [%2]\n" |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 338 | WFE("mi") |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | " rsbpls %0, %1, #0\n" |
| 340 | " bmi 1b" |
| 341 | : "=&r" (tmp), "=&r" (tmp2) |
| 342 | : "r" (&rw->lock) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 343 | : "cc"); |
| 344 | |
| 345 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | } |
| 347 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 348 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | { |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 350 | unsigned long tmp, tmp2; |
| 351 | |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 352 | smp_mb(); |
| 353 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | __asm__ __volatile__( |
| 355 | "1: ldrex %0, [%2]\n" |
| 356 | " sub %0, %0, #1\n" |
| 357 | " strex %1, %0, [%2]\n" |
| 358 | " teq %1, #0\n" |
| 359 | " bne 1b" |
| 360 | : "=&r" (tmp), "=&r" (tmp2) |
| 361 | : "r" (&rw->lock) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 362 | : "cc"); |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 363 | |
| 364 | if (tmp == 0) |
| 365 | dsb_sev(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | } |
| 367 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 368 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
Russell King | 8e34703 | 2006-08-31 15:09:30 +0100 | [diff] [blame] | 369 | { |
Catalin Marinas | e89bc81 | 2006-09-06 19:03:14 +0100 | [diff] [blame] | 370 | unsigned long tmp, tmp2 = 1; |
Russell King | 8e34703 | 2006-08-31 15:09:30 +0100 | [diff] [blame] | 371 | |
| 372 | __asm__ __volatile__( |
| 373 | "1: ldrex %0, [%2]\n" |
| 374 | " adds %0, %0, #1\n" |
| 375 | " strexpl %1, %0, [%2]\n" |
| 376 | : "=&r" (tmp), "+r" (tmp2) |
| 377 | : "r" (&rw->lock) |
| 378 | : "cc"); |
| 379 | |
| 380 | smp_mb(); |
| 381 | return tmp2 == 0; |
| 382 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | |
Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 384 | /* read_can_lock - would read_trylock() succeed? */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 385 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) |
Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 386 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 387 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 388 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
Robin Holt | f5f7eac | 2009-04-02 16:59:46 -0700 | [diff] [blame] | 389 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 390 | #define arch_spin_relax(lock) cpu_relax() |
| 391 | #define arch_read_relax(lock) cpu_relax() |
| 392 | #define arch_write_relax(lock) cpu_relax() |
Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 393 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | #endif /* __ASM_SPINLOCK_H */ |