blob: 94aa75e42702b0f82a9eca007e111bb6da4cbccb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#if __LINUX_ARM_ARCH__ < 6
5#error SMP not supported on pre-ARMv6 CPUs
6#endif
7
Marc Zyngier603605a2011-05-23 17:16:59 +01008#include <asm/processor.h>
9
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -070010extern int msm_krait_need_wfe_fixup;
11
Russell King000d9c72011-01-15 16:22:12 +000012/*
13 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
14 * extensions, so when running on UP, we have to patch these instructions away.
15 */
16#define ALT_SMP(smp, up) \
17 "9998: " smp "\n" \
18 " .pushsection \".alt.smp.init\", \"a\"\n" \
19 " .long 9998b\n" \
20 " " up "\n" \
21 " .popsection\n"
22
23#ifdef CONFIG_THUMB2_KERNEL
24#define SEV ALT_SMP("sev.w", "nop.w")
Dave Martin917692f2011-02-09 12:06:59 +010025/*
Stepan Moskovchenko470d1342012-07-13 16:56:10 -070026 * Both instructions given to the ALT_SMP macro need to be the same size, to
27 * allow the SMP_ON_UP fixups to function correctly. Hence the explicit encoding
28 * specifications.
Dave Martin917692f2011-02-09 12:06:59 +010029 */
Stepan Moskovchenko470d1342012-07-13 16:56:10 -070030#define WFE() ALT_SMP( \
31 "wfe.w", \
Dave Martin917692f2011-02-09 12:06:59 +010032 "nop.w" \
33)
Russell King000d9c72011-01-15 16:22:12 +000034#else
35#define SEV ALT_SMP("sev", "nop")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -070036#define WFE() ALT_SMP("wfe", "nop")
Russell King000d9c72011-01-15 16:22:12 +000037#endif
38
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -070039/*
40 * The fixup involves disabling interrupts during execution of the WFE
41 * instruction. This could potentially lead to deadlock if a thread is trying
42 * to acquire a spinlock which is being released from an interrupt context.
43 */
44#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
45#define WFE_SAFE(fixup, tmp) \
46" mrs " tmp ", cpsr\n" \
47" cmp " fixup ", #0\n" \
48" wfeeq\n" \
49" beq 10f\n" \
50" cpsid if\n" \
51" mrc p15, 7, " fixup ", c15, c0, 5\n" \
52" bic " fixup ", " fixup ", #0x10000\n" \
53" mcr p15, 7, " fixup ", c15, c0, 5\n" \
54" isb\n" \
55" wfe\n" \
56" orr " fixup ", " fixup ", #0x10000\n" \
57" mcr p15, 7, " fixup ", c15, c0, 5\n" \
58" isb\n" \
59"10: msr cpsr_cf, " tmp "\n"
60#else
61#define WFE_SAFE(fixup, tmp) " wfe\n"
62#endif
63
Rabin Vincentc5113b62010-01-25 19:43:03 +010064static inline void dsb_sev(void)
65{
66#if __LINUX_ARM_ARCH__ >= 7
67 __asm__ __volatile__ (
68 "dsb\n"
Russell King000d9c72011-01-15 16:22:12 +000069 SEV
Rabin Vincentc5113b62010-01-25 19:43:03 +010070 );
Russell King000d9c72011-01-15 16:22:12 +000071#else
Rabin Vincentc5113b62010-01-25 19:43:03 +010072 __asm__ __volatile__ (
73 "mcr p15, 0, %0, c7, c10, 4\n"
Russell King000d9c72011-01-15 16:22:12 +000074 SEV
Rabin Vincentc5113b62010-01-25 19:43:03 +010075 : : "r" (0)
76 );
77#endif
78}
79
Brent DeGraaf8e7b8572011-09-21 16:21:31 -040080#ifndef CONFIG_ARM_TICKET_LOCKS
Linus Torvalds1da177e2005-04-16 15:20:36 -070081/*
82 * ARMv6 Spin-locking.
83 *
Russell King6d9b37a2005-07-26 19:44:26 +010084 * We exclusively read the old value. If it is zero, we may have
85 * won the lock, so we try exclusively storing it. A memory barrier
86 * is required after we get a lock, and before we release it, because
87 * V6 CPUs are assumed to have weakly ordered memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 *
89 * Unlocked value: 0
90 * Locked value: 1
91 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010093#define arch_spin_is_locked(x) ((x)->lock != 0)
94#define arch_spin_unlock_wait(lock) \
95 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010097#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010099static inline void arch_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700101 unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103 __asm__ __volatile__(
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700104"1: ldrex %[tmp], [%[lock]]\n"
105" teq %[tmp], #0\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700106" beq 2f\n"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700107 WFE_SAFE("%[fixup]", "%[tmp]")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700108"2:\n"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700109" strexeq %[tmp], %[bit0], [%[lock]]\n"
110" teqeq %[tmp], #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111" bne 1b"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700112 : [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700113 : [lock] "r" (&lock->lock), [bit0] "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +0100114 : "cc");
115
116 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117}
118
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100119static inline int arch_spin_trylock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120{
121 unsigned long tmp;
122
123 __asm__ __volatile__(
124" ldrex %0, [%1]\n"
125" teq %0, #0\n"
126" strexeq %0, %2, [%1]"
127 : "=&r" (tmp)
128 : "r" (&lock->lock), "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +0100129 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Russell King6d9b37a2005-07-26 19:44:26 +0100131 if (tmp == 0) {
132 smp_mb();
133 return 1;
134 } else {
135 return 0;
136 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137}
138
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100139static inline void arch_spin_unlock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
Russell King6d9b37a2005-07-26 19:44:26 +0100141 smp_mb();
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +0000144" str %1, [%0]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 :
146 : "r" (&lock->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100147 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100148
149 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150}
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400151#else
152/*
153 * ARM Ticket spin-locking
154 *
155 * Ticket locks are conceptually two parts, one indicating the current head of
156 * the queue, and the other indicating the current tail. The lock is acquired
157 * by atomically noting the tail and incrementing it by one (thus adding
158 * ourself to the queue and noting our position), then waiting until the head
159 * becomes equal to the the initial value of the tail.
160 *
161 * Unlocked value: 0
162 * Locked value: now_serving != next_ticket
163 *
164 * 31 17 16 15 14 0
165 * +----------------------------------------------------+
166 * | now_serving | next_ticket |
167 * +----------------------------------------------------+
168 */
169
170#define TICKET_SHIFT 16
171#define TICKET_BITS 16
172#define TICKET_MASK 0xFFFF
173
174#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
175
176static inline void arch_spin_lock(arch_spinlock_t *lock)
177{
178 unsigned long tmp, ticket, next_ticket;
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700179 unsigned long fixup = msm_krait_need_wfe_fixup;
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400180
181 /* Grab the next ticket and wait for it to be "served" */
182 __asm__ __volatile__(
183"1: ldrex %[ticket], [%[lockaddr]]\n"
184" uadd16 %[next_ticket], %[ticket], %[val1]\n"
185" strex %[tmp], %[next_ticket], [%[lockaddr]]\n"
186" teq %[tmp], #0\n"
187" bne 1b\n"
188" uxth %[ticket], %[ticket]\n"
189"2:\n"
190#ifdef CONFIG_CPU_32v6K
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700191" beq 3f\n"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700192 WFE_SAFE("%[fixup]", "%[tmp]")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700193"3:\n"
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400194#endif
195" ldr %[tmp], [%[lockaddr]]\n"
196" cmp %[ticket], %[tmp], lsr #16\n"
197" bne 2b"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700198 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
199 [next_ticket]"=&r" (next_ticket), [fixup]"+r" (fixup)
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400200 : [lockaddr]"r" (&lock->lock), [val1]"r" (1)
201 : "cc");
202 smp_mb();
203}
204
205static inline int arch_spin_trylock(arch_spinlock_t *lock)
206{
207 unsigned long tmp, ticket, next_ticket;
208
209 /* Grab lock if now_serving == next_ticket and access is exclusive */
210 __asm__ __volatile__(
211" ldrex %[ticket], [%[lockaddr]]\n"
212" ror %[tmp], %[ticket], #16\n"
213" eors %[tmp], %[tmp], %[ticket]\n"
214" bne 1f\n"
215" uadd16 %[next_ticket], %[ticket], %[val1]\n"
216" strex %[tmp], %[next_ticket], [%[lockaddr]]\n"
217"1:"
218 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
219 [next_ticket]"=&r" (next_ticket)
220 : [lockaddr]"r" (&lock->lock), [val1]"r" (1)
221 : "cc");
222 if (!tmp)
223 smp_mb();
224 return !tmp;
225}
226
227static inline void arch_spin_unlock(arch_spinlock_t *lock)
228{
229 unsigned long ticket, tmp;
230
231 smp_mb();
232
233 /* Bump now_serving by 1 */
234 __asm__ __volatile__(
235"1: ldrex %[ticket], [%[lockaddr]]\n"
236" uadd16 %[ticket], %[ticket], %[serving1]\n"
237" strex %[tmp], %[ticket], [%[lockaddr]]\n"
238" teq %[tmp], #0\n"
239" bne 1b"
240 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp)
241 : [lockaddr]"r" (&lock->lock), [serving1]"r" (0x00010000)
242 : "cc");
243 dsb_sev();
244}
245
246static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
247{
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700248 unsigned long ticket, tmp, fixup = msm_krait_need_wfe_fixup;
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400249
250 /* Wait for now_serving == next_ticket */
251 __asm__ __volatile__(
252#ifdef CONFIG_CPU_32v6K
253" cmpne %[lockaddr], %[lockaddr]\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700254"1:\n"
255" beq 2f\n"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700256 WFE_SAFE("%[fixup]", "%[tmp]")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700257"2:\n"
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400258#else
259"1:\n"
260#endif
261" ldr %[ticket], [%[lockaddr]]\n"
262" eor %[ticket], %[ticket], %[ticket], lsr #16\n"
263" uxth %[ticket], %[ticket]\n"
264" cmp %[ticket], #0\n"
265" bne 1b"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700266 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
267 [fixup]"+r" (fixup)
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400268 : [lockaddr]"r" (&lock->lock)
269 : "cc");
270}
271
272static inline int arch_spin_is_locked(arch_spinlock_t *lock)
273{
274 unsigned long tmp = ACCESS_ONCE(lock->lock);
275 return (((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK) != 0;
276}
277
278static inline int arch_spin_is_contended(arch_spinlock_t *lock)
279{
280 unsigned long tmp = ACCESS_ONCE(lock->lock);
281 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
282}
283#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
285/*
286 * RWLOCKS
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700287 *
288 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 * Write locks are easy - we just set bit 31. When unlocking, we can
290 * just write zero since the lock is exclusively held.
291 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700292
Thomas Gleixnere5931942009-12-03 20:08:46 +0100293static inline void arch_write_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700295 unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297 __asm__ __volatile__(
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700298"1: ldrex %[tmp], [%[lock]]\n"
299" teq %[tmp], #0\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700300" beq 2f\n"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700301 WFE_SAFE("%[fixup]", "%[tmp]")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700302"2:\n"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700303" strexeq %[tmp], %[bit31], [%[lock]]\n"
304" teq %[tmp], #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305" bne 1b"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700306 : [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700307 : [lock] "r" (&rw->lock), [bit31] "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100308 : "cc");
309
310 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
312
Thomas Gleixnere5931942009-12-03 20:08:46 +0100313static inline int arch_write_trylock(arch_rwlock_t *rw)
Russell King4e8fd222005-07-24 12:13:40 +0100314{
315 unsigned long tmp;
316
317 __asm__ __volatile__(
318"1: ldrex %0, [%1]\n"
319" teq %0, #0\n"
320" strexeq %0, %2, [%1]"
321 : "=&r" (tmp)
322 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100323 : "cc");
Russell King4e8fd222005-07-24 12:13:40 +0100324
Russell King6d9b37a2005-07-26 19:44:26 +0100325 if (tmp == 0) {
326 smp_mb();
327 return 1;
328 } else {
329 return 0;
330 }
Russell King4e8fd222005-07-24 12:13:40 +0100331}
332
Thomas Gleixnere5931942009-12-03 20:08:46 +0100333static inline void arch_write_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
Russell King6d9b37a2005-07-26 19:44:26 +0100335 smp_mb();
336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +0000338 "str %1, [%0]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 :
340 : "r" (&rw->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100341 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100342
343 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100346/* write_can_lock - would write_trylock() succeed? */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100347#define arch_write_can_lock(x) ((x)->lock == 0)
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100348
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349/*
350 * Read locks are a bit more hairy:
351 * - Exclusively load the lock value.
352 * - Increment it.
353 * - Store new lock value if positive, and we still own this location.
354 * If the value is negative, we've already failed.
355 * - If we failed to store the value, we want a negative result.
356 * - If we failed, try again.
357 * Unlocking is similarly hairy. We may have multiple read locks
358 * currently active. However, we know we won't have any write
359 * locks.
360 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100361static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700363 unsigned long tmp, tmp2, fixup = msm_krait_need_wfe_fixup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365 __asm__ __volatile__(
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700366"1: ldrex %[tmp], [%[lock]]\n"
367" adds %[tmp], %[tmp], #1\n"
368" strexpl %[tmp2], %[tmp], [%[lock]]\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700369" bpl 2f\n"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700370 WFE_SAFE("%[fixup]", "%[tmp]")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700371"2:\n"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700372" rsbpls %[tmp], %[tmp2], #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373" bmi 1b"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700374 : [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2), [fixup] "+r" (fixup)
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700375 : [lock] "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100376 : "cc");
377
378 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379}
380
Thomas Gleixnere5931942009-12-03 20:08:46 +0100381static inline void arch_read_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382{
Russell King4e8fd222005-07-24 12:13:40 +0100383 unsigned long tmp, tmp2;
384
Russell King6d9b37a2005-07-26 19:44:26 +0100385 smp_mb();
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 __asm__ __volatile__(
388"1: ldrex %0, [%2]\n"
389" sub %0, %0, #1\n"
390" strex %1, %0, [%2]\n"
391" teq %1, #0\n"
392" bne 1b"
393 : "=&r" (tmp), "=&r" (tmp2)
394 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100395 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100396
397 if (tmp == 0)
398 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399}
400
Thomas Gleixnere5931942009-12-03 20:08:46 +0100401static inline int arch_read_trylock(arch_rwlock_t *rw)
Russell King8e347032006-08-31 15:09:30 +0100402{
Catalin Marinase89bc812006-09-06 19:03:14 +0100403 unsigned long tmp, tmp2 = 1;
Russell King8e347032006-08-31 15:09:30 +0100404
405 __asm__ __volatile__(
406"1: ldrex %0, [%2]\n"
407" adds %0, %0, #1\n"
408" strexpl %1, %0, [%2]\n"
409 : "=&r" (tmp), "+r" (tmp2)
410 : "r" (&rw->lock)
411 : "cc");
412
413 smp_mb();
414 return tmp2 == 0;
415}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100417/* read_can_lock - would read_trylock() succeed? */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100418#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100419
Thomas Gleixnere5931942009-12-03 20:08:46 +0100420#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
421#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Robin Holtf5f7eac2009-04-02 16:59:46 -0700422
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100423#define arch_spin_relax(lock) cpu_relax()
424#define arch_read_relax(lock) cpu_relax()
425#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427#endif /* __ASM_SPINLOCK_H */