blob: bc816964303b0c2920fa47be4b073de66b4864fd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#if __LINUX_ARM_ARCH__ < 6
5#error SMP not supported on pre-ARMv6 CPUs
6#endif
7
Marc Zyngier603605a2011-05-23 17:16:59 +01008#include <asm/processor.h>
9
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -070010extern int msm_krait_need_wfe_fixup;
11
Russell King000d9c72011-01-15 16:22:12 +000012/*
13 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
14 * extensions, so when running on UP, we have to patch these instructions away.
15 */
16#define ALT_SMP(smp, up) \
17 "9998: " smp "\n" \
18 " .pushsection \".alt.smp.init\", \"a\"\n" \
19 " .long 9998b\n" \
20 " " up "\n" \
21 " .popsection\n"
22
23#ifdef CONFIG_THUMB2_KERNEL
24#define SEV ALT_SMP("sev.w", "nop.w")
Dave Martin917692f2011-02-09 12:06:59 +010025/*
Stepan Moskovchenko470d1342012-07-13 16:56:10 -070026 * Both instructions given to the ALT_SMP macro need to be the same size, to
27 * allow the SMP_ON_UP fixups to function correctly. Hence the explicit encoding
28 * specifications.
Dave Martin917692f2011-02-09 12:06:59 +010029 */
Stepan Moskovchenko470d1342012-07-13 16:56:10 -070030#define WFE() ALT_SMP( \
31 "wfe.w", \
Dave Martin917692f2011-02-09 12:06:59 +010032 "nop.w" \
33)
Russell King000d9c72011-01-15 16:22:12 +000034#else
35#define SEV ALT_SMP("sev", "nop")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -070036#define WFE() ALT_SMP("wfe", "nop")
Russell King000d9c72011-01-15 16:22:12 +000037#endif
38
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -070039/*
Stepan Moskovchenko1fd13782012-07-27 16:44:37 -070040 * The fixup involves disabling FIQs during execution of the WFE instruction.
41 * This could potentially lead to deadlock if a thread is trying to acquire a
42 * spinlock which is being released from an FIQ. This should not be a problem
43 * because FIQs are handled by the secure environment and do not directly
44 * manipulate spinlocks.
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -070045 */
46#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
47#define WFE_SAFE(fixup, tmp) \
48" mrs " tmp ", cpsr\n" \
49" cmp " fixup ", #0\n" \
50" wfeeq\n" \
51" beq 10f\n" \
Stepan Moskovchenko1fd13782012-07-27 16:44:37 -070052" cpsid f\n" \
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -070053" mrc p15, 7, " fixup ", c15, c0, 5\n" \
54" bic " fixup ", " fixup ", #0x10000\n" \
55" mcr p15, 7, " fixup ", c15, c0, 5\n" \
56" isb\n" \
57" wfe\n" \
58" orr " fixup ", " fixup ", #0x10000\n" \
59" mcr p15, 7, " fixup ", c15, c0, 5\n" \
60" isb\n" \
61"10: msr cpsr_cf, " tmp "\n"
62#else
63#define WFE_SAFE(fixup, tmp) " wfe\n"
64#endif
65
Rabin Vincentc5113b62010-01-25 19:43:03 +010066static inline void dsb_sev(void)
67{
68#if __LINUX_ARM_ARCH__ >= 7
69 __asm__ __volatile__ (
70 "dsb\n"
Russell King000d9c72011-01-15 16:22:12 +000071 SEV
Rabin Vincentc5113b62010-01-25 19:43:03 +010072 );
Russell King000d9c72011-01-15 16:22:12 +000073#else
Rabin Vincentc5113b62010-01-25 19:43:03 +010074 __asm__ __volatile__ (
75 "mcr p15, 0, %0, c7, c10, 4\n"
Russell King000d9c72011-01-15 16:22:12 +000076 SEV
Rabin Vincentc5113b62010-01-25 19:43:03 +010077 : : "r" (0)
78 );
79#endif
80}
81
Brent DeGraaf8e7b8572011-09-21 16:21:31 -040082#ifndef CONFIG_ARM_TICKET_LOCKS
Linus Torvalds1da177e2005-04-16 15:20:36 -070083/*
84 * ARMv6 Spin-locking.
85 *
Russell King6d9b37a2005-07-26 19:44:26 +010086 * We exclusively read the old value. If it is zero, we may have
87 * won the lock, so we try exclusively storing it. A memory barrier
88 * is required after we get a lock, and before we release it, because
89 * V6 CPUs are assumed to have weakly ordered memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 *
91 * Unlocked value: 0
92 * Locked value: 1
93 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010095#define arch_spin_is_locked(x) ((x)->lock != 0)
96#define arch_spin_unlock_wait(lock) \
97 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010099#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100101static inline void arch_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102{
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700103 unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105 __asm__ __volatile__(
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700106"1: ldrex %[tmp], [%[lock]]\n"
107" teq %[tmp], #0\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700108" beq 2f\n"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700109 WFE_SAFE("%[fixup]", "%[tmp]")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700110"2:\n"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700111" strexeq %[tmp], %[bit0], [%[lock]]\n"
112" teqeq %[tmp], #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113" bne 1b"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700114 : [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700115 : [lock] "r" (&lock->lock), [bit0] "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +0100116 : "cc");
117
118 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100121static inline int arch_spin_trylock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 unsigned long tmp;
124
125 __asm__ __volatile__(
126" ldrex %0, [%1]\n"
127" teq %0, #0\n"
128" strexeq %0, %2, [%1]"
129 : "=&r" (tmp)
130 : "r" (&lock->lock), "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +0100131 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Russell King6d9b37a2005-07-26 19:44:26 +0100133 if (tmp == 0) {
134 smp_mb();
135 return 1;
136 } else {
137 return 0;
138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139}
140
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100141static inline void arch_spin_unlock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Russell King6d9b37a2005-07-26 19:44:26 +0100143 smp_mb();
144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +0000146" str %1, [%0]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 :
148 : "r" (&lock->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100149 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100150
151 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152}
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400153#else
154/*
155 * ARM Ticket spin-locking
156 *
157 * Ticket locks are conceptually two parts, one indicating the current head of
158 * the queue, and the other indicating the current tail. The lock is acquired
159 * by atomically noting the tail and incrementing it by one (thus adding
160 * ourself to the queue and noting our position), then waiting until the head
161 * becomes equal to the the initial value of the tail.
162 *
163 * Unlocked value: 0
164 * Locked value: now_serving != next_ticket
165 *
166 * 31 17 16 15 14 0
167 * +----------------------------------------------------+
168 * | now_serving | next_ticket |
169 * +----------------------------------------------------+
170 */
171
172#define TICKET_SHIFT 16
173#define TICKET_BITS 16
174#define TICKET_MASK 0xFFFF
175
176#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
177
178static inline void arch_spin_lock(arch_spinlock_t *lock)
179{
180 unsigned long tmp, ticket, next_ticket;
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700181 unsigned long fixup = msm_krait_need_wfe_fixup;
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400182
183 /* Grab the next ticket and wait for it to be "served" */
184 __asm__ __volatile__(
185"1: ldrex %[ticket], [%[lockaddr]]\n"
186" uadd16 %[next_ticket], %[ticket], %[val1]\n"
187" strex %[tmp], %[next_ticket], [%[lockaddr]]\n"
188" teq %[tmp], #0\n"
189" bne 1b\n"
190" uxth %[ticket], %[ticket]\n"
191"2:\n"
192#ifdef CONFIG_CPU_32v6K
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700193" beq 3f\n"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700194 WFE_SAFE("%[fixup]", "%[tmp]")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700195"3:\n"
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400196#endif
197" ldr %[tmp], [%[lockaddr]]\n"
198" cmp %[ticket], %[tmp], lsr #16\n"
199" bne 2b"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700200 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
201 [next_ticket]"=&r" (next_ticket), [fixup]"+r" (fixup)
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400202 : [lockaddr]"r" (&lock->lock), [val1]"r" (1)
203 : "cc");
204 smp_mb();
205}
206
207static inline int arch_spin_trylock(arch_spinlock_t *lock)
208{
209 unsigned long tmp, ticket, next_ticket;
210
211 /* Grab lock if now_serving == next_ticket and access is exclusive */
212 __asm__ __volatile__(
213" ldrex %[ticket], [%[lockaddr]]\n"
214" ror %[tmp], %[ticket], #16\n"
215" eors %[tmp], %[tmp], %[ticket]\n"
216" bne 1f\n"
217" uadd16 %[next_ticket], %[ticket], %[val1]\n"
218" strex %[tmp], %[next_ticket], [%[lockaddr]]\n"
219"1:"
220 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
221 [next_ticket]"=&r" (next_ticket)
222 : [lockaddr]"r" (&lock->lock), [val1]"r" (1)
223 : "cc");
224 if (!tmp)
225 smp_mb();
226 return !tmp;
227}
228
229static inline void arch_spin_unlock(arch_spinlock_t *lock)
230{
231 unsigned long ticket, tmp;
232
233 smp_mb();
234
235 /* Bump now_serving by 1 */
236 __asm__ __volatile__(
237"1: ldrex %[ticket], [%[lockaddr]]\n"
238" uadd16 %[ticket], %[ticket], %[serving1]\n"
239" strex %[tmp], %[ticket], [%[lockaddr]]\n"
240" teq %[tmp], #0\n"
241" bne 1b"
242 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp)
243 : [lockaddr]"r" (&lock->lock), [serving1]"r" (0x00010000)
244 : "cc");
245 dsb_sev();
246}
247
248static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
249{
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700250 unsigned long ticket, tmp, fixup = msm_krait_need_wfe_fixup;
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400251
252 /* Wait for now_serving == next_ticket */
253 __asm__ __volatile__(
254#ifdef CONFIG_CPU_32v6K
255" cmpne %[lockaddr], %[lockaddr]\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700256"1:\n"
257" beq 2f\n"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700258 WFE_SAFE("%[fixup]", "%[tmp]")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700259"2:\n"
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400260#else
261"1:\n"
262#endif
263" ldr %[ticket], [%[lockaddr]]\n"
264" eor %[ticket], %[ticket], %[ticket], lsr #16\n"
265" uxth %[ticket], %[ticket]\n"
266" cmp %[ticket], #0\n"
267" bne 1b"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700268 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
269 [fixup]"+r" (fixup)
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400270 : [lockaddr]"r" (&lock->lock)
271 : "cc");
272}
273
274static inline int arch_spin_is_locked(arch_spinlock_t *lock)
275{
276 unsigned long tmp = ACCESS_ONCE(lock->lock);
277 return (((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK) != 0;
278}
279
280static inline int arch_spin_is_contended(arch_spinlock_t *lock)
281{
282 unsigned long tmp = ACCESS_ONCE(lock->lock);
283 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
284}
285#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287/*
288 * RWLOCKS
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700289 *
290 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 * Write locks are easy - we just set bit 31. When unlocking, we can
292 * just write zero since the lock is exclusively held.
293 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700294
Thomas Gleixnere5931942009-12-03 20:08:46 +0100295static inline void arch_write_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296{
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700297 unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
299 __asm__ __volatile__(
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700300"1: ldrex %[tmp], [%[lock]]\n"
301" teq %[tmp], #0\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700302" beq 2f\n"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700303 WFE_SAFE("%[fixup]", "%[tmp]")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700304"2:\n"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700305" strexeq %[tmp], %[bit31], [%[lock]]\n"
306" teq %[tmp], #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307" bne 1b"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700308 : [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700309 : [lock] "r" (&rw->lock), [bit31] "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100310 : "cc");
311
312 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
Thomas Gleixnere5931942009-12-03 20:08:46 +0100315static inline int arch_write_trylock(arch_rwlock_t *rw)
Russell King4e8fd222005-07-24 12:13:40 +0100316{
317 unsigned long tmp;
318
319 __asm__ __volatile__(
320"1: ldrex %0, [%1]\n"
321" teq %0, #0\n"
322" strexeq %0, %2, [%1]"
323 : "=&r" (tmp)
324 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100325 : "cc");
Russell King4e8fd222005-07-24 12:13:40 +0100326
Russell King6d9b37a2005-07-26 19:44:26 +0100327 if (tmp == 0) {
328 smp_mb();
329 return 1;
330 } else {
331 return 0;
332 }
Russell King4e8fd222005-07-24 12:13:40 +0100333}
334
Thomas Gleixnere5931942009-12-03 20:08:46 +0100335static inline void arch_write_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
Russell King6d9b37a2005-07-26 19:44:26 +0100337 smp_mb();
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +0000340 "str %1, [%0]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 :
342 : "r" (&rw->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100343 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100344
345 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346}
347
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100348/* write_can_lock - would write_trylock() succeed? */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100349#define arch_write_can_lock(x) ((x)->lock == 0)
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351/*
352 * Read locks are a bit more hairy:
353 * - Exclusively load the lock value.
354 * - Increment it.
355 * - Store new lock value if positive, and we still own this location.
356 * If the value is negative, we've already failed.
357 * - If we failed to store the value, we want a negative result.
358 * - If we failed, try again.
359 * Unlocking is similarly hairy. We may have multiple read locks
360 * currently active. However, we know we won't have any write
361 * locks.
362 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100363static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364{
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700365 unsigned long tmp, tmp2, fixup = msm_krait_need_wfe_fixup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367 __asm__ __volatile__(
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700368"1: ldrex %[tmp], [%[lock]]\n"
369" adds %[tmp], %[tmp], #1\n"
370" strexpl %[tmp2], %[tmp], [%[lock]]\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700371" bpl 2f\n"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700372 WFE_SAFE("%[fixup]", "%[tmp]")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700373"2:\n"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700374" rsbpls %[tmp], %[tmp2], #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375" bmi 1b"
Stepan Moskovchenko196f86e2012-07-13 20:40:46 -0700376 : [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2), [fixup] "+r" (fixup)
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700377 : [lock] "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100378 : "cc");
379
380 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
Thomas Gleixnere5931942009-12-03 20:08:46 +0100383static inline void arch_read_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384{
Russell King4e8fd222005-07-24 12:13:40 +0100385 unsigned long tmp, tmp2;
386
Russell King6d9b37a2005-07-26 19:44:26 +0100387 smp_mb();
388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 __asm__ __volatile__(
390"1: ldrex %0, [%2]\n"
391" sub %0, %0, #1\n"
392" strex %1, %0, [%2]\n"
393" teq %1, #0\n"
394" bne 1b"
395 : "=&r" (tmp), "=&r" (tmp2)
396 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100397 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100398
399 if (tmp == 0)
400 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401}
402
Thomas Gleixnere5931942009-12-03 20:08:46 +0100403static inline int arch_read_trylock(arch_rwlock_t *rw)
Russell King8e347032006-08-31 15:09:30 +0100404{
Catalin Marinase89bc812006-09-06 19:03:14 +0100405 unsigned long tmp, tmp2 = 1;
Russell King8e347032006-08-31 15:09:30 +0100406
407 __asm__ __volatile__(
408"1: ldrex %0, [%2]\n"
409" adds %0, %0, #1\n"
410" strexpl %1, %0, [%2]\n"
411 : "=&r" (tmp), "+r" (tmp2)
412 : "r" (&rw->lock)
413 : "cc");
414
415 smp_mb();
416 return tmp2 == 0;
417}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100419/* read_can_lock - would read_trylock() succeed? */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100420#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100421
Thomas Gleixnere5931942009-12-03 20:08:46 +0100422#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
423#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Robin Holtf5f7eac2009-04-02 16:59:46 -0700424
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100425#define arch_spin_relax(lock) cpu_relax()
426#define arch_read_relax(lock) cpu_relax()
427#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429#endif /* __ASM_SPINLOCK_H */