blob: 51be2295fc306a08893e033ff5bb351928a74d13 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#if __LINUX_ARM_ARCH__ < 6
5#error SMP not supported on pre-ARMv6 CPUs
6#endif
7
Marc Zyngier603605a2011-05-23 17:16:59 +01008#include <asm/processor.h>
9
Russell King000d9c72011-01-15 16:22:12 +000010/*
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away.
13 */
14#define ALT_SMP(smp, up) \
15 "9998: " smp "\n" \
16 " .pushsection \".alt.smp.init\", \"a\"\n" \
17 " .long 9998b\n" \
18 " " up "\n" \
19 " .popsection\n"
20
21#ifdef CONFIG_THUMB2_KERNEL
22#define SEV ALT_SMP("sev.w", "nop.w")
Dave Martin917692f2011-02-09 12:06:59 +010023/*
Stepan Moskovchenko470d1342012-07-13 16:56:10 -070024 * Both instructions given to the ALT_SMP macro need to be the same size, to
25 * allow the SMP_ON_UP fixups to function correctly. Hence the explicit encoding
26 * specifications.
Dave Martin917692f2011-02-09 12:06:59 +010027 */
Stepan Moskovchenko470d1342012-07-13 16:56:10 -070028#define WFE() ALT_SMP( \
29 "wfe.w", \
Dave Martin917692f2011-02-09 12:06:59 +010030 "nop.w" \
31)
Russell King000d9c72011-01-15 16:22:12 +000032#else
33#define SEV ALT_SMP("sev", "nop")
Stepan Moskovchenko470d1342012-07-13 16:56:10 -070034#define WFE() ALT_SMP("wfe", "nop")
Russell King000d9c72011-01-15 16:22:12 +000035#endif
36
Rabin Vincentc5113b62010-01-25 19:43:03 +010037static inline void dsb_sev(void)
38{
39#if __LINUX_ARM_ARCH__ >= 7
40 __asm__ __volatile__ (
41 "dsb\n"
Russell King000d9c72011-01-15 16:22:12 +000042 SEV
Rabin Vincentc5113b62010-01-25 19:43:03 +010043 );
Russell King000d9c72011-01-15 16:22:12 +000044#else
Rabin Vincentc5113b62010-01-25 19:43:03 +010045 __asm__ __volatile__ (
46 "mcr p15, 0, %0, c7, c10, 4\n"
Russell King000d9c72011-01-15 16:22:12 +000047 SEV
Rabin Vincentc5113b62010-01-25 19:43:03 +010048 : : "r" (0)
49 );
50#endif
51}
52
Brent DeGraaf8e7b8572011-09-21 16:21:31 -040053#ifndef CONFIG_ARM_TICKET_LOCKS
Linus Torvalds1da177e2005-04-16 15:20:36 -070054/*
55 * ARMv6 Spin-locking.
56 *
Russell King6d9b37a2005-07-26 19:44:26 +010057 * We exclusively read the old value. If it is zero, we may have
58 * won the lock, so we try exclusively storing it. A memory barrier
59 * is required after we get a lock, and before we release it, because
60 * V6 CPUs are assumed to have weakly ordered memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 *
62 * Unlocked value: 0
63 * Locked value: 1
64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010066#define arch_spin_is_locked(x) ((x)->lock != 0)
67#define arch_spin_unlock_wait(lock) \
68 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010070#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010072static inline void arch_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 unsigned long tmp;
75
76 __asm__ __volatile__(
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -070077"1: ldrex %[tmp], [%[lock]]\n"
78" teq %[tmp], #0\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -070079" beq 2f\n"
80 WFE()
81"2:\n"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -070082" strexeq %[tmp], %[bit0], [%[lock]]\n"
83" teqeq %[tmp], #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070084" bne 1b"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -070085 : [tmp] "=&r" (tmp)
86 : [lock] "r" (&lock->lock), [bit0] "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +010087 : "cc");
88
89 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010092static inline int arch_spin_trylock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 unsigned long tmp;
95
96 __asm__ __volatile__(
97" ldrex %0, [%1]\n"
98" teq %0, #0\n"
99" strexeq %0, %2, [%1]"
100 : "=&r" (tmp)
101 : "r" (&lock->lock), "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +0100102 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Russell King6d9b37a2005-07-26 19:44:26 +0100104 if (tmp == 0) {
105 smp_mb();
106 return 1;
107 } else {
108 return 0;
109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100112static inline void arch_spin_unlock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
Russell King6d9b37a2005-07-26 19:44:26 +0100114 smp_mb();
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +0000117" str %1, [%0]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 :
119 : "r" (&lock->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100120 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100121
122 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123}
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400124#else
125/*
126 * ARM Ticket spin-locking
127 *
128 * Ticket locks are conceptually two parts, one indicating the current head of
129 * the queue, and the other indicating the current tail. The lock is acquired
130 * by atomically noting the tail and incrementing it by one (thus adding
131 * ourself to the queue and noting our position), then waiting until the head
132 * becomes equal to the the initial value of the tail.
133 *
134 * Unlocked value: 0
135 * Locked value: now_serving != next_ticket
136 *
137 * 31 17 16 15 14 0
138 * +----------------------------------------------------+
139 * | now_serving | next_ticket |
140 * +----------------------------------------------------+
141 */
142
143#define TICKET_SHIFT 16
144#define TICKET_BITS 16
145#define TICKET_MASK 0xFFFF
146
147#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
148
149static inline void arch_spin_lock(arch_spinlock_t *lock)
150{
151 unsigned long tmp, ticket, next_ticket;
152
153 /* Grab the next ticket and wait for it to be "served" */
154 __asm__ __volatile__(
155"1: ldrex %[ticket], [%[lockaddr]]\n"
156" uadd16 %[next_ticket], %[ticket], %[val1]\n"
157" strex %[tmp], %[next_ticket], [%[lockaddr]]\n"
158" teq %[tmp], #0\n"
159" bne 1b\n"
160" uxth %[ticket], %[ticket]\n"
161"2:\n"
162#ifdef CONFIG_CPU_32v6K
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700163" beq 3f\n"
164 WFE()
165"3:\n"
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400166#endif
167" ldr %[tmp], [%[lockaddr]]\n"
168" cmp %[ticket], %[tmp], lsr #16\n"
169" bne 2b"
170 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), [next_ticket]"=&r" (next_ticket)
171 : [lockaddr]"r" (&lock->lock), [val1]"r" (1)
172 : "cc");
173 smp_mb();
174}
175
176static inline int arch_spin_trylock(arch_spinlock_t *lock)
177{
178 unsigned long tmp, ticket, next_ticket;
179
180 /* Grab lock if now_serving == next_ticket and access is exclusive */
181 __asm__ __volatile__(
182" ldrex %[ticket], [%[lockaddr]]\n"
183" ror %[tmp], %[ticket], #16\n"
184" eors %[tmp], %[tmp], %[ticket]\n"
185" bne 1f\n"
186" uadd16 %[next_ticket], %[ticket], %[val1]\n"
187" strex %[tmp], %[next_ticket], [%[lockaddr]]\n"
188"1:"
189 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
190 [next_ticket]"=&r" (next_ticket)
191 : [lockaddr]"r" (&lock->lock), [val1]"r" (1)
192 : "cc");
193 if (!tmp)
194 smp_mb();
195 return !tmp;
196}
197
198static inline void arch_spin_unlock(arch_spinlock_t *lock)
199{
200 unsigned long ticket, tmp;
201
202 smp_mb();
203
204 /* Bump now_serving by 1 */
205 __asm__ __volatile__(
206"1: ldrex %[ticket], [%[lockaddr]]\n"
207" uadd16 %[ticket], %[ticket], %[serving1]\n"
208" strex %[tmp], %[ticket], [%[lockaddr]]\n"
209" teq %[tmp], #0\n"
210" bne 1b"
211 : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp)
212 : [lockaddr]"r" (&lock->lock), [serving1]"r" (0x00010000)
213 : "cc");
214 dsb_sev();
215}
216
217static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
218{
219 unsigned long ticket;
220
221 /* Wait for now_serving == next_ticket */
222 __asm__ __volatile__(
223#ifdef CONFIG_CPU_32v6K
224" cmpne %[lockaddr], %[lockaddr]\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700225"1:\n"
226" beq 2f\n"
227 WFE()
228"2:\n"
Brent DeGraaf8e7b8572011-09-21 16:21:31 -0400229#else
230"1:\n"
231#endif
232" ldr %[ticket], [%[lockaddr]]\n"
233" eor %[ticket], %[ticket], %[ticket], lsr #16\n"
234" uxth %[ticket], %[ticket]\n"
235" cmp %[ticket], #0\n"
236" bne 1b"
237 : [ticket]"=&r" (ticket)
238 : [lockaddr]"r" (&lock->lock)
239 : "cc");
240}
241
242static inline int arch_spin_is_locked(arch_spinlock_t *lock)
243{
244 unsigned long tmp = ACCESS_ONCE(lock->lock);
245 return (((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK) != 0;
246}
247
248static inline int arch_spin_is_contended(arch_spinlock_t *lock)
249{
250 unsigned long tmp = ACCESS_ONCE(lock->lock);
251 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
252}
253#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255/*
256 * RWLOCKS
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700257 *
258 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 * Write locks are easy - we just set bit 31. When unlocking, we can
260 * just write zero since the lock is exclusively held.
261 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700262
Thomas Gleixnere5931942009-12-03 20:08:46 +0100263static inline void arch_write_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264{
265 unsigned long tmp;
266
267 __asm__ __volatile__(
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700268"1: ldrex %[tmp], [%[lock]]\n"
269" teq %[tmp], #0\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700270" beq 2f\n"
271 WFE()
272"2:\n"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700273" strexeq %[tmp], %[bit31], [%[lock]]\n"
274" teq %[tmp], #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275" bne 1b"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700276 : [tmp] "=&r" (tmp)
277 : [lock] "r" (&rw->lock), [bit31] "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100278 : "cc");
279
280 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281}
282
Thomas Gleixnere5931942009-12-03 20:08:46 +0100283static inline int arch_write_trylock(arch_rwlock_t *rw)
Russell King4e8fd222005-07-24 12:13:40 +0100284{
285 unsigned long tmp;
286
287 __asm__ __volatile__(
288"1: ldrex %0, [%1]\n"
289" teq %0, #0\n"
290" strexeq %0, %2, [%1]"
291 : "=&r" (tmp)
292 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100293 : "cc");
Russell King4e8fd222005-07-24 12:13:40 +0100294
Russell King6d9b37a2005-07-26 19:44:26 +0100295 if (tmp == 0) {
296 smp_mb();
297 return 1;
298 } else {
299 return 0;
300 }
Russell King4e8fd222005-07-24 12:13:40 +0100301}
302
Thomas Gleixnere5931942009-12-03 20:08:46 +0100303static inline void arch_write_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
Russell King6d9b37a2005-07-26 19:44:26 +0100305 smp_mb();
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +0000308 "str %1, [%0]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 :
310 : "r" (&rw->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100311 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100312
313 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314}
315
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100316/* write_can_lock - would write_trylock() succeed? */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100317#define arch_write_can_lock(x) ((x)->lock == 0)
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100318
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319/*
320 * Read locks are a bit more hairy:
321 * - Exclusively load the lock value.
322 * - Increment it.
323 * - Store new lock value if positive, and we still own this location.
324 * If the value is negative, we've already failed.
325 * - If we failed to store the value, we want a negative result.
326 * - If we failed, try again.
327 * Unlocking is similarly hairy. We may have multiple read locks
328 * currently active. However, we know we won't have any write
329 * locks.
330 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100331static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
333 unsigned long tmp, tmp2;
334
335 __asm__ __volatile__(
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700336"1: ldrex %[tmp], [%[lock]]\n"
337" adds %[tmp], %[tmp], #1\n"
338" strexpl %[tmp2], %[tmp], [%[lock]]\n"
Stepan Moskovchenko470d1342012-07-13 16:56:10 -0700339" bpl 2f\n"
340 WFE()
341"2:\n"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700342" rsbpls %[tmp], %[tmp2], #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343" bmi 1b"
Stepan Moskovchenko68e1d742012-07-13 17:26:54 -0700344 : [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2)
345 : [lock] "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100346 : "cc");
347
348 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349}
350
Thomas Gleixnere5931942009-12-03 20:08:46 +0100351static inline void arch_read_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352{
Russell King4e8fd222005-07-24 12:13:40 +0100353 unsigned long tmp, tmp2;
354
Russell King6d9b37a2005-07-26 19:44:26 +0100355 smp_mb();
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 __asm__ __volatile__(
358"1: ldrex %0, [%2]\n"
359" sub %0, %0, #1\n"
360" strex %1, %0, [%2]\n"
361" teq %1, #0\n"
362" bne 1b"
363 : "=&r" (tmp), "=&r" (tmp2)
364 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100365 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100366
367 if (tmp == 0)
368 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369}
370
Thomas Gleixnere5931942009-12-03 20:08:46 +0100371static inline int arch_read_trylock(arch_rwlock_t *rw)
Russell King8e347032006-08-31 15:09:30 +0100372{
Catalin Marinase89bc812006-09-06 19:03:14 +0100373 unsigned long tmp, tmp2 = 1;
Russell King8e347032006-08-31 15:09:30 +0100374
375 __asm__ __volatile__(
376"1: ldrex %0, [%2]\n"
377" adds %0, %0, #1\n"
378" strexpl %1, %0, [%2]\n"
379 : "=&r" (tmp), "+r" (tmp2)
380 : "r" (&rw->lock)
381 : "cc");
382
383 smp_mb();
384 return tmp2 == 0;
385}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100387/* read_can_lock - would read_trylock() succeed? */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100388#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100389
Thomas Gleixnere5931942009-12-03 20:08:46 +0100390#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
391#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Robin Holtf5f7eac2009-04-02 16:59:46 -0700392
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100393#define arch_spin_relax(lock) cpu_relax()
394#define arch_read_relax(lock) cpu_relax()
395#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397#endif /* __ASM_SPINLOCK_H */