Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
Ralf Baechle | f65e4fa | 2006-09-28 01:45:21 +0100 | [diff] [blame] | 6 | * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
| 8 | */ |
| 9 | #ifndef _ASM_SPINLOCK_H |
| 10 | #define _ASM_SPINLOCK_H |
| 11 | |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 12 | #include <linux/compiler.h> |
| 13 | |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 14 | #include <asm/barrier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <asm/war.h> |
| 16 | |
| 17 | /* |
| 18 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 19 | * |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 20 | * Simple spin lock operations. There are two variants, one clears IRQ's |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | * on the local processor, one does not. |
| 22 | * |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 23 | * These are fair FIFO ticket locks |
| 24 | * |
| 25 | * (the type definitions are in asm/spinlock_types.h) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | */ |
| 27 | |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 28 | |
| 29 | /* |
| 30 | * Ticket locks are conceptually two parts, one indicating the current head of |
| 31 | * the queue, and the other indicating the current tail. The lock is acquired |
| 32 | * by atomically noting the tail and incrementing it by one (thus adding |
| 33 | * ourself to the queue and noting our position), then waiting until the head |
| 34 | * becomes equal to the the initial value of the tail. |
| 35 | */ |
| 36 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 38 | { |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 39 | u32 counters = ACCESS_ONCE(lock->lock); |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 40 | |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 41 | return ((counters >> 16) ^ counters) & 0xffff; |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 42 | } |
| 43 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 44 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
| 45 | #define arch_spin_unlock_wait(x) \ |
| 46 | while (arch_spin_is_locked(x)) { cpu_relax(); } |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 47 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 48 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 49 | { |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 50 | u32 counters = ACCESS_ONCE(lock->lock); |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 51 | |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 52 | return (((counters >> 16) - counters) & 0xffff) > 1; |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 53 | } |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 54 | #define arch_spin_is_contended arch_spin_is_contended |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 55 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 56 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | { |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 58 | int my_ticket; |
| 59 | int tmp; |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 60 | int inc = 0x10000; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
| 62 | if (R10000_LLSC_WAR) { |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 63 | __asm__ __volatile__ ( |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 64 | " .set push # arch_spin_lock \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 65 | " .set noreorder \n" |
| 66 | " \n" |
| 67 | "1: ll %[ticket], %[ticket_ptr] \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 68 | " addu %[my_ticket], %[ticket], %[inc] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 69 | " sc %[my_ticket], %[ticket_ptr] \n" |
| 70 | " beqzl %[my_ticket], 1b \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | " nop \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 72 | " srl %[my_ticket], %[ticket], 16 \n" |
| 73 | " andi %[ticket], %[ticket], 0xffff \n" |
| 74 | " andi %[my_ticket], %[my_ticket], 0xffff \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 75 | " bne %[ticket], %[my_ticket], 4f \n" |
| 76 | " subu %[ticket], %[my_ticket], %[ticket] \n" |
| 77 | "2: \n" |
Ralf Baechle | f65e4fa | 2006-09-28 01:45:21 +0100 | [diff] [blame] | 78 | " .subsection 2 \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 79 | "4: andi %[ticket], %[ticket], 0xffff \n" |
David Daney | 0e6826c | 2009-03-27 10:07:02 -0700 | [diff] [blame] | 80 | " sll %[ticket], 5 \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 81 | " \n" |
| 82 | "6: bnez %[ticket], 6b \n" |
| 83 | " subu %[ticket], 1 \n" |
| 84 | " \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 85 | " lhu %[ticket], %[serving_now_ptr] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 86 | " beq %[ticket], %[my_ticket], 2b \n" |
| 87 | " subu %[ticket], %[my_ticket], %[ticket] \n" |
David Daney | 0e6826c | 2009-03-27 10:07:02 -0700 | [diff] [blame] | 88 | " b 4b \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 89 | " subu %[ticket], %[ticket], 1 \n" |
Ralf Baechle | f65e4fa | 2006-09-28 01:45:21 +0100 | [diff] [blame] | 90 | " .previous \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 91 | " .set pop \n" |
| 92 | : [ticket_ptr] "+m" (lock->lock), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 93 | [serving_now_ptr] "+m" (lock->h.serving_now), |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 94 | [ticket] "=&r" (tmp), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 95 | [my_ticket] "=&r" (my_ticket) |
| 96 | : [inc] "r" (inc)); |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 97 | } else { |
| 98 | __asm__ __volatile__ ( |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 99 | " .set push # arch_spin_lock \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 100 | " .set noreorder \n" |
| 101 | " \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 102 | "1: ll %[ticket], %[ticket_ptr] \n" |
| 103 | " addu %[my_ticket], %[ticket], %[inc] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 104 | " sc %[my_ticket], %[ticket_ptr] \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 105 | " beqz %[my_ticket], 1b \n" |
| 106 | " srl %[my_ticket], %[ticket], 16 \n" |
| 107 | " andi %[ticket], %[ticket], 0xffff \n" |
| 108 | " andi %[my_ticket], %[my_ticket], 0xffff \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 109 | " bne %[ticket], %[my_ticket], 4f \n" |
| 110 | " subu %[ticket], %[my_ticket], %[ticket] \n" |
| 111 | "2: \n" |
| 112 | " .subsection 2 \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 113 | "4: andi %[ticket], %[ticket], 0x1fff \n" |
David Daney | 0e6826c | 2009-03-27 10:07:02 -0700 | [diff] [blame] | 114 | " sll %[ticket], 5 \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 115 | " \n" |
| 116 | "6: bnez %[ticket], 6b \n" |
| 117 | " subu %[ticket], 1 \n" |
| 118 | " \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 119 | " lhu %[ticket], %[serving_now_ptr] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 120 | " beq %[ticket], %[my_ticket], 2b \n" |
| 121 | " subu %[ticket], %[my_ticket], %[ticket] \n" |
David Daney | 0e6826c | 2009-03-27 10:07:02 -0700 | [diff] [blame] | 122 | " b 4b \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 123 | " subu %[ticket], %[ticket], 1 \n" |
| 124 | " .previous \n" |
| 125 | " .set pop \n" |
| 126 | : [ticket_ptr] "+m" (lock->lock), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 127 | [serving_now_ptr] "+m" (lock->h.serving_now), |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 128 | [ticket] "=&r" (tmp), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 129 | [my_ticket] "=&r" (my_ticket) |
| 130 | : [inc] "r" (inc)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | } |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 132 | |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 133 | smp_llsc_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 136 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | { |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 138 | unsigned int serving_now = lock->h.serving_now + 1; |
| 139 | wmb(); |
| 140 | lock->h.serving_now = (u16)serving_now; |
| 141 | nudge_writes(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | } |
| 143 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 144 | static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | { |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 146 | int tmp, tmp2, tmp3; |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 147 | int inc = 0x10000; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | |
| 149 | if (R10000_LLSC_WAR) { |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 150 | __asm__ __volatile__ ( |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 151 | " .set push # arch_spin_trylock \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 152 | " .set noreorder \n" |
| 153 | " \n" |
| 154 | "1: ll %[ticket], %[ticket_ptr] \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 155 | " srl %[my_ticket], %[ticket], 16 \n" |
| 156 | " andi %[my_ticket], %[my_ticket], 0xffff \n" |
| 157 | " andi %[now_serving], %[ticket], 0xffff \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 158 | " bne %[my_ticket], %[now_serving], 3f \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 159 | " addu %[ticket], %[ticket], %[inc] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 160 | " sc %[ticket], %[ticket_ptr] \n" |
| 161 | " beqzl %[ticket], 1b \n" |
| 162 | " li %[ticket], 1 \n" |
| 163 | "2: \n" |
Ralf Baechle | f65e4fa | 2006-09-28 01:45:21 +0100 | [diff] [blame] | 164 | " .subsection 2 \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 165 | "3: b 2b \n" |
| 166 | " li %[ticket], 0 \n" |
Ralf Baechle | f65e4fa | 2006-09-28 01:45:21 +0100 | [diff] [blame] | 167 | " .previous \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 168 | " .set pop \n" |
| 169 | : [ticket_ptr] "+m" (lock->lock), |
| 170 | [ticket] "=&r" (tmp), |
| 171 | [my_ticket] "=&r" (tmp2), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 172 | [now_serving] "=&r" (tmp3) |
| 173 | : [inc] "r" (inc)); |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 174 | } else { |
| 175 | __asm__ __volatile__ ( |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 176 | " .set push # arch_spin_trylock \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 177 | " .set noreorder \n" |
| 178 | " \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 179 | "1: ll %[ticket], %[ticket_ptr] \n" |
| 180 | " srl %[my_ticket], %[ticket], 16 \n" |
| 181 | " andi %[my_ticket], %[my_ticket], 0xffff \n" |
| 182 | " andi %[now_serving], %[ticket], 0xffff \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 183 | " bne %[my_ticket], %[now_serving], 3f \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 184 | " addu %[ticket], %[ticket], %[inc] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 185 | " sc %[ticket], %[ticket_ptr] \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 186 | " beqz %[ticket], 1b \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 187 | " li %[ticket], 1 \n" |
| 188 | "2: \n" |
| 189 | " .subsection 2 \n" |
| 190 | "3: b 2b \n" |
| 191 | " li %[ticket], 0 \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 192 | " .previous \n" |
| 193 | " .set pop \n" |
| 194 | : [ticket_ptr] "+m" (lock->lock), |
| 195 | [ticket] "=&r" (tmp), |
| 196 | [my_ticket] "=&r" (tmp2), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 197 | [now_serving] "=&r" (tmp3) |
| 198 | : [inc] "r" (inc)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | } |
| 200 | |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 201 | smp_llsc_mb(); |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 202 | |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 203 | return tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | /* |
| 207 | * Read-write spinlocks, allowing multiple readers but only one writer. |
| 208 | * |
| 209 | * NOTE! it is quite common to have readers in interrupts but no interrupt |
| 210 | * writers. For those circumstances we can "mix" irq-safe locks - any writer |
| 211 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe |
| 212 | * read-locks. |
| 213 | */ |
| 214 | |
Ralf Baechle | e3c4807 | 2005-02-03 13:34:45 +0000 | [diff] [blame] | 215 | /* |
| 216 | * read_can_lock - would read_trylock() succeed? |
| 217 | * @lock: the rwlock in question. |
| 218 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 219 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
Ralf Baechle | e3c4807 | 2005-02-03 13:34:45 +0000 | [diff] [blame] | 220 | |
| 221 | /* |
| 222 | * write_can_lock - would write_trylock() succeed? |
| 223 | * @lock: the rwlock in question. |
| 224 | */ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 225 | #define arch_write_can_lock(rw) (!(rw)->lock) |
Ralf Baechle | e3c4807 | 2005-02-03 13:34:45 +0000 | [diff] [blame] | 226 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 227 | static inline void arch_read_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | { |
| 229 | unsigned int tmp; |
| 230 | |
| 231 | if (R10000_LLSC_WAR) { |
| 232 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 233 | " .set noreorder # arch_read_lock \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | "1: ll %1, %2 \n" |
| 235 | " bltz %1, 1b \n" |
| 236 | " addu %1, 1 \n" |
| 237 | " sc %1, %0 \n" |
| 238 | " beqzl %1, 1b \n" |
| 239 | " nop \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | " .set reorder \n" |
| 241 | : "=m" (rw->lock), "=&r" (tmp) |
| 242 | : "m" (rw->lock) |
| 243 | : "memory"); |
| 244 | } else { |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame^] | 245 | do { |
| 246 | __asm__ __volatile__( |
| 247 | "1: ll %1, %2 # arch_read_lock \n" |
| 248 | " bltz %1, 1b \n" |
| 249 | " addu %1, 1 \n" |
| 250 | "2: sc %1, %0 \n" |
| 251 | : "=m" (rw->lock), "=&r" (tmp) |
| 252 | : "m" (rw->lock) |
| 253 | : "memory"); |
| 254 | } while (unlikely(!tmp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | } |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 256 | |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 257 | smp_llsc_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | /* Note the use of sub, not subu which will make the kernel die with an |
| 261 | overflow exception if we ever try to unlock an rwlock that is already |
| 262 | unlocked or is being held by a writer. */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 263 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | { |
| 265 | unsigned int tmp; |
| 266 | |
David Daney | f252ffd | 2010-01-08 17:17:43 -0800 | [diff] [blame] | 267 | smp_mb__before_llsc(); |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 268 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | if (R10000_LLSC_WAR) { |
| 270 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 271 | "1: ll %1, %2 # arch_read_unlock \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | " sub %1, 1 \n" |
| 273 | " sc %1, %0 \n" |
| 274 | " beqzl %1, 1b \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | : "=m" (rw->lock), "=&r" (tmp) |
| 276 | : "m" (rw->lock) |
| 277 | : "memory"); |
| 278 | } else { |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame^] | 279 | do { |
| 280 | __asm__ __volatile__( |
| 281 | "1: ll %1, %2 # arch_read_unlock \n" |
| 282 | " sub %1, 1 \n" |
| 283 | " sc %1, %0 \n" |
| 284 | : "=m" (rw->lock), "=&r" (tmp) |
| 285 | : "m" (rw->lock) |
| 286 | : "memory"); |
| 287 | } while (unlikely(!tmp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | } |
| 289 | } |
| 290 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 291 | static inline void arch_write_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | { |
| 293 | unsigned int tmp; |
| 294 | |
| 295 | if (R10000_LLSC_WAR) { |
| 296 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 297 | " .set noreorder # arch_write_lock \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | "1: ll %1, %2 \n" |
| 299 | " bnez %1, 1b \n" |
| 300 | " lui %1, 0x8000 \n" |
| 301 | " sc %1, %0 \n" |
| 302 | " beqzl %1, 1b \n" |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 303 | " nop \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | " .set reorder \n" |
| 305 | : "=m" (rw->lock), "=&r" (tmp) |
| 306 | : "m" (rw->lock) |
| 307 | : "memory"); |
| 308 | } else { |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame^] | 309 | do { |
| 310 | __asm__ __volatile__( |
| 311 | "1: ll %1, %2 # arch_write_lock \n" |
| 312 | " bnez %1, 1b \n" |
| 313 | " lui %1, 0x8000 \n" |
| 314 | "2: sc %1, %0 \n" |
| 315 | : "=m" (rw->lock), "=&r" (tmp) |
| 316 | : "m" (rw->lock) |
| 317 | : "memory"); |
| 318 | } while (unlikely(!tmp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | } |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 320 | |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 321 | smp_llsc_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | } |
| 323 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 324 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | { |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 326 | smp_mb(); |
| 327 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 329 | " # arch_write_unlock \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | " sw $0, %0 \n" |
| 331 | : "=m" (rw->lock) |
| 332 | : "m" (rw->lock) |
| 333 | : "memory"); |
| 334 | } |
| 335 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 336 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 337 | { |
| 338 | unsigned int tmp; |
| 339 | int ret; |
| 340 | |
| 341 | if (R10000_LLSC_WAR) { |
| 342 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 343 | " .set noreorder # arch_read_trylock \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 344 | " li %2, 0 \n" |
| 345 | "1: ll %1, %3 \n" |
Dave Johnson | d52c2d5 | 2007-03-05 20:50:27 -0500 | [diff] [blame] | 346 | " bltz %1, 2f \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 347 | " addu %1, 1 \n" |
| 348 | " sc %1, %0 \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 349 | " .set reorder \n" |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 350 | " beqzl %1, 1b \n" |
| 351 | " nop \n" |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 352 | __WEAK_LLSC_MB |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 353 | " li %2, 1 \n" |
| 354 | "2: \n" |
| 355 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) |
| 356 | : "m" (rw->lock) |
| 357 | : "memory"); |
| 358 | } else { |
| 359 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 360 | " .set noreorder # arch_read_trylock \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 361 | " li %2, 0 \n" |
| 362 | "1: ll %1, %3 \n" |
Dave Johnson | d52c2d5 | 2007-03-05 20:50:27 -0500 | [diff] [blame] | 363 | " bltz %1, 2f \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 364 | " addu %1, 1 \n" |
| 365 | " sc %1, %0 \n" |
| 366 | " beqz %1, 1b \n" |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 367 | " nop \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 368 | " .set reorder \n" |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 369 | __WEAK_LLSC_MB |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 370 | " li %2, 1 \n" |
| 371 | "2: \n" |
| 372 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) |
| 373 | : "m" (rw->lock) |
| 374 | : "memory"); |
| 375 | } |
| 376 | |
| 377 | return ret; |
| 378 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 380 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | { |
| 382 | unsigned int tmp; |
| 383 | int ret; |
| 384 | |
| 385 | if (R10000_LLSC_WAR) { |
| 386 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 387 | " .set noreorder # arch_write_trylock \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | " li %2, 0 \n" |
| 389 | "1: ll %1, %3 \n" |
| 390 | " bnez %1, 2f \n" |
| 391 | " lui %1, 0x8000 \n" |
| 392 | " sc %1, %0 \n" |
| 393 | " beqzl %1, 1b \n" |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 394 | " nop \n" |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 395 | __WEAK_LLSC_MB |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | " li %2, 1 \n" |
| 397 | " .set reorder \n" |
| 398 | "2: \n" |
| 399 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) |
| 400 | : "m" (rw->lock) |
| 401 | : "memory"); |
| 402 | } else { |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame^] | 403 | do { |
| 404 | __asm__ __volatile__( |
| 405 | " ll %1, %3 # arch_write_trylock \n" |
| 406 | " li %2, 0 \n" |
| 407 | " bnez %1, 2f \n" |
| 408 | " lui %1, 0x8000 \n" |
| 409 | " sc %1, %0 \n" |
| 410 | " li %2, 1 \n" |
| 411 | "2: \n" |
| 412 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) |
| 413 | : "m" (rw->lock) |
| 414 | : "memory"); |
| 415 | } while (unlikely(!tmp)); |
| 416 | |
| 417 | smp_llsc_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | } |
| 419 | |
| 420 | return ret; |
| 421 | } |
| 422 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 423 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 424 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 425 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 426 | #define arch_spin_relax(lock) cpu_relax() |
| 427 | #define arch_read_relax(lock) cpu_relax() |
| 428 | #define arch_write_relax(lock) cpu_relax() |
Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 429 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | #endif /* _ASM_SPINLOCK_H */ |