arm: Implement ticket spin-locks
Introduce optional ticket locks to help ensure fairness in
contended locks, as well as preventing livelock.
CRs-fixed: 302764
Signed-off-by: Brent DeGraaf <bdegraaf@codeaurora.org>
Conflicts:
arch/arm/include/asm/spinlock.h
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 65fa3c8..582c9b3 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -58,6 +58,7 @@
#endif
}
+#ifndef CONFIG_ARM_TICKET_LOCKS
/*
* ARMv6 Spin-locking.
*
@@ -126,6 +127,131 @@
dsb_sev();
}
+#else
+/*
+ * ARM Ticket spin-locking
+ *
+ * Ticket locks are conceptually two parts, one indicating the current head of
+ * the queue, and the other indicating the current tail. The lock is acquired
+ * by atomically noting the tail and incrementing it by one (thus adding
+ * ourself to the queue and noting our position), then waiting until the head
+ * becomes equal to the the initial value of the tail.
+ *
+ * Unlocked value: 0
+ * Locked value: now_serving != next_ticket
+ *
+ * 31 17 16 15 14 0
+ * +----------------------------------------------------+
+ * | now_serving | next_ticket |
+ * +----------------------------------------------------+
+ */
+
+#define TICKET_SHIFT 16
+#define TICKET_BITS 16
+#define TICKET_MASK 0xFFFF
+
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned long tmp, ticket, next_ticket;
+
+ /* Grab the next ticket and wait for it to be "served" */
+ __asm__ __volatile__(
+"1: ldrex %[ticket], [%[lockaddr]]\n"
+" uadd16 %[next_ticket], %[ticket], %[val1]\n"
+" strex %[tmp], %[next_ticket], [%[lockaddr]]\n"
+" teq %[tmp], #0\n"
+" bne 1b\n"
+" uxth %[ticket], %[ticket]\n"
+"2:\n"
+#ifdef CONFIG_CPU_32v6K
+" wfene\n"
+#endif
+" ldr %[tmp], [%[lockaddr]]\n"
+" cmp %[ticket], %[tmp], lsr #16\n"
+" bne 2b"
+ : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), [next_ticket]"=&r" (next_ticket)
+ : [lockaddr]"r" (&lock->lock), [val1]"r" (1)
+ : "cc");
+ smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long tmp, ticket, next_ticket;
+
+ /* Grab lock if now_serving == next_ticket and access is exclusive */
+ __asm__ __volatile__(
+" ldrex %[ticket], [%[lockaddr]]\n"
+" ror %[tmp], %[ticket], #16\n"
+" eors %[tmp], %[tmp], %[ticket]\n"
+" bne 1f\n"
+" uadd16 %[next_ticket], %[ticket], %[val1]\n"
+" strex %[tmp], %[next_ticket], [%[lockaddr]]\n"
+"1:"
+ : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
+ [next_ticket]"=&r" (next_ticket)
+ : [lockaddr]"r" (&lock->lock), [val1]"r" (1)
+ : "cc");
+ if (!tmp)
+ smp_mb();
+ return !tmp;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ unsigned long ticket, tmp;
+
+ smp_mb();
+
+ /* Bump now_serving by 1 */
+ __asm__ __volatile__(
+"1: ldrex %[ticket], [%[lockaddr]]\n"
+" uadd16 %[ticket], %[ticket], %[serving1]\n"
+" strex %[tmp], %[ticket], [%[lockaddr]]\n"
+" teq %[tmp], #0\n"
+" bne 1b"
+ : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp)
+ : [lockaddr]"r" (&lock->lock), [serving1]"r" (0x00010000)
+ : "cc");
+ dsb_sev();
+}
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ unsigned long ticket;
+
+ /* Wait for now_serving == next_ticket */
+ __asm__ __volatile__(
+#ifdef CONFIG_CPU_32v6K
+" cmpne %[lockaddr], %[lockaddr]\n"
+"1: wfene\n"
+#else
+"1:\n"
+#endif
+" ldr %[ticket], [%[lockaddr]]\n"
+" eor %[ticket], %[ticket], %[ticket], lsr #16\n"
+" uxth %[ticket], %[ticket]\n"
+" cmp %[ticket], #0\n"
+" bne 1b"
+ : [ticket]"=&r" (ticket)
+ : [lockaddr]"r" (&lock->lock)
+ : "cc");
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ unsigned long tmp = ACCESS_ONCE(lock->lock);
+ return (((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK) != 0;
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ unsigned long tmp = ACCESS_ONCE(lock->lock);
+ return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
+}
+#endif
/*
* RWLOCKS