[IA64] SMT friendly version of spin_unlock_wait()
We can be kinder to SMT systems in spin_unlock_wait.
Signed-off-by: Tony Luck <tony.luck@intel.com>
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 4fa5027..239ecdc 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -75,6 +75,20 @@
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
+static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
+{
+ int *p = (int *)&lock->lock, ticket;
+
+ ia64_invala();
+
+ for (;;) {
+ asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
+ if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+ return;
+ cpu_relax();
+ }
+}
+
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
@@ -123,8 +137,7 @@
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
- cpu_relax();
+ __ticket_spin_unlock_wait(lock);
}
#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)