| /* spinlock.h: 32-bit Sparc spinlock support. | 
 |  * | 
 |  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | 
 |  */ | 
 |  | 
 | #ifndef __SPARC_SPINLOCK_H | 
 | #define __SPARC_SPINLOCK_H | 
 |  | 
 | #include <linux/threads.h>	/* For NR_CPUS */ | 
 |  | 
 | #ifndef __ASSEMBLY__ | 
 |  | 
 | #include <asm/psr.h> | 
 |  | 
 | #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | 
 |  | 
 | #define __raw_spin_unlock_wait(lock) \ | 
 | 	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 
 |  | 
 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 
 | { | 
 | 	__asm__ __volatile__( | 
 | 	"\n1:\n\t" | 
 | 	"ldstub	[%0], %%g2\n\t" | 
 | 	"orcc	%%g2, 0x0, %%g0\n\t" | 
 | 	"bne,a	2f\n\t" | 
 | 	" ldub	[%0], %%g2\n\t" | 
 | 	".subsection	2\n" | 
 | 	"2:\n\t" | 
 | 	"orcc	%%g2, 0x0, %%g0\n\t" | 
 | 	"bne,a	2b\n\t" | 
 | 	" ldub	[%0], %%g2\n\t" | 
 | 	"b,a	1b\n\t" | 
 | 	".previous\n" | 
 | 	: /* no outputs */ | 
 | 	: "r" (lock) | 
 | 	: "g2", "memory", "cc"); | 
 | } | 
 |  | 
 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 
 | { | 
 | 	unsigned int result; | 
 | 	__asm__ __volatile__("ldstub [%1], %0" | 
 | 			     : "=r" (result) | 
 | 			     : "r" (lock) | 
 | 			     : "memory"); | 
 | 	return (result == 0); | 
 | } | 
 |  | 
 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 
 | { | 
 | 	__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | 
 | } | 
 |  | 
 | /* Read-write spinlocks, allowing multiple readers | 
 |  * but only one writer. | 
 |  * | 
 |  * NOTE! it is quite common to have readers in interrupts | 
 |  * but no interrupt writers. For those circumstances we | 
 |  * can "mix" irq-safe locks - any writer needs to get a | 
 |  * irq-safe write-lock, but readers can get non-irqsafe | 
 |  * read-locks. | 
 |  * | 
 |  * XXX This might create some problems with my dual spinlock | 
 |  * XXX scheme, deadlocks etc. -DaveM | 
 |  * | 
 |  * Sort of like atomic_t's on Sparc, but even more clever. | 
 |  * | 
 |  *	------------------------------------ | 
 |  *	| 24-bit counter           | wlock |  raw_rwlock_t | 
 |  *	------------------------------------ | 
 |  *	 31                       8 7     0 | 
 |  * | 
 |  * wlock signifies the one writer is in or somebody is updating | 
 |  * counter. For a writer, if he successfully acquires the wlock, | 
 |  * but counter is non-zero, he has to release the lock and wait, | 
 |  * till both counter and wlock are zero. | 
 |  * | 
 |  * Unfortunately this scheme limits us to ~16,000,000 cpus. | 
 |  */ | 
 | static inline void __read_lock(raw_rwlock_t *rw) | 
 | { | 
 | 	register raw_rwlock_t *lp asm("g1"); | 
 | 	lp = rw; | 
 | 	__asm__ __volatile__( | 
 | 	"mov	%%o7, %%g4\n\t" | 
 | 	"call	___rw_read_enter\n\t" | 
 | 	" ldstub	[%%g1 + 3], %%g2\n" | 
 | 	: /* no outputs */ | 
 | 	: "r" (lp) | 
 | 	: "g2", "g4", "memory", "cc"); | 
 | } | 
 |  | 
 | #define __raw_read_lock(lock) \ | 
 | do {	unsigned long flags; \ | 
 | 	local_irq_save(flags); \ | 
 | 	__read_lock(lock); \ | 
 | 	local_irq_restore(flags); \ | 
 | } while(0) | 
 |  | 
 | static inline void __read_unlock(raw_rwlock_t *rw) | 
 | { | 
 | 	register raw_rwlock_t *lp asm("g1"); | 
 | 	lp = rw; | 
 | 	__asm__ __volatile__( | 
 | 	"mov	%%o7, %%g4\n\t" | 
 | 	"call	___rw_read_exit\n\t" | 
 | 	" ldstub	[%%g1 + 3], %%g2\n" | 
 | 	: /* no outputs */ | 
 | 	: "r" (lp) | 
 | 	: "g2", "g4", "memory", "cc"); | 
 | } | 
 |  | 
 | #define __raw_read_unlock(lock) \ | 
 | do {	unsigned long flags; \ | 
 | 	local_irq_save(flags); \ | 
 | 	__read_unlock(lock); \ | 
 | 	local_irq_restore(flags); \ | 
 | } while(0) | 
 |  | 
 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 
 | { | 
 | 	register raw_rwlock_t *lp asm("g1"); | 
 | 	lp = rw; | 
 | 	__asm__ __volatile__( | 
 | 	"mov	%%o7, %%g4\n\t" | 
 | 	"call	___rw_write_enter\n\t" | 
 | 	" ldstub	[%%g1 + 3], %%g2\n" | 
 | 	: /* no outputs */ | 
 | 	: "r" (lp) | 
 | 	: "g2", "g4", "memory", "cc"); | 
 | } | 
 |  | 
 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 
 | { | 
 | 	unsigned int val; | 
 |  | 
 | 	__asm__ __volatile__("ldstub [%1 + 3], %0" | 
 | 			     : "=r" (val) | 
 | 			     : "r" (&rw->lock) | 
 | 			     : "memory"); | 
 |  | 
 | 	if (val == 0) { | 
 | 		val = rw->lock & ~0xff; | 
 | 		if (val) | 
 | 			((volatile u8*)&rw->lock)[3] = 0; | 
 | 	} | 
 |  | 
 | 	return (val == 0); | 
 | } | 
 |  | 
 | #define __raw_write_unlock(rw)	do { (rw)->lock = 0; } while(0) | 
 |  | 
 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 
 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) | 
 |  | 
 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) | 
 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 
 |  | 
 | #endif /* !(__ASSEMBLY__) */ | 
 |  | 
 | #endif /* __SPARC_SPINLOCK_H */ |