| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 1 | #ifndef __ASM_ARCH_SPINLOCK_H | 
 | 2 | #define __ASM_ARCH_SPINLOCK_H | 
 | 3 |  | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 4 | #include <linux/spinlock_types.h> | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 5 |  | 
 | 6 | #define RW_LOCK_BIAS 0x01000000 | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 7 |  | 
 | 8 | extern void cris_spin_unlock(void *l, int val); | 
 | 9 | extern void cris_spin_lock(void *l); | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 10 | extern int cris_spin_trylock(void *l); | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 11 |  | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 12 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | 
 | 13 | { | 
 | 14 | 	return *(volatile signed char *)(&(x)->slock) <= 0; | 
 | 15 | } | 
 | 16 |  | 
 | 17 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 18 | { | 
 | 19 | 	__asm__ volatile ("move.d %1,%0" \ | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 20 | 			  : "=m" (lock->slock) \ | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 21 | 			  : "r" (1) \ | 
 | 22 | 			  : "memory"); | 
 | 23 | } | 
 | 24 |  | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 25 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 26 | { | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 27 | 	while (__raw_spin_is_locked(lock)) | 
 | 28 | 		cpu_relax(); | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 29 | } | 
 | 30 |  | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 31 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 32 | { | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 33 | 	return cris_spin_trylock((void *)&lock->slock); | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 34 | } | 
 | 35 |  | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 36 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 37 | { | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 38 | 	cris_spin_lock((void *)&lock->slock); | 
 | 39 | } | 
 | 40 |  | 
 | 41 | static inline void | 
 | 42 | __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 
 | 43 | { | 
 | 44 | 	__raw_spin_lock(lock); | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 45 | } | 
 | 46 |  | 
 | 47 | /* | 
 | 48 |  * Read-write spinlocks, allowing multiple readers | 
 | 49 |  * but only one writer. | 
 | 50 |  * | 
 | 51 |  * NOTE! it is quite common to have readers in interrupts | 
 | 52 |  * but no interrupt writers. For those circumstances we | 
 | 53 |  * can "mix" irq-safe locks - any writer needs to get a | 
 | 54 |  * irq-safe write-lock, but readers can get non-irqsafe | 
 | 55 |  * read-locks. | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 56 |  * | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 57 |  */ | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 58 |  | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 59 | static inline int __raw_read_can_lock(raw_rwlock_t *x) | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 60 | { | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 61 | 	return (int)(x)->lock > 0; | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 62 | } | 
 | 63 |  | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 64 | static inline int __raw_write_can_lock(raw_rwlock_t *x) | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 65 | { | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 66 | 	return (x)->lock == RW_LOCK_BIAS; | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 67 | } | 
 | 68 |  | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 69 | static  inline void __raw_read_lock(raw_rwlock_t *rw) | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 70 | { | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 71 | 	__raw_spin_lock(&rw->slock); | 
 | 72 | 	while (rw->lock == 0); | 
 | 73 | 	rw->lock--; | 
 | 74 | 	__raw_spin_unlock(&rw->slock); | 
 | 75 | } | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 76 |  | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 77 | static  inline void __raw_write_lock(raw_rwlock_t *rw) | 
 | 78 | { | 
 | 79 | 	__raw_spin_lock(&rw->slock); | 
 | 80 | 	while (rw->lock != RW_LOCK_BIAS); | 
 | 81 | 	rw->lock == 0; | 
 | 82 | 	__raw_spin_unlock(&rw->slock); | 
 | 83 | } | 
 | 84 |  | 
 | 85 | static  inline void __raw_read_unlock(raw_rwlock_t *rw) | 
 | 86 | { | 
 | 87 | 	__raw_spin_lock(&rw->slock); | 
 | 88 | 	rw->lock++; | 
 | 89 | 	__raw_spin_unlock(&rw->slock); | 
 | 90 | } | 
 | 91 |  | 
 | 92 | static  inline void __raw_write_unlock(raw_rwlock_t *rw) | 
 | 93 | { | 
 | 94 | 	__raw_spin_lock(&rw->slock); | 
 | 95 | 	while (rw->lock != RW_LOCK_BIAS); | 
 | 96 | 	rw->lock == RW_LOCK_BIAS; | 
 | 97 | 	__raw_spin_unlock(&rw->slock); | 
 | 98 | } | 
 | 99 |  | 
 | 100 | static  inline int __raw_read_trylock(raw_rwlock_t *rw) | 
 | 101 | { | 
 | 102 | 	int ret = 0; | 
 | 103 | 	__raw_spin_lock(&rw->slock); | 
 | 104 | 	if (rw->lock != 0) { | 
 | 105 | 		rw->lock--; | 
 | 106 | 		ret = 1; | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 107 | 	} | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 108 | 	__raw_spin_unlock(&rw->slock); | 
 | 109 | 	return ret; | 
 | 110 | } | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 111 |  | 
| Jesper Nilsson | de1c141 | 2008-01-28 18:07:58 +0100 | [diff] [blame] | 112 | static  inline int __raw_write_trylock(raw_rwlock_t *rw) | 
 | 113 | { | 
 | 114 | 	int ret = 0; | 
 | 115 | 	__raw_spin_lock(&rw->slock); | 
 | 116 | 	if (rw->lock == RW_LOCK_BIAS) { | 
 | 117 | 		rw->lock == 0; | 
 | 118 | 		ret = 1; | 
 | 119 | 	} | 
 | 120 | 	__raw_spin_unlock(&rw->slock); | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 121 | 	return 1; | 
 | 122 | } | 
 | 123 |  | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 124 |  | 
| Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 125 | #define _raw_spin_relax(lock)	cpu_relax() | 
 | 126 | #define _raw_read_relax(lock)	cpu_relax() | 
 | 127 | #define _raw_write_relax(lock)	cpu_relax() | 
 | 128 |  | 
| Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 129 | #endif /* __ASM_ARCH_SPINLOCK_H */ |