| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H | 
 | 2 | #define __ASM_SPINLOCK_H | 
 | 3 |  | 
 | 4 | #include <asm/system.h> | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 5 | #include <asm/processor.h> | 
 | 6 | #include <asm/spinlock_types.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | { | 
 | 10 | 	volatile unsigned int *a = __ldcw_align(x); | 
 | 11 | 	return *a == 0; | 
 | 12 | } | 
 | 13 |  | 
| James Bottomley | 08dc2ca | 2005-11-17 16:35:09 -0500 | [diff] [blame] | 14 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 15 | #define __raw_spin_unlock_wait(x) \ | 
 | 16 | 		do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  | 
| James Bottomley | 08dc2ca | 2005-11-17 16:35:09 -0500 | [diff] [blame] | 18 | static inline void __raw_spin_lock_flags(raw_spinlock_t *x, | 
 | 19 | 					 unsigned long flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | { | 
 | 21 | 	volatile unsigned int *a; | 
 | 22 |  | 
 | 23 | 	mb(); | 
 | 24 | 	a = __ldcw_align(x); | 
 | 25 | 	while (__ldcw(a) == 0) | 
| James Bottomley | 08dc2ca | 2005-11-17 16:35:09 -0500 | [diff] [blame] | 26 | 		while (*a == 0) | 
 | 27 | 			if (flags & PSW_SM_I) { | 
 | 28 | 				local_irq_enable(); | 
 | 29 | 				cpu_relax(); | 
 | 30 | 				local_irq_disable(); | 
 | 31 | 			} else | 
 | 32 | 				cpu_relax(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | 	mb(); | 
 | 34 | } | 
 | 35 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 36 | static inline void __raw_spin_unlock(raw_spinlock_t *x) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | { | 
 | 38 | 	volatile unsigned int *a; | 
 | 39 | 	mb(); | 
 | 40 | 	a = __ldcw_align(x); | 
 | 41 | 	*a = 1; | 
 | 42 | 	mb(); | 
 | 43 | } | 
 | 44 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 45 | static inline int __raw_spin_trylock(raw_spinlock_t *x) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | { | 
 | 47 | 	volatile unsigned int *a; | 
 | 48 | 	int ret; | 
 | 49 |  | 
 | 50 | 	mb(); | 
 | 51 | 	a = __ldcw_align(x); | 
 | 52 |         ret = __ldcw(a) != 0; | 
 | 53 | 	mb(); | 
 | 54 |  | 
 | 55 | 	return ret; | 
 | 56 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 |  | 
 | 58 | /* | 
 | 59 |  * Read-write spinlocks, allowing multiple readers | 
 | 60 |  * but only one writer. | 
 | 61 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 63 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 |  | 
 | 65 | /* read_lock, read_unlock are pretty straightforward.  Of course it somehow | 
 | 66 |  * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ | 
 | 67 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 68 | static  __inline__ void __raw_read_lock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | { | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 70 | 	__raw_spin_lock(&rw->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 |  | 
 | 72 | 	rw->counter++; | 
 | 73 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 74 | 	__raw_spin_unlock(&rw->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 77 | static  __inline__ void __raw_read_unlock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | { | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 79 | 	__raw_spin_lock(&rw->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 |  | 
 | 81 | 	rw->counter--; | 
 | 82 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 83 | 	__raw_spin_unlock(&rw->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | } | 
 | 85 |  | 
 | 86 | /* write_lock is less trivial.  We optimistically grab the lock and check | 
 | 87 |  * if we surprised any readers.  If so we release the lock and wait till | 
 | 88 |  * they're all gone before trying again | 
 | 89 |  * | 
 | 90 |  * Also note that we don't use the _irqsave / _irqrestore suffixes here. | 
 | 91 |  * If we're called with interrupts enabled and we've got readers (or other | 
 | 92 |  * writers) in interrupt handlers someone fucked up and we'd dead-lock | 
 | 93 |  * sooner or later anyway.   prumpf */ | 
 | 94 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 95 | static  __inline__ void __raw_write_lock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | { | 
 | 97 | retry: | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 98 | 	__raw_spin_lock(&rw->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 |  | 
 | 100 | 	if(rw->counter != 0) { | 
 | 101 | 		/* this basically never happens */ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 102 | 		__raw_spin_unlock(&rw->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 104 | 		while (rw->counter != 0) | 
 | 105 | 			cpu_relax(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 |  | 
 | 107 | 		goto retry; | 
 | 108 | 	} | 
 | 109 |  | 
 | 110 | 	/* got it.  now leave without unlocking */ | 
 | 111 | 	rw->counter = -1; /* remember we are locked */ | 
 | 112 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 |  | 
 | 114 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ | 
 | 115 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 116 | static  __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | { | 
 | 118 | 	rw->counter = 0; | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 119 | 	__raw_spin_unlock(&rw->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | } | 
 | 121 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 122 | static  __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | { | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 124 | 	__raw_spin_lock(&rw->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | 	if (rw->counter != 0) { | 
 | 126 | 		/* this basically never happens */ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 127 | 		__raw_spin_unlock(&rw->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 |  | 
 | 129 | 		return 0; | 
 | 130 | 	} | 
 | 131 |  | 
 | 132 | 	/* got it.  now leave without unlocking */ | 
 | 133 | 	rw->counter = -1; /* remember we are locked */ | 
 | 134 | 	return 1; | 
 | 135 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 137 | static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | { | 
 | 139 | 	return rw->counter > 0; | 
 | 140 | } | 
 | 141 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 142 | static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | { | 
 | 144 | 	return rw->counter < 0; | 
 | 145 | } | 
 | 146 |  | 
 | 147 | #endif /* __ASM_SPINLOCK_H */ |