| Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 1 | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | 
|  | 2 | * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> | 
|  | 3 | */ | 
|  | 4 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #ifndef _ASM_PARISC_ATOMIC_H_ | 
|  | 6 | #define _ASM_PARISC_ATOMIC_H_ | 
|  | 7 |  | 
| Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 8 | #include <linux/types.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <asm/system.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 |  | 
|  | 11 | /* | 
|  | 12 | * Atomic operations that C can't guarantee us.  Useful for | 
|  | 13 | * resource counting etc.. | 
|  | 14 | * | 
|  | 15 | * And probably incredibly slow on parisc.  OTOH, we don't | 
|  | 16 | * have to write any serious assembly.   prumpf | 
|  | 17 | */ | 
|  | 18 |  | 
|  | 19 | #ifdef CONFIG_SMP | 
|  | 20 | #include <asm/spinlock.h> | 
|  | 21 | #include <asm/cache.h>		/* we use L1_CACHE_BYTES */ | 
|  | 22 |  | 
|  | 23 | /* Use an array of spinlocks for our atomic_ts. | 
|  | 24 | * Hash function to index into a different SPINLOCK. | 
|  | 25 | * Since "a" is usually an address, use one spinlock per cacheline. | 
|  | 26 | */ | 
|  | 27 | #  define ATOMIC_HASH_SIZE 4 | 
|  | 28 | #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | 
|  | 29 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 30 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 32 | /* Can't use raw_spin_lock_irq because of #include problems, so | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | * this is the substitute */ | 
|  | 34 | #define _atomic_spin_lock_irqsave(l,f) do {	\ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 35 | raw_spinlock_t *s = ATOMIC_HASH(l);		\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | local_irq_save(f);			\ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 37 | __raw_spin_lock(s);			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | } while(0) | 
|  | 39 |  | 
|  | 40 | #define _atomic_spin_unlock_irqrestore(l,f) do {	\ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 41 | raw_spinlock_t *s = ATOMIC_HASH(l);			\ | 
|  | 42 | __raw_spin_unlock(s);				\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | local_irq_restore(f);				\ | 
|  | 44 | } while(0) | 
|  | 45 |  | 
|  | 46 |  | 
|  | 47 | #else | 
|  | 48 | #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | 
|  | 49 | #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | 
|  | 50 | #endif | 
|  | 51 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | /* This should get optimized out since it's never called. | 
|  | 53 | ** Or get a link error if xchg is used "wrong". | 
|  | 54 | */ | 
|  | 55 | extern void __xchg_called_with_bad_pointer(void); | 
|  | 56 |  | 
|  | 57 |  | 
|  | 58 | /* __xchg32/64 defined in arch/parisc/lib/bitops.c */ | 
|  | 59 | extern unsigned long __xchg8(char, char *); | 
|  | 60 | extern unsigned long __xchg32(int, int *); | 
| Helge Deller | 513e7ec | 2007-01-28 15:09:20 +0100 | [diff] [blame] | 61 | #ifdef CONFIG_64BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | extern unsigned long __xchg64(unsigned long, unsigned long *); | 
|  | 63 | #endif | 
|  | 64 |  | 
|  | 65 | /* optimizer better get rid of switch since size is a constant */ | 
| Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 66 | static __inline__ unsigned long | 
|  | 67 | __xchg(unsigned long x, __volatile__ void * ptr, int size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | switch(size) { | 
| Helge Deller | 513e7ec | 2007-01-28 15:09:20 +0100 | [diff] [blame] | 70 | #ifdef CONFIG_64BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | case 8: return __xchg64(x,(unsigned long *) ptr); | 
|  | 72 | #endif | 
|  | 73 | case 4: return __xchg32((int) x, (int *) ptr); | 
|  | 74 | case 1: return __xchg8((char) x, (char *) ptr); | 
|  | 75 | } | 
|  | 76 | __xchg_called_with_bad_pointer(); | 
|  | 77 | return x; | 
|  | 78 | } | 
|  | 79 |  | 
|  | 80 |  | 
|  | 81 | /* | 
|  | 82 | ** REVISIT - Abandoned use of LDCW in xchg() for now: | 
|  | 83 | ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes | 
| Helge Deller | 513e7ec | 2007-01-28 15:09:20 +0100 | [diff] [blame] | 84 | ** o and while we are at it, could CONFIG_64BIT code use LDCD too? | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | ** | 
|  | 86 | **	if (__builtin_constant_p(x) && (x == NULL)) | 
|  | 87 | **		if (((unsigned long)p & 0xf) == 0) | 
|  | 88 | **			return __ldcw(p); | 
|  | 89 | */ | 
|  | 90 | #define xchg(ptr,x) \ | 
|  | 91 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | 
|  | 92 |  | 
|  | 93 |  | 
|  | 94 | #define __HAVE_ARCH_CMPXCHG	1 | 
|  | 95 |  | 
|  | 96 | /* bug catcher for when unsupported size is used - won't link */ | 
|  | 97 | extern void __cmpxchg_called_with_bad_pointer(void); | 
|  | 98 |  | 
|  | 99 | /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ | 
|  | 100 | extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); | 
|  | 101 | extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); | 
|  | 102 |  | 
|  | 103 | /* don't worry...optimizer will get rid of most of this */ | 
|  | 104 | static __inline__ unsigned long | 
|  | 105 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) | 
|  | 106 | { | 
|  | 107 | switch(size) { | 
| Helge Deller | 513e7ec | 2007-01-28 15:09:20 +0100 | [diff] [blame] | 108 | #ifdef CONFIG_64BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); | 
|  | 110 | #endif | 
|  | 111 | case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); | 
|  | 112 | } | 
|  | 113 | __cmpxchg_called_with_bad_pointer(); | 
|  | 114 | return old; | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | #define cmpxchg(ptr,o,n)						 \ | 
|  | 118 | ({									 \ | 
|  | 119 | __typeof__(*(ptr)) _o_ = (o);					 \ | 
|  | 120 | __typeof__(*(ptr)) _n_ = (n);					 \ | 
|  | 121 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \ | 
|  | 122 | (unsigned long)_n_, sizeof(*(ptr))); \ | 
|  | 123 | }) | 
|  | 124 |  | 
| Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 125 | /* Note that we need not lock read accesses - aligned word writes/reads | 
|  | 126 | * are atomic, so a reader never sees unconsistent values. | 
|  | 127 | * | 
|  | 128 | * Cache-line alignment would conflict with, for example, linux/module.h | 
|  | 129 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 |  | 
| Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 131 | typedef struct { volatile int counter; } atomic_t; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 |  | 
|  | 133 | /* It's possible to reduce all atomic operations to either | 
|  | 134 | * __atomic_add_return, atomic_set and atomic_read (the latter | 
|  | 135 | * is there only for consistency). | 
|  | 136 | */ | 
|  | 137 |  | 
|  | 138 | static __inline__ int __atomic_add_return(int i, atomic_t *v) | 
|  | 139 | { | 
|  | 140 | int ret; | 
|  | 141 | unsigned long flags; | 
|  | 142 | _atomic_spin_lock_irqsave(v, flags); | 
|  | 143 |  | 
|  | 144 | ret = (v->counter += i); | 
|  | 145 |  | 
|  | 146 | _atomic_spin_unlock_irqrestore(v, flags); | 
|  | 147 | return ret; | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | static __inline__ void atomic_set(atomic_t *v, int i) | 
|  | 151 | { | 
|  | 152 | unsigned long flags; | 
|  | 153 | _atomic_spin_lock_irqsave(v, flags); | 
|  | 154 |  | 
|  | 155 | v->counter = i; | 
|  | 156 |  | 
|  | 157 | _atomic_spin_unlock_irqrestore(v, flags); | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | static __inline__ int atomic_read(const atomic_t *v) | 
|  | 161 | { | 
|  | 162 | return v->counter; | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | /* exported interface */ | 
| Nick Piggin | 4a6dae6 | 2005-11-13 16:07:24 -0800 | [diff] [blame] | 166 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | 
| Ingo Molnar | ffbf670 | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 167 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 |  | 
| Nick Piggin | 8426e1f | 2005-11-13 16:07:25 -0800 | [diff] [blame] | 169 | /** | 
|  | 170 | * atomic_add_unless - add unless the number is a given value | 
|  | 171 | * @v: pointer of type atomic_t | 
|  | 172 | * @a: the amount to add to v... | 
|  | 173 | * @u: ...unless v is equal to u. | 
|  | 174 | * | 
|  | 175 | * Atomically adds @a to @v, so long as it was not @u. | 
|  | 176 | * Returns non-zero if @v was not @u, and zero otherwise. | 
|  | 177 | */ | 
|  | 178 | #define atomic_add_unless(v, a, u)				\ | 
|  | 179 | ({								\ | 
|  | 180 | int c, old;						\ | 
|  | 181 | c = atomic_read(v);					\ | 
|  | 182 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | 
|  | 183 | c = old;					\ | 
|  | 184 | c != (u);						\ | 
|  | 185 | }) | 
|  | 186 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 
|  | 187 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | #define atomic_add(i,v)	((void)(__atomic_add_return( ((int)i),(v)))) | 
|  | 189 | #define atomic_sub(i,v)	((void)(__atomic_add_return(-((int)i),(v)))) | 
|  | 190 | #define atomic_inc(v)	((void)(__atomic_add_return(   1,(v)))) | 
|  | 191 | #define atomic_dec(v)	((void)(__atomic_add_return(  -1,(v)))) | 
|  | 192 |  | 
|  | 193 | #define atomic_add_return(i,v)	(__atomic_add_return( ((int)i),(v))) | 
|  | 194 | #define atomic_sub_return(i,v)	(__atomic_add_return(-((int)i),(v))) | 
|  | 195 | #define atomic_inc_return(v)	(__atomic_add_return(   1,(v))) | 
|  | 196 | #define atomic_dec_return(v)	(__atomic_add_return(  -1,(v))) | 
|  | 197 |  | 
|  | 198 | #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0) | 
|  | 199 |  | 
|  | 200 | /* | 
|  | 201 | * atomic_inc_and_test - increment and test | 
|  | 202 | * @v: pointer of type atomic_t | 
|  | 203 | * | 
|  | 204 | * Atomically increments @v by 1 | 
|  | 205 | * and returns true if the result is zero, or false for all | 
|  | 206 | * other cases. | 
|  | 207 | */ | 
|  | 208 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | 
|  | 209 |  | 
|  | 210 | #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0) | 
|  | 211 |  | 
| Kyle McMartin | 4da9f13 | 2006-03-29 19:47:32 -0500 | [diff] [blame] | 212 | #define atomic_sub_and_test(i,v)	(atomic_sub_return((i),(v)) == 0) | 
|  | 213 |  | 
| Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 214 | #define ATOMIC_INIT(i)	((atomic_t) { (i) }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 |  | 
|  | 216 | #define smp_mb__before_atomic_dec()	smp_mb() | 
|  | 217 | #define smp_mb__after_atomic_dec()	smp_mb() | 
|  | 218 | #define smp_mb__before_atomic_inc()	smp_mb() | 
|  | 219 | #define smp_mb__after_atomic_inc()	smp_mb() | 
|  | 220 |  | 
| Helge Deller | 513e7ec | 2007-01-28 15:09:20 +0100 | [diff] [blame] | 221 | #ifdef CONFIG_64BIT | 
| Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 222 |  | 
|  | 223 | typedef struct { volatile s64 counter; } atomic64_t; | 
|  | 224 |  | 
|  | 225 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) | 
|  | 226 |  | 
|  | 227 | static __inline__ int | 
|  | 228 | __atomic64_add_return(s64 i, atomic64_t *v) | 
|  | 229 | { | 
|  | 230 | int ret; | 
|  | 231 | unsigned long flags; | 
|  | 232 | _atomic_spin_lock_irqsave(v, flags); | 
|  | 233 |  | 
|  | 234 | ret = (v->counter += i); | 
|  | 235 |  | 
|  | 236 | _atomic_spin_unlock_irqrestore(v, flags); | 
|  | 237 | return ret; | 
|  | 238 | } | 
|  | 239 |  | 
|  | 240 | static __inline__ void | 
|  | 241 | atomic64_set(atomic64_t *v, s64 i) | 
|  | 242 | { | 
|  | 243 | unsigned long flags; | 
|  | 244 | _atomic_spin_lock_irqsave(v, flags); | 
|  | 245 |  | 
|  | 246 | v->counter = i; | 
|  | 247 |  | 
|  | 248 | _atomic_spin_unlock_irqrestore(v, flags); | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | static __inline__ s64 | 
|  | 252 | atomic64_read(const atomic64_t *v) | 
|  | 253 | { | 
|  | 254 | return v->counter; | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | #define atomic64_add(i,v)	((void)(__atomic64_add_return( ((s64)i),(v)))) | 
|  | 258 | #define atomic64_sub(i,v)	((void)(__atomic64_add_return(-((s64)i),(v)))) | 
|  | 259 | #define atomic64_inc(v)		((void)(__atomic64_add_return(   1,(v)))) | 
|  | 260 | #define atomic64_dec(v)		((void)(__atomic64_add_return(  -1,(v)))) | 
|  | 261 |  | 
|  | 262 | #define atomic64_add_return(i,v)	(__atomic64_add_return( ((s64)i),(v))) | 
|  | 263 | #define atomic64_sub_return(i,v)	(__atomic64_add_return(-((s64)i),(v))) | 
|  | 264 | #define atomic64_inc_return(v)		(__atomic64_add_return(   1,(v))) | 
|  | 265 | #define atomic64_dec_return(v)		(__atomic64_add_return(  -1,(v))) | 
|  | 266 |  | 
|  | 267 | #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0) | 
|  | 268 |  | 
|  | 269 | #define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0) | 
|  | 270 | #define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0) | 
| Kyle McMartin | 4da9f13 | 2006-03-29 19:47:32 -0500 | [diff] [blame] | 271 | #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i),(v)) == 0) | 
| Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 272 |  | 
| Helge Deller | 513e7ec | 2007-01-28 15:09:20 +0100 | [diff] [blame] | 273 | #endif /* CONFIG_64BIT */ | 
| Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 274 |  | 
| Christoph Lameter | d3cb487 | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 275 | #include <asm-generic/atomic.h> | 
| Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 276 |  | 
|  | 277 | #endif /* _ASM_PARISC_ATOMIC_H_ */ |