blob: 52bdd1a895faaf0df90d57d42ec0140cdb5f45b8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* atomic.h: These still suck, but the I-cache hit rate is higher.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
5 *
6 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
7 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
8 */
9
10#ifndef __ARCH_SPARC_ATOMIC__
11#define __ARCH_SPARC_ATOMIC__
12
13#include <linux/config.h>
14
15typedef struct { volatile int counter; } atomic_t;
16
17#ifdef __KERNEL__
18
19#define ATOMIC_INIT(i) { (i) }
20
21extern int __atomic_add_return(int, atomic_t *);
Nick Piggin4a6dae62005-11-13 16:07:24 -080022extern int atomic_cmpxchg(atomic_t *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023extern void atomic_set(atomic_t *, int);
24
25#define atomic_read(v) ((v)->counter)
26
27#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
28#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
29#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
30#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
31
32#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
33#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
34#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
35#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
36
37#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
38
39/*
40 * atomic_inc_and_test - increment and test
41 * @v: pointer of type atomic_t
42 *
43 * Atomically increments @v by 1
44 * and returns true if the result is zero, or false for all
45 * other cases.
46 */
47#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
48
49#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
50#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
51
52/* This is the old 24-bit implementation. It's still used internally
53 * by some sparc-specific code, notably the semaphore implementation.
54 */
55typedef struct { volatile int counter; } atomic24_t;
56
57#ifndef CONFIG_SMP
58
59#define ATOMIC24_INIT(i) { (i) }
60#define atomic24_read(v) ((v)->counter)
61#define atomic24_set(v, i) (((v)->counter) = i)
62
63#else
64/* We do the bulk of the actual work out of line in two common
65 * routines in assembler, see arch/sparc/lib/atomic.S for the
66 * "fun" details.
67 *
68 * For SMP the trick is you embed the spin lock byte within
69 * the word, use the low byte so signedness is easily retained
70 * via a quick arithmetic shift. It looks like this:
71 *
72 * ----------------------------------------
73 * | signed 24-bit counter value | lock | atomic_t
74 * ----------------------------------------
75 * 31 8 7 0
76 */
77
78#define ATOMIC24_INIT(i) { ((i) << 8) }
79
80static inline int atomic24_read(const atomic24_t *v)
81{
82 int ret = v->counter;
83
84 while(ret & 0xff)
85 ret = v->counter;
86
87 return ret >> 8;
88}
89
90#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
91#endif
92
93static inline int __atomic24_add(int i, atomic24_t *v)
94{
95 register volatile int *ptr asm("g1");
96 register int increment asm("g2");
97 register int tmp1 asm("g3");
98 register int tmp2 asm("g4");
99 register int tmp3 asm("g7");
100
101 ptr = &v->counter;
102 increment = i;
103
104 __asm__ __volatile__(
105 "mov %%o7, %%g4\n\t"
106 "call ___atomic24_add\n\t"
107 " add %%o7, 8, %%o7\n"
108 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
109 : "0" (increment), "r" (ptr)
110 : "memory", "cc");
111
112 return increment;
113}
114
115static inline int __atomic24_sub(int i, atomic24_t *v)
116{
117 register volatile int *ptr asm("g1");
118 register int increment asm("g2");
119 register int tmp1 asm("g3");
120 register int tmp2 asm("g4");
121 register int tmp3 asm("g7");
122
123 ptr = &v->counter;
124 increment = i;
125
126 __asm__ __volatile__(
127 "mov %%o7, %%g4\n\t"
128 "call ___atomic24_sub\n\t"
129 " add %%o7, 8, %%o7\n"
130 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
131 : "0" (increment), "r" (ptr)
132 : "memory", "cc");
133
134 return increment;
135}
136
137#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
138#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
139
140#define atomic24_dec_return(v) __atomic24_sub(1, (v))
141#define atomic24_inc_return(v) __atomic24_add(1, (v))
142
143#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
144#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
145
146#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
147#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
148
149#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
150
151/* Atomic operations are already serializing */
152#define smp_mb__before_atomic_dec() barrier()
153#define smp_mb__after_atomic_dec() barrier()
154#define smp_mb__before_atomic_inc() barrier()
155#define smp_mb__after_atomic_inc() barrier()
156
157#endif /* !(__KERNEL__) */
158
159#endif /* !(__ARCH_SPARC_ATOMIC__) */