blob: 451bfbb9db3dbac8830ace5d850ae98ee75ad341 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ARCH_S390_ATOMIC__
2#define __ARCH_S390_ATOMIC__
3
Heiko Carstens12751052009-09-11 10:28:34 +02004/*
5 * Copyright 1999,2009 IBM Corp.
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Denis Joseph Barrow,
8 * Arnd Bergmann <arndb@de.ibm.com>,
9 *
10 * Atomic operations that C can't guarantee us.
11 * Useful for resource counting etc.
12 * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
13 *
14 */
15
Dave Jones5bd1db62006-04-10 22:53:51 -070016#include <linux/compiler.h>
Matthew Wilcoxea4354672009-01-06 14:40:39 -080017#include <linux/types.h>
Dave Jones5bd1db62006-04-10 22:53:51 -070018
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#define ATOMIC_INIT(i) { (i) }
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#define __CS_LOOP(ptr, op_val, op_string) ({ \
Martin Schwidefsky39475172009-12-07 12:52:05 +010022 int old_val, new_val; \
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020023 asm volatile( \
24 " l %0,%2\n" \
25 "0: lr %1,%0\n" \
26 op_string " %1,%3\n" \
27 " cs %0,%1,%2\n" \
28 " jl 0b" \
29 : "=&d" (old_val), "=&d" (new_val), \
30 "=Q" (((atomic_t *)(ptr))->counter) \
31 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
32 : "cc", "memory"); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 new_val; \
34})
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020035
Heiko Carstensc51b9622007-08-22 13:51:45 +020036static inline int atomic_read(const atomic_t *v)
37{
38 barrier();
39 return v->counter;
40}
41
42static inline void atomic_set(atomic_t *v, int i)
43{
44 v->counter = i;
45 barrier();
46}
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Heiko Carstensbfe33492009-09-11 10:28:35 +020048static inline int atomic_add_return(int i, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070049{
50 return __CS_LOOP(v, i, "ar");
51}
Martin Schwidefsky973bd992006-01-06 00:19:07 -080052#define atomic_add(_i, _v) atomic_add_return(_i, _v)
53#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
54#define atomic_inc(_v) atomic_add_return(1, _v)
55#define atomic_inc_return(_v) atomic_add_return(1, _v)
56#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
57
Heiko Carstensbfe33492009-09-11 10:28:35 +020058static inline int atomic_sub_return(int i, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
60 return __CS_LOOP(v, i, "sr");
61}
Martin Schwidefsky973bd992006-01-06 00:19:07 -080062#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
63#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
64#define atomic_dec(_v) atomic_sub_return(1, _v)
65#define atomic_dec_return(_v) atomic_sub_return(1, _v)
66#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Heiko Carstensbfe33492009-09-11 10:28:35 +020068static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Heiko Carstensbfe33492009-09-11 10:28:35 +020070 __CS_LOOP(v, ~mask, "nr");
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
Martin Schwidefsky973bd992006-01-06 00:19:07 -080072
Heiko Carstensbfe33492009-09-11 10:28:35 +020073static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
Heiko Carstensbfe33492009-09-11 10:28:35 +020075 __CS_LOOP(v, mask, "or");
Linus Torvalds1da177e2005-04-16 15:20:36 -070076}
Martin Schwidefsky973bd992006-01-06 00:19:07 -080077
Ingo Molnarffbf6702006-01-09 15:59:17 -080078#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
79
Heiko Carstensbfe33492009-09-11 10:28:35 +020080static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
Martin Schwidefsky973bd992006-01-06 00:19:07 -080081{
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020082 asm volatile(
83 " cs %0,%2,%1"
84 : "+d" (old), "=Q" (v->counter)
85 : "d" (new), "Q" (v->counter)
86 : "cc", "memory");
Martin Schwidefsky973bd992006-01-06 00:19:07 -080087 return old;
88}
89
Heiko Carstensbfe33492009-09-11 10:28:35 +020090static inline int atomic_add_unless(atomic_t *v, int a, int u)
Martin Schwidefsky973bd992006-01-06 00:19:07 -080091{
92 int c, old;
Martin Schwidefsky973bd992006-01-06 00:19:07 -080093 c = atomic_read(v);
Nick Piggin0b2fcfd2006-03-23 03:01:02 -080094 for (;;) {
95 if (unlikely(c == u))
96 break;
97 old = atomic_cmpxchg(v, c, c + a);
98 if (likely(old == c))
99 break;
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800100 c = old;
Nick Piggin0b2fcfd2006-03-23 03:01:02 -0800101 }
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800102 return c != u;
103}
104
105#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#undef __CS_LOOP
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#define ATOMIC64_INIT(i) { (i) }
110
Heiko Carstens12751052009-09-11 10:28:34 +0200111#ifdef CONFIG_64BIT
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#define __CSG_LOOP(ptr, op_val, op_string) ({ \
Martin Schwidefsky39475172009-12-07 12:52:05 +0100114 long long old_val, new_val; \
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200115 asm volatile( \
116 " lg %0,%2\n" \
117 "0: lgr %1,%0\n" \
118 op_string " %1,%3\n" \
119 " csg %0,%1,%2\n" \
120 " jl 0b" \
121 : "=&d" (old_val), "=&d" (new_val), \
122 "=Q" (((atomic_t *)(ptr))->counter) \
123 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
Heiko Carstensbfe33492009-09-11 10:28:35 +0200124 : "cc", "memory"); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 new_val; \
126})
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200127
Heiko Carstensc51b9622007-08-22 13:51:45 +0200128static inline long long atomic64_read(const atomic64_t *v)
129{
130 barrier();
131 return v->counter;
132}
133
134static inline void atomic64_set(atomic64_t *v, long long i)
135{
136 v->counter = i;
137 barrier();
138}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Heiko Carstensbfe33492009-09-11 10:28:35 +0200140static inline long long atomic64_add_return(long long i, atomic64_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
142 return __CSG_LOOP(v, i, "agr");
143}
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800144
Heiko Carstensbfe33492009-09-11 10:28:35 +0200145static inline long long atomic64_sub_return(long long i, atomic64_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800147 return __CSG_LOOP(v, i, "sgr");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148}
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800149
Heiko Carstensbfe33492009-09-11 10:28:35 +0200150static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151{
Heiko Carstensbfe33492009-09-11 10:28:35 +0200152 __CSG_LOOP(v, ~mask, "ngr");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153}
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800154
Heiko Carstensbfe33492009-09-11 10:28:35 +0200155static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
Heiko Carstensbfe33492009-09-11 10:28:35 +0200157 __CSG_LOOP(v, mask, "ogr");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158}
159
Mathieu Desnoyers3a5f10e2007-02-21 10:55:59 +0100160#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
161
Heiko Carstensbfe33492009-09-11 10:28:35 +0200162static inline long long atomic64_cmpxchg(atomic64_t *v,
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800163 long long old, long long new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164{
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200165 asm volatile(
166 " csg %0,%2,%1"
167 : "+d" (old), "=Q" (v->counter)
168 : "d" (new), "Q" (v->counter)
169 : "cc", "memory");
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800170 return old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172
Heiko Carstens12751052009-09-11 10:28:34 +0200173#undef __CSG_LOOP
174
175#else /* CONFIG_64BIT */
176
177typedef struct {
178 long long counter;
179} atomic64_t;
180
181static inline long long atomic64_read(const atomic64_t *v)
182{
183 register_pair rp;
184
185 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100186 " lm %0,%N0,%1"
187 : "=&d" (rp) : "Q" (v->counter) );
Heiko Carstens12751052009-09-11 10:28:34 +0200188 return rp.pair;
189}
190
191static inline void atomic64_set(atomic64_t *v, long long i)
192{
193 register_pair rp = {.pair = i};
194
195 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100196 " stm %1,%N1,%0"
197 : "=Q" (v->counter) : "d" (rp) );
Heiko Carstens12751052009-09-11 10:28:34 +0200198}
199
200static inline long long atomic64_xchg(atomic64_t *v, long long new)
201{
202 register_pair rp_new = {.pair = new};
203 register_pair rp_old;
204
205 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100206 " lm %0,%N0,%1\n"
207 "0: cds %0,%2,%1\n"
Heiko Carstens12751052009-09-11 10:28:34 +0200208 " jl 0b\n"
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100209 : "=&d" (rp_old), "=Q" (v->counter)
210 : "d" (rp_new), "Q" (v->counter)
Heiko Carstens12751052009-09-11 10:28:34 +0200211 : "cc");
212 return rp_old.pair;
213}
214
215static inline long long atomic64_cmpxchg(atomic64_t *v,
216 long long old, long long new)
217{
218 register_pair rp_old = {.pair = old};
219 register_pair rp_new = {.pair = new};
220
221 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100222 " cds %0,%2,%1"
223 : "+&d" (rp_old), "=Q" (v->counter)
224 : "d" (rp_new), "Q" (v->counter)
Heiko Carstens12751052009-09-11 10:28:34 +0200225 : "cc");
226 return rp_old.pair;
227}
228
229
230static inline long long atomic64_add_return(long long i, atomic64_t *v)
231{
232 long long old, new;
233
234 do {
235 old = atomic64_read(v);
236 new = old + i;
237 } while (atomic64_cmpxchg(v, old, new) != old);
238 return new;
239}
240
241static inline long long atomic64_sub_return(long long i, atomic64_t *v)
242{
243 long long old, new;
244
245 do {
246 old = atomic64_read(v);
247 new = old - i;
248 } while (atomic64_cmpxchg(v, old, new) != old);
249 return new;
250}
251
252static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
253{
254 long long old, new;
255
256 do {
257 old = atomic64_read(v);
258 new = old | mask;
259 } while (atomic64_cmpxchg(v, old, new) != old);
260}
261
262static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
263{
264 long long old, new;
265
266 do {
267 old = atomic64_read(v);
268 new = old & mask;
269 } while (atomic64_cmpxchg(v, old, new) != old);
270}
271
272#endif /* CONFIG_64BIT */
273
Heiko Carstensbfe33492009-09-11 10:28:35 +0200274static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800275{
276 long long c, old;
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800277 c = atomic64_read(v);
Nick Piggin0b2fcfd2006-03-23 03:01:02 -0800278 for (;;) {
279 if (unlikely(c == u))
280 break;
281 old = atomic64_cmpxchg(v, c, c + a);
282 if (likely(old == c))
283 break;
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800284 c = old;
Nick Piggin0b2fcfd2006-03-23 03:01:02 -0800285 }
Martin Schwidefsky973bd992006-01-06 00:19:07 -0800286 return c != u;
287}
288
Heiko Carstens12751052009-09-11 10:28:34 +0200289#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
290#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
291#define atomic64_inc(_v) atomic64_add_return(1, _v)
292#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
293#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
294#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
295#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
296#define atomic64_dec(_v) atomic64_sub_return(1, _v)
297#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
298#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
299#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
Nick Piggin8426e1f2005-11-13 16:07:25 -0800300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301#define smp_mb__before_atomic_dec() smp_mb()
302#define smp_mb__after_atomic_dec() smp_mb()
303#define smp_mb__before_atomic_inc() smp_mb()
304#define smp_mb__after_atomic_inc() smp_mb()
305
Arnd Bergmann72099ed2009-05-13 22:56:29 +0000306#include <asm-generic/atomic-long.h>
Heiko Carstensbfe33492009-09-11 10:28:35 +0200307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308#endif /* __ARCH_S390_ATOMIC__ */