blob: 94a95872d7276914fecd98041b712c6c937dbceb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Atomic operations that C can't guarantee us. Useful for
3 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04 by Ralf Baechle
13 */
14
15/*
16 * As workaround for the ATOMIC_DEC_AND_LOCK / atomic_dec_and_lock mess in
17 * <linux/spinlock.h> we have to include <linux/spinlock.h> outside the
18 * main big wrapper ...
19 */
20#include <linux/config.h>
21#include <linux/spinlock.h>
22
23#ifndef _ASM_ATOMIC_H
24#define _ASM_ATOMIC_H
25
26#include <asm/cpu-features.h>
27#include <asm/war.h>
28
29extern spinlock_t atomic_lock;
30
31typedef struct { volatile int counter; } atomic_t;
32
33#define ATOMIC_INIT(i) { (i) }
34
35/*
36 * atomic_read - read atomic variable
37 * @v: pointer of type atomic_t
38 *
39 * Atomically reads the value of @v.
40 */
41#define atomic_read(v) ((v)->counter)
42
43/*
44 * atomic_set - set atomic variable
45 * @v: pointer of type atomic_t
46 * @i: required value
47 *
48 * Atomically sets the value of @v to @i.
49 */
50#define atomic_set(v,i) ((v)->counter = (i))
51
52/*
53 * atomic_add - add integer to atomic variable
54 * @i: integer value to add
55 * @v: pointer of type atomic_t
56 *
57 * Atomically adds @i to @v.
58 */
59static __inline__ void atomic_add(int i, atomic_t * v)
60{
61 if (cpu_has_llsc && R10000_LLSC_WAR) {
62 unsigned long temp;
63
64 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +000065 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 "1: ll %0, %1 # atomic_add \n"
67 " addu %0, %2 \n"
68 " sc %0, %1 \n"
69 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000070 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 : "=&r" (temp), "=m" (v->counter)
72 : "Ir" (i), "m" (v->counter));
73 } else if (cpu_has_llsc) {
74 unsigned long temp;
75
76 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +000077 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 "1: ll %0, %1 # atomic_add \n"
79 " addu %0, %2 \n"
80 " sc %0, %1 \n"
81 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000082 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 : "=&r" (temp), "=m" (v->counter)
84 : "Ir" (i), "m" (v->counter));
85 } else {
86 unsigned long flags;
87
88 spin_lock_irqsave(&atomic_lock, flags);
89 v->counter += i;
90 spin_unlock_irqrestore(&atomic_lock, flags);
91 }
92}
93
94/*
95 * atomic_sub - subtract the atomic variable
96 * @i: integer value to subtract
97 * @v: pointer of type atomic_t
98 *
99 * Atomically subtracts @i from @v.
100 */
101static __inline__ void atomic_sub(int i, atomic_t * v)
102{
103 if (cpu_has_llsc && R10000_LLSC_WAR) {
104 unsigned long temp;
105
106 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000107 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 "1: ll %0, %1 # atomic_sub \n"
109 " subu %0, %2 \n"
110 " sc %0, %1 \n"
111 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000112 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 : "=&r" (temp), "=m" (v->counter)
114 : "Ir" (i), "m" (v->counter));
115 } else if (cpu_has_llsc) {
116 unsigned long temp;
117
118 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000119 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 "1: ll %0, %1 # atomic_sub \n"
121 " subu %0, %2 \n"
122 " sc %0, %1 \n"
123 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000124 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 : "=&r" (temp), "=m" (v->counter)
126 : "Ir" (i), "m" (v->counter));
127 } else {
128 unsigned long flags;
129
130 spin_lock_irqsave(&atomic_lock, flags);
131 v->counter -= i;
132 spin_unlock_irqrestore(&atomic_lock, flags);
133 }
134}
135
136/*
137 * Same as above, but return the result value
138 */
139static __inline__ int atomic_add_return(int i, atomic_t * v)
140{
141 unsigned long result;
142
143 if (cpu_has_llsc && R10000_LLSC_WAR) {
144 unsigned long temp;
145
146 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000147 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 "1: ll %1, %2 # atomic_add_return \n"
149 " addu %0, %1, %3 \n"
150 " sc %0, %2 \n"
151 " beqzl %0, 1b \n"
152 " addu %0, %1, %3 \n"
153 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000154 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
156 : "Ir" (i), "m" (v->counter)
157 : "memory");
158 } else if (cpu_has_llsc) {
159 unsigned long temp;
160
161 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000162 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 "1: ll %1, %2 # atomic_add_return \n"
164 " addu %0, %1, %3 \n"
165 " sc %0, %2 \n"
166 " beqz %0, 1b \n"
167 " addu %0, %1, %3 \n"
168 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000169 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
171 : "Ir" (i), "m" (v->counter)
172 : "memory");
173 } else {
174 unsigned long flags;
175
176 spin_lock_irqsave(&atomic_lock, flags);
177 result = v->counter;
178 result += i;
179 v->counter = result;
180 spin_unlock_irqrestore(&atomic_lock, flags);
181 }
182
183 return result;
184}
185
186static __inline__ int atomic_sub_return(int i, atomic_t * v)
187{
188 unsigned long result;
189
190 if (cpu_has_llsc && R10000_LLSC_WAR) {
191 unsigned long temp;
192
193 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000194 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 "1: ll %1, %2 # atomic_sub_return \n"
196 " subu %0, %1, %3 \n"
197 " sc %0, %2 \n"
198 " beqzl %0, 1b \n"
199 " subu %0, %1, %3 \n"
200 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000201 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
203 : "Ir" (i), "m" (v->counter)
204 : "memory");
205 } else if (cpu_has_llsc) {
206 unsigned long temp;
207
208 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000209 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 "1: ll %1, %2 # atomic_sub_return \n"
211 " subu %0, %1, %3 \n"
212 " sc %0, %2 \n"
213 " beqz %0, 1b \n"
214 " subu %0, %1, %3 \n"
215 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000216 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
218 : "Ir" (i), "m" (v->counter)
219 : "memory");
220 } else {
221 unsigned long flags;
222
223 spin_lock_irqsave(&atomic_lock, flags);
224 result = v->counter;
225 result -= i;
226 v->counter = result;
227 spin_unlock_irqrestore(&atomic_lock, flags);
228 }
229
230 return result;
231}
232
233/*
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100234 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
235 * @i: integer value to subtract
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 * @v: pointer of type atomic_t
237 *
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100238 * Atomically test @v and subtract @i if @v is greater or equal than @i.
239 * The function returns the old value of @v minus @i.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 */
241static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
242{
243 unsigned long result;
244
245 if (cpu_has_llsc && R10000_LLSC_WAR) {
246 unsigned long temp;
247
248 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000249 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 "1: ll %1, %2 # atomic_sub_if_positive\n"
251 " subu %0, %1, %3 \n"
252 " bltz %0, 1f \n"
253 " sc %0, %2 \n"
254 " beqzl %0, 1b \n"
255 " sync \n"
256 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000257 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
259 : "Ir" (i), "m" (v->counter)
260 : "memory");
261 } else if (cpu_has_llsc) {
262 unsigned long temp;
263
264 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000265 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 "1: ll %1, %2 # atomic_sub_if_positive\n"
267 " subu %0, %1, %3 \n"
268 " bltz %0, 1f \n"
269 " sc %0, %2 \n"
270 " beqz %0, 1b \n"
271 " sync \n"
272 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000273 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
275 : "Ir" (i), "m" (v->counter)
276 : "memory");
277 } else {
278 unsigned long flags;
279
280 spin_lock_irqsave(&atomic_lock, flags);
281 result = v->counter;
282 result -= i;
283 if (result >= 0)
284 v->counter = result;
285 spin_unlock_irqrestore(&atomic_lock, flags);
286 }
287
288 return result;
289}
290
Nick Piggin4a6dae62005-11-13 16:07:24 -0800291#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
Ingo Molnarffbf6702006-01-09 15:59:17 -0800292#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800293
Nick Piggin8426e1f2005-11-13 16:07:25 -0800294/**
295 * atomic_add_unless - add unless the number is a given value
296 * @v: pointer of type atomic_t
297 * @a: the amount to add to v...
298 * @u: ...unless v is equal to u.
299 *
300 * Atomically adds @a to @v, so long as it was not @u.
301 * Returns non-zero if @v was not @u, and zero otherwise.
302 */
303#define atomic_add_unless(v, a, u) \
304({ \
305 int c, old; \
306 c = atomic_read(v); \
307 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
308 c = old; \
309 c != (u); \
310})
311#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
312
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313#define atomic_dec_return(v) atomic_sub_return(1,(v))
314#define atomic_inc_return(v) atomic_add_return(1,(v))
315
316/*
317 * atomic_sub_and_test - subtract value from variable and test result
318 * @i: integer value to subtract
319 * @v: pointer of type atomic_t
320 *
321 * Atomically subtracts @i from @v and returns
322 * true if the result is zero, or false for all
323 * other cases.
324 */
325#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
326
327/*
328 * atomic_inc_and_test - increment and test
329 * @v: pointer of type atomic_t
330 *
331 * Atomically increments @v by 1
332 * and returns true if the result is zero, or false for all
333 * other cases.
334 */
335#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
336
337/*
338 * atomic_dec_and_test - decrement by 1 and test
339 * @v: pointer of type atomic_t
340 *
341 * Atomically decrements @v by 1 and
342 * returns true if the result is 0, or false for all other
343 * cases.
344 */
345#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
346
347/*
348 * atomic_dec_if_positive - decrement by 1 if old value positive
349 * @v: pointer of type atomic_t
350 */
351#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
352
353/*
354 * atomic_inc - increment atomic variable
355 * @v: pointer of type atomic_t
356 *
357 * Atomically increments @v by 1.
358 */
359#define atomic_inc(v) atomic_add(1,(v))
360
361/*
362 * atomic_dec - decrement and test
363 * @v: pointer of type atomic_t
364 *
365 * Atomically decrements @v by 1.
366 */
367#define atomic_dec(v) atomic_sub(1,(v))
368
369/*
370 * atomic_add_negative - add and test if negative
371 * @v: pointer of type atomic_t
372 * @i: integer value to add
373 *
374 * Atomically adds @i to @v and returns true
375 * if the result is negative, or false when
376 * result is greater than or equal to zero.
377 */
378#define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0)
379
Ralf Baechle875d43e2005-09-03 15:56:16 -0700380#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382typedef struct { volatile __s64 counter; } atomic64_t;
383
384#define ATOMIC64_INIT(i) { (i) }
385
386/*
387 * atomic64_read - read atomic variable
388 * @v: pointer of type atomic64_t
389 *
390 */
391#define atomic64_read(v) ((v)->counter)
392
393/*
394 * atomic64_set - set atomic variable
395 * @v: pointer of type atomic64_t
396 * @i: required value
397 */
398#define atomic64_set(v,i) ((v)->counter = (i))
399
400/*
401 * atomic64_add - add integer to atomic variable
402 * @i: integer value to add
403 * @v: pointer of type atomic64_t
404 *
405 * Atomically adds @i to @v.
406 */
407static __inline__ void atomic64_add(long i, atomic64_t * v)
408{
409 if (cpu_has_llsc && R10000_LLSC_WAR) {
410 unsigned long temp;
411
412 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000413 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 "1: lld %0, %1 # atomic64_add \n"
415 " addu %0, %2 \n"
416 " scd %0, %1 \n"
417 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000418 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 : "=&r" (temp), "=m" (v->counter)
420 : "Ir" (i), "m" (v->counter));
421 } else if (cpu_has_llsc) {
422 unsigned long temp;
423
424 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000425 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 "1: lld %0, %1 # atomic64_add \n"
427 " addu %0, %2 \n"
428 " scd %0, %1 \n"
429 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000430 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 : "=&r" (temp), "=m" (v->counter)
432 : "Ir" (i), "m" (v->counter));
433 } else {
434 unsigned long flags;
435
436 spin_lock_irqsave(&atomic_lock, flags);
437 v->counter += i;
438 spin_unlock_irqrestore(&atomic_lock, flags);
439 }
440}
441
442/*
443 * atomic64_sub - subtract the atomic variable
444 * @i: integer value to subtract
445 * @v: pointer of type atomic64_t
446 *
447 * Atomically subtracts @i from @v.
448 */
449static __inline__ void atomic64_sub(long i, atomic64_t * v)
450{
451 if (cpu_has_llsc && R10000_LLSC_WAR) {
452 unsigned long temp;
453
454 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000455 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 "1: lld %0, %1 # atomic64_sub \n"
457 " subu %0, %2 \n"
458 " scd %0, %1 \n"
459 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000460 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 : "=&r" (temp), "=m" (v->counter)
462 : "Ir" (i), "m" (v->counter));
463 } else if (cpu_has_llsc) {
464 unsigned long temp;
465
466 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000467 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 "1: lld %0, %1 # atomic64_sub \n"
469 " subu %0, %2 \n"
470 " scd %0, %1 \n"
471 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000472 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 : "=&r" (temp), "=m" (v->counter)
474 : "Ir" (i), "m" (v->counter));
475 } else {
476 unsigned long flags;
477
478 spin_lock_irqsave(&atomic_lock, flags);
479 v->counter -= i;
480 spin_unlock_irqrestore(&atomic_lock, flags);
481 }
482}
483
484/*
485 * Same as above, but return the result value
486 */
487static __inline__ long atomic64_add_return(long i, atomic64_t * v)
488{
489 unsigned long result;
490
491 if (cpu_has_llsc && R10000_LLSC_WAR) {
492 unsigned long temp;
493
494 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000495 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 "1: lld %1, %2 # atomic64_add_return \n"
497 " addu %0, %1, %3 \n"
498 " scd %0, %2 \n"
499 " beqzl %0, 1b \n"
500 " addu %0, %1, %3 \n"
501 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000502 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
504 : "Ir" (i), "m" (v->counter)
505 : "memory");
506 } else if (cpu_has_llsc) {
507 unsigned long temp;
508
509 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000510 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 "1: lld %1, %2 # atomic64_add_return \n"
512 " addu %0, %1, %3 \n"
513 " scd %0, %2 \n"
514 " beqz %0, 1b \n"
515 " addu %0, %1, %3 \n"
516 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000517 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
519 : "Ir" (i), "m" (v->counter)
520 : "memory");
521 } else {
522 unsigned long flags;
523
524 spin_lock_irqsave(&atomic_lock, flags);
525 result = v->counter;
526 result += i;
527 v->counter = result;
528 spin_unlock_irqrestore(&atomic_lock, flags);
529 }
530
531 return result;
532}
533
534static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
535{
536 unsigned long result;
537
538 if (cpu_has_llsc && R10000_LLSC_WAR) {
539 unsigned long temp;
540
541 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000542 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 "1: lld %1, %2 # atomic64_sub_return \n"
544 " subu %0, %1, %3 \n"
545 " scd %0, %2 \n"
546 " beqzl %0, 1b \n"
547 " subu %0, %1, %3 \n"
548 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000549 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
551 : "Ir" (i), "m" (v->counter)
552 : "memory");
553 } else if (cpu_has_llsc) {
554 unsigned long temp;
555
556 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000557 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 "1: lld %1, %2 # atomic64_sub_return \n"
559 " subu %0, %1, %3 \n"
560 " scd %0, %2 \n"
561 " beqz %0, 1b \n"
562 " subu %0, %1, %3 \n"
563 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000564 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
566 : "Ir" (i), "m" (v->counter)
567 : "memory");
568 } else {
569 unsigned long flags;
570
571 spin_lock_irqsave(&atomic_lock, flags);
572 result = v->counter;
573 result -= i;
574 v->counter = result;
575 spin_unlock_irqrestore(&atomic_lock, flags);
576 }
577
578 return result;
579}
580
581/*
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100582 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
583 * @i: integer value to subtract
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 * @v: pointer of type atomic64_t
585 *
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100586 * Atomically test @v and subtract @i if @v is greater or equal than @i.
587 * The function returns the old value of @v minus @i.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 */
589static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
590{
591 unsigned long result;
592
593 if (cpu_has_llsc && R10000_LLSC_WAR) {
594 unsigned long temp;
595
596 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000597 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 "1: lld %1, %2 # atomic64_sub_if_positive\n"
599 " dsubu %0, %1, %3 \n"
600 " bltz %0, 1f \n"
601 " scd %0, %2 \n"
602 " beqzl %0, 1b \n"
603 " sync \n"
604 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000605 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
607 : "Ir" (i), "m" (v->counter)
608 : "memory");
609 } else if (cpu_has_llsc) {
610 unsigned long temp;
611
612 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000613 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 "1: lld %1, %2 # atomic64_sub_if_positive\n"
615 " dsubu %0, %1, %3 \n"
616 " bltz %0, 1f \n"
617 " scd %0, %2 \n"
618 " beqz %0, 1b \n"
619 " sync \n"
620 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000621 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
623 : "Ir" (i), "m" (v->counter)
624 : "memory");
625 } else {
626 unsigned long flags;
627
628 spin_lock_irqsave(&atomic_lock, flags);
629 result = v->counter;
630 result -= i;
631 if (result >= 0)
632 v->counter = result;
633 spin_unlock_irqrestore(&atomic_lock, flags);
634 }
635
636 return result;
637}
638
639#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
640#define atomic64_inc_return(v) atomic64_add_return(1,(v))
641
642/*
643 * atomic64_sub_and_test - subtract value from variable and test result
644 * @i: integer value to subtract
645 * @v: pointer of type atomic64_t
646 *
647 * Atomically subtracts @i from @v and returns
648 * true if the result is zero, or false for all
649 * other cases.
650 */
651#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
652
653/*
654 * atomic64_inc_and_test - increment and test
655 * @v: pointer of type atomic64_t
656 *
657 * Atomically increments @v by 1
658 * and returns true if the result is zero, or false for all
659 * other cases.
660 */
661#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
662
663/*
664 * atomic64_dec_and_test - decrement by 1 and test
665 * @v: pointer of type atomic64_t
666 *
667 * Atomically decrements @v by 1 and
668 * returns true if the result is 0, or false for all other
669 * cases.
670 */
671#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
672
673/*
674 * atomic64_dec_if_positive - decrement by 1 if old value positive
675 * @v: pointer of type atomic64_t
676 */
677#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
678
679/*
680 * atomic64_inc - increment atomic variable
681 * @v: pointer of type atomic64_t
682 *
683 * Atomically increments @v by 1.
684 */
685#define atomic64_inc(v) atomic64_add(1,(v))
686
687/*
688 * atomic64_dec - decrement and test
689 * @v: pointer of type atomic64_t
690 *
691 * Atomically decrements @v by 1.
692 */
693#define atomic64_dec(v) atomic64_sub(1,(v))
694
695/*
696 * atomic64_add_negative - add and test if negative
697 * @v: pointer of type atomic64_t
698 * @i: integer value to add
699 *
700 * Atomically adds @i to @v and returns true
701 * if the result is negative, or false when
702 * result is greater than or equal to zero.
703 */
704#define atomic64_add_negative(i,v) (atomic64_add_return(i, (v)) < 0)
705
Ralf Baechle875d43e2005-09-03 15:56:16 -0700706#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
708/*
709 * atomic*_return operations are serializing but not the non-*_return
710 * versions.
711 */
712#define smp_mb__before_atomic_dec() smp_mb()
713#define smp_mb__after_atomic_dec() smp_mb()
714#define smp_mb__before_atomic_inc() smp_mb()
715#define smp_mb__after_atomic_inc() smp_mb()
716
Christoph Lameterd3cb4872006-01-06 00:11:20 -0800717#include <asm-generic/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718#endif /* _ASM_ATOMIC_H */