| /* | 
 |  *  linux/include/asm-arm/atomic.h | 
 |  * | 
 |  *  Copyright (C) 1996 Russell King. | 
 |  *  Copyright (C) 2002 Deep Blue Solutions Ltd. | 
 |  * | 
 |  * This program is free software; you can redistribute it and/or modify | 
 |  * it under the terms of the GNU General Public License version 2 as | 
 |  * published by the Free Software Foundation. | 
 |  */ | 
 | #ifndef __ASM_ARM_ATOMIC_H | 
 | #define __ASM_ARM_ATOMIC_H | 
 |  | 
 | #include <linux/compiler.h> | 
 | #include <asm/system.h> | 
 |  | 
 | typedef struct { volatile int counter; } atomic_t; | 
 |  | 
 | #define ATOMIC_INIT(i)	{ (i) } | 
 |  | 
 | #ifdef __KERNEL__ | 
 |  | 
 | #define atomic_read(v)	((v)->counter) | 
 |  | 
 | #if __LINUX_ARM_ARCH__ >= 6 | 
 |  | 
 | /* | 
 |  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and | 
 |  * store exclusive to ensure that these are atomic.  We may loop | 
 |  * to ensure that the update happens.  Writing to 'v->counter' | 
 |  * without using the following operations WILL break the atomic | 
 |  * nature of these ops. | 
 |  */ | 
 | static inline void atomic_set(atomic_t *v, int i) | 
 | { | 
 | 	unsigned long tmp; | 
 |  | 
 | 	__asm__ __volatile__("@ atomic_set\n" | 
 | "1:	ldrex	%0, [%1]\n" | 
 | "	strex	%0, %2, [%1]\n" | 
 | "	teq	%0, #0\n" | 
 | "	bne	1b" | 
 | 	: "=&r" (tmp) | 
 | 	: "r" (&v->counter), "r" (i) | 
 | 	: "cc"); | 
 | } | 
 |  | 
 | static inline int atomic_add_return(int i, atomic_t *v) | 
 | { | 
 | 	unsigned long tmp; | 
 | 	int result; | 
 |  | 
 | 	__asm__ __volatile__("@ atomic_add_return\n" | 
 | "1:	ldrex	%0, [%2]\n" | 
 | "	add	%0, %0, %3\n" | 
 | "	strex	%1, %0, [%2]\n" | 
 | "	teq	%1, #0\n" | 
 | "	bne	1b" | 
 | 	: "=&r" (result), "=&r" (tmp) | 
 | 	: "r" (&v->counter), "Ir" (i) | 
 | 	: "cc"); | 
 |  | 
 | 	return result; | 
 | } | 
 |  | 
 | static inline int atomic_sub_return(int i, atomic_t *v) | 
 | { | 
 | 	unsigned long tmp; | 
 | 	int result; | 
 |  | 
 | 	__asm__ __volatile__("@ atomic_sub_return\n" | 
 | "1:	ldrex	%0, [%2]\n" | 
 | "	sub	%0, %0, %3\n" | 
 | "	strex	%1, %0, [%2]\n" | 
 | "	teq	%1, #0\n" | 
 | "	bne	1b" | 
 | 	: "=&r" (result), "=&r" (tmp) | 
 | 	: "r" (&v->counter), "Ir" (i) | 
 | 	: "cc"); | 
 |  | 
 | 	return result; | 
 | } | 
 |  | 
 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | 
 | { | 
 | 	unsigned long oldval, res; | 
 |  | 
 | 	do { | 
 | 		__asm__ __volatile__("@ atomic_cmpxchg\n" | 
 | 		"ldrex	%1, [%2]\n" | 
 | 		"mov	%0, #0\n" | 
 | 		"teq	%1, %3\n" | 
 | 		"strexeq %0, %4, [%2]\n" | 
 | 		    : "=&r" (res), "=&r" (oldval) | 
 | 		    : "r" (&ptr->counter), "Ir" (old), "r" (new) | 
 | 		    : "cc"); | 
 | 	} while (res); | 
 |  | 
 | 	return oldval; | 
 | } | 
 |  | 
 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | 
 | { | 
 | 	unsigned long tmp, tmp2; | 
 |  | 
 | 	__asm__ __volatile__("@ atomic_clear_mask\n" | 
 | "1:	ldrex	%0, [%2]\n" | 
 | "	bic	%0, %0, %3\n" | 
 | "	strex	%1, %0, [%2]\n" | 
 | "	teq	%1, #0\n" | 
 | "	bne	1b" | 
 | 	: "=&r" (tmp), "=&r" (tmp2) | 
 | 	: "r" (addr), "Ir" (mask) | 
 | 	: "cc"); | 
 | } | 
 |  | 
 | #else /* ARM_ARCH_6 */ | 
 |  | 
 | #include <asm/system.h> | 
 |  | 
 | #ifdef CONFIG_SMP | 
 | #error SMP not supported on pre-ARMv6 CPUs | 
 | #endif | 
 |  | 
 | #define atomic_set(v,i)	(((v)->counter) = (i)) | 
 |  | 
 | static inline int atomic_add_return(int i, atomic_t *v) | 
 | { | 
 | 	unsigned long flags; | 
 | 	int val; | 
 |  | 
 | 	raw_local_irq_save(flags); | 
 | 	val = v->counter; | 
 | 	v->counter = val += i; | 
 | 	raw_local_irq_restore(flags); | 
 |  | 
 | 	return val; | 
 | } | 
 |  | 
 | static inline int atomic_sub_return(int i, atomic_t *v) | 
 | { | 
 | 	unsigned long flags; | 
 | 	int val; | 
 |  | 
 | 	raw_local_irq_save(flags); | 
 | 	val = v->counter; | 
 | 	v->counter = val -= i; | 
 | 	raw_local_irq_restore(flags); | 
 |  | 
 | 	return val; | 
 | } | 
 |  | 
 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | 
 | { | 
 | 	int ret; | 
 | 	unsigned long flags; | 
 |  | 
 | 	raw_local_irq_save(flags); | 
 | 	ret = v->counter; | 
 | 	if (likely(ret == old)) | 
 | 		v->counter = new; | 
 | 	raw_local_irq_restore(flags); | 
 |  | 
 | 	return ret; | 
 | } | 
 |  | 
 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | 
 | { | 
 | 	unsigned long flags; | 
 |  | 
 | 	raw_local_irq_save(flags); | 
 | 	*addr &= ~mask; | 
 | 	raw_local_irq_restore(flags); | 
 | } | 
 |  | 
 | #endif /* __LINUX_ARM_ARCH__ */ | 
 |  | 
 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 
 |  | 
 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | 
 | { | 
 | 	int c, old; | 
 |  | 
 | 	c = atomic_read(v); | 
 | 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) | 
 | 		c = old; | 
 | 	return c != u; | 
 | } | 
 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 
 |  | 
 | #define atomic_add(i, v)	(void) atomic_add_return(i, v) | 
 | #define atomic_inc(v)		(void) atomic_add_return(1, v) | 
 | #define atomic_sub(i, v)	(void) atomic_sub_return(i, v) | 
 | #define atomic_dec(v)		(void) atomic_sub_return(1, v) | 
 |  | 
 | #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0) | 
 | #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0) | 
 | #define atomic_inc_return(v)    (atomic_add_return(1, v)) | 
 | #define atomic_dec_return(v)    (atomic_sub_return(1, v)) | 
 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | 
 |  | 
 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) | 
 |  | 
 | /* Atomic operations are already serializing on ARM */ | 
 | #define smp_mb__before_atomic_dec()	barrier() | 
 | #define smp_mb__after_atomic_dec()	barrier() | 
 | #define smp_mb__before_atomic_inc()	barrier() | 
 | #define smp_mb__after_atomic_inc()	barrier() | 
 |  | 
 | #include <asm-generic/atomic.h> | 
 | #endif | 
 | #endif |