| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_GENERIC_LOCAL_H | 
|  | 2 | #define _ASM_GENERIC_LOCAL_H | 
|  | 3 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/percpu.h> | 
|  | 5 | #include <linux/hardirq.h> | 
| Kyle McMartin | f5f5370 | 2006-03-28 01:56:11 -0800 | [diff] [blame] | 6 | #include <asm/atomic.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <asm/types.h> | 
|  | 8 |  | 
| Andrew Morton | 2cf8d82 | 2006-03-31 02:30:49 -0800 | [diff] [blame] | 9 | /* | 
|  | 10 | * A signed long type for operations which are atomic for a single CPU. | 
|  | 11 | * Usually used in combination with per-cpu variables. | 
|  | 12 | * | 
|  | 13 | * This is the default implementation, which uses atomic_long_t.  Which is | 
|  | 14 | * rather pointless.  The whole point behind local_t is that some processors | 
|  | 15 | * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs | 
|  | 16 | * running on this CPU.  local_t allows exploitation of such capabilities. | 
|  | 17 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | /* Implement in terms of atomics. */ | 
|  | 20 |  | 
|  | 21 | /* Don't use typedef: don't want them to be mixed with atomic_t's. */ | 
|  | 22 | typedef struct | 
|  | 23 | { | 
| Kyle McMartin | f5f5370 | 2006-03-28 01:56:11 -0800 | [diff] [blame] | 24 | atomic_long_t a; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | } local_t; | 
|  | 26 |  | 
| Kyle McMartin | f5f5370 | 2006-03-28 01:56:11 -0800 | [diff] [blame] | 27 | #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
| Andrew Morton | 2cf8d82 | 2006-03-31 02:30:49 -0800 | [diff] [blame] | 29 | #define local_read(l)	atomic_long_read(&(l)->a) | 
| Kyle McMartin | f5f5370 | 2006-03-28 01:56:11 -0800 | [diff] [blame] | 30 | #define local_set(l,i)	atomic_long_set((&(l)->a),(i)) | 
|  | 31 | #define local_inc(l)	atomic_long_inc(&(l)->a) | 
|  | 32 | #define local_dec(l)	atomic_long_dec(&(l)->a) | 
|  | 33 | #define local_add(i,l)	atomic_long_add((i),(&(l)->a)) | 
|  | 34 | #define local_sub(i,l)	atomic_long_sub((i),(&(l)->a)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 |  | 
| Mathieu Desnoyers | 5e97b93 | 2007-05-08 00:34:40 -0700 | [diff] [blame] | 36 | #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) | 
|  | 37 | #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) | 
|  | 38 | #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) | 
|  | 39 | #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) | 
|  | 40 | #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) | 
|  | 41 | #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) | 
|  | 42 | #define local_inc_return(l) atomic_long_inc_return(&(l)->a) | 
|  | 43 |  | 
|  | 44 | #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) | 
|  | 45 | #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) | 
|  | 46 | #define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u)) | 
|  | 47 | #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) | 
|  | 48 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | /* Non-atomic variants, ie. preemption disabled and won't be touched | 
|  | 50 | * in interrupt, etc.  Some archs can optimize this case well. */ | 
|  | 51 | #define __local_inc(l)		local_set((l), local_read(l) + 1) | 
|  | 52 | #define __local_dec(l)		local_set((l), local_read(l) - 1) | 
|  | 53 | #define __local_add(i,l)	local_set((l), local_read(l) + (i)) | 
|  | 54 | #define __local_sub(i,l)	local_set((l), local_read(l) - (i)) | 
|  | 55 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | /* Use these for per-cpu local_t variables: on some archs they are | 
|  | 57 | * much more efficient than these naive implementations.  Note they take | 
|  | 58 | * a variable (eg. mystruct.foo), not an address. | 
|  | 59 | */ | 
| Mathieu Desnoyers | 5e97b93 | 2007-05-08 00:34:40 -0700 | [diff] [blame] | 60 | #define cpu_local_read(l)	local_read(&__get_cpu_var(l)) | 
|  | 61 | #define cpu_local_set(l, i)	local_set(&__get_cpu_var(l), (i)) | 
|  | 62 | #define cpu_local_inc(l)	local_inc(&__get_cpu_var(l)) | 
|  | 63 | #define cpu_local_dec(l)	local_dec(&__get_cpu_var(l)) | 
|  | 64 | #define cpu_local_add(i, l)	local_add((i), &__get_cpu_var(l)) | 
|  | 65 | #define cpu_local_sub(i, l)	local_sub((i), &__get_cpu_var(l)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 |  | 
|  | 67 | /* Non-atomic increments, ie. preemption disabled and won't be touched | 
|  | 68 | * in interrupt, etc.  Some archs can optimize this case well. | 
|  | 69 | */ | 
| Mathieu Desnoyers | 5e97b93 | 2007-05-08 00:34:40 -0700 | [diff] [blame] | 70 | #define __cpu_local_inc(l)	__local_inc(&__get_cpu_var(l)) | 
|  | 71 | #define __cpu_local_dec(l)	__local_dec(&__get_cpu_var(l)) | 
|  | 72 | #define __cpu_local_add(i, l)	__local_add((i), &__get_cpu_var(l)) | 
|  | 73 | #define __cpu_local_sub(i, l)	__local_sub((i), &__get_cpu_var(l)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 |  | 
|  | 75 | #endif /* _ASM_GENERIC_LOCAL_H */ |