| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PERCPU_H | 
 | 2 | #define __LINUX_PERCPU_H | 
| Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 3 |  | 
| Robert P. J. Day | 0a3021f | 2007-07-15 23:39:57 -0700 | [diff] [blame] | 4 | #include <linux/preempt.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/smp.h> | 
| Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 6 | #include <linux/cpumask.h> | 
| Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 7 | #include <linux/pfn.h> | 
| Tejun Heo | de380b5 | 2010-03-24 17:06:43 +0900 | [diff] [blame] | 8 | #include <linux/init.h> | 
| Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 9 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <asm/percpu.h> | 
 | 11 |  | 
| Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 12 | /* enough to cover all DEFINE_PER_CPUs in modules */ | 
| Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 13 | #ifdef CONFIG_MODULES | 
| Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 14 | #define PERCPU_MODULE_RESERVE		(8 << 10) | 
| Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 15 | #else | 
| Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 16 | #define PERCPU_MODULE_RESERVE		0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #endif | 
 | 18 |  | 
| Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 19 | #ifndef PERCPU_ENOUGH_ROOM | 
| Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 20 | #define PERCPU_ENOUGH_ROOM						\ | 
| Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 21 | 	(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +	\ | 
 | 22 | 	 PERCPU_MODULE_RESERVE) | 
 | 23 | #endif | 
| Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 24 |  | 
| Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 25 | /* | 
 | 26 |  * Must be an lvalue. Since @var must be a simple identifier, | 
 | 27 |  * we force a syntax error here if it isn't. | 
 | 28 |  */ | 
 | 29 | #define get_cpu_var(var) (*({				\ | 
| Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 30 | 	preempt_disable();				\ | 
 | 31 | 	&__get_cpu_var(var); })) | 
| Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 32 |  | 
| Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 33 | /* | 
 | 34 |  * The weird & is necessary because sparse considers (void)(var) to be | 
 | 35 |  * a direct dereference of percpu variable (var). | 
 | 36 |  */ | 
| Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 37 | #define put_cpu_var(var) do {				\ | 
| Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 38 | 	(void)&(var);					\ | 
| Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 39 | 	preempt_enable();				\ | 
 | 40 | } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
| Peter Zijlstra | 8b8e2ec | 2010-09-16 19:21:28 +0200 | [diff] [blame] | 42 | #define get_cpu_ptr(var) ({				\ | 
 | 43 | 	preempt_disable();				\ | 
 | 44 | 	this_cpu_ptr(var); }) | 
 | 45 |  | 
 | 46 | #define put_cpu_ptr(var) do {				\ | 
 | 47 | 	(void)(var);					\ | 
 | 48 | 	preempt_enable();				\ | 
 | 49 | } while (0) | 
 | 50 |  | 
| Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 51 | /* minimum unit size, also is the maximum supported allocation size */ | 
| Tejun Heo | 6abad5a | 2010-09-03 18:22:47 +0200 | [diff] [blame] | 52 | #define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10) | 
| Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 53 |  | 
 | 54 | /* | 
| Tejun Heo | 099a19d | 2010-06-27 18:50:00 +0200 | [diff] [blame] | 55 |  * Percpu allocator can serve percpu allocations before slab is | 
 | 56 |  * initialized which allows slab to depend on the percpu allocator. | 
 | 57 |  * The following two parameters decide how much resource to | 
 | 58 |  * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or | 
 | 59 |  * larger than PERCPU_DYNAMIC_EARLY_SIZE. | 
 | 60 |  */ | 
 | 61 | #define PERCPU_DYNAMIC_EARLY_SLOTS	128 | 
 | 62 | #define PERCPU_DYNAMIC_EARLY_SIZE	(12 << 10) | 
 | 63 |  | 
 | 64 | /* | 
| Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 65 |  * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | 
| Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 66 |  * back on the first chunk for dynamic percpu allocation if arch is | 
 | 67 |  * manually allocating and mapping it for faster access (as a part of | 
 | 68 |  * large page mapping for example). | 
| Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 69 |  * | 
| Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 70 |  * The following values give between one and two pages of free space | 
 | 71 |  * after typical minimal boot (2-way SMP, single disk and NIC) with | 
 | 72 |  * both defconfig and a distro config on x86_64 and 32.  More | 
 | 73 |  * intelligent way to determine this would be nice. | 
| Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 74 |  */ | 
| Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 75 | #if BITS_PER_LONG > 32 | 
 | 76 | #define PERCPU_DYNAMIC_RESERVE		(20 << 10) | 
 | 77 | #else | 
 | 78 | #define PERCPU_DYNAMIC_RESERVE		(12 << 10) | 
 | 79 | #endif | 
| Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 80 |  | 
| Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 81 | extern void *pcpu_base_addr; | 
| Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 82 | extern const unsigned long *pcpu_unit_offsets; | 
| Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 83 |  | 
| Tejun Heo | fd1e8a1 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 84 | struct pcpu_group_info { | 
 | 85 | 	int			nr_units;	/* aligned # of units */ | 
 | 86 | 	unsigned long		base_offset;	/* base address offset */ | 
 | 87 | 	unsigned int		*cpu_map;	/* unit->cpu map, empty | 
 | 88 | 						 * entries contain NR_CPUS */ | 
 | 89 | }; | 
 | 90 |  | 
 | 91 | struct pcpu_alloc_info { | 
 | 92 | 	size_t			static_size; | 
 | 93 | 	size_t			reserved_size; | 
 | 94 | 	size_t			dyn_size; | 
 | 95 | 	size_t			unit_size; | 
 | 96 | 	size_t			atom_size; | 
 | 97 | 	size_t			alloc_size; | 
 | 98 | 	size_t			__ai_size;	/* internal, don't use */ | 
 | 99 | 	int			nr_groups;	/* 0 if grouping unnecessary */ | 
 | 100 | 	struct pcpu_group_info	groups[]; | 
 | 101 | }; | 
 | 102 |  | 
| Tejun Heo | f58dc01 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 103 | enum pcpu_fc { | 
 | 104 | 	PCPU_FC_AUTO, | 
 | 105 | 	PCPU_FC_EMBED, | 
 | 106 | 	PCPU_FC_PAGE, | 
| Tejun Heo | f58dc01 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 107 |  | 
 | 108 | 	PCPU_FC_NR, | 
 | 109 | }; | 
 | 110 | extern const char *pcpu_fc_names[PCPU_FC_NR]; | 
 | 111 |  | 
 | 112 | extern enum pcpu_fc pcpu_chosen_fc; | 
 | 113 |  | 
| Tejun Heo | 3cbc856 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 114 | typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, | 
 | 115 | 				     size_t align); | 
| Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 116 | typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); | 
 | 117 | typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); | 
| Tejun Heo | a530b79 | 2009-07-04 08:11:00 +0900 | [diff] [blame] | 118 | typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); | 
| Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 119 |  | 
| Tejun Heo | fd1e8a1 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 120 | extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, | 
 | 121 | 							     int nr_units); | 
 | 122 | extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); | 
| Tejun Heo | 033e48f | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 123 |  | 
| Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 124 | extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | 
 | 125 | 					 void *base_addr); | 
| Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 126 |  | 
| Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 127 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK | 
| Tejun Heo | 4ba6ce2 | 2010-06-27 18:49:59 +0200 | [diff] [blame] | 128 | extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, | 
| Tejun Heo | c8826dd | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 129 | 				size_t atom_size, | 
 | 130 | 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn, | 
 | 131 | 				pcpu_fc_alloc_fn_t alloc_fn, | 
 | 132 | 				pcpu_fc_free_fn_t free_fn); | 
| Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 133 | #endif | 
| Tejun Heo | 66c3a75 | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 134 |  | 
| Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 135 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | 
| Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 136 | extern int __init pcpu_page_first_chunk(size_t reserved_size, | 
| Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 137 | 				pcpu_fc_alloc_fn_t alloc_fn, | 
 | 138 | 				pcpu_fc_free_fn_t free_fn, | 
 | 139 | 				pcpu_fc_populate_pte_fn_t populate_pte_fn); | 
| Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 140 | #endif | 
| Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 141 |  | 
| Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 142 | /* | 
 | 143 |  * Use this to get to a cpu's version of the per-cpu object | 
 | 144 |  * dynamically allocated. Non-atomic access to the current CPU's | 
 | 145 |  * version should probably be combined with get_cpu()/put_cpu(). | 
 | 146 |  */ | 
| Tejun Heo | bbddff0 | 2010-09-03 18:22:48 +0200 | [diff] [blame] | 147 | #ifdef CONFIG_SMP | 
| Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 148 | #define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | 
| Tejun Heo | bbddff0 | 2010-09-03 18:22:48 +0200 | [diff] [blame] | 149 | #else | 
 | 150 | #define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) | 
 | 151 | #endif | 
| Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 152 |  | 
| Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 153 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); | 
| Tejun Heo | 10fad5e | 2010-03-10 18:57:54 +0900 | [diff] [blame] | 154 | extern bool is_kernel_percpu_address(unsigned long addr); | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 155 |  | 
| Tejun Heo | bbddff0 | 2010-09-03 18:22:48 +0200 | [diff] [blame] | 156 | #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) | 
| Tejun Heo | e74e396 | 2009-03-30 19:07:44 +0900 | [diff] [blame] | 157 | extern void __init setup_per_cpu_areas(void); | 
 | 158 | #endif | 
| Tejun Heo | 099a19d | 2010-06-27 18:50:00 +0200 | [diff] [blame] | 159 | extern void __init percpu_init_late(void); | 
| Tejun Heo | e74e396 | 2009-03-30 19:07:44 +0900 | [diff] [blame] | 160 |  | 
| Tejun Heo | de380b5 | 2010-03-24 17:06:43 +0900 | [diff] [blame] | 161 | extern void __percpu *__alloc_percpu(size_t size, size_t align); | 
 | 162 | extern void free_percpu(void __percpu *__pdata); | 
 | 163 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | 
 | 164 |  | 
| Tejun Heo | 64ef291 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 165 | #define alloc_percpu(type)	\ | 
| Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 166 | 	(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 167 |  | 
| Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 168 | /* | 
 | 169 |  * Optional methods for optimized non-lvalue per-cpu variable access. | 
 | 170 |  * | 
 | 171 |  * @var can be a percpu variable or a field of it and its size should | 
 | 172 |  * equal char, int or long.  percpu_read() evaluates to a lvalue and | 
 | 173 |  * all others to void. | 
 | 174 |  * | 
 | 175 |  * These operations are guaranteed to be atomic w.r.t. preemption. | 
 | 176 |  * The generic versions use plain get/put_cpu_var().  Archs are | 
 | 177 |  * encouraged to implement single-instruction alternatives which don't | 
 | 178 |  * require preemption protection. | 
 | 179 |  */ | 
 | 180 | #ifndef percpu_read | 
 | 181 | # define percpu_read(var)						\ | 
 | 182 |   ({									\ | 
| Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 183 | 	typeof(var) *pr_ptr__ = &(var);					\ | 
 | 184 | 	typeof(var) pr_ret__;						\ | 
 | 185 | 	pr_ret__ = get_cpu_var(*pr_ptr__);				\ | 
 | 186 | 	put_cpu_var(*pr_ptr__);						\ | 
 | 187 | 	pr_ret__;							\ | 
| Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 188 |   }) | 
 | 189 | #endif | 
 | 190 |  | 
 | 191 | #define __percpu_generic_to_op(var, val, op)				\ | 
 | 192 | do {									\ | 
| Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 193 | 	typeof(var) *pgto_ptr__ = &(var);				\ | 
 | 194 | 	get_cpu_var(*pgto_ptr__) op val;				\ | 
 | 195 | 	put_cpu_var(*pgto_ptr__);					\ | 
| Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 196 | } while (0) | 
 | 197 |  | 
 | 198 | #ifndef percpu_write | 
 | 199 | # define percpu_write(var, val)		__percpu_generic_to_op(var, (val), =) | 
 | 200 | #endif | 
 | 201 |  | 
 | 202 | #ifndef percpu_add | 
 | 203 | # define percpu_add(var, val)		__percpu_generic_to_op(var, (val), +=) | 
 | 204 | #endif | 
 | 205 |  | 
 | 206 | #ifndef percpu_sub | 
 | 207 | # define percpu_sub(var, val)		__percpu_generic_to_op(var, (val), -=) | 
 | 208 | #endif | 
 | 209 |  | 
 | 210 | #ifndef percpu_and | 
 | 211 | # define percpu_and(var, val)		__percpu_generic_to_op(var, (val), &=) | 
 | 212 | #endif | 
 | 213 |  | 
 | 214 | #ifndef percpu_or | 
 | 215 | # define percpu_or(var, val)		__percpu_generic_to_op(var, (val), |=) | 
 | 216 | #endif | 
 | 217 |  | 
 | 218 | #ifndef percpu_xor | 
 | 219 | # define percpu_xor(var, val)		__percpu_generic_to_op(var, (val), ^=) | 
 | 220 | #endif | 
 | 221 |  | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 222 | /* | 
 | 223 |  * Branching function to split up a function into a set of functions that | 
 | 224 |  * are called for different scalar sizes of the objects handled. | 
 | 225 |  */ | 
 | 226 |  | 
 | 227 | extern void __bad_size_call_parameter(void); | 
 | 228 |  | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 229 | #define __pcpu_size_call_return(stem, variable)				\ | 
 | 230 | ({	typeof(variable) pscr_ret__;					\ | 
| Tejun Heo | 545695f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 231 | 	__verify_pcpu_ptr(&(variable));					\ | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 232 | 	switch(sizeof(variable)) {					\ | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 233 | 	case 1: pscr_ret__ = stem##1(variable);break;			\ | 
 | 234 | 	case 2: pscr_ret__ = stem##2(variable);break;			\ | 
 | 235 | 	case 4: pscr_ret__ = stem##4(variable);break;			\ | 
 | 236 | 	case 8: pscr_ret__ = stem##8(variable);break;			\ | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 237 | 	default:							\ | 
 | 238 | 		__bad_size_call_parameter();break;			\ | 
 | 239 | 	}								\ | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 240 | 	pscr_ret__;							\ | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 241 | }) | 
 | 242 |  | 
| Christoph Lameter | a663fff | 2010-12-06 11:39:59 -0600 | [diff] [blame] | 243 | #define __pcpu_size_call_return2(stem, variable, ...)			\ | 
 | 244 | ({									\ | 
 | 245 | 	typeof(variable) pscr2_ret__;					\ | 
 | 246 | 	__verify_pcpu_ptr(&(variable));					\ | 
 | 247 | 	switch(sizeof(variable)) {					\ | 
 | 248 | 	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\ | 
 | 249 | 	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\ | 
 | 250 | 	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\ | 
 | 251 | 	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\ | 
 | 252 | 	default:							\ | 
 | 253 | 		__bad_size_call_parameter(); break;			\ | 
 | 254 | 	}								\ | 
 | 255 | 	pscr2_ret__;							\ | 
 | 256 | }) | 
 | 257 |  | 
| Christoph Lameter | 7c33433 | 2011-02-28 11:02:24 +0100 | [diff] [blame] | 258 | /* | 
 | 259 |  * Special handling for cmpxchg_double.  cmpxchg_double is passed two | 
 | 260 |  * percpu variables.  The first has to be aligned to a double word | 
 | 261 |  * boundary and the second has to follow directly thereafter. | 
| Chris Metcalf | d4d84fe | 2011-06-02 10:19:41 -0400 | [diff] [blame] | 262 |  * We enforce this on all architectures even if they don't support | 
 | 263 |  * a double cmpxchg instruction, since it's a cheap requirement, and it | 
 | 264 |  * avoids breaking the requirement for architectures with the instruction. | 
| Christoph Lameter | 7c33433 | 2011-02-28 11:02:24 +0100 | [diff] [blame] | 265 |  */ | 
 | 266 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)		\ | 
 | 267 | ({									\ | 
 | 268 | 	bool pdcrb_ret__;						\ | 
 | 269 | 	__verify_pcpu_ptr(&pcp1);					\ | 
 | 270 | 	BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));			\ | 
 | 271 | 	VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));		\ | 
 | 272 | 	VM_BUG_ON((unsigned long)(&pcp2) !=				\ | 
 | 273 | 		  (unsigned long)(&pcp1) + sizeof(pcp1));		\ | 
 | 274 | 	switch(sizeof(pcp1)) {						\ | 
 | 275 | 	case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;	\ | 
 | 276 | 	case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;	\ | 
 | 277 | 	case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;	\ | 
 | 278 | 	case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;	\ | 
 | 279 | 	default:							\ | 
 | 280 | 		__bad_size_call_parameter(); break;			\ | 
 | 281 | 	}								\ | 
 | 282 | 	pdcrb_ret__;							\ | 
 | 283 | }) | 
 | 284 |  | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 285 | #define __pcpu_size_call(stem, variable, ...)				\ | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 286 | do {									\ | 
| Tejun Heo | 545695f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 287 | 	__verify_pcpu_ptr(&(variable));					\ | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 288 | 	switch(sizeof(variable)) {					\ | 
 | 289 | 		case 1: stem##1(variable, __VA_ARGS__);break;		\ | 
 | 290 | 		case 2: stem##2(variable, __VA_ARGS__);break;		\ | 
 | 291 | 		case 4: stem##4(variable, __VA_ARGS__);break;		\ | 
 | 292 | 		case 8: stem##8(variable, __VA_ARGS__);break;		\ | 
 | 293 | 		default: 						\ | 
 | 294 | 			__bad_size_call_parameter();break;		\ | 
 | 295 | 	}								\ | 
 | 296 | } while (0) | 
 | 297 |  | 
 | 298 | /* | 
 | 299 |  * Optimized manipulation for memory allocated through the per cpu | 
| Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 300 |  * allocator or for addresses of per cpu variables. | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 301 |  * | 
 | 302 |  * These operation guarantee exclusivity of access for other operations | 
 | 303 |  * on the *same* processor. The assumption is that per cpu data is only | 
 | 304 |  * accessed by a single processor instance (the current one). | 
 | 305 |  * | 
 | 306 |  * The first group is used for accesses that must be done in a | 
 | 307 |  * preemption safe way since we know that the context is not preempt | 
 | 308 |  * safe. Interrupts may occur. If the interrupt modifies the variable | 
 | 309 |  * too then RMW actions will not be reliable. | 
 | 310 |  * | 
 | 311 |  * The arch code can provide optimized functions in two ways: | 
 | 312 |  * | 
 | 313 |  * 1. Override the function completely. F.e. define this_cpu_add(). | 
 | 314 |  *    The arch must then ensure that the various scalar format passed | 
 | 315 |  *    are handled correctly. | 
 | 316 |  * | 
 | 317 |  * 2. Provide functions for certain scalar sizes. F.e. provide | 
 | 318 |  *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte | 
 | 319 |  *    sized RMW actions. If arch code does not provide operations for | 
 | 320 |  *    a scalar size then the fallback in the generic code will be | 
 | 321 |  *    used. | 
 | 322 |  */ | 
 | 323 |  | 
 | 324 | #define _this_cpu_generic_read(pcp)					\ | 
 | 325 | ({	typeof(pcp) ret__;						\ | 
 | 326 | 	preempt_disable();						\ | 
 | 327 | 	ret__ = *this_cpu_ptr(&(pcp));					\ | 
 | 328 | 	preempt_enable();						\ | 
 | 329 | 	ret__;								\ | 
 | 330 | }) | 
 | 331 |  | 
 | 332 | #ifndef this_cpu_read | 
 | 333 | # ifndef this_cpu_read_1 | 
 | 334 | #  define this_cpu_read_1(pcp)	_this_cpu_generic_read(pcp) | 
 | 335 | # endif | 
 | 336 | # ifndef this_cpu_read_2 | 
 | 337 | #  define this_cpu_read_2(pcp)	_this_cpu_generic_read(pcp) | 
 | 338 | # endif | 
 | 339 | # ifndef this_cpu_read_4 | 
 | 340 | #  define this_cpu_read_4(pcp)	_this_cpu_generic_read(pcp) | 
 | 341 | # endif | 
 | 342 | # ifndef this_cpu_read_8 | 
 | 343 | #  define this_cpu_read_8(pcp)	_this_cpu_generic_read(pcp) | 
 | 344 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 345 | # define this_cpu_read(pcp)	__pcpu_size_call_return(this_cpu_read_, (pcp)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 346 | #endif | 
 | 347 |  | 
 | 348 | #define _this_cpu_generic_to_op(pcp, val, op)				\ | 
 | 349 | do {									\ | 
 | 350 | 	preempt_disable();						\ | 
| Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 351 | 	*__this_cpu_ptr(&(pcp)) op val;					\ | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 352 | 	preempt_enable();						\ | 
 | 353 | } while (0) | 
 | 354 |  | 
 | 355 | #ifndef this_cpu_write | 
 | 356 | # ifndef this_cpu_write_1 | 
 | 357 | #  define this_cpu_write_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =) | 
 | 358 | # endif | 
 | 359 | # ifndef this_cpu_write_2 | 
 | 360 | #  define this_cpu_write_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =) | 
 | 361 | # endif | 
 | 362 | # ifndef this_cpu_write_4 | 
 | 363 | #  define this_cpu_write_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =) | 
 | 364 | # endif | 
 | 365 | # ifndef this_cpu_write_8 | 
 | 366 | #  define this_cpu_write_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =) | 
 | 367 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 368 | # define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 369 | #endif | 
 | 370 |  | 
 | 371 | #ifndef this_cpu_add | 
 | 372 | # ifndef this_cpu_add_1 | 
 | 373 | #  define this_cpu_add_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=) | 
 | 374 | # endif | 
 | 375 | # ifndef this_cpu_add_2 | 
 | 376 | #  define this_cpu_add_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=) | 
 | 377 | # endif | 
 | 378 | # ifndef this_cpu_add_4 | 
 | 379 | #  define this_cpu_add_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=) | 
 | 380 | # endif | 
 | 381 | # ifndef this_cpu_add_8 | 
 | 382 | #  define this_cpu_add_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=) | 
 | 383 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 384 | # define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 385 | #endif | 
 | 386 |  | 
 | 387 | #ifndef this_cpu_sub | 
 | 388 | # define this_cpu_sub(pcp, val)		this_cpu_add((pcp), -(val)) | 
 | 389 | #endif | 
 | 390 |  | 
 | 391 | #ifndef this_cpu_inc | 
 | 392 | # define this_cpu_inc(pcp)		this_cpu_add((pcp), 1) | 
 | 393 | #endif | 
 | 394 |  | 
 | 395 | #ifndef this_cpu_dec | 
 | 396 | # define this_cpu_dec(pcp)		this_cpu_sub((pcp), 1) | 
 | 397 | #endif | 
 | 398 |  | 
 | 399 | #ifndef this_cpu_and | 
 | 400 | # ifndef this_cpu_and_1 | 
 | 401 | #  define this_cpu_and_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=) | 
 | 402 | # endif | 
 | 403 | # ifndef this_cpu_and_2 | 
 | 404 | #  define this_cpu_and_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=) | 
 | 405 | # endif | 
 | 406 | # ifndef this_cpu_and_4 | 
 | 407 | #  define this_cpu_and_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=) | 
 | 408 | # endif | 
 | 409 | # ifndef this_cpu_and_8 | 
 | 410 | #  define this_cpu_and_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=) | 
 | 411 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 412 | # define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 413 | #endif | 
 | 414 |  | 
 | 415 | #ifndef this_cpu_or | 
 | 416 | # ifndef this_cpu_or_1 | 
 | 417 | #  define this_cpu_or_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=) | 
 | 418 | # endif | 
 | 419 | # ifndef this_cpu_or_2 | 
 | 420 | #  define this_cpu_or_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=) | 
 | 421 | # endif | 
 | 422 | # ifndef this_cpu_or_4 | 
 | 423 | #  define this_cpu_or_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=) | 
 | 424 | # endif | 
 | 425 | # ifndef this_cpu_or_8 | 
 | 426 | #  define this_cpu_or_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=) | 
 | 427 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 428 | # define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 429 | #endif | 
 | 430 |  | 
 | 431 | #ifndef this_cpu_xor | 
 | 432 | # ifndef this_cpu_xor_1 | 
 | 433 | #  define this_cpu_xor_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=) | 
 | 434 | # endif | 
 | 435 | # ifndef this_cpu_xor_2 | 
 | 436 | #  define this_cpu_xor_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=) | 
 | 437 | # endif | 
 | 438 | # ifndef this_cpu_xor_4 | 
 | 439 | #  define this_cpu_xor_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=) | 
 | 440 | # endif | 
 | 441 | # ifndef this_cpu_xor_8 | 
 | 442 | #  define this_cpu_xor_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=) | 
 | 443 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 444 | # define this_cpu_xor(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 445 | #endif | 
 | 446 |  | 
| Tejun Heo | 4030477 | 2010-12-17 15:47:04 +0100 | [diff] [blame] | 447 | #define _this_cpu_generic_add_return(pcp, val)				\ | 
 | 448 | ({									\ | 
 | 449 | 	typeof(pcp) ret__;						\ | 
 | 450 | 	preempt_disable();						\ | 
 | 451 | 	__this_cpu_add(pcp, val);					\ | 
 | 452 | 	ret__ = __this_cpu_read(pcp);					\ | 
 | 453 | 	preempt_enable();						\ | 
 | 454 | 	ret__;								\ | 
 | 455 | }) | 
 | 456 |  | 
 | 457 | #ifndef this_cpu_add_return | 
 | 458 | # ifndef this_cpu_add_return_1 | 
 | 459 | #  define this_cpu_add_return_1(pcp, val)	_this_cpu_generic_add_return(pcp, val) | 
 | 460 | # endif | 
 | 461 | # ifndef this_cpu_add_return_2 | 
 | 462 | #  define this_cpu_add_return_2(pcp, val)	_this_cpu_generic_add_return(pcp, val) | 
 | 463 | # endif | 
 | 464 | # ifndef this_cpu_add_return_4 | 
 | 465 | #  define this_cpu_add_return_4(pcp, val)	_this_cpu_generic_add_return(pcp, val) | 
 | 466 | # endif | 
 | 467 | # ifndef this_cpu_add_return_8 | 
 | 468 | #  define this_cpu_add_return_8(pcp, val)	_this_cpu_generic_add_return(pcp, val) | 
 | 469 | # endif | 
 | 470 | # define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | 
 | 471 | #endif | 
 | 472 |  | 
 | 473 | #define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(val)) | 
 | 474 | #define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1) | 
 | 475 | #define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1) | 
 | 476 |  | 
| Christoph Lameter | 2b71244 | 2010-12-18 15:54:04 +0100 | [diff] [blame] | 477 | #define _this_cpu_generic_xchg(pcp, nval)				\ | 
 | 478 | ({	typeof(pcp) ret__;						\ | 
 | 479 | 	preempt_disable();						\ | 
 | 480 | 	ret__ = __this_cpu_read(pcp);					\ | 
 | 481 | 	__this_cpu_write(pcp, nval);					\ | 
 | 482 | 	preempt_enable();						\ | 
 | 483 | 	ret__;								\ | 
 | 484 | }) | 
 | 485 |  | 
 | 486 | #ifndef this_cpu_xchg | 
 | 487 | # ifndef this_cpu_xchg_1 | 
 | 488 | #  define this_cpu_xchg_1(pcp, nval)	_this_cpu_generic_xchg(pcp, nval) | 
 | 489 | # endif | 
 | 490 | # ifndef this_cpu_xchg_2 | 
 | 491 | #  define this_cpu_xchg_2(pcp, nval)	_this_cpu_generic_xchg(pcp, nval) | 
 | 492 | # endif | 
 | 493 | # ifndef this_cpu_xchg_4 | 
 | 494 | #  define this_cpu_xchg_4(pcp, nval)	_this_cpu_generic_xchg(pcp, nval) | 
 | 495 | # endif | 
 | 496 | # ifndef this_cpu_xchg_8 | 
 | 497 | #  define this_cpu_xchg_8(pcp, nval)	_this_cpu_generic_xchg(pcp, nval) | 
 | 498 | # endif | 
 | 499 | # define this_cpu_xchg(pcp, nval)	\ | 
 | 500 | 	__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) | 
 | 501 | #endif | 
 | 502 |  | 
 | 503 | #define _this_cpu_generic_cmpxchg(pcp, oval, nval)			\ | 
 | 504 | ({	typeof(pcp) ret__;						\ | 
 | 505 | 	preempt_disable();						\ | 
 | 506 | 	ret__ = __this_cpu_read(pcp);					\ | 
 | 507 | 	if (ret__ == (oval))						\ | 
 | 508 | 		__this_cpu_write(pcp, nval);				\ | 
 | 509 | 	preempt_enable();						\ | 
 | 510 | 	ret__;								\ | 
 | 511 | }) | 
 | 512 |  | 
 | 513 | #ifndef this_cpu_cmpxchg | 
 | 514 | # ifndef this_cpu_cmpxchg_1 | 
 | 515 | #  define this_cpu_cmpxchg_1(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 516 | # endif | 
 | 517 | # ifndef this_cpu_cmpxchg_2 | 
 | 518 | #  define this_cpu_cmpxchg_2(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 519 | # endif | 
 | 520 | # ifndef this_cpu_cmpxchg_4 | 
 | 521 | #  define this_cpu_cmpxchg_4(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 522 | # endif | 
 | 523 | # ifndef this_cpu_cmpxchg_8 | 
 | 524 | #  define this_cpu_cmpxchg_8(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 525 | # endif | 
 | 526 | # define this_cpu_cmpxchg(pcp, oval, nval)	\ | 
 | 527 | 	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | 
 | 528 | #endif | 
 | 529 |  | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 530 | /* | 
| Christoph Lameter | 7c33433 | 2011-02-28 11:02:24 +0100 | [diff] [blame] | 531 |  * cmpxchg_double replaces two adjacent scalars at once.  The first | 
 | 532 |  * two parameters are per cpu variables which have to be of the same | 
 | 533 |  * size.  A truth value is returned to indicate success or failure | 
 | 534 |  * (since a double register result is difficult to handle).  There is | 
 | 535 |  * very limited hardware support for these operations, so only certain | 
 | 536 |  * sizes may work. | 
 | 537 |  */ | 
 | 538 | #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 539 | ({									\ | 
 | 540 | 	int ret__;							\ | 
 | 541 | 	preempt_disable();						\ | 
 | 542 | 	ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,		\ | 
 | 543 | 			oval1, oval2, nval1, nval2);			\ | 
 | 544 | 	preempt_enable();						\ | 
 | 545 | 	ret__;								\ | 
 | 546 | }) | 
 | 547 |  | 
 | 548 | #ifndef this_cpu_cmpxchg_double | 
 | 549 | # ifndef this_cpu_cmpxchg_double_1 | 
 | 550 | #  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 551 | 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 552 | # endif | 
 | 553 | # ifndef this_cpu_cmpxchg_double_2 | 
 | 554 | #  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 555 | 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 556 | # endif | 
 | 557 | # ifndef this_cpu_cmpxchg_double_4 | 
 | 558 | #  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 559 | 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 560 | # endif | 
 | 561 | # ifndef this_cpu_cmpxchg_double_8 | 
 | 562 | #  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 563 | 	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 564 | # endif | 
 | 565 | # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 566 | 	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | 
 | 567 | #endif | 
 | 568 |  | 
 | 569 | /* | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 570 |  * Generic percpu operations that do not require preemption handling. | 
 | 571 |  * Either we do not care about races or the caller has the | 
 | 572 |  * responsibility of handling preemptions issues. Arch code can still | 
 | 573 |  * override these instructions since the arch per cpu code may be more | 
 | 574 |  * efficient and may actually get race freeness for free (that is the | 
 | 575 |  * case for x86 for example). | 
 | 576 |  * | 
 | 577 |  * If there is no other protection through preempt disable and/or | 
 | 578 |  * disabling interupts then one of these RMW operations can show unexpected | 
 | 579 |  * behavior because the execution thread was rescheduled on another processor | 
 | 580 |  * or an interrupt occurred and the same percpu variable was modified from | 
 | 581 |  * the interrupt context. | 
 | 582 |  */ | 
 | 583 | #ifndef __this_cpu_read | 
 | 584 | # ifndef __this_cpu_read_1 | 
 | 585 | #  define __this_cpu_read_1(pcp)	(*__this_cpu_ptr(&(pcp))) | 
 | 586 | # endif | 
 | 587 | # ifndef __this_cpu_read_2 | 
 | 588 | #  define __this_cpu_read_2(pcp)	(*__this_cpu_ptr(&(pcp))) | 
 | 589 | # endif | 
 | 590 | # ifndef __this_cpu_read_4 | 
 | 591 | #  define __this_cpu_read_4(pcp)	(*__this_cpu_ptr(&(pcp))) | 
 | 592 | # endif | 
 | 593 | # ifndef __this_cpu_read_8 | 
 | 594 | #  define __this_cpu_read_8(pcp)	(*__this_cpu_ptr(&(pcp))) | 
 | 595 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 596 | # define __this_cpu_read(pcp)	__pcpu_size_call_return(__this_cpu_read_, (pcp)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 597 | #endif | 
 | 598 |  | 
 | 599 | #define __this_cpu_generic_to_op(pcp, val, op)				\ | 
 | 600 | do {									\ | 
 | 601 | 	*__this_cpu_ptr(&(pcp)) op val;					\ | 
 | 602 | } while (0) | 
 | 603 |  | 
 | 604 | #ifndef __this_cpu_write | 
 | 605 | # ifndef __this_cpu_write_1 | 
 | 606 | #  define __this_cpu_write_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =) | 
 | 607 | # endif | 
 | 608 | # ifndef __this_cpu_write_2 | 
 | 609 | #  define __this_cpu_write_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =) | 
 | 610 | # endif | 
 | 611 | # ifndef __this_cpu_write_4 | 
 | 612 | #  define __this_cpu_write_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =) | 
 | 613 | # endif | 
 | 614 | # ifndef __this_cpu_write_8 | 
 | 615 | #  define __this_cpu_write_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =) | 
 | 616 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 617 | # define __this_cpu_write(pcp, val)	__pcpu_size_call(__this_cpu_write_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 618 | #endif | 
 | 619 |  | 
 | 620 | #ifndef __this_cpu_add | 
 | 621 | # ifndef __this_cpu_add_1 | 
 | 622 | #  define __this_cpu_add_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=) | 
 | 623 | # endif | 
 | 624 | # ifndef __this_cpu_add_2 | 
 | 625 | #  define __this_cpu_add_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=) | 
 | 626 | # endif | 
 | 627 | # ifndef __this_cpu_add_4 | 
 | 628 | #  define __this_cpu_add_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=) | 
 | 629 | # endif | 
 | 630 | # ifndef __this_cpu_add_8 | 
 | 631 | #  define __this_cpu_add_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=) | 
 | 632 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 633 | # define __this_cpu_add(pcp, val)	__pcpu_size_call(__this_cpu_add_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 634 | #endif | 
 | 635 |  | 
 | 636 | #ifndef __this_cpu_sub | 
 | 637 | # define __this_cpu_sub(pcp, val)	__this_cpu_add((pcp), -(val)) | 
 | 638 | #endif | 
 | 639 |  | 
 | 640 | #ifndef __this_cpu_inc | 
 | 641 | # define __this_cpu_inc(pcp)		__this_cpu_add((pcp), 1) | 
 | 642 | #endif | 
 | 643 |  | 
 | 644 | #ifndef __this_cpu_dec | 
 | 645 | # define __this_cpu_dec(pcp)		__this_cpu_sub((pcp), 1) | 
 | 646 | #endif | 
 | 647 |  | 
 | 648 | #ifndef __this_cpu_and | 
 | 649 | # ifndef __this_cpu_and_1 | 
 | 650 | #  define __this_cpu_and_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=) | 
 | 651 | # endif | 
 | 652 | # ifndef __this_cpu_and_2 | 
 | 653 | #  define __this_cpu_and_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=) | 
 | 654 | # endif | 
 | 655 | # ifndef __this_cpu_and_4 | 
 | 656 | #  define __this_cpu_and_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=) | 
 | 657 | # endif | 
 | 658 | # ifndef __this_cpu_and_8 | 
 | 659 | #  define __this_cpu_and_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=) | 
 | 660 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 661 | # define __this_cpu_and(pcp, val)	__pcpu_size_call(__this_cpu_and_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 662 | #endif | 
 | 663 |  | 
 | 664 | #ifndef __this_cpu_or | 
 | 665 | # ifndef __this_cpu_or_1 | 
 | 666 | #  define __this_cpu_or_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=) | 
 | 667 | # endif | 
 | 668 | # ifndef __this_cpu_or_2 | 
 | 669 | #  define __this_cpu_or_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=) | 
 | 670 | # endif | 
 | 671 | # ifndef __this_cpu_or_4 | 
 | 672 | #  define __this_cpu_or_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=) | 
 | 673 | # endif | 
 | 674 | # ifndef __this_cpu_or_8 | 
 | 675 | #  define __this_cpu_or_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=) | 
 | 676 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 677 | # define __this_cpu_or(pcp, val)	__pcpu_size_call(__this_cpu_or_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 678 | #endif | 
 | 679 |  | 
 | 680 | #ifndef __this_cpu_xor | 
 | 681 | # ifndef __this_cpu_xor_1 | 
 | 682 | #  define __this_cpu_xor_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=) | 
 | 683 | # endif | 
 | 684 | # ifndef __this_cpu_xor_2 | 
 | 685 | #  define __this_cpu_xor_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=) | 
 | 686 | # endif | 
 | 687 | # ifndef __this_cpu_xor_4 | 
 | 688 | #  define __this_cpu_xor_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=) | 
 | 689 | # endif | 
 | 690 | # ifndef __this_cpu_xor_8 | 
 | 691 | #  define __this_cpu_xor_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=) | 
 | 692 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 693 | # define __this_cpu_xor(pcp, val)	__pcpu_size_call(__this_cpu_xor_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 694 | #endif | 
 | 695 |  | 
| Christoph Lameter | a663fff | 2010-12-06 11:39:59 -0600 | [diff] [blame] | 696 | #define __this_cpu_generic_add_return(pcp, val)				\ | 
 | 697 | ({									\ | 
 | 698 | 	__this_cpu_add(pcp, val);					\ | 
 | 699 | 	__this_cpu_read(pcp);						\ | 
 | 700 | }) | 
 | 701 |  | 
 | 702 | #ifndef __this_cpu_add_return | 
 | 703 | # ifndef __this_cpu_add_return_1 | 
 | 704 | #  define __this_cpu_add_return_1(pcp, val)	__this_cpu_generic_add_return(pcp, val) | 
 | 705 | # endif | 
 | 706 | # ifndef __this_cpu_add_return_2 | 
 | 707 | #  define __this_cpu_add_return_2(pcp, val)	__this_cpu_generic_add_return(pcp, val) | 
 | 708 | # endif | 
 | 709 | # ifndef __this_cpu_add_return_4 | 
 | 710 | #  define __this_cpu_add_return_4(pcp, val)	__this_cpu_generic_add_return(pcp, val) | 
 | 711 | # endif | 
 | 712 | # ifndef __this_cpu_add_return_8 | 
 | 713 | #  define __this_cpu_add_return_8(pcp, val)	__this_cpu_generic_add_return(pcp, val) | 
 | 714 | # endif | 
 | 715 | # define __this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | 
 | 716 | #endif | 
 | 717 |  | 
 | 718 | #define __this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(val)) | 
 | 719 | #define __this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1) | 
 | 720 | #define __this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1) | 
 | 721 |  | 
| Christoph Lameter | 2b71244 | 2010-12-18 15:54:04 +0100 | [diff] [blame] | 722 | #define __this_cpu_generic_xchg(pcp, nval)				\ | 
 | 723 | ({	typeof(pcp) ret__;						\ | 
 | 724 | 	ret__ = __this_cpu_read(pcp);					\ | 
 | 725 | 	__this_cpu_write(pcp, nval);					\ | 
 | 726 | 	ret__;								\ | 
 | 727 | }) | 
 | 728 |  | 
 | 729 | #ifndef __this_cpu_xchg | 
 | 730 | # ifndef __this_cpu_xchg_1 | 
 | 731 | #  define __this_cpu_xchg_1(pcp, nval)	__this_cpu_generic_xchg(pcp, nval) | 
 | 732 | # endif | 
 | 733 | # ifndef __this_cpu_xchg_2 | 
 | 734 | #  define __this_cpu_xchg_2(pcp, nval)	__this_cpu_generic_xchg(pcp, nval) | 
 | 735 | # endif | 
 | 736 | # ifndef __this_cpu_xchg_4 | 
 | 737 | #  define __this_cpu_xchg_4(pcp, nval)	__this_cpu_generic_xchg(pcp, nval) | 
 | 738 | # endif | 
 | 739 | # ifndef __this_cpu_xchg_8 | 
 | 740 | #  define __this_cpu_xchg_8(pcp, nval)	__this_cpu_generic_xchg(pcp, nval) | 
 | 741 | # endif | 
 | 742 | # define __this_cpu_xchg(pcp, nval)	\ | 
 | 743 | 	__pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval) | 
 | 744 | #endif | 
 | 745 |  | 
 | 746 | #define __this_cpu_generic_cmpxchg(pcp, oval, nval)			\ | 
 | 747 | ({									\ | 
 | 748 | 	typeof(pcp) ret__;						\ | 
 | 749 | 	ret__ = __this_cpu_read(pcp);					\ | 
 | 750 | 	if (ret__ == (oval))						\ | 
 | 751 | 		__this_cpu_write(pcp, nval);				\ | 
 | 752 | 	ret__;								\ | 
 | 753 | }) | 
 | 754 |  | 
 | 755 | #ifndef __this_cpu_cmpxchg | 
 | 756 | # ifndef __this_cpu_cmpxchg_1 | 
 | 757 | #  define __this_cpu_cmpxchg_1(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 758 | # endif | 
 | 759 | # ifndef __this_cpu_cmpxchg_2 | 
 | 760 | #  define __this_cpu_cmpxchg_2(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 761 | # endif | 
 | 762 | # ifndef __this_cpu_cmpxchg_4 | 
 | 763 | #  define __this_cpu_cmpxchg_4(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 764 | # endif | 
 | 765 | # ifndef __this_cpu_cmpxchg_8 | 
 | 766 | #  define __this_cpu_cmpxchg_8(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 767 | # endif | 
 | 768 | # define __this_cpu_cmpxchg(pcp, oval, nval)	\ | 
 | 769 | 	__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval) | 
 | 770 | #endif | 
 | 771 |  | 
| Christoph Lameter | 7c33433 | 2011-02-28 11:02:24 +0100 | [diff] [blame] | 772 | #define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 773 | ({									\ | 
 | 774 | 	int __ret = 0;							\ | 
 | 775 | 	if (__this_cpu_read(pcp1) == (oval1) &&				\ | 
 | 776 | 			 __this_cpu_read(pcp2)  == (oval2)) {		\ | 
 | 777 | 		__this_cpu_write(pcp1, (nval1));			\ | 
 | 778 | 		__this_cpu_write(pcp2, (nval2));			\ | 
 | 779 | 		__ret = 1;						\ | 
 | 780 | 	}								\ | 
 | 781 | 	(__ret);							\ | 
 | 782 | }) | 
 | 783 |  | 
 | 784 | #ifndef __this_cpu_cmpxchg_double | 
 | 785 | # ifndef __this_cpu_cmpxchg_double_1 | 
 | 786 | #  define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 787 | 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 788 | # endif | 
 | 789 | # ifndef __this_cpu_cmpxchg_double_2 | 
 | 790 | #  define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 791 | 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 792 | # endif | 
 | 793 | # ifndef __this_cpu_cmpxchg_double_4 | 
 | 794 | #  define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 795 | 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 796 | # endif | 
 | 797 | # ifndef __this_cpu_cmpxchg_double_8 | 
 | 798 | #  define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 799 | 	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 800 | # endif | 
 | 801 | # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 802 | 	__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | 
 | 803 | #endif | 
 | 804 |  | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 805 | /* | 
 | 806 |  * IRQ safe versions of the per cpu RMW operations. Note that these operations | 
 | 807 |  * are *not* safe against modification of the same variable from another | 
 | 808 |  * processors (which one gets when using regular atomic operations) | 
| Christoph Lameter | 2b71244 | 2010-12-18 15:54:04 +0100 | [diff] [blame] | 809 |  * They are guaranteed to be atomic vs. local interrupts and | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 810 |  * preemption only. | 
 | 811 |  */ | 
 | 812 | #define irqsafe_cpu_generic_to_op(pcp, val, op)				\ | 
 | 813 | do {									\ | 
 | 814 | 	unsigned long flags;						\ | 
 | 815 | 	local_irq_save(flags);						\ | 
 | 816 | 	*__this_cpu_ptr(&(pcp)) op val;					\ | 
 | 817 | 	local_irq_restore(flags);					\ | 
 | 818 | } while (0) | 
 | 819 |  | 
 | 820 | #ifndef irqsafe_cpu_add | 
 | 821 | # ifndef irqsafe_cpu_add_1 | 
 | 822 | #  define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | 
 | 823 | # endif | 
 | 824 | # ifndef irqsafe_cpu_add_2 | 
 | 825 | #  define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | 
 | 826 | # endif | 
 | 827 | # ifndef irqsafe_cpu_add_4 | 
 | 828 | #  define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | 
 | 829 | # endif | 
 | 830 | # ifndef irqsafe_cpu_add_8 | 
 | 831 | #  define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | 
 | 832 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 833 | # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 834 | #endif | 
 | 835 |  | 
 | 836 | #ifndef irqsafe_cpu_sub | 
 | 837 | # define irqsafe_cpu_sub(pcp, val)	irqsafe_cpu_add((pcp), -(val)) | 
 | 838 | #endif | 
 | 839 |  | 
 | 840 | #ifndef irqsafe_cpu_inc | 
 | 841 | # define irqsafe_cpu_inc(pcp)	irqsafe_cpu_add((pcp), 1) | 
 | 842 | #endif | 
 | 843 |  | 
 | 844 | #ifndef irqsafe_cpu_dec | 
 | 845 | # define irqsafe_cpu_dec(pcp)	irqsafe_cpu_sub((pcp), 1) | 
 | 846 | #endif | 
 | 847 |  | 
 | 848 | #ifndef irqsafe_cpu_and | 
 | 849 | # ifndef irqsafe_cpu_and_1 | 
 | 850 | #  define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | 
 | 851 | # endif | 
 | 852 | # ifndef irqsafe_cpu_and_2 | 
 | 853 | #  define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | 
 | 854 | # endif | 
 | 855 | # ifndef irqsafe_cpu_and_4 | 
 | 856 | #  define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | 
 | 857 | # endif | 
 | 858 | # ifndef irqsafe_cpu_and_8 | 
 | 859 | #  define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | 
 | 860 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 861 | # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 862 | #endif | 
 | 863 |  | 
 | 864 | #ifndef irqsafe_cpu_or | 
 | 865 | # ifndef irqsafe_cpu_or_1 | 
 | 866 | #  define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | 
 | 867 | # endif | 
 | 868 | # ifndef irqsafe_cpu_or_2 | 
 | 869 | #  define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | 
 | 870 | # endif | 
 | 871 | # ifndef irqsafe_cpu_or_4 | 
 | 872 | #  define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | 
 | 873 | # endif | 
 | 874 | # ifndef irqsafe_cpu_or_8 | 
 | 875 | #  define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | 
 | 876 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 877 | # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 878 | #endif | 
 | 879 |  | 
 | 880 | #ifndef irqsafe_cpu_xor | 
 | 881 | # ifndef irqsafe_cpu_xor_1 | 
 | 882 | #  define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | 
 | 883 | # endif | 
 | 884 | # ifndef irqsafe_cpu_xor_2 | 
 | 885 | #  define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | 
 | 886 | # endif | 
 | 887 | # ifndef irqsafe_cpu_xor_4 | 
 | 888 | #  define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | 
 | 889 | # endif | 
 | 890 | # ifndef irqsafe_cpu_xor_8 | 
 | 891 | #  define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | 
 | 892 | # endif | 
| Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 893 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) | 
| Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 894 | #endif | 
 | 895 |  | 
| Christoph Lameter | 2b71244 | 2010-12-18 15:54:04 +0100 | [diff] [blame] | 896 | #define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)			\ | 
 | 897 | ({									\ | 
 | 898 | 	typeof(pcp) ret__;						\ | 
 | 899 | 	unsigned long flags;						\ | 
 | 900 | 	local_irq_save(flags);						\ | 
 | 901 | 	ret__ = __this_cpu_read(pcp);					\ | 
 | 902 | 	if (ret__ == (oval))						\ | 
 | 903 | 		__this_cpu_write(pcp, nval);				\ | 
 | 904 | 	local_irq_restore(flags);					\ | 
 | 905 | 	ret__;								\ | 
 | 906 | }) | 
 | 907 |  | 
 | 908 | #ifndef irqsafe_cpu_cmpxchg | 
 | 909 | # ifndef irqsafe_cpu_cmpxchg_1 | 
 | 910 | #  define irqsafe_cpu_cmpxchg_1(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 911 | # endif | 
 | 912 | # ifndef irqsafe_cpu_cmpxchg_2 | 
 | 913 | #  define irqsafe_cpu_cmpxchg_2(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 914 | # endif | 
 | 915 | # ifndef irqsafe_cpu_cmpxchg_4 | 
 | 916 | #  define irqsafe_cpu_cmpxchg_4(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 917 | # endif | 
 | 918 | # ifndef irqsafe_cpu_cmpxchg_8 | 
 | 919 | #  define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | 
 | 920 | # endif | 
 | 921 | # define irqsafe_cpu_cmpxchg(pcp, oval, nval)		\ | 
 | 922 | 	__pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval) | 
 | 923 | #endif | 
 | 924 |  | 
| Christoph Lameter | 7c33433 | 2011-02-28 11:02:24 +0100 | [diff] [blame] | 925 | #define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 926 | ({									\ | 
 | 927 | 	int ret__;							\ | 
 | 928 | 	unsigned long flags;						\ | 
 | 929 | 	local_irq_save(flags);						\ | 
 | 930 | 	ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,		\ | 
 | 931 | 			oval1, oval2, nval1, nval2);			\ | 
 | 932 | 	local_irq_restore(flags);					\ | 
 | 933 | 	ret__;								\ | 
 | 934 | }) | 
 | 935 |  | 
 | 936 | #ifndef irqsafe_cpu_cmpxchg_double | 
 | 937 | # ifndef irqsafe_cpu_cmpxchg_double_1 | 
 | 938 | #  define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 939 | 	irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 940 | # endif | 
 | 941 | # ifndef irqsafe_cpu_cmpxchg_double_2 | 
 | 942 | #  define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 943 | 	irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 944 | # endif | 
 | 945 | # ifndef irqsafe_cpu_cmpxchg_double_4 | 
 | 946 | #  define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 947 | 	irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 948 | # endif | 
 | 949 | # ifndef irqsafe_cpu_cmpxchg_double_8 | 
 | 950 | #  define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
 | 951 | 	irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 
 | 952 | # endif | 
 | 953 | # define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\ | 
| Thomas Gleixner | 30106b8 | 2011-05-04 15:38:19 +0200 | [diff] [blame] | 954 | 	__pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | 
| Christoph Lameter | 7c33433 | 2011-02-28 11:02:24 +0100 | [diff] [blame] | 955 | #endif | 
 | 956 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 957 | #endif /* __LINUX_PERCPU_H */ |