blob: f965f833a6433ef2b71c260de1f719f60e64d64d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PERCPU_H
2#define __LINUX_PERCPU_H
Martin Peschke7ff6f082006-09-25 23:31:21 -07003
Robert P. J. Day0a3021f2007-07-15 23:39:57 -07004#include <linux/preempt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/slab.h> /* For kmalloc() */
6#include <linux/smp.h>
Martin Peschke7ff6f082006-09-25 23:31:21 -07007#include <linux/cpumask.h>
Tejun Heo6a242902009-03-06 14:33:58 +09008#include <linux/pfn.h>
Martin Peschke7ff6f082006-09-25 23:31:21 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <asm/percpu.h>
11
Tejun Heo6a242902009-03-06 14:33:58 +090012/* enough to cover all DEFINE_PER_CPUs in modules */
Jeremy Fitzhardingeb00742d2007-05-02 19:27:11 +020013#ifdef CONFIG_MODULES
Tejun Heo6a242902009-03-06 14:33:58 +090014#define PERCPU_MODULE_RESERVE (8 << 10)
Jeremy Fitzhardingeb00742d2007-05-02 19:27:11 +020015#else
Tejun Heo6a242902009-03-06 14:33:58 +090016#define PERCPU_MODULE_RESERVE 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#endif
18
Tejun Heo6a242902009-03-06 14:33:58 +090019#ifndef PERCPU_ENOUGH_ROOM
Jeremy Fitzhardingeb00742d2007-05-02 19:27:11 +020020#define PERCPU_ENOUGH_ROOM \
Tejun Heo6a242902009-03-06 14:33:58 +090021 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
22 PERCPU_MODULE_RESERVE)
23#endif
Jeremy Fitzhardingeb00742d2007-05-02 19:27:11 +020024
Jan Blunck632bbfe2006-09-25 23:30:53 -070025/*
26 * Must be an lvalue. Since @var must be a simple identifier,
27 * we force a syntax error here if it isn't.
28 */
29#define get_cpu_var(var) (*({ \
Jan Blunck632bbfe2006-09-25 23:30:53 -070030 preempt_disable(); \
31 &__get_cpu_var(var); }))
Tejun Heof7b64fe2009-10-29 22:34:15 +090032
33#define put_cpu_var(var) do { \
34 (void)(var); \
35 preempt_enable(); \
36} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38#ifdef CONFIG_SMP
39
Tejun Heo8d408b42009-02-24 11:57:21 +090040/* minimum unit size, also is the maximum supported allocation size */
Tejun Heo6a242902009-03-06 14:33:58 +090041#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
Tejun Heo8d408b42009-02-24 11:57:21 +090042
43/*
44 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
Tejun Heo6b19b0c2009-03-06 14:33:59 +090045 * back on the first chunk for dynamic percpu allocation if arch is
46 * manually allocating and mapping it for faster access (as a part of
47 * large page mapping for example).
Tejun Heo8d408b42009-02-24 11:57:21 +090048 *
Tejun Heo6b19b0c2009-03-06 14:33:59 +090049 * The following values give between one and two pages of free space
50 * after typical minimal boot (2-way SMP, single disk and NIC) with
51 * both defconfig and a distro config on x86_64 and 32. More
52 * intelligent way to determine this would be nice.
Tejun Heo8d408b42009-02-24 11:57:21 +090053 */
Tejun Heo6b19b0c2009-03-06 14:33:59 +090054#if BITS_PER_LONG > 32
55#define PERCPU_DYNAMIC_RESERVE (20 << 10)
56#else
57#define PERCPU_DYNAMIC_RESERVE (12 << 10)
58#endif
Tejun Heo8d408b42009-02-24 11:57:21 +090059
Tejun Heofbf59bc2009-02-20 16:29:08 +090060extern void *pcpu_base_addr;
Tejun Heofb435d52009-08-14 15:00:51 +090061extern const unsigned long *pcpu_unit_offsets;
Tejun Heofbf59bc2009-02-20 16:29:08 +090062
Tejun Heofd1e8a12009-08-14 15:00:51 +090063struct pcpu_group_info {
64 int nr_units; /* aligned # of units */
65 unsigned long base_offset; /* base address offset */
66 unsigned int *cpu_map; /* unit->cpu map, empty
67 * entries contain NR_CPUS */
68};
69
70struct pcpu_alloc_info {
71 size_t static_size;
72 size_t reserved_size;
73 size_t dyn_size;
74 size_t unit_size;
75 size_t atom_size;
76 size_t alloc_size;
77 size_t __ai_size; /* internal, don't use */
78 int nr_groups; /* 0 if grouping unnecessary */
79 struct pcpu_group_info groups[];
80};
81
Tejun Heof58dc012009-08-14 15:00:50 +090082enum pcpu_fc {
83 PCPU_FC_AUTO,
84 PCPU_FC_EMBED,
85 PCPU_FC_PAGE,
Tejun Heof58dc012009-08-14 15:00:50 +090086
87 PCPU_FC_NR,
88};
89extern const char *pcpu_fc_names[PCPU_FC_NR];
90
91extern enum pcpu_fc pcpu_chosen_fc;
92
Tejun Heo3cbc8562009-08-14 15:00:50 +090093typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
94 size_t align);
Tejun Heod4b95f82009-07-04 08:10:59 +090095typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
96typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
Tejun Heoa530b792009-07-04 08:11:00 +090097typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
Tejun Heofbf59bc2009-02-20 16:29:08 +090098
Tejun Heofd1e8a12009-08-14 15:00:51 +090099extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
100 int nr_units);
101extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
Tejun Heo033e48f2009-08-14 15:00:51 +0900102
Tejun Heofd1e8a12009-08-14 15:00:51 +0900103extern struct pcpu_alloc_info * __init pcpu_build_alloc_info(
104 size_t reserved_size, ssize_t dyn_size,
105 size_t atom_size,
106 pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
107
Tejun Heofb435d52009-08-14 15:00:51 +0900108extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
109 void *base_addr);
Tejun Heo8d408b42009-02-24 11:57:21 +0900110
Tejun Heo08fc4582009-08-14 15:00:49 +0900111#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
Tejun Heoc8826dd2009-08-14 15:00:52 +0900112extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
113 size_t atom_size,
114 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
115 pcpu_fc_alloc_fn_t alloc_fn,
116 pcpu_fc_free_fn_t free_fn);
Tejun Heo08fc4582009-08-14 15:00:49 +0900117#endif
Tejun Heo66c3a752009-03-10 16:27:48 +0900118
Tejun Heo08fc4582009-08-14 15:00:49 +0900119#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
Tejun Heofb435d52009-08-14 15:00:51 +0900120extern int __init pcpu_page_first_chunk(size_t reserved_size,
Tejun Heod4b95f82009-07-04 08:10:59 +0900121 pcpu_fc_alloc_fn_t alloc_fn,
122 pcpu_fc_free_fn_t free_fn,
123 pcpu_fc_populate_pte_fn_t populate_pte_fn);
Tejun Heo08fc4582009-08-14 15:00:49 +0900124#endif
Tejun Heod4b95f82009-07-04 08:10:59 +0900125
Tejun Heofbf59bc2009-02-20 16:29:08 +0900126/*
127 * Use this to get to a cpu's version of the per-cpu object
128 * dynamically allocated. Non-atomic access to the current CPU's
129 * version should probably be combined with get_cpu()/put_cpu().
130 */
131#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
132
Tejun Heoedcb4632009-03-06 14:33:59 +0900133extern void *__alloc_reserved_percpu(size_t size, size_t align);
Tejun Heof2a82052009-02-20 16:29:08 +0900134extern void *__alloc_percpu(size_t size, size_t align);
135extern void free_percpu(void *__pdata);
136
Tejun Heoe74e3962009-03-30 19:07:44 +0900137#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
138extern void __init setup_per_cpu_areas(void);
139#endif
140
Tejun Heof2a82052009-02-20 16:29:08 +0900141#else /* CONFIG_SMP */
142
143#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
144
145static inline void *__alloc_percpu(size_t size, size_t align)
146{
147 /*
148 * Can't easily make larger alignment work with kmalloc. WARN
149 * on it. Larger alignment should only be used for module
150 * percpu sections on SMP for which this path isn't used.
151 */
Tejun Heoe3176032009-02-26 10:54:17 +0900152 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
Ingo Molnard2b02612009-02-25 14:36:45 +0100153 return kzalloc(size, GFP_KERNEL);
Tejun Heof2a82052009-02-20 16:29:08 +0900154}
155
156static inline void free_percpu(void *p)
157{
158 kfree(p);
159}
160
Tejun Heoe74e3962009-03-30 19:07:44 +0900161static inline void __init setup_per_cpu_areas(void) { }
162
Tejun Heoa76761b2009-07-15 23:35:14 +0900163static inline void *pcpu_lpage_remapped(void *kaddr)
164{
165 return NULL;
166}
167
Tejun Heof2a82052009-02-20 16:29:08 +0900168#endif /* CONFIG_SMP */
169
Tejun Heo64ef2912009-10-29 22:34:12 +0900170#define alloc_percpu(type) \
171 (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
Tejun Heof2a82052009-02-20 16:29:08 +0900172
Tejun Heo066123a2009-04-10 12:02:40 -0700173/*
174 * Optional methods for optimized non-lvalue per-cpu variable access.
175 *
176 * @var can be a percpu variable or a field of it and its size should
177 * equal char, int or long. percpu_read() evaluates to a lvalue and
178 * all others to void.
179 *
180 * These operations are guaranteed to be atomic w.r.t. preemption.
181 * The generic versions use plain get/put_cpu_var(). Archs are
182 * encouraged to implement single-instruction alternatives which don't
183 * require preemption protection.
184 */
185#ifndef percpu_read
186# define percpu_read(var) \
187 ({ \
Tejun Heof7b64fe2009-10-29 22:34:15 +0900188 typeof(var) *pr_ptr__ = &(var); \
189 typeof(var) pr_ret__; \
190 pr_ret__ = get_cpu_var(*pr_ptr__); \
191 put_cpu_var(*pr_ptr__); \
192 pr_ret__; \
Tejun Heo066123a2009-04-10 12:02:40 -0700193 })
194#endif
195
196#define __percpu_generic_to_op(var, val, op) \
197do { \
Tejun Heof7b64fe2009-10-29 22:34:15 +0900198 typeof(var) *pgto_ptr__ = &(var); \
199 get_cpu_var(*pgto_ptr__) op val; \
200 put_cpu_var(*pgto_ptr__); \
Tejun Heo066123a2009-04-10 12:02:40 -0700201} while (0)
202
203#ifndef percpu_write
204# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
205#endif
206
207#ifndef percpu_add
208# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
209#endif
210
211#ifndef percpu_sub
212# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
213#endif
214
215#ifndef percpu_and
216# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
217#endif
218
219#ifndef percpu_or
220# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
221#endif
222
223#ifndef percpu_xor
224# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
225#endif
226
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900227/*
228 * Branching function to split up a function into a set of functions that
229 * are called for different scalar sizes of the objects handled.
230 */
231
232extern void __bad_size_call_parameter(void);
233
Tejun Heo0f5e4812009-10-29 22:34:12 +0900234#define __pcpu_size_call_return(stem, variable) \
235({ typeof(variable) pscr_ret__; \
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900236 switch(sizeof(variable)) { \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900237 case 1: pscr_ret__ = stem##1(variable);break; \
238 case 2: pscr_ret__ = stem##2(variable);break; \
239 case 4: pscr_ret__ = stem##4(variable);break; \
240 case 8: pscr_ret__ = stem##8(variable);break; \
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900241 default: \
242 __bad_size_call_parameter();break; \
243 } \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900244 pscr_ret__; \
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900245})
246
Tejun Heo0f5e4812009-10-29 22:34:12 +0900247#define __pcpu_size_call(stem, variable, ...) \
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900248do { \
249 switch(sizeof(variable)) { \
250 case 1: stem##1(variable, __VA_ARGS__);break; \
251 case 2: stem##2(variable, __VA_ARGS__);break; \
252 case 4: stem##4(variable, __VA_ARGS__);break; \
253 case 8: stem##8(variable, __VA_ARGS__);break; \
254 default: \
255 __bad_size_call_parameter();break; \
256 } \
257} while (0)
258
259/*
260 * Optimized manipulation for memory allocated through the per cpu
Rusty Russelldd17c8f2009-10-29 22:34:15 +0900261 * allocator or for addresses of per cpu variables.
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900262 *
263 * These operation guarantee exclusivity of access for other operations
264 * on the *same* processor. The assumption is that per cpu data is only
265 * accessed by a single processor instance (the current one).
266 *
267 * The first group is used for accesses that must be done in a
268 * preemption safe way since we know that the context is not preempt
269 * safe. Interrupts may occur. If the interrupt modifies the variable
270 * too then RMW actions will not be reliable.
271 *
272 * The arch code can provide optimized functions in two ways:
273 *
274 * 1. Override the function completely. F.e. define this_cpu_add().
275 * The arch must then ensure that the various scalar format passed
276 * are handled correctly.
277 *
278 * 2. Provide functions for certain scalar sizes. F.e. provide
279 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
280 * sized RMW actions. If arch code does not provide operations for
281 * a scalar size then the fallback in the generic code will be
282 * used.
283 */
284
285#define _this_cpu_generic_read(pcp) \
286({ typeof(pcp) ret__; \
287 preempt_disable(); \
288 ret__ = *this_cpu_ptr(&(pcp)); \
289 preempt_enable(); \
290 ret__; \
291})
292
293#ifndef this_cpu_read
294# ifndef this_cpu_read_1
295# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
296# endif
297# ifndef this_cpu_read_2
298# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
299# endif
300# ifndef this_cpu_read_4
301# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
302# endif
303# ifndef this_cpu_read_8
304# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
305# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900306# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900307#endif
308
309#define _this_cpu_generic_to_op(pcp, val, op) \
310do { \
311 preempt_disable(); \
Tejun Heof7b64fe2009-10-29 22:34:15 +0900312 *__this_cpu_ptr(&(pcp)) op val; \
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900313 preempt_enable(); \
314} while (0)
315
316#ifndef this_cpu_write
317# ifndef this_cpu_write_1
318# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
319# endif
320# ifndef this_cpu_write_2
321# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
322# endif
323# ifndef this_cpu_write_4
324# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
325# endif
326# ifndef this_cpu_write_8
327# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
328# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900329# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900330#endif
331
332#ifndef this_cpu_add
333# ifndef this_cpu_add_1
334# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
335# endif
336# ifndef this_cpu_add_2
337# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
338# endif
339# ifndef this_cpu_add_4
340# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
341# endif
342# ifndef this_cpu_add_8
343# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
344# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900345# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900346#endif
347
348#ifndef this_cpu_sub
349# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
350#endif
351
352#ifndef this_cpu_inc
353# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
354#endif
355
356#ifndef this_cpu_dec
357# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
358#endif
359
360#ifndef this_cpu_and
361# ifndef this_cpu_and_1
362# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
363# endif
364# ifndef this_cpu_and_2
365# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
366# endif
367# ifndef this_cpu_and_4
368# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
369# endif
370# ifndef this_cpu_and_8
371# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
372# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900373# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900374#endif
375
376#ifndef this_cpu_or
377# ifndef this_cpu_or_1
378# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
379# endif
380# ifndef this_cpu_or_2
381# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
382# endif
383# ifndef this_cpu_or_4
384# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
385# endif
386# ifndef this_cpu_or_8
387# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
388# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900389# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900390#endif
391
392#ifndef this_cpu_xor
393# ifndef this_cpu_xor_1
394# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
395# endif
396# ifndef this_cpu_xor_2
397# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
398# endif
399# ifndef this_cpu_xor_4
400# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
401# endif
402# ifndef this_cpu_xor_8
403# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
404# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900405# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900406#endif
407
408/*
409 * Generic percpu operations that do not require preemption handling.
410 * Either we do not care about races or the caller has the
411 * responsibility of handling preemptions issues. Arch code can still
412 * override these instructions since the arch per cpu code may be more
413 * efficient and may actually get race freeness for free (that is the
414 * case for x86 for example).
415 *
416 * If there is no other protection through preempt disable and/or
417 * disabling interupts then one of these RMW operations can show unexpected
418 * behavior because the execution thread was rescheduled on another processor
419 * or an interrupt occurred and the same percpu variable was modified from
420 * the interrupt context.
421 */
422#ifndef __this_cpu_read
423# ifndef __this_cpu_read_1
424# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
425# endif
426# ifndef __this_cpu_read_2
427# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
428# endif
429# ifndef __this_cpu_read_4
430# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
431# endif
432# ifndef __this_cpu_read_8
433# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
434# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900435# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900436#endif
437
438#define __this_cpu_generic_to_op(pcp, val, op) \
439do { \
440 *__this_cpu_ptr(&(pcp)) op val; \
441} while (0)
442
443#ifndef __this_cpu_write
444# ifndef __this_cpu_write_1
445# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
446# endif
447# ifndef __this_cpu_write_2
448# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
449# endif
450# ifndef __this_cpu_write_4
451# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
452# endif
453# ifndef __this_cpu_write_8
454# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
455# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900456# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900457#endif
458
459#ifndef __this_cpu_add
460# ifndef __this_cpu_add_1
461# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
462# endif
463# ifndef __this_cpu_add_2
464# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
465# endif
466# ifndef __this_cpu_add_4
467# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
468# endif
469# ifndef __this_cpu_add_8
470# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
471# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900472# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900473#endif
474
475#ifndef __this_cpu_sub
476# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
477#endif
478
479#ifndef __this_cpu_inc
480# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
481#endif
482
483#ifndef __this_cpu_dec
484# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
485#endif
486
487#ifndef __this_cpu_and
488# ifndef __this_cpu_and_1
489# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
490# endif
491# ifndef __this_cpu_and_2
492# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
493# endif
494# ifndef __this_cpu_and_4
495# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
496# endif
497# ifndef __this_cpu_and_8
498# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
499# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900500# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900501#endif
502
503#ifndef __this_cpu_or
504# ifndef __this_cpu_or_1
505# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
506# endif
507# ifndef __this_cpu_or_2
508# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
509# endif
510# ifndef __this_cpu_or_4
511# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
512# endif
513# ifndef __this_cpu_or_8
514# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
515# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900516# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900517#endif
518
519#ifndef __this_cpu_xor
520# ifndef __this_cpu_xor_1
521# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
522# endif
523# ifndef __this_cpu_xor_2
524# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
525# endif
526# ifndef __this_cpu_xor_4
527# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
528# endif
529# ifndef __this_cpu_xor_8
530# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
531# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900532# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900533#endif
534
535/*
536 * IRQ safe versions of the per cpu RMW operations. Note that these operations
537 * are *not* safe against modification of the same variable from another
538 * processors (which one gets when using regular atomic operations)
539 . They are guaranteed to be atomic vs. local interrupts and
540 * preemption only.
541 */
542#define irqsafe_cpu_generic_to_op(pcp, val, op) \
543do { \
544 unsigned long flags; \
545 local_irq_save(flags); \
546 *__this_cpu_ptr(&(pcp)) op val; \
547 local_irq_restore(flags); \
548} while (0)
549
550#ifndef irqsafe_cpu_add
551# ifndef irqsafe_cpu_add_1
552# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
553# endif
554# ifndef irqsafe_cpu_add_2
555# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
556# endif
557# ifndef irqsafe_cpu_add_4
558# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
559# endif
560# ifndef irqsafe_cpu_add_8
561# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
562# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900563# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900564#endif
565
566#ifndef irqsafe_cpu_sub
567# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
568#endif
569
570#ifndef irqsafe_cpu_inc
571# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
572#endif
573
574#ifndef irqsafe_cpu_dec
575# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
576#endif
577
578#ifndef irqsafe_cpu_and
579# ifndef irqsafe_cpu_and_1
580# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
581# endif
582# ifndef irqsafe_cpu_and_2
583# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
584# endif
585# ifndef irqsafe_cpu_and_4
586# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
587# endif
588# ifndef irqsafe_cpu_and_8
589# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
590# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900591# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900592#endif
593
594#ifndef irqsafe_cpu_or
595# ifndef irqsafe_cpu_or_1
596# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
597# endif
598# ifndef irqsafe_cpu_or_2
599# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
600# endif
601# ifndef irqsafe_cpu_or_4
602# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
603# endif
604# ifndef irqsafe_cpu_or_8
605# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
606# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900607# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900608#endif
609
610#ifndef irqsafe_cpu_xor
611# ifndef irqsafe_cpu_xor_1
612# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
613# endif
614# ifndef irqsafe_cpu_xor_2
615# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
616# endif
617# ifndef irqsafe_cpu_xor_4
618# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
619# endif
620# ifndef irqsafe_cpu_xor_8
621# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
622# endif
Tejun Heo0f5e4812009-10-29 22:34:12 +0900623# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
Christoph Lameter7340a0b2009-10-03 19:48:22 +0900624#endif
625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626#endif /* __LINUX_PERCPU_H */