blob: f2bab4d2fc40c8064a3844da0758f9ad90097db4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_MEMPOLICY_H
2#define _LINUX_MEMPOLICY_H 1
3
4#include <linux/errno.h>
5
6/*
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
9 */
10
David Rientjes028fec42008-04-28 02:12:25 -070011/*
12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017/* Policies */
David Rientjesa3b51e02008-04-28 02:12:23 -070018enum {
19 MPOL_DEFAULT,
20 MPOL_PREFERRED,
21 MPOL_BIND,
22 MPOL_INTERLEAVE,
23 MPOL_MAX, /* always last member of enum */
24};
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
David Rientjes028fec42008-04-28 02:12:25 -070026/* Flags for set_mempolicy */
27/*
28 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
29 * either set_mempolicy() or mbind().
30 */
31#define MPOL_MODE_FLAGS (0)
32
33/* Flags for get_mempolicy */
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
35#define MPOL_F_ADDR (1<<1) /* look up vma using address */
Lee Schermerhorn754af6f2007-10-16 01:24:51 -070036#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38/* Flags for mbind */
39#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080040#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
41#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
42#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#ifdef __KERNEL__
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/mmzone.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/slab.h>
48#include <linux/rbtree.h>
49#include <linux/spinlock.h>
Andi Kleendfcd3c02005-10-29 18:15:48 -070050#include <linux/nodemask.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52struct vm_area_struct;
Ralf Baechle45b35a52006-06-08 00:43:41 -070053struct mm_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#ifdef CONFIG_NUMA
56
57/*
58 * Describe a memory policy.
59 *
60 * A mempolicy can be either associated with a process or with a VMA.
61 * For VMA related allocations the VMA policy is preferred, otherwise
62 * the process policy is used. Interrupts ignore the memory policy
63 * of the current process.
64 *
65 * Locking policy for interlave:
66 * In process context there is no locking because only the process accesses
67 * its own state. All vma manipulation is somewhat protected by a down_read on
Hugh Dickinsb8072f02005-10-29 18:16:41 -070068 * mmap_sem.
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 *
70 * Freeing policy:
Mel Gorman19770b32008-04-28 02:12:18 -070071 * Mempolicy objects are reference counted. A mempolicy will be freed when
72 * mpol_free() decrements the reference count to zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 *
74 * Copying policy objects:
Mel Gorman19770b32008-04-28 02:12:18 -070075 * mpol_copy() allocates a new mempolicy and copies the specified mempolicy
76 * to the new storage. The reference count of the new object is initialized
77 * to 1, representing the caller of mpol_copy().
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 */
79struct mempolicy {
80 atomic_t refcnt;
David Rientjesa3b51e02008-04-28 02:12:23 -070081 unsigned short policy; /* See MPOL_* above */
David Rientjes028fec42008-04-28 02:12:25 -070082 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 union {
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 short preferred_node; /* preferred */
Mel Gorman19770b32008-04-28 02:12:18 -070085 nodemask_t nodes; /* interleave/bind */
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 /* undefined for default */
87 } v;
Paul Jackson74cb2152006-01-08 01:01:56 -080088 nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
Linus Torvalds1da177e2005-04-16 15:20:36 -070089};
90
91/*
92 * Support for managing mempolicy data objects (clone, copy, destroy)
93 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
94 */
95
96extern void __mpol_free(struct mempolicy *pol);
97static inline void mpol_free(struct mempolicy *pol)
98{
99 if (pol)
100 __mpol_free(pol);
101}
102
103extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
104static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
105{
106 if (pol)
107 pol = __mpol_copy(pol);
108 return pol;
109}
110
111#define vma_policy(vma) ((vma)->vm_policy)
112#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
113
114static inline void mpol_get(struct mempolicy *pol)
115{
116 if (pol)
117 atomic_inc(&pol->refcnt);
118}
119
120extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
121static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
122{
123 if (a == b)
124 return 1;
125 return __mpol_equal(a, b);
126}
127#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
128
129/* Could later add inheritance of the process policy here. */
130
131#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
132
133/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 * Tree of shared policies for a shared memory region.
135 * Maintain the policies in a pseudo mm that contains vmas. The vmas
136 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
137 * bytes, so that we can work with shared memory segments bigger than
138 * unsigned long.
139 */
140
141struct sp_node {
142 struct rb_node nd;
143 unsigned long start, end;
144 struct mempolicy *policy;
145};
146
147struct shared_policy {
148 struct rb_root root;
149 spinlock_t lock;
150};
151
David Rientjesa3b51e02008-04-28 02:12:23 -0700152void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
David Rientjes028fec42008-04-28 02:12:25 -0700153 unsigned short flags, nodemask_t *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154int mpol_set_shared_policy(struct shared_policy *info,
155 struct vm_area_struct *vma,
156 struct mempolicy *new);
157void mpol_free_shared_policy(struct shared_policy *p);
158struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
159 unsigned long idx);
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161extern void numa_default_policy(void);
162extern void numa_policy_init(void);
Paul Jackson74cb2152006-01-08 01:01:56 -0800163extern void mpol_rebind_task(struct task_struct *tsk,
164 const nodemask_t *new);
Paul Jackson42253992006-01-08 01:01:59 -0800165extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
Paul Jacksonc61afb12006-03-24 03:16:08 -0800166extern void mpol_fix_fork_child_flag(struct task_struct *p);
Paul Jackson42253992006-01-08 01:01:59 -0800167
Andi Kleend42c6992005-07-06 19:56:03 +0200168extern struct mempolicy default_policy;
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800169extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
Mel Gorman19770b32008-04-28 02:12:18 -0700170 unsigned long addr, gfp_t gfp_flags,
171 struct mempolicy **mpol, nodemask_t **nodemask);
Christoph Lameterdc85da12006-01-18 17:42:36 -0800172extern unsigned slab_node(struct mempolicy *policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700174extern enum zone_type policy_zone;
Christoph Lameter4be38e32006-01-06 00:11:17 -0800175
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700176static inline void check_highest_zone(enum zone_type k)
Christoph Lameter4be38e32006-01-06 00:11:17 -0800177{
Mel Gormanb377fd32007-08-22 14:02:05 -0700178 if (k > policy_zone && k != ZONE_MOVABLE)
Christoph Lameter4be38e32006-01-06 00:11:17 -0800179 policy_zone = k;
180}
181
Christoph Lameter39743882006-01-08 01:00:51 -0800182int do_migrate_pages(struct mm_struct *mm,
183 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185#else
186
187struct mempolicy {};
188
189static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
190{
191 return 1;
192}
193#define vma_mpol_equal(a,b) 1
194
195#define mpol_set_vma_default(vma) do {} while(0)
196
197static inline void mpol_free(struct mempolicy *p)
198{
199}
200
201static inline void mpol_get(struct mempolicy *pol)
202{
203}
204
205static inline struct mempolicy *mpol_copy(struct mempolicy *old)
206{
207 return NULL;
208}
209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210struct shared_policy {};
211
212static inline int mpol_set_shared_policy(struct shared_policy *info,
213 struct vm_area_struct *vma,
214 struct mempolicy *new)
215{
216 return -EINVAL;
217}
218
Robin Holt7339ff82006-01-14 13:20:48 -0800219static inline void mpol_shared_policy_init(struct shared_policy *info,
David Rientjes028fec42008-04-28 02:12:25 -0700220 unsigned short policy, unsigned short flags, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
222}
223
224static inline void mpol_free_shared_policy(struct shared_policy *p)
225{
226}
227
228static inline struct mempolicy *
229mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
230{
231 return NULL;
232}
233
234#define vma_policy(vma) NULL
235#define vma_set_policy(vma, pol) do {} while(0)
236
237static inline void numa_policy_init(void)
238{
239}
240
241static inline void numa_default_policy(void)
242{
243}
244
Paul Jackson74cb2152006-01-08 01:01:56 -0800245static inline void mpol_rebind_task(struct task_struct *tsk,
Paul Jackson68860ec2005-10-30 15:02:36 -0800246 const nodemask_t *new)
247{
248}
249
Paul Jackson42253992006-01-08 01:01:59 -0800250static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
251{
252}
253
Paul Jacksonc61afb12006-03-24 03:16:08 -0800254static inline void mpol_fix_fork_child_flag(struct task_struct *p)
255{
256}
257
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800258static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
Mel Gorman19770b32008-04-28 02:12:18 -0700259 unsigned long addr, gfp_t gfp_flags,
260 struct mempolicy **mpol, nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800261{
Mel Gorman19770b32008-04-28 02:12:18 -0700262 *mpol = NULL;
263 *nodemask = NULL;
Mel Gorman0e884602008-04-28 02:12:14 -0700264 return node_zonelist(0, gfp_flags);
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800265}
266
Paul Jackson45b07ef2006-01-08 01:00:56 -0800267static inline int do_migrate_pages(struct mm_struct *mm,
268 const nodemask_t *from_nodes,
269 const nodemask_t *to_nodes, int flags)
270{
271 return 0;
272}
273
Christoph Lameter4be38e32006-01-06 00:11:17 -0800274static inline void check_highest_zone(int k)
275{
276}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277#endif /* CONFIG_NUMA */
278#endif /* __KERNEL__ */
279
280#endif