| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MEMPOLICY_H | 
 | 2 | #define _LINUX_MEMPOLICY_H 1 | 
 | 3 |  | 
 | 4 | #include <linux/errno.h> | 
 | 5 |  | 
 | 6 | /* | 
 | 7 |  * NUMA memory policies for Linux. | 
 | 8 |  * Copyright 2003,2004 Andi Kleen SuSE Labs | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | /* Policies */ | 
 | 12 | #define MPOL_DEFAULT	0 | 
 | 13 | #define MPOL_PREFERRED	1 | 
 | 14 | #define MPOL_BIND	2 | 
 | 15 | #define MPOL_INTERLEAVE	3 | 
 | 16 |  | 
 | 17 | #define MPOL_MAX MPOL_INTERLEAVE | 
 | 18 |  | 
 | 19 | /* Flags for get_mem_policy */ | 
 | 20 | #define MPOL_F_NODE	(1<<0)	/* return next IL mode instead of node mask */ | 
 | 21 | #define MPOL_F_ADDR	(1<<1)	/* look up vma using address */ | 
 | 22 |  | 
 | 23 | /* Flags for mbind */ | 
 | 24 | #define MPOL_MF_STRICT	(1<<0)	/* Verify existing pages in the mapping */ | 
| Christoph Lameter | dc9aa5b | 2006-01-08 01:00:50 -0800 | [diff] [blame] | 25 | #define MPOL_MF_MOVE	(1<<1)	/* Move pages owned by this process to conform to mapping */ | 
 | 26 | #define MPOL_MF_MOVE_ALL (1<<2)	/* Move every page to conform to mapping */ | 
 | 27 | #define MPOL_MF_INTERNAL (1<<3)	/* Internal flags start here */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
 | 29 | #ifdef __KERNEL__ | 
 | 30 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/mmzone.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <linux/slab.h> | 
 | 33 | #include <linux/rbtree.h> | 
 | 34 | #include <linux/spinlock.h> | 
| Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 35 | #include <linux/nodemask.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
 | 37 | struct vm_area_struct; | 
| Ralf Baechle | 45b35a5 | 2006-06-08 00:43:41 -0700 | [diff] [blame] | 38 | struct mm_struct; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
 | 40 | #ifdef CONFIG_NUMA | 
 | 41 |  | 
 | 42 | /* | 
 | 43 |  * Describe a memory policy. | 
 | 44 |  * | 
 | 45 |  * A mempolicy can be either associated with a process or with a VMA. | 
 | 46 |  * For VMA related allocations the VMA policy is preferred, otherwise | 
 | 47 |  * the process policy is used. Interrupts ignore the memory policy | 
 | 48 |  * of the current process. | 
 | 49 |  * | 
 | 50 |  * Locking policy for interlave: | 
 | 51 |  * In process context there is no locking because only the process accesses | 
 | 52 |  * its own state. All vma manipulation is somewhat protected by a down_read on | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 53 |  * mmap_sem. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 |  * | 
 | 55 |  * Freeing policy: | 
 | 56 |  * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. | 
 | 57 |  * All other policies don't have any external state. mpol_free() handles this. | 
 | 58 |  * | 
 | 59 |  * Copying policy objects: | 
 | 60 |  * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this. | 
 | 61 |  */ | 
 | 62 | struct mempolicy { | 
 | 63 | 	atomic_t refcnt; | 
 | 64 | 	short policy; 	/* See MPOL_* above */ | 
 | 65 | 	union { | 
 | 66 | 		struct zonelist  *zonelist;	/* bind */ | 
 | 67 | 		short 		 preferred_node; /* preferred */ | 
| Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 68 | 		nodemask_t	 nodes;		/* interleave */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | 		/* undefined for default */ | 
 | 70 | 	} v; | 
| Paul Jackson | 74cb215 | 2006-01-08 01:01:56 -0800 | [diff] [blame] | 71 | 	nodemask_t cpuset_mems_allowed;	/* mempolicy relative to these nodes */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | }; | 
 | 73 |  | 
 | 74 | /* | 
 | 75 |  * Support for managing mempolicy data objects (clone, copy, destroy) | 
 | 76 |  * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. | 
 | 77 |  */ | 
 | 78 |  | 
 | 79 | extern void __mpol_free(struct mempolicy *pol); | 
 | 80 | static inline void mpol_free(struct mempolicy *pol) | 
 | 81 | { | 
 | 82 | 	if (pol) | 
 | 83 | 		__mpol_free(pol); | 
 | 84 | } | 
 | 85 |  | 
 | 86 | extern struct mempolicy *__mpol_copy(struct mempolicy *pol); | 
 | 87 | static inline struct mempolicy *mpol_copy(struct mempolicy *pol) | 
 | 88 | { | 
 | 89 | 	if (pol) | 
 | 90 | 		pol = __mpol_copy(pol); | 
 | 91 | 	return pol; | 
 | 92 | } | 
 | 93 |  | 
 | 94 | #define vma_policy(vma) ((vma)->vm_policy) | 
 | 95 | #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) | 
 | 96 |  | 
 | 97 | static inline void mpol_get(struct mempolicy *pol) | 
 | 98 | { | 
 | 99 | 	if (pol) | 
 | 100 | 		atomic_inc(&pol->refcnt); | 
 | 101 | } | 
 | 102 |  | 
 | 103 | extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b); | 
 | 104 | static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) | 
 | 105 | { | 
 | 106 | 	if (a == b) | 
 | 107 | 		return 1; | 
 | 108 | 	return __mpol_equal(a, b); | 
 | 109 | } | 
 | 110 | #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b)) | 
 | 111 |  | 
 | 112 | /* Could later add inheritance of the process policy here. */ | 
 | 113 |  | 
 | 114 | #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) | 
 | 115 |  | 
 | 116 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 |  * Tree of shared policies for a shared memory region. | 
 | 118 |  * Maintain the policies in a pseudo mm that contains vmas. The vmas | 
 | 119 |  * carry the policy. As a special twist the pseudo mm is indexed in pages, not | 
 | 120 |  * bytes, so that we can work with shared memory segments bigger than | 
 | 121 |  * unsigned long. | 
 | 122 |  */ | 
 | 123 |  | 
 | 124 | struct sp_node { | 
 | 125 | 	struct rb_node nd; | 
 | 126 | 	unsigned long start, end; | 
 | 127 | 	struct mempolicy *policy; | 
 | 128 | }; | 
 | 129 |  | 
 | 130 | struct shared_policy { | 
 | 131 | 	struct rb_root root; | 
 | 132 | 	spinlock_t lock; | 
 | 133 | }; | 
 | 134 |  | 
| Robin Holt | 7339ff8 | 2006-01-14 13:20:48 -0800 | [diff] [blame] | 135 | void mpol_shared_policy_init(struct shared_policy *info, int policy, | 
 | 136 | 				nodemask_t *nodes); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | int mpol_set_shared_policy(struct shared_policy *info, | 
 | 138 | 				struct vm_area_struct *vma, | 
 | 139 | 				struct mempolicy *new); | 
 | 140 | void mpol_free_shared_policy(struct shared_policy *p); | 
 | 141 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, | 
 | 142 | 					    unsigned long idx); | 
 | 143 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | extern void numa_default_policy(void); | 
 | 145 | extern void numa_policy_init(void); | 
| Paul Jackson | 74cb215 | 2006-01-08 01:01:56 -0800 | [diff] [blame] | 146 | extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new); | 
 | 147 | extern void mpol_rebind_task(struct task_struct *tsk, | 
 | 148 | 					const nodemask_t *new); | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 149 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); | 
| Paul Jackson | c61afb1 | 2006-03-24 03:16:08 -0800 | [diff] [blame] | 150 | extern void mpol_fix_fork_child_flag(struct task_struct *p); | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 151 | #define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x)) | 
 | 152 |  | 
 | 153 | #ifdef CONFIG_CPUSET | 
 | 154 | #define current_cpuset_is_being_rebound() \ | 
 | 155 | 				(cpuset_being_rebound == current->cpuset) | 
 | 156 | #else | 
 | 157 | #define current_cpuset_is_being_rebound() 0 | 
 | 158 | #endif | 
 | 159 |  | 
| Andi Kleen | d42c699 | 2005-07-06 19:56:03 +0200 | [diff] [blame] | 160 | extern struct mempolicy default_policy; | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 161 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | 
 | 162 | 		unsigned long addr); | 
| Christoph Lameter | dc85da1 | 2006-01-18 17:42:36 -0800 | [diff] [blame] | 163 | extern unsigned slab_node(struct mempolicy *policy); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 |  | 
| Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 165 | extern int policy_zone; | 
 | 166 |  | 
 | 167 | static inline void check_highest_zone(int k) | 
 | 168 | { | 
 | 169 | 	if (k > policy_zone) | 
 | 170 | 		policy_zone = k; | 
 | 171 | } | 
 | 172 |  | 
| Christoph Lameter | 3974388 | 2006-01-08 01:00:51 -0800 | [diff] [blame] | 173 | int do_migrate_pages(struct mm_struct *mm, | 
 | 174 | 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); | 
 | 175 |  | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 176 | extern void *cpuset_being_rebound;	/* Trigger mpol_copy vma rebind */ | 
 | 177 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | #else | 
 | 179 |  | 
 | 180 | struct mempolicy {}; | 
 | 181 |  | 
 | 182 | static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) | 
 | 183 | { | 
 | 184 | 	return 1; | 
 | 185 | } | 
 | 186 | #define vma_mpol_equal(a,b) 1 | 
 | 187 |  | 
 | 188 | #define mpol_set_vma_default(vma) do {} while(0) | 
 | 189 |  | 
 | 190 | static inline void mpol_free(struct mempolicy *p) | 
 | 191 | { | 
 | 192 | } | 
 | 193 |  | 
 | 194 | static inline void mpol_get(struct mempolicy *pol) | 
 | 195 | { | 
 | 196 | } | 
 | 197 |  | 
 | 198 | static inline struct mempolicy *mpol_copy(struct mempolicy *old) | 
 | 199 | { | 
 | 200 | 	return NULL; | 
 | 201 | } | 
 | 202 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | struct shared_policy {}; | 
 | 204 |  | 
 | 205 | static inline int mpol_set_shared_policy(struct shared_policy *info, | 
 | 206 | 					struct vm_area_struct *vma, | 
 | 207 | 					struct mempolicy *new) | 
 | 208 | { | 
 | 209 | 	return -EINVAL; | 
 | 210 | } | 
 | 211 |  | 
| Robin Holt | 7339ff8 | 2006-01-14 13:20:48 -0800 | [diff] [blame] | 212 | static inline void mpol_shared_policy_init(struct shared_policy *info, | 
 | 213 | 					int policy, nodemask_t *nodes) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | { | 
 | 215 | } | 
 | 216 |  | 
 | 217 | static inline void mpol_free_shared_policy(struct shared_policy *p) | 
 | 218 | { | 
 | 219 | } | 
 | 220 |  | 
 | 221 | static inline struct mempolicy * | 
 | 222 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) | 
 | 223 | { | 
 | 224 | 	return NULL; | 
 | 225 | } | 
 | 226 |  | 
 | 227 | #define vma_policy(vma) NULL | 
 | 228 | #define vma_set_policy(vma, pol) do {} while(0) | 
 | 229 |  | 
 | 230 | static inline void numa_policy_init(void) | 
 | 231 | { | 
 | 232 | } | 
 | 233 |  | 
 | 234 | static inline void numa_default_policy(void) | 
 | 235 | { | 
 | 236 | } | 
 | 237 |  | 
| Paul Jackson | 74cb215 | 2006-01-08 01:01:56 -0800 | [diff] [blame] | 238 | static inline void mpol_rebind_policy(struct mempolicy *pol, | 
 | 239 | 					const nodemask_t *new) | 
 | 240 | { | 
 | 241 | } | 
 | 242 |  | 
 | 243 | static inline void mpol_rebind_task(struct task_struct *tsk, | 
| Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 244 | 					const nodemask_t *new) | 
 | 245 | { | 
 | 246 | } | 
 | 247 |  | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 248 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) | 
 | 249 | { | 
 | 250 | } | 
 | 251 |  | 
| Paul Jackson | c61afb1 | 2006-03-24 03:16:08 -0800 | [diff] [blame] | 252 | static inline void mpol_fix_fork_child_flag(struct task_struct *p) | 
 | 253 | { | 
 | 254 | } | 
 | 255 |  | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 256 | #define set_cpuset_being_rebound(x) do {} while (0) | 
 | 257 |  | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 258 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, | 
 | 259 | 		unsigned long addr) | 
 | 260 | { | 
 | 261 | 	return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); | 
 | 262 | } | 
 | 263 |  | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 264 | static inline int do_migrate_pages(struct mm_struct *mm, | 
 | 265 | 			const nodemask_t *from_nodes, | 
 | 266 | 			const nodemask_t *to_nodes, int flags) | 
 | 267 | { | 
 | 268 | 	return 0; | 
 | 269 | } | 
 | 270 |  | 
| Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 271 | static inline void check_highest_zone(int k) | 
 | 272 | { | 
 | 273 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | #endif /* CONFIG_NUMA */ | 
 | 275 | #endif /* __KERNEL__ */ | 
 | 276 |  | 
 | 277 | #endif |