blob: 51d3ebd8561ed68f56914a88a2ff0e787b4dc428 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <linux/nodemask.h>
75#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/slab.h>
77#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040078#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070079#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080083#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080084#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080086#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080087#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070088#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070089#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070090#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070091#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080092#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020093#include <linux/mmu_notifier.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080094
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <asm/tlbflush.h>
96#include <asm/uaccess.h>
Michal Hocko778d3b02011-07-26 16:08:30 -070097#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Nick Piggin62695a82008-10-18 20:26:09 -070099#include "internal.h"
100
Christoph Lameter38e35862006-01-08 01:01:01 -0800101/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800102#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800103#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800104
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800105static struct kmem_cache *policy_cache;
106static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108/* Highest zone. An specific allocation for a zone below that is not
109 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800110enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700112/*
113 * run-time system-wide default policy => local allocation
114 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700115static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700117 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700118 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119};
120
David Rientjes37012942008-04-28 02:12:33 -0700121static const struct mempolicy_operations {
122 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Miao Xie708c1bb2010-05-24 14:32:07 -0700123 /*
124 * If read-side task has no lock to protect task->mempolicy, write-side
125 * task will rebind the task->mempolicy by two step. The first step is
126 * setting all the newly nodes, and the second step is cleaning all the
127 * disallowed nodes. In this way, we can avoid finding no node to alloc
128 * page.
129 * If we have a lock to protect task->mempolicy in read-side, we do
130 * rebind directly.
131 *
132 * step:
133 * MPOL_REBIND_ONCE - do rebind work at once
134 * MPOL_REBIND_STEP1 - set all the newly nodes
135 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
136 */
137 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
138 enum mpol_rebind_step step);
David Rientjes37012942008-04-28 02:12:33 -0700139} mpol_ops[MPOL_MAX];
140
Mel Gorman19770b32008-04-28 02:12:18 -0700141/* Check that the nodemask contains at least one populated zone */
David Rientjes37012942008-04-28 02:12:33 -0700142static int is_valid_nodemask(const nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
Mel Gorman19770b32008-04-28 02:12:18 -0700144 int nd, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Mel Gorman19770b32008-04-28 02:12:18 -0700146 for_each_node_mask(nd, *nodemask) {
147 struct zone *z;
148
149 for (k = 0; k <= policy_zone; k++) {
150 z = &NODE_DATA(nd)->node_zones[k];
151 if (z->present_pages > 0)
152 return 1;
Andi Kleendd942ae2006-02-17 01:39:16 +0100153 }
154 }
Mel Gorman19770b32008-04-28 02:12:18 -0700155
156 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157}
158
David Rientjesf5b087b2008-04-28 02:12:27 -0700159static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
160{
Bob Liu6d556292010-05-24 14:31:59 -0700161 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700162}
163
164static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
165 const nodemask_t *rel)
166{
167 nodemask_t tmp;
168 nodes_fold(tmp, *orig, nodes_weight(*rel));
169 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700170}
171
David Rientjes37012942008-04-28 02:12:33 -0700172static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
173{
174 if (nodes_empty(*nodes))
175 return -EINVAL;
176 pol->v.nodes = *nodes;
177 return 0;
178}
179
180static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
181{
182 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700183 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700184 else if (nodes_empty(*nodes))
185 return -EINVAL; /* no allowed nodes */
186 else
187 pol->v.preferred_node = first_node(*nodes);
188 return 0;
189}
190
191static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
192{
193 if (!is_valid_nodemask(nodes))
194 return -EINVAL;
195 pol->v.nodes = *nodes;
196 return 0;
197}
198
Miao Xie58568d22009-06-16 15:31:49 -0700199/*
200 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
201 * any, for the new policy. mpol_new() has already validated the nodes
202 * parameter with respect to the policy mode and flags. But, we need to
203 * handle an empty nodemask with MPOL_PREFERRED here.
204 *
205 * Must be called holding task's alloc_lock to protect task's mems_allowed
206 * and mempolicy. May also be called holding the mmap_semaphore for write.
207 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700208static int mpol_set_nodemask(struct mempolicy *pol,
209 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700210{
Miao Xie58568d22009-06-16 15:31:49 -0700211 int ret;
212
213 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
214 if (pol == NULL)
215 return 0;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700216 /* Check N_HIGH_MEMORY */
217 nodes_and(nsc->mask1,
218 cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700219
220 VM_BUG_ON(!nodes);
221 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
222 nodes = NULL; /* explicit local allocation */
223 else {
224 if (pol->flags & MPOL_F_RELATIVE_NODES)
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700225 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700226 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700227 nodes_and(nsc->mask2, *nodes, nsc->mask1);
228
Miao Xie58568d22009-06-16 15:31:49 -0700229 if (mpol_store_user_nodemask(pol))
230 pol->w.user_nodemask = *nodes;
231 else
232 pol->w.cpuset_mems_allowed =
233 cpuset_current_mems_allowed;
234 }
235
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700236 if (nodes)
237 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
238 else
239 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700240 return ret;
241}
242
243/*
244 * This function just creates a new policy, does some check and simple
245 * initialization. You must invoke mpol_set_nodemask() to set nodes.
246 */
David Rientjes028fec42008-04-28 02:12:25 -0700247static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
248 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249{
250 struct mempolicy *policy;
251
David Rientjes028fec42008-04-28 02:12:25 -0700252 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
253 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
Paul Mundt140d5a42007-07-15 23:38:16 -0700254
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200255 if (mode == MPOL_DEFAULT || mode == MPOL_NOOP) {
David Rientjes3e1f0642008-04-28 02:12:34 -0700256 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700257 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200258 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700259 }
David Rientjes3e1f0642008-04-28 02:12:34 -0700260 VM_BUG_ON(!nodes);
261
262 /*
263 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
264 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
265 * All other modes require a valid pointer to a non-empty nodemask.
266 */
267 if (mode == MPOL_PREFERRED) {
268 if (nodes_empty(*nodes)) {
269 if (((flags & MPOL_F_STATIC_NODES) ||
270 (flags & MPOL_F_RELATIVE_NODES)))
271 return ERR_PTR(-EINVAL);
David Rientjes3e1f0642008-04-28 02:12:34 -0700272 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200273 } else if (mode == MPOL_LOCAL) {
274 if (!nodes_empty(*nodes))
275 return ERR_PTR(-EINVAL);
276 mode = MPOL_PREFERRED;
David Rientjes3e1f0642008-04-28 02:12:34 -0700277 } else if (nodes_empty(*nodes))
278 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
280 if (!policy)
281 return ERR_PTR(-ENOMEM);
282 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700283 policy->mode = mode;
David Rientjes3e1f0642008-04-28 02:12:34 -0700284 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700285
David Rientjes37012942008-04-28 02:12:33 -0700286 return policy;
287}
288
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700289/* Slow path of a mpol destructor. */
290void __mpol_put(struct mempolicy *p)
291{
292 if (!atomic_dec_and_test(&p->refcnt))
293 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700294 kmem_cache_free(policy_cache, p);
295}
296
Miao Xie708c1bb2010-05-24 14:32:07 -0700297static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
298 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700299{
300}
301
Miao Xie708c1bb2010-05-24 14:32:07 -0700302/*
303 * step:
304 * MPOL_REBIND_ONCE - do rebind work at once
305 * MPOL_REBIND_STEP1 - set all the newly nodes
306 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
307 */
308static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
309 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700310{
311 nodemask_t tmp;
312
313 if (pol->flags & MPOL_F_STATIC_NODES)
314 nodes_and(tmp, pol->w.user_nodemask, *nodes);
315 else if (pol->flags & MPOL_F_RELATIVE_NODES)
316 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
317 else {
Miao Xie708c1bb2010-05-24 14:32:07 -0700318 /*
319 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
320 * result
321 */
322 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
323 nodes_remap(tmp, pol->v.nodes,
324 pol->w.cpuset_mems_allowed, *nodes);
325 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
326 } else if (step == MPOL_REBIND_STEP2) {
327 tmp = pol->w.cpuset_mems_allowed;
328 pol->w.cpuset_mems_allowed = *nodes;
329 } else
330 BUG();
David Rientjes37012942008-04-28 02:12:33 -0700331 }
332
Miao Xie708c1bb2010-05-24 14:32:07 -0700333 if (nodes_empty(tmp))
334 tmp = *nodes;
335
336 if (step == MPOL_REBIND_STEP1)
337 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
338 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
339 pol->v.nodes = tmp;
340 else
341 BUG();
342
David Rientjes37012942008-04-28 02:12:33 -0700343 if (!node_isset(current->il_next, tmp)) {
344 current->il_next = next_node(current->il_next, tmp);
345 if (current->il_next >= MAX_NUMNODES)
346 current->il_next = first_node(tmp);
347 if (current->il_next >= MAX_NUMNODES)
348 current->il_next = numa_node_id();
349 }
350}
351
352static void mpol_rebind_preferred(struct mempolicy *pol,
Miao Xie708c1bb2010-05-24 14:32:07 -0700353 const nodemask_t *nodes,
354 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700355{
356 nodemask_t tmp;
357
David Rientjes37012942008-04-28 02:12:33 -0700358 if (pol->flags & MPOL_F_STATIC_NODES) {
359 int node = first_node(pol->w.user_nodemask);
360
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700361 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700362 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700363 pol->flags &= ~MPOL_F_LOCAL;
364 } else
365 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700366 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
367 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
368 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700369 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700370 pol->v.preferred_node = node_remap(pol->v.preferred_node,
371 pol->w.cpuset_mems_allowed,
372 *nodes);
373 pol->w.cpuset_mems_allowed = *nodes;
374 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375}
376
Miao Xie708c1bb2010-05-24 14:32:07 -0700377/*
378 * mpol_rebind_policy - Migrate a policy to a different set of nodes
379 *
380 * If read-side task has no lock to protect task->mempolicy, write-side
381 * task will rebind the task->mempolicy by two step. The first step is
382 * setting all the newly nodes, and the second step is cleaning all the
383 * disallowed nodes. In this way, we can avoid finding no node to alloc
384 * page.
385 * If we have a lock to protect task->mempolicy in read-side, we do
386 * rebind directly.
387 *
388 * step:
389 * MPOL_REBIND_ONCE - do rebind work at once
390 * MPOL_REBIND_STEP1 - set all the newly nodes
391 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
392 */
393static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
394 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700395{
David Rientjes1d0d2682008-04-28 02:12:32 -0700396 if (!pol)
397 return;
Wang Sheng-Hui89c522c2012-05-29 15:06:16 -0700398 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700399 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
400 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700401
402 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
403 return;
404
405 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
406 BUG();
407
408 if (step == MPOL_REBIND_STEP1)
409 pol->flags |= MPOL_F_REBINDING;
410 else if (step == MPOL_REBIND_STEP2)
411 pol->flags &= ~MPOL_F_REBINDING;
412 else if (step >= MPOL_REBIND_NSTEP)
413 BUG();
414
415 mpol_ops[pol->mode].rebind(pol, newmask, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700416}
417
418/*
419 * Wrapper for mpol_rebind_policy() that just requires task
420 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700421 *
422 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700423 */
424
Miao Xie708c1bb2010-05-24 14:32:07 -0700425void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
426 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700427{
Miao Xie708c1bb2010-05-24 14:32:07 -0700428 mpol_rebind_policy(tsk->mempolicy, new, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700429}
430
431/*
432 * Rebind each vma in mm to new nodemask.
433 *
434 * Call holding a reference to mm. Takes mm->mmap_sem during call.
435 */
436
437void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
438{
439 struct vm_area_struct *vma;
440
441 down_write(&mm->mmap_sem);
442 for (vma = mm->mmap; vma; vma = vma->vm_next)
Miao Xie708c1bb2010-05-24 14:32:07 -0700443 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
David Rientjes1d0d2682008-04-28 02:12:32 -0700444 up_write(&mm->mmap_sem);
445}
446
David Rientjes37012942008-04-28 02:12:33 -0700447static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
448 [MPOL_DEFAULT] = {
449 .rebind = mpol_rebind_default,
450 },
451 [MPOL_INTERLEAVE] = {
452 .create = mpol_new_interleave,
453 .rebind = mpol_rebind_nodemask,
454 },
455 [MPOL_PREFERRED] = {
456 .create = mpol_new_preferred,
457 .rebind = mpol_rebind_preferred,
458 },
459 [MPOL_BIND] = {
460 .create = mpol_new_bind,
461 .rebind = mpol_rebind_nodemask,
462 },
463};
464
Christoph Lameterfc301282006-01-18 17:42:29 -0800465static void migrate_page_add(struct page *page, struct list_head *pagelist,
466 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800467
Christoph Lameter38e35862006-01-08 01:01:01 -0800468/* Scan through pages checking if pages follow certain conditions. */
Nick Pigginb5810032005-10-29 18:16:12 -0700469static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800470 unsigned long addr, unsigned long end,
471 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800472 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473{
Hugh Dickins91612e02005-06-21 17:15:07 -0700474 pte_t *orig_pte;
475 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700476 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700477
Hugh Dickins705e87c2005-10-29 18:16:27 -0700478 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700479 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800480 struct page *page;
Andy Whitcroft25ba77c2006-12-06 20:33:03 -0800481 int nid;
Hugh Dickins91612e02005-06-21 17:15:07 -0700482
483 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800485 page = vm_normal_page(vma, addr, *pte);
486 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800488 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800489 * vm_normal_page() filters out zero pages, but there might
490 * still be PageReserved pages to skip, perhaps in a VDSO.
491 * And we cannot move PageKsm pages sensibly or safely yet.
Nick Piggin053837f2006-01-18 17:42:27 -0800492 */
Hugh Dickins62b61f62009-12-14 17:59:33 -0800493 if (PageReserved(page) || PageKsm(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800494 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800495 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800496 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
497 continue;
498
Stephen Wilsonb1f72d12011-05-24 17:12:43 -0700499 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800500 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800501 else
502 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700503 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700504 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700505 return addr != end;
506}
507
Nick Pigginb5810032005-10-29 18:16:12 -0700508static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800509 unsigned long addr, unsigned long end,
510 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800511 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700512{
513 pmd_t *pmd;
514 unsigned long next;
515
516 pmd = pmd_offset(pud, addr);
517 do {
518 next = pmd_addr_end(addr, end);
Andrea Arcangelibae9c192011-01-13 15:46:46 -0800519 split_huge_page_pmd(vma->vm_mm, pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700520 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
Hugh Dickins91612e02005-06-21 17:15:07 -0700521 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800522 if (check_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800523 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700524 return -EIO;
525 } while (pmd++, addr = next, addr != end);
526 return 0;
527}
528
Nick Pigginb5810032005-10-29 18:16:12 -0700529static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800530 unsigned long addr, unsigned long end,
531 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800532 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700533{
534 pud_t *pud;
535 unsigned long next;
536
537 pud = pud_offset(pgd, addr);
538 do {
539 next = pud_addr_end(addr, end);
540 if (pud_none_or_clear_bad(pud))
541 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800542 if (check_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800543 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700544 return -EIO;
545 } while (pud++, addr = next, addr != end);
546 return 0;
547}
548
Nick Pigginb5810032005-10-29 18:16:12 -0700549static inline int check_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800550 unsigned long addr, unsigned long end,
551 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800552 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700553{
554 pgd_t *pgd;
555 unsigned long next;
556
Nick Pigginb5810032005-10-29 18:16:12 -0700557 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700558 do {
559 next = pgd_addr_end(addr, end);
560 if (pgd_none_or_clear_bad(pgd))
561 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800562 if (check_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800563 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700564 return -EIO;
565 } while (pgd++, addr = next, addr != end);
566 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567}
568
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200569#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
570/*
571 * Here we search for not shared page mappings (mapcount == 1) and we
572 * set up the pmd/pte_numa on those mappings so the very next access
573 * will fire a NUMA hinting page fault.
574 */
575static int
576change_prot_numa_range(struct mm_struct *mm, struct vm_area_struct *vma,
577 unsigned long address)
578{
579 pgd_t *pgd;
580 pud_t *pud;
581 pmd_t *pmd;
582 pte_t *pte, *_pte;
583 struct page *page;
584 unsigned long _address, end;
585 spinlock_t *ptl;
586 int ret = 0;
587
588 VM_BUG_ON(address & ~PAGE_MASK);
589
590 pgd = pgd_offset(mm, address);
591 if (!pgd_present(*pgd))
592 goto out;
593
594 pud = pud_offset(pgd, address);
595 if (!pud_present(*pud))
596 goto out;
597
598 pmd = pmd_offset(pud, address);
599 if (pmd_none(*pmd))
600 goto out;
601
602 if (pmd_trans_huge_lock(pmd, vma) == 1) {
603 int page_nid;
604 ret = HPAGE_PMD_NR;
605
606 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
607
608 if (pmd_numa(*pmd)) {
609 spin_unlock(&mm->page_table_lock);
610 goto out;
611 }
612
613 page = pmd_page(*pmd);
614
615 /* only check non-shared pages */
616 if (page_mapcount(page) != 1) {
617 spin_unlock(&mm->page_table_lock);
618 goto out;
619 }
620
621 page_nid = page_to_nid(page);
622
623 if (pmd_numa(*pmd)) {
624 spin_unlock(&mm->page_table_lock);
625 goto out;
626 }
627
628 set_pmd_at(mm, address, pmd, pmd_mknuma(*pmd));
629 ret += HPAGE_PMD_NR;
630 /* defer TLB flush to lower the overhead */
631 spin_unlock(&mm->page_table_lock);
632 goto out;
633 }
634
635 if (pmd_trans_unstable(pmd))
636 goto out;
637 VM_BUG_ON(!pmd_present(*pmd));
638
639 end = min(vma->vm_end, (address + PMD_SIZE) & PMD_MASK);
640 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
641 for (_address = address, _pte = pte; _address < end;
642 _pte++, _address += PAGE_SIZE) {
643 pte_t pteval = *_pte;
644 if (!pte_present(pteval))
645 continue;
646 if (pte_numa(pteval))
647 continue;
648 page = vm_normal_page(vma, _address, pteval);
649 if (unlikely(!page))
650 continue;
651 /* only check non-shared pages */
652 if (page_mapcount(page) != 1)
653 continue;
654
655 set_pte_at(mm, _address, _pte, pte_mknuma(pteval));
656
657 /* defer TLB flush to lower the overhead */
658 ret++;
659 }
660 pte_unmap_unlock(pte, ptl);
661
662 if (ret && !pmd_numa(*pmd)) {
663 spin_lock(&mm->page_table_lock);
664 set_pmd_at(mm, address, pmd, pmd_mknuma(*pmd));
665 spin_unlock(&mm->page_table_lock);
666 /* defer TLB flush to lower the overhead */
667 }
668
669out:
670 return ret;
671}
672
673/* Assumes mmap_sem is held */
674void
675change_prot_numa(struct vm_area_struct *vma,
676 unsigned long address, unsigned long end)
677{
678 struct mm_struct *mm = vma->vm_mm;
679 int progress = 0;
680
681 while (address < end) {
682 VM_BUG_ON(address < vma->vm_start ||
683 address + PAGE_SIZE > vma->vm_end);
684
685 progress += change_prot_numa_range(mm, vma, address);
686 address = (address + PMD_SIZE) & PMD_MASK;
687 }
688
689 /*
690 * Flush the TLB for the mm to start the NUMA hinting
691 * page faults after we finish scanning this vma part
692 * if there were any PTE updates
693 */
694 if (progress) {
695 mmu_notifier_invalidate_range_start(vma->vm_mm, address, end);
696 flush_tlb_range(vma, address, end);
697 mmu_notifier_invalidate_range_end(vma->vm_mm, address, end);
698 }
699}
700#else
701static unsigned long change_prot_numa(struct vm_area_struct *vma,
702 unsigned long addr, unsigned long end)
703{
704 return 0;
705}
706#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
707
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800708/*
709 * Check if all pages in a range are on a set of nodes.
710 * If pagelist != NULL then isolate pages from the LRU and
711 * put them on the pagelist.
712 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713static struct vm_area_struct *
714check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800715 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 int err;
718 struct vm_area_struct *first, *vma, *prev;
719
Nick Piggin053837f2006-01-18 17:42:27 -0800720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 first = find_vma(mm, start);
722 if (!first)
723 return ERR_PTR(-EFAULT);
724 prev = NULL;
725 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200726 unsigned long endvma = vma->vm_end;
727
728 if (endvma > end)
729 endvma = end;
730 if (vma->vm_start > start)
731 start = vma->vm_start;
732
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800733 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
734 if (!vma->vm_next && vma->vm_end < end)
735 return ERR_PTR(-EFAULT);
736 if (prev && prev->vm_end < vma->vm_start)
737 return ERR_PTR(-EFAULT);
738 }
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800739
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200740 if (is_vm_hugetlb_page(vma))
741 goto next;
742
743 if (flags & MPOL_MF_LAZY) {
744 change_prot_numa(vma, start, endvma);
745 goto next;
746 }
747
748 if ((flags & MPOL_MF_STRICT) ||
749 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
750 vma_migratable(vma))) {
751
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800752 err = check_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800753 flags, private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 if (err) {
755 first = ERR_PTR(err);
756 break;
757 }
758 }
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200759next:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 prev = vma;
761 }
762 return first;
763}
764
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700765/*
766 * Apply policy to a single VMA
767 * This must be called with the mmap_sem held for writing.
768 */
769static int vma_replace_policy(struct vm_area_struct *vma,
770 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700771{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700772 int err;
773 struct mempolicy *old;
774 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700775
776 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
777 vma->vm_start, vma->vm_end, vma->vm_pgoff,
778 vma->vm_ops, vma->vm_file,
779 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
780
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700781 new = mpol_dup(pol);
782 if (IS_ERR(new))
783 return PTR_ERR(new);
784
785 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700786 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700787 if (err)
788 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700789 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700790
791 old = vma->vm_policy;
792 vma->vm_policy = new; /* protected by mmap_sem */
793 mpol_put(old);
794
795 return 0;
796 err_out:
797 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700798 return err;
799}
800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800802static int mbind_range(struct mm_struct *mm, unsigned long start,
803 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
805 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800806 struct vm_area_struct *prev;
807 struct vm_area_struct *vma;
808 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800809 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800810 unsigned long vmstart;
811 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Linus Torvalds097d5912012-03-06 18:23:36 -0800813 vma = find_vma(mm, start);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800814 if (!vma || vma->vm_start > start)
815 return -EFAULT;
816
Linus Torvalds097d5912012-03-06 18:23:36 -0800817 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800818 if (start > vma->vm_start)
819 prev = vma;
820
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800821 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800823 vmstart = max(start, vma->vm_start);
824 vmend = min(end, vma->vm_end);
825
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800826 if (mpol_equal(vma_policy(vma), new_pol))
827 continue;
828
829 pgoff = vma->vm_pgoff +
830 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800831 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800832 vma->anon_vma, vma->vm_file, pgoff,
Caspar Zhang8aacc9f2011-09-14 16:20:58 -0700833 new_pol);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800834 if (prev) {
835 vma = prev;
836 next = vma->vm_next;
837 continue;
838 }
839 if (vma->vm_start != vmstart) {
840 err = split_vma(vma->vm_mm, vma, vmstart, 1);
841 if (err)
842 goto out;
843 }
844 if (vma->vm_end != vmend) {
845 err = split_vma(vma->vm_mm, vma, vmend, 0);
846 if (err)
847 goto out;
848 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700849 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700850 if (err)
851 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800853
854 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 return err;
856}
857
Paul Jacksonc61afb12006-03-24 03:16:08 -0800858/*
859 * Update task->flags PF_MEMPOLICY bit: set iff non-default
860 * mempolicy. Allows more rapid checking of this (combined perhaps
861 * with other PF_* flag bits) on memory allocation hot code paths.
862 *
863 * If called from outside this file, the task 'p' should -only- be
864 * a newly forked child not yet visible on the task list, because
865 * manipulating the task flags of a visible task is not safe.
866 *
867 * The above limitation is why this routine has the funny name
868 * mpol_fix_fork_child_flag().
869 *
870 * It is also safe to call this with a task pointer of current,
871 * which the static wrapper mpol_set_task_struct_flag() does,
872 * for use within this file.
873 */
874
875void mpol_fix_fork_child_flag(struct task_struct *p)
876{
877 if (p->mempolicy)
878 p->flags |= PF_MEMPOLICY;
879 else
880 p->flags &= ~PF_MEMPOLICY;
881}
882
883static void mpol_set_task_struct_flag(void)
884{
885 mpol_fix_fork_child_flag(current);
886}
887
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700889static long do_set_mempolicy(unsigned short mode, unsigned short flags,
890 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891{
Miao Xie58568d22009-06-16 15:31:49 -0700892 struct mempolicy *new, *old;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700893 struct mm_struct *mm = current->mm;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700894 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700895 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700897 if (!scratch)
898 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700899
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700900 new = mpol_new(mode, flags, nodes);
901 if (IS_ERR(new)) {
902 ret = PTR_ERR(new);
903 goto out;
904 }
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700905 /*
906 * prevent changing our mempolicy while show_numa_maps()
907 * is using it.
908 * Note: do_set_mempolicy() can be called at init time
909 * with no 'mm'.
910 */
911 if (mm)
912 down_write(&mm->mmap_sem);
Miao Xie58568d22009-06-16 15:31:49 -0700913 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700914 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700915 if (ret) {
916 task_unlock(current);
917 if (mm)
918 up_write(&mm->mmap_sem);
919 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700920 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700921 }
922 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 current->mempolicy = new;
Paul Jacksonc61afb12006-03-24 03:16:08 -0800924 mpol_set_task_struct_flag();
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700925 if (new && new->mode == MPOL_INTERLEAVE &&
David Rientjesf5b087b2008-04-28 02:12:27 -0700926 nodes_weight(new->v.nodes))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700927 current->il_next = first_node(new->v.nodes);
Miao Xie58568d22009-06-16 15:31:49 -0700928 task_unlock(current);
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700929 if (mm)
930 up_write(&mm->mmap_sem);
931
Miao Xie58568d22009-06-16 15:31:49 -0700932 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700933 ret = 0;
934out:
935 NODEMASK_SCRATCH_FREE(scratch);
936 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937}
938
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700939/*
940 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700941 *
942 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700943 */
944static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700946 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700947 if (p == &default_policy)
948 return;
949
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700950 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700951 case MPOL_BIND:
952 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700954 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 break;
956 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700957 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700958 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700959 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 break;
961 default:
962 BUG();
963 }
964}
965
966static int lookup_node(struct mm_struct *mm, unsigned long addr)
967{
968 struct page *p;
969 int err;
970
971 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
972 if (err >= 0) {
973 err = page_to_nid(p);
974 put_page(p);
975 }
976 return err;
977}
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700980static long do_get_mempolicy(int *policy, nodemask_t *nmask,
981 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700983 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 struct mm_struct *mm = current->mm;
985 struct vm_area_struct *vma = NULL;
986 struct mempolicy *pol = current->mempolicy;
987
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700988 if (flags &
989 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700991
992 if (flags & MPOL_F_MEMS_ALLOWED) {
993 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
994 return -EINVAL;
995 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700996 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700997 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700998 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700999 return 0;
1000 }
1001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001003 /*
1004 * Do NOT fall back to task policy if the
1005 * vma/shared policy at addr is NULL. We
1006 * want to return MPOL_DEFAULT in this case.
1007 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 down_read(&mm->mmap_sem);
1009 vma = find_vma_intersection(mm, addr, addr+1);
1010 if (!vma) {
1011 up_read(&mm->mmap_sem);
1012 return -EFAULT;
1013 }
1014 if (vma->vm_ops && vma->vm_ops->get_policy)
1015 pol = vma->vm_ops->get_policy(vma, addr);
1016 else
1017 pol = vma->vm_policy;
1018 } else if (addr)
1019 return -EINVAL;
1020
1021 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001022 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
1024 if (flags & MPOL_F_NODE) {
1025 if (flags & MPOL_F_ADDR) {
1026 err = lookup_node(mm, addr);
1027 if (err < 0)
1028 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001029 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001031 pol->mode == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001032 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 } else {
1034 err = -EINVAL;
1035 goto out;
1036 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001037 } else {
1038 *policy = pol == &default_policy ? MPOL_DEFAULT :
1039 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -07001040 /*
1041 * Internal mempolicy flags must be masked off before exposing
1042 * the policy to userspace.
1043 */
1044 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
1047 if (vma) {
1048 up_read(&current->mm->mmap_sem);
1049 vma = NULL;
1050 }
1051
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -07001053 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -07001054 if (mpol_store_user_nodemask(pol)) {
1055 *nmask = pol->w.user_nodemask;
1056 } else {
1057 task_lock(current);
1058 get_policy_nodemask(pol, nmask);
1059 task_unlock(current);
1060 }
Miao Xie58568d22009-06-16 15:31:49 -07001061 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
1063 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001064 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 if (vma)
1066 up_read(&current->mm->mmap_sem);
1067 return err;
1068}
1069
Christoph Lameterb20a3502006-03-22 00:09:12 -08001070#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -07001071/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001072 * page migration
1073 */
Christoph Lameterfc301282006-01-18 17:42:29 -08001074static void migrate_page_add(struct page *page, struct list_head *pagelist,
1075 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001076{
1077 /*
Christoph Lameterfc301282006-01-18 17:42:29 -08001078 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001079 */
Nick Piggin62695a82008-10-18 20:26:09 -07001080 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
1081 if (!isolate_lru_page(page)) {
1082 list_add_tail(&page->lru, pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001083 inc_zone_page_state(page, NR_ISOLATED_ANON +
1084 page_is_file_cache(page));
Nick Piggin62695a82008-10-18 20:26:09 -07001085 }
1086 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001087}
1088
Christoph Lameter742755a2006-06-23 02:03:55 -07001089static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001090{
Mel Gorman6484eb32009-06-16 15:31:54 -07001091 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001092}
1093
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001094/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001095 * Migrate pages from one node to a target node.
1096 * Returns error or the number of pages not migrated.
1097 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001098static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1099 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001100{
1101 nodemask_t nmask;
1102 LIST_HEAD(pagelist);
1103 int err = 0;
1104
1105 nodes_clear(nmask);
1106 node_set(source, nmask);
1107
Minchan Kim08270802012-10-08 16:33:38 -07001108 /*
1109 * This does not "check" the range but isolates all pages that
1110 * need migration. Between passing in the full user address
1111 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1112 */
1113 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1114 check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001115 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1116
Minchan Kimcf608ac2010-10-26 14:21:29 -07001117 if (!list_empty(&pagelist)) {
Mel Gorman7f0f2492011-01-13 15:45:58 -08001118 err = migrate_pages(&pagelist, new_node_page, dest,
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001119 false, MIGRATE_SYNC,
1120 MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001121 if (err)
1122 putback_lru_pages(&pagelist);
1123 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001124
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001125 return err;
1126}
1127
1128/*
1129 * Move pages between the two nodesets so as to preserve the physical
1130 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001131 *
1132 * Returns the number of page that could not be moved.
1133 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001134int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1135 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001136{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001137 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001138 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001139 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001140
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001141 err = migrate_prep();
1142 if (err)
1143 return err;
1144
Lee Schermerhorn53f25562008-04-28 02:13:20 -07001145 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -08001146
Andrew Morton0ce72d42012-05-29 15:06:24 -07001147 err = migrate_vmas(mm, from, to, flags);
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001148 if (err)
1149 goto out;
1150
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001151 /*
1152 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1153 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1154 * bit in 'tmp', and return that <source, dest> pair for migration.
1155 * The pair of nodemasks 'to' and 'from' define the map.
1156 *
1157 * If no pair of bits is found that way, fallback to picking some
1158 * pair of 'source' and 'dest' bits that are not the same. If the
1159 * 'source' and 'dest' bits are the same, this represents a node
1160 * that will be migrating to itself, so no pages need move.
1161 *
1162 * If no bits are left in 'tmp', or if all remaining bits left
1163 * in 'tmp' correspond to the same bit in 'to', return false
1164 * (nothing left to migrate).
1165 *
1166 * This lets us pick a pair of nodes to migrate between, such that
1167 * if possible the dest node is not already occupied by some other
1168 * source node, minimizing the risk of overloading the memory on a
1169 * node that would happen if we migrated incoming memory to a node
1170 * before migrating outgoing memory source that same node.
1171 *
1172 * A single scan of tmp is sufficient. As we go, we remember the
1173 * most recent <s, d> pair that moved (s != d). If we find a pair
1174 * that not only moved, but what's better, moved to an empty slot
1175 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001176 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001177 * most recent <s, d> pair that moved. If we get all the way through
1178 * the scan of tmp without finding any node that moved, much less
1179 * moved to an empty node, then there is nothing left worth migrating.
1180 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001181
Andrew Morton0ce72d42012-05-29 15:06:24 -07001182 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001183 while (!nodes_empty(tmp)) {
1184 int s,d;
1185 int source = -1;
1186 int dest = 0;
1187
1188 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001189
1190 /*
1191 * do_migrate_pages() tries to maintain the relative
1192 * node relationship of the pages established between
1193 * threads and memory areas.
1194 *
1195 * However if the number of source nodes is not equal to
1196 * the number of destination nodes we can not preserve
1197 * this node relative relationship. In that case, skip
1198 * copying memory from a node that is in the destination
1199 * mask.
1200 *
1201 * Example: [2,3,4] -> [3,4,5] moves everything.
1202 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1203 */
1204
Andrew Morton0ce72d42012-05-29 15:06:24 -07001205 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1206 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001207 continue;
1208
Andrew Morton0ce72d42012-05-29 15:06:24 -07001209 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001210 if (s == d)
1211 continue;
1212
1213 source = s; /* Node moved. Memorize */
1214 dest = d;
1215
1216 /* dest not in remaining from nodes? */
1217 if (!node_isset(dest, tmp))
1218 break;
1219 }
1220 if (source == -1)
1221 break;
1222
1223 node_clear(source, tmp);
1224 err = migrate_to_node(mm, source, dest, flags);
1225 if (err > 0)
1226 busy += err;
1227 if (err < 0)
1228 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001229 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001230out:
Christoph Lameter39743882006-01-08 01:00:51 -08001231 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001232 if (err < 0)
1233 return err;
1234 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001235
Christoph Lameter39743882006-01-08 01:00:51 -08001236}
1237
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001238/*
1239 * Allocate a new page for page migration based on vma policy.
1240 * Start assuming that page is mapped by vma pointed to by @private.
1241 * Search forward from there, if not. N.B., this assumes that the
1242 * list of pages handed to migrate_pages()--which is how we get here--
1243 * is in virtual address order.
1244 */
Christoph Lameter742755a2006-06-23 02:03:55 -07001245static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001246{
1247 struct vm_area_struct *vma = (struct vm_area_struct *)private;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001248 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001249
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001250 while (vma) {
1251 address = page_address_in_vma(page, vma);
1252 if (address != -EFAULT)
1253 break;
1254 vma = vma->vm_next;
1255 }
1256
1257 /*
1258 * if !vma, alloc_page_vma() will use task or system default policy
1259 */
1260 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001261}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001262#else
1263
1264static void migrate_page_add(struct page *page, struct list_head *pagelist,
1265 unsigned long flags)
1266{
1267}
1268
Andrew Morton0ce72d42012-05-29 15:06:24 -07001269int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1270 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001271{
1272 return -ENOSYS;
1273}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001274
Keith Owens69939742006-10-11 01:21:28 -07001275static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001276{
1277 return NULL;
1278}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001279#endif
1280
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001281static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001282 unsigned short mode, unsigned short mode_flags,
1283 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001284{
1285 struct vm_area_struct *vma;
1286 struct mm_struct *mm = current->mm;
1287 struct mempolicy *new;
1288 unsigned long end;
1289 int err;
1290 LIST_HEAD(pagelist);
1291
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001292 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001293 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001294 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001295 return -EPERM;
1296
1297 if (start & ~PAGE_MASK)
1298 return -EINVAL;
1299
Lee Schermerhornd3a71032012-10-25 14:16:29 +02001300 if (mode == MPOL_DEFAULT || mode == MPOL_NOOP)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001301 flags &= ~MPOL_MF_STRICT;
1302
1303 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1304 end = start + len;
1305
1306 if (end < start)
1307 return -EINVAL;
1308 if (end == start)
1309 return 0;
1310
David Rientjes028fec42008-04-28 02:12:25 -07001311 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001312 if (IS_ERR(new))
1313 return PTR_ERR(new);
1314
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001315 if (flags & MPOL_MF_LAZY)
1316 new->flags |= MPOL_F_MOF;
1317
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001318 /*
1319 * If we are using the default policy then operation
1320 * on discontinuous address spaces is okay after all
1321 */
1322 if (!new)
1323 flags |= MPOL_MF_DISCONTIG_OK;
1324
David Rientjes028fec42008-04-28 02:12:25 -07001325 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1326 start, start + len, mode, mode_flags,
1327 nmask ? nodes_addr(*nmask)[0] : -1);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001328
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001329 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1330
1331 err = migrate_prep();
1332 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001333 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001334 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001335 {
1336 NODEMASK_SCRATCH(scratch);
1337 if (scratch) {
1338 down_write(&mm->mmap_sem);
1339 task_lock(current);
1340 err = mpol_set_nodemask(new, nmask, scratch);
1341 task_unlock(current);
1342 if (err)
1343 up_write(&mm->mmap_sem);
1344 } else
1345 err = -ENOMEM;
1346 NODEMASK_SCRATCH_FREE(scratch);
1347 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001348 if (err)
1349 goto mpol_out;
1350
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001351 vma = check_range(mm, start, end, nmask,
1352 flags | MPOL_MF_INVERT, &pagelist);
1353
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001354 err = PTR_ERR(vma); /* maybe ... */
1355 if (!IS_ERR(vma) && mode != MPOL_NOOP)
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -08001356 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001357
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001358 if (!err) {
1359 int nr_failed = 0;
1360
Minchan Kimcf608ac2010-10-26 14:21:29 -07001361 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001362 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001363 nr_failed = migrate_pages(&pagelist, new_vma_page,
Mel Gorman7f0f2492011-01-13 15:45:58 -08001364 (unsigned long)vma,
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001365 false, MIGRATE_SYNC,
1366 MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001367 if (nr_failed)
1368 putback_lru_pages(&pagelist);
1369 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001370
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001371 if (nr_failed && (flags & MPOL_MF_STRICT))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001372 err = -EIO;
KOSAKI Motohiroab8a3e12009-10-26 16:49:58 -07001373 } else
1374 putback_lru_pages(&pagelist);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001375
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001376 up_write(&mm->mmap_sem);
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001377 mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001378 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001379 return err;
1380}
1381
Christoph Lameter39743882006-01-08 01:00:51 -08001382/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001383 * User space interface with variable sized bitmaps for nodelists.
1384 */
1385
1386/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001387static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001388 unsigned long maxnode)
1389{
1390 unsigned long k;
1391 unsigned long nlongs;
1392 unsigned long endmask;
1393
1394 --maxnode;
1395 nodes_clear(*nodes);
1396 if (maxnode == 0 || !nmask)
1397 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001398 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001399 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001400
1401 nlongs = BITS_TO_LONGS(maxnode);
1402 if ((maxnode % BITS_PER_LONG) == 0)
1403 endmask = ~0UL;
1404 else
1405 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1406
1407 /* When the user specified more nodes than supported just check
1408 if the non supported part is all zero. */
1409 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1410 if (nlongs > PAGE_SIZE/sizeof(long))
1411 return -EINVAL;
1412 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1413 unsigned long t;
1414 if (get_user(t, nmask + k))
1415 return -EFAULT;
1416 if (k == nlongs - 1) {
1417 if (t & endmask)
1418 return -EINVAL;
1419 } else if (t)
1420 return -EINVAL;
1421 }
1422 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1423 endmask = ~0UL;
1424 }
1425
1426 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1427 return -EFAULT;
1428 nodes_addr(*nodes)[nlongs-1] &= endmask;
1429 return 0;
1430}
1431
1432/* Copy a kernel node mask to user space */
1433static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1434 nodemask_t *nodes)
1435{
1436 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1437 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1438
1439 if (copy > nbytes) {
1440 if (copy > PAGE_SIZE)
1441 return -EINVAL;
1442 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1443 return -EFAULT;
1444 copy = nbytes;
1445 }
1446 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1447}
1448
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001449SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1450 unsigned long, mode, unsigned long __user *, nmask,
1451 unsigned long, maxnode, unsigned, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001452{
1453 nodemask_t nodes;
1454 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001455 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001456
David Rientjes028fec42008-04-28 02:12:25 -07001457 mode_flags = mode & MPOL_MODE_FLAGS;
1458 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001459 if (mode >= MPOL_MAX)
1460 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001461 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1462 (mode_flags & MPOL_F_RELATIVE_NODES))
1463 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001464 err = get_nodes(&nodes, nmask, maxnode);
1465 if (err)
1466 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001467 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001468}
1469
1470/* Set the process memory policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001471SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1472 unsigned long, maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001473{
1474 int err;
1475 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001476 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001477
David Rientjes028fec42008-04-28 02:12:25 -07001478 flags = mode & MPOL_MODE_FLAGS;
1479 mode &= ~MPOL_MODE_FLAGS;
1480 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001481 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001482 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1483 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001484 err = get_nodes(&nodes, nmask, maxnode);
1485 if (err)
1486 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001487 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001488}
1489
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001490SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1491 const unsigned long __user *, old_nodes,
1492 const unsigned long __user *, new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001493{
David Howellsc69e8d92008-11-14 10:39:19 +11001494 const struct cred *cred = current_cred(), *tcred;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001495 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001496 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001497 nodemask_t task_nodes;
1498 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001499 nodemask_t *old;
1500 nodemask_t *new;
1501 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001502
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001503 if (!scratch)
1504 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001505
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001506 old = &scratch->mask1;
1507 new = &scratch->mask2;
1508
1509 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001510 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001511 goto out;
1512
1513 err = get_nodes(new, new_nodes, maxnode);
1514 if (err)
1515 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001516
1517 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001518 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001519 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001520 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001521 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001522 err = -ESRCH;
1523 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001524 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001525 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001526
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001527 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001528
1529 /*
1530 * Check if this process has the right to modify the specified
1531 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001532 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001533 * userid as the target process.
1534 */
David Howellsc69e8d92008-11-14 10:39:19 +11001535 tcred = __task_cred(task);
Eric W. Biedermanb38a86e2012-03-12 15:48:24 -07001536 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1537 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001538 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001539 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001540 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001541 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001542 }
David Howellsc69e8d92008-11-14 10:39:19 +11001543 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001544
1545 task_nodes = cpuset_mems_allowed(task);
1546 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001547 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001548 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001549 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001550 }
1551
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001552 if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -07001553 err = -EINVAL;
Christoph Lameter3268c632012-03-21 16:34:06 -07001554 goto out_put;
Christoph Lameter3b42d282007-08-31 00:12:08 -07001555 }
1556
David Quigley86c3a762006-06-23 02:04:02 -07001557 err = security_task_movememory(task);
1558 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001559 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001560
Christoph Lameter3268c632012-03-21 16:34:06 -07001561 mm = get_task_mm(task);
1562 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001563
1564 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001565 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001566 goto out;
1567 }
1568
1569 err = do_migrate_pages(mm, old, new,
1570 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001571
1572 mmput(mm);
1573out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001574 NODEMASK_SCRATCH_FREE(scratch);
1575
Christoph Lameter39743882006-01-08 01:00:51 -08001576 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001577
1578out_put:
1579 put_task_struct(task);
1580 goto out;
1581
Christoph Lameter39743882006-01-08 01:00:51 -08001582}
1583
1584
Christoph Lameter8bccd852005-10-29 18:16:59 -07001585/* Retrieve NUMA policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001586SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1587 unsigned long __user *, nmask, unsigned long, maxnode,
1588 unsigned long, addr, unsigned long, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001589{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001590 int err;
1591 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001592 nodemask_t nodes;
1593
1594 if (nmask != NULL && maxnode < MAX_NUMNODES)
1595 return -EINVAL;
1596
1597 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1598
1599 if (err)
1600 return err;
1601
1602 if (policy && put_user(pval, policy))
1603 return -EFAULT;
1604
1605 if (nmask)
1606 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1607
1608 return err;
1609}
1610
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611#ifdef CONFIG_COMPAT
1612
1613asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1614 compat_ulong_t __user *nmask,
1615 compat_ulong_t maxnode,
1616 compat_ulong_t addr, compat_ulong_t flags)
1617{
1618 long err;
1619 unsigned long __user *nm = NULL;
1620 unsigned long nr_bits, alloc_size;
1621 DECLARE_BITMAP(bm, MAX_NUMNODES);
1622
1623 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1624 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1625
1626 if (nmask)
1627 nm = compat_alloc_user_space(alloc_size);
1628
1629 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1630
1631 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001632 unsigned long copy_size;
1633 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1634 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 /* ensure entire bitmap is zeroed */
1636 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1637 err |= compat_put_bitmap(nmask, bm, nr_bits);
1638 }
1639
1640 return err;
1641}
1642
1643asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1644 compat_ulong_t maxnode)
1645{
1646 long err = 0;
1647 unsigned long __user *nm = NULL;
1648 unsigned long nr_bits, alloc_size;
1649 DECLARE_BITMAP(bm, MAX_NUMNODES);
1650
1651 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1652 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1653
1654 if (nmask) {
1655 err = compat_get_bitmap(bm, nmask, nr_bits);
1656 nm = compat_alloc_user_space(alloc_size);
1657 err |= copy_to_user(nm, bm, alloc_size);
1658 }
1659
1660 if (err)
1661 return -EFAULT;
1662
1663 return sys_set_mempolicy(mode, nm, nr_bits+1);
1664}
1665
1666asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1667 compat_ulong_t mode, compat_ulong_t __user *nmask,
1668 compat_ulong_t maxnode, compat_ulong_t flags)
1669{
1670 long err = 0;
1671 unsigned long __user *nm = NULL;
1672 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001673 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
1675 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1676 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1677
1678 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001679 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001681 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 }
1683
1684 if (err)
1685 return -EFAULT;
1686
1687 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1688}
1689
1690#endif
1691
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001692/*
1693 * get_vma_policy(@task, @vma, @addr)
1694 * @task - task for fallback if vma policy == default
1695 * @vma - virtual memory area whose policy is sought
1696 * @addr - address in @vma for shared policy lookup
1697 *
1698 * Returns effective policy for a VMA at specified address.
1699 * Falls back to @task or system default policy, as necessary.
David Rientjes32f85162012-10-16 17:31:23 -07001700 * Current or other task's task mempolicy and non-shared vma policies must be
1701 * protected by task_lock(task) by the caller.
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001702 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1703 * count--added by the get_policy() vm_op, as appropriate--to protect against
1704 * freeing by another task. It is the caller's responsibility to free the
1705 * extra reference for shared policies.
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001706 */
Stephen Wilsond98f6cb2011-05-24 17:12:41 -07001707struct mempolicy *get_vma_policy(struct task_struct *task,
Christoph Lameter48fce342006-01-08 01:01:03 -08001708 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001710 struct mempolicy *pol = task->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
1712 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001713 if (vma->vm_ops && vma->vm_ops->get_policy) {
Lee Schermerhornae4d8c12008-04-28 02:13:11 -07001714 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1715 addr);
1716 if (vpol)
1717 pol = vpol;
Mel Gorman00442ad2012-10-08 16:29:20 -07001718 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001720
1721 /*
1722 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1723 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1724 * count on these policies which will be dropped by
1725 * mpol_cond_put() later
1726 */
1727 if (mpol_needs_cond_ref(pol))
1728 mpol_get(pol);
1729 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 }
1731 if (!pol)
1732 pol = &default_policy;
1733 return pol;
1734}
1735
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001736/*
1737 * Return a nodemask representing a mempolicy for filtering nodes for
1738 * page allocation
1739 */
1740static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001741{
1742 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001743 if (unlikely(policy->mode == MPOL_BIND) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001744 gfp_zone(gfp) >= policy_zone &&
1745 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1746 return &policy->v.nodes;
1747
1748 return NULL;
1749}
1750
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001751/* Return a zonelist indicated by gfp for node representing a mempolicy */
Andi Kleen2f5f9482011-03-04 17:36:29 -08001752static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1753 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754{
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001755 switch (policy->mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001757 if (!(policy->flags & MPOL_F_LOCAL))
1758 nd = policy->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 break;
1760 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001761 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001762 * Normally, MPOL_BIND allocations are node-local within the
1763 * allowed nodemask. However, if __GFP_THISNODE is set and the
Bob Liu6eb27e12010-05-24 14:32:00 -07001764 * current node isn't part of the mask, we use the zonelist for
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001765 * the first node in the mask instead.
Mel Gorman19770b32008-04-28 02:12:18 -07001766 */
Mel Gorman19770b32008-04-28 02:12:18 -07001767 if (unlikely(gfp & __GFP_THISNODE) &&
1768 unlikely(!node_isset(nd, policy->v.nodes)))
1769 nd = first_node(policy->v.nodes);
1770 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 BUG();
1773 }
Mel Gorman0e884602008-04-28 02:12:14 -07001774 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775}
1776
1777/* Do dynamic interleaving for a process */
1778static unsigned interleave_nodes(struct mempolicy *policy)
1779{
1780 unsigned nid, next;
1781 struct task_struct *me = current;
1782
1783 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001784 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001786 next = first_node(policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001787 if (next < MAX_NUMNODES)
1788 me->il_next = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 return nid;
1790}
1791
Christoph Lameterdc85da12006-01-18 17:42:36 -08001792/*
1793 * Depending on the memory policy provide a node from which to allocate the
1794 * next slab entry.
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001795 * @policy must be protected by freeing by the caller. If @policy is
1796 * the current task's mempolicy, this protection is implicit, as only the
1797 * task can change it's policy. The system default policy requires no
1798 * such protection.
Christoph Lameterdc85da12006-01-18 17:42:36 -08001799 */
Andi Kleene7b691b2012-06-09 02:40:03 -07001800unsigned slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001801{
Andi Kleene7b691b2012-06-09 02:40:03 -07001802 struct mempolicy *policy;
1803
1804 if (in_interrupt())
1805 return numa_node_id();
1806
1807 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001808 if (!policy || policy->flags & MPOL_F_LOCAL)
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001809 return numa_node_id();
Christoph Lameter765c4502006-09-27 01:50:08 -07001810
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001811 switch (policy->mode) {
1812 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001813 /*
1814 * handled MPOL_F_LOCAL above
1815 */
1816 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001817
Christoph Lameterdc85da12006-01-18 17:42:36 -08001818 case MPOL_INTERLEAVE:
1819 return interleave_nodes(policy);
1820
Mel Gormandd1a2392008-04-28 02:12:17 -07001821 case MPOL_BIND: {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001822 /*
1823 * Follow bind policy behavior and start allocation at the
1824 * first node.
1825 */
Mel Gorman19770b32008-04-28 02:12:18 -07001826 struct zonelist *zonelist;
1827 struct zone *zone;
1828 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1829 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1830 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1831 &policy->v.nodes,
1832 &zone);
Eric Dumazet800416f2010-10-27 19:33:43 +02001833 return zone ? zone->node : numa_node_id();
Mel Gormandd1a2392008-04-28 02:12:17 -07001834 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001835
Christoph Lameterdc85da12006-01-18 17:42:36 -08001836 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001837 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001838 }
1839}
1840
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841/* Do static interleaving for a VMA with known offset. */
1842static unsigned offset_il_node(struct mempolicy *pol,
1843 struct vm_area_struct *vma, unsigned long off)
1844{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001845 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001846 unsigned target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 int c;
1848 int nid = -1;
1849
David Rientjesf5b087b2008-04-28 02:12:27 -07001850 if (!nnodes)
1851 return numa_node_id();
1852 target = (unsigned int)off % nnodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 c = 0;
1854 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001855 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 c++;
1857 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 return nid;
1859}
1860
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001861/* Determine a node number for interleave */
1862static inline unsigned interleave_nid(struct mempolicy *pol,
1863 struct vm_area_struct *vma, unsigned long addr, int shift)
1864{
1865 if (vma) {
1866 unsigned long off;
1867
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001868 /*
1869 * for small pages, there is no difference between
1870 * shift and PAGE_SHIFT, so the bit-shift is safe.
1871 * for huge pages, since vm_pgoff is in units of small
1872 * pages, we need to shift off the always 0 bits to get
1873 * a useful offset.
1874 */
1875 BUG_ON(shift < PAGE_SHIFT);
1876 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001877 off += (addr - vma->vm_start) >> shift;
1878 return offset_il_node(pol, vma, off);
1879 } else
1880 return interleave_nodes(pol);
1881}
1882
Michal Hocko778d3b02011-07-26 16:08:30 -07001883/*
1884 * Return the bit number of a random bit set in the nodemask.
1885 * (returns -1 if nodemask is empty)
1886 */
1887int node_random(const nodemask_t *maskp)
1888{
1889 int w, bit = -1;
1890
1891 w = nodes_weight(*maskp);
1892 if (w)
1893 bit = bitmap_ord_to_pos(maskp->bits,
1894 get_random_int() % w, MAX_NUMNODES);
1895 return bit;
1896}
1897
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001898#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001899/*
1900 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1901 * @vma = virtual memory area whose policy is sought
1902 * @addr = address in @vma for shared policy lookup and interleave policy
1903 * @gfp_flags = for requested zone
Mel Gorman19770b32008-04-28 02:12:18 -07001904 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1905 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001906 *
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001907 * Returns a zonelist suitable for a huge page allocation and a pointer
1908 * to the struct mempolicy for conditional unref after allocation.
1909 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1910 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001911 *
1912 * Must be protected by get_mems_allowed()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001913 */
Mel Gorman396faf02007-07-17 04:03:13 -07001914struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001915 gfp_t gfp_flags, struct mempolicy **mpol,
1916 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001917{
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001918 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001919
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001920 *mpol = get_vma_policy(current, vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001921 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001922
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001923 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1924 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
Andi Kleena5516432008-07-23 21:27:41 -07001925 huge_page_shift(hstate_vma(vma))), gfp_flags);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001926 } else {
Andi Kleen2f5f9482011-03-04 17:36:29 -08001927 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001928 if ((*mpol)->mode == MPOL_BIND)
1929 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001930 }
1931 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001932}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001933
1934/*
1935 * init_nodemask_of_mempolicy
1936 *
1937 * If the current task's mempolicy is "default" [NULL], return 'false'
1938 * to indicate default policy. Otherwise, extract the policy nodemask
1939 * for 'bind' or 'interleave' policy into the argument nodemask, or
1940 * initialize the argument nodemask to contain the single node for
1941 * 'preferred' or 'local' policy and return 'true' to indicate presence
1942 * of non-default mempolicy.
1943 *
1944 * We don't bother with reference counting the mempolicy [mpol_get/put]
1945 * because the current task is examining it's own mempolicy and a task's
1946 * mempolicy is only ever changed by the task itself.
1947 *
1948 * N.B., it is the caller's responsibility to free a returned nodemask.
1949 */
1950bool init_nodemask_of_mempolicy(nodemask_t *mask)
1951{
1952 struct mempolicy *mempolicy;
1953 int nid;
1954
1955 if (!(mask && current->mempolicy))
1956 return false;
1957
Miao Xiec0ff7452010-05-24 14:32:08 -07001958 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001959 mempolicy = current->mempolicy;
1960 switch (mempolicy->mode) {
1961 case MPOL_PREFERRED:
1962 if (mempolicy->flags & MPOL_F_LOCAL)
1963 nid = numa_node_id();
1964 else
1965 nid = mempolicy->v.preferred_node;
1966 init_nodemask_of_node(mask, nid);
1967 break;
1968
1969 case MPOL_BIND:
1970 /* Fall through */
1971 case MPOL_INTERLEAVE:
1972 *mask = mempolicy->v.nodes;
1973 break;
1974
1975 default:
1976 BUG();
1977 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001978 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001979
1980 return true;
1981}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001982#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001983
David Rientjes6f48d0eb2010-08-09 17:18:52 -07001984/*
1985 * mempolicy_nodemask_intersects
1986 *
1987 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1988 * policy. Otherwise, check for intersection between mask and the policy
1989 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1990 * policy, always return true since it may allocate elsewhere on fallback.
1991 *
1992 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1993 */
1994bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1995 const nodemask_t *mask)
1996{
1997 struct mempolicy *mempolicy;
1998 bool ret = true;
1999
2000 if (!mask)
2001 return ret;
2002 task_lock(tsk);
2003 mempolicy = tsk->mempolicy;
2004 if (!mempolicy)
2005 goto out;
2006
2007 switch (mempolicy->mode) {
2008 case MPOL_PREFERRED:
2009 /*
2010 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2011 * allocate from, they may fallback to other nodes when oom.
2012 * Thus, it's possible for tsk to have allocated memory from
2013 * nodes in mask.
2014 */
2015 break;
2016 case MPOL_BIND:
2017 case MPOL_INTERLEAVE:
2018 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2019 break;
2020 default:
2021 BUG();
2022 }
2023out:
2024 task_unlock(tsk);
2025 return ret;
2026}
2027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028/* Allocate a page in interleaved policy.
2029 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07002030static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2031 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032{
2033 struct zonelist *zl;
2034 struct page *page;
2035
Mel Gorman0e884602008-04-28 02:12:14 -07002036 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07002038 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07002039 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 return page;
2041}
2042
2043/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002044 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 *
2046 * @gfp:
2047 * %GFP_USER user allocation.
2048 * %GFP_KERNEL kernel allocations,
2049 * %GFP_HIGHMEM highmem/user allocations,
2050 * %GFP_FS allocation should not call back into a file system.
2051 * %GFP_ATOMIC don't sleep.
2052 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002053 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 * @vma: Pointer to VMA or NULL if not available.
2055 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2056 *
2057 * This function allocates a page from the kernel page pool and applies
2058 * a NUMA policy associated with the VMA or the current process.
2059 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2060 * mm_struct of the VMA to prevent it from going away. Should be used for
2061 * all allocations for pages that will be mapped into
2062 * user space. Returns NULL when no page can be allocated.
2063 *
2064 * Should be called with the mm_sem of the vma hold.
2065 */
2066struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002067alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Andi Kleen2f5f9482011-03-04 17:36:29 -08002068 unsigned long addr, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069{
Mel Gormancc9a6c82012-03-21 16:34:11 -07002070 struct mempolicy *pol;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002071 struct zonelist *zl;
Miao Xiec0ff7452010-05-24 14:32:08 -07002072 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002073 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
Mel Gormancc9a6c82012-03-21 16:34:11 -07002075retry_cpuset:
2076 pol = get_vma_policy(current, vma, addr);
2077 cpuset_mems_cookie = get_mems_allowed();
2078
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002079 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002081
Andi Kleen8eac5632011-02-25 14:44:28 -08002082 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002083 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002084 page = alloc_page_interleave(gfp, order, nid);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002085 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2086 goto retry_cpuset;
2087
Miao Xiec0ff7452010-05-24 14:32:08 -07002088 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 }
Andi Kleen2f5f9482011-03-04 17:36:29 -08002090 zl = policy_zonelist(gfp, pol, node);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002091 if (unlikely(mpol_needs_cond_ref(pol))) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002092 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002093 * slow path: ref counted shared policy
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002094 */
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002095 struct page *page = __alloc_pages_nodemask(gfp, order,
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002096 zl, policy_nodemask(gfp, pol));
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07002097 __mpol_put(pol);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002098 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2099 goto retry_cpuset;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002100 return page;
2101 }
2102 /*
2103 * fast path: default or task policy
2104 */
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002105 page = __alloc_pages_nodemask(gfp, order, zl,
2106 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002107 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2108 goto retry_cpuset;
Miao Xiec0ff7452010-05-24 14:32:08 -07002109 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110}
2111
2112/**
2113 * alloc_pages_current - Allocate pages.
2114 *
2115 * @gfp:
2116 * %GFP_USER user allocation,
2117 * %GFP_KERNEL kernel allocation,
2118 * %GFP_HIGHMEM highmem allocation,
2119 * %GFP_FS don't call back into a file system.
2120 * %GFP_ATOMIC don't sleep.
2121 * @order: Power of two of allocation size in pages. 0 is a single page.
2122 *
2123 * Allocate a page from the kernel page pool. When not in
2124 * interrupt context and apply the current process NUMA policy.
2125 * Returns NULL when no page can be allocated.
2126 *
Paul Jacksoncf2a4732006-01-08 01:01:54 -08002127 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 * 1) it's ok to take cpuset_sem (can WAIT), and
2129 * 2) allocating for current task (not interrupt).
2130 */
Al Virodd0fc662005-10-07 07:46:04 +01002131struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132{
2133 struct mempolicy *pol = current->mempolicy;
Miao Xiec0ff7452010-05-24 14:32:08 -07002134 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002135 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
Christoph Lameter9b819d22006-09-25 23:31:40 -07002137 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 pol = &default_policy;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002139
Mel Gormancc9a6c82012-03-21 16:34:11 -07002140retry_cpuset:
2141 cpuset_mems_cookie = get_mems_allowed();
2142
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002143 /*
2144 * No reference counting needed for current->mempolicy
2145 * nor system default_policy
2146 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002147 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002148 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2149 else
2150 page = __alloc_pages_nodemask(gfp, order,
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002151 policy_zonelist(gfp, pol, numa_node_id()),
2152 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002153
2154 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2155 goto retry_cpuset;
2156
Miao Xiec0ff7452010-05-24 14:32:08 -07002157 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158}
2159EXPORT_SYMBOL(alloc_pages_current);
2160
Paul Jackson42253992006-01-08 01:01:59 -08002161/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002162 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002163 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2164 * with the mems_allowed returned by cpuset_mems_allowed(). This
2165 * keeps mempolicies cpuset relative after its cpuset moves. See
2166 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002167 *
2168 * current's mempolicy may be rebinded by the other task(the task that changes
2169 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002170 */
Paul Jackson42253992006-01-08 01:01:59 -08002171
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002172/* Slow path of a mempolicy duplicate */
2173struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174{
2175 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2176
2177 if (!new)
2178 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002179
2180 /* task's mempolicy is protected by alloc_lock */
2181 if (old == current->mempolicy) {
2182 task_lock(current);
2183 *new = *old;
2184 task_unlock(current);
2185 } else
2186 *new = *old;
2187
Paul E. McKenney99ee4ca2010-03-03 17:50:17 -08002188 rcu_read_lock();
Paul Jackson42253992006-01-08 01:01:59 -08002189 if (current_cpuset_is_being_rebound()) {
2190 nodemask_t mems = cpuset_mems_allowed(current);
Miao Xie708c1bb2010-05-24 14:32:07 -07002191 if (new->flags & MPOL_F_REBINDING)
2192 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2193 else
2194 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
Paul Jackson42253992006-01-08 01:01:59 -08002195 }
Paul E. McKenney99ee4ca2010-03-03 17:50:17 -08002196 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 return new;
2199}
2200
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002201/*
2202 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
2203 * eliminate the * MPOL_F_* flags that require conditional ref and
2204 * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
2205 * after return. Use the returned value.
2206 *
2207 * Allows use of a mempolicy for, e.g., multiple allocations with a single
2208 * policy lookup, even if the policy needs/has extra ref on lookup.
2209 * shmem_readahead needs this.
2210 */
2211struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
2212 struct mempolicy *frompol)
2213{
2214 if (!mpol_needs_cond_ref(frompol))
2215 return frompol;
2216
2217 *tompol = *frompol;
2218 tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
2219 __mpol_put(frompol);
2220 return tompol;
2221}
2222
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002224bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225{
2226 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002227 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002228 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002229 return false;
Bob Liu19800502010-05-24 14:32:01 -07002230 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002231 return false;
Bob Liu19800502010-05-24 14:32:01 -07002232 if (mpol_store_user_nodemask(a))
2233 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002234 return false;
Bob Liu19800502010-05-24 14:32:01 -07002235
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002236 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002237 case MPOL_BIND:
2238 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002240 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 case MPOL_PREFERRED:
Namhyung Kim75719662011-03-22 16:33:02 -07002242 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 default:
2244 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002245 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 }
2247}
2248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 * Shared memory backing store policy support.
2251 *
2252 * Remember policies even when nobody has shared memory mapped.
2253 * The policies are kept in Red-Black tree linked from the inode.
2254 * They are protected by the sp->lock spinlock, which should be held
2255 * for any accesses to the tree.
2256 */
2257
2258/* lookup first element intersecting start-end */
Mel Gormanb22d1272012-10-08 16:29:17 -07002259/* Caller holds sp->mutex */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260static struct sp_node *
2261sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2262{
2263 struct rb_node *n = sp->root.rb_node;
2264
2265 while (n) {
2266 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2267
2268 if (start >= p->end)
2269 n = n->rb_right;
2270 else if (end <= p->start)
2271 n = n->rb_left;
2272 else
2273 break;
2274 }
2275 if (!n)
2276 return NULL;
2277 for (;;) {
2278 struct sp_node *w = NULL;
2279 struct rb_node *prev = rb_prev(n);
2280 if (!prev)
2281 break;
2282 w = rb_entry(prev, struct sp_node, nd);
2283 if (w->end <= start)
2284 break;
2285 n = prev;
2286 }
2287 return rb_entry(n, struct sp_node, nd);
2288}
2289
2290/* Insert a new shared policy into the list. */
2291/* Caller holds sp->lock */
2292static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2293{
2294 struct rb_node **p = &sp->root.rb_node;
2295 struct rb_node *parent = NULL;
2296 struct sp_node *nd;
2297
2298 while (*p) {
2299 parent = *p;
2300 nd = rb_entry(parent, struct sp_node, nd);
2301 if (new->start < nd->start)
2302 p = &(*p)->rb_left;
2303 else if (new->end > nd->end)
2304 p = &(*p)->rb_right;
2305 else
2306 BUG();
2307 }
2308 rb_link_node(&new->nd, parent, p);
2309 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002310 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002311 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312}
2313
2314/* Find shared policy intersecting idx */
2315struct mempolicy *
2316mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2317{
2318 struct mempolicy *pol = NULL;
2319 struct sp_node *sn;
2320
2321 if (!sp->root.rb_node)
2322 return NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002323 mutex_lock(&sp->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 sn = sp_lookup(sp, idx, idx+1);
2325 if (sn) {
2326 mpol_get(sn->policy);
2327 pol = sn->policy;
2328 }
Mel Gormanb22d1272012-10-08 16:29:17 -07002329 mutex_unlock(&sp->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 return pol;
2331}
2332
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002333static void sp_free(struct sp_node *n)
2334{
2335 mpol_put(n->policy);
2336 kmem_cache_free(sn_cache, n);
2337}
2338
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002339/**
2340 * mpol_misplaced - check whether current page node is valid in policy
2341 *
2342 * @page - page to be checked
2343 * @vma - vm area where page mapped
2344 * @addr - virtual address where page mapped
2345 *
2346 * Lookup current policy node id for vma,addr and "compare to" page's
2347 * node id.
2348 *
2349 * Returns:
2350 * -1 - not misplaced, page is in the right node
2351 * node - node id where the page should be
2352 *
2353 * Policy determination "mimics" alloc_page_vma().
2354 * Called from fault path where we know the vma and faulting address.
2355 */
2356int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2357{
2358 struct mempolicy *pol;
2359 struct zone *zone;
2360 int curnid = page_to_nid(page);
2361 unsigned long pgoff;
2362 int polnid = -1;
2363 int ret = -1;
2364
2365 BUG_ON(!vma);
2366
2367 pol = get_vma_policy(current, vma, addr);
2368 if (!(pol->flags & MPOL_F_MOF))
2369 goto out;
2370
2371 switch (pol->mode) {
2372 case MPOL_INTERLEAVE:
2373 BUG_ON(addr >= vma->vm_end);
2374 BUG_ON(addr < vma->vm_start);
2375
2376 pgoff = vma->vm_pgoff;
2377 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2378 polnid = offset_il_node(pol, vma, pgoff);
2379 break;
2380
2381 case MPOL_PREFERRED:
2382 if (pol->flags & MPOL_F_LOCAL)
2383 polnid = numa_node_id();
2384 else
2385 polnid = pol->v.preferred_node;
2386 break;
2387
2388 case MPOL_BIND:
2389 /*
2390 * allows binding to multiple nodes.
2391 * use current page if in policy nodemask,
2392 * else select nearest allowed node, if any.
2393 * If no allowed nodes, use current [!misplaced].
2394 */
2395 if (node_isset(curnid, pol->v.nodes))
2396 goto out;
2397 (void)first_zones_zonelist(
2398 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2399 gfp_zone(GFP_HIGHUSER),
2400 &pol->v.nodes, &zone);
2401 polnid = zone->node;
2402 break;
2403
2404 default:
2405 BUG();
2406 }
2407 if (curnid != polnid)
2408 ret = polnid;
2409out:
2410 mpol_cond_put(pol);
2411
2412 return ret;
2413}
2414
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2416{
Paul Mundt140d5a42007-07-15 23:38:16 -07002417 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002419 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420}
2421
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002422static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2423 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002425 struct sp_node *n;
2426 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002428 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 if (!n)
2430 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002431
2432 newpol = mpol_dup(pol);
2433 if (IS_ERR(newpol)) {
2434 kmem_cache_free(sn_cache, n);
2435 return NULL;
2436 }
2437 newpol->flags |= MPOL_F_SHARED;
2438
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 n->start = start;
2440 n->end = end;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002441 n->policy = newpol;
2442
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 return n;
2444}
2445
2446/* Replace a policy range. */
2447static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2448 unsigned long end, struct sp_node *new)
2449{
Mel Gormanb22d1272012-10-08 16:29:17 -07002450 struct sp_node *n;
2451 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Mel Gormanb22d1272012-10-08 16:29:17 -07002453 mutex_lock(&sp->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 n = sp_lookup(sp, start, end);
2455 /* Take care of old policies in the same range. */
2456 while (n && n->start < end) {
2457 struct rb_node *next = rb_next(&n->nd);
2458 if (n->start >= start) {
2459 if (n->end <= end)
2460 sp_delete(sp, n);
2461 else
2462 n->start = end;
2463 } else {
2464 /* Old policy spanning whole new range. */
2465 if (n->end > end) {
Mel Gormanb22d1272012-10-08 16:29:17 -07002466 struct sp_node *new2;
2467 new2 = sp_alloc(end, n->end, n->policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 if (!new2) {
Mel Gormanb22d1272012-10-08 16:29:17 -07002469 ret = -ENOMEM;
2470 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 }
2472 n->end = start;
2473 sp_insert(sp, new2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 break;
2475 } else
2476 n->end = start;
2477 }
2478 if (!next)
2479 break;
2480 n = rb_entry(next, struct sp_node, nd);
2481 }
2482 if (new)
2483 sp_insert(sp, new);
Mel Gormanb22d1272012-10-08 16:29:17 -07002484out:
2485 mutex_unlock(&sp->mutex);
2486 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487}
2488
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002489/**
2490 * mpol_shared_policy_init - initialize shared policy for inode
2491 * @sp: pointer to inode shared policy
2492 * @mpol: struct mempolicy to install
2493 *
2494 * Install non-NULL @mpol in inode's shared policy rb-tree.
2495 * On entry, the current task has a reference on a non-NULL @mpol.
2496 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002497 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002498 */
2499void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002500{
Miao Xie58568d22009-06-16 15:31:49 -07002501 int ret;
2502
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002503 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Mel Gormanb22d1272012-10-08 16:29:17 -07002504 mutex_init(&sp->mutex);
Robin Holt7339ff82006-01-14 13:20:48 -08002505
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002506 if (mpol) {
2507 struct vm_area_struct pvma;
2508 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002509 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002510
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002511 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002512 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002513 /* contextualize the tmpfs mount point mempolicy */
2514 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002515 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002516 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002517
2518 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002519 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002520 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002521 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002522 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002523
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002524 /* Create pseudo-vma that contains just the policy */
2525 memset(&pvma, 0, sizeof(struct vm_area_struct));
2526 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2527 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002528
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002529put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002530 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002531free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002532 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002533put_mpol:
2534 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002535 }
2536}
2537
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538int mpol_set_shared_policy(struct shared_policy *info,
2539 struct vm_area_struct *vma, struct mempolicy *npol)
2540{
2541 int err;
2542 struct sp_node *new = NULL;
2543 unsigned long sz = vma_pages(vma);
2544
David Rientjes028fec42008-04-28 02:12:25 -07002545 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002547 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002548 npol ? npol->flags : -1,
Paul Mundt140d5a42007-07-15 23:38:16 -07002549 npol ? nodes_addr(npol->v.nodes)[0] : -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
2551 if (npol) {
2552 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2553 if (!new)
2554 return -ENOMEM;
2555 }
2556 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2557 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002558 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 return err;
2560}
2561
2562/* Free a backing policy store on inode delete. */
2563void mpol_free_shared_policy(struct shared_policy *p)
2564{
2565 struct sp_node *n;
2566 struct rb_node *next;
2567
2568 if (!p->root.rb_node)
2569 return;
Mel Gormanb22d1272012-10-08 16:29:17 -07002570 mutex_lock(&p->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 next = rb_first(&p->root);
2572 while (next) {
2573 n = rb_entry(next, struct sp_node, nd);
2574 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002575 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 }
Mel Gormanb22d1272012-10-08 16:29:17 -07002577 mutex_unlock(&p->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578}
2579
2580/* assumes fs == KERNEL_DS */
2581void __init numa_policy_init(void)
2582{
Paul Mundtb71636e2007-07-15 23:38:15 -07002583 nodemask_t interleave_nodes;
2584 unsigned long largest = 0;
2585 int nid, prefer = 0;
2586
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 policy_cache = kmem_cache_create("numa_policy",
2588 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002589 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590
2591 sn_cache = kmem_cache_create("shared_policy_node",
2592 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002593 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594
Paul Mundtb71636e2007-07-15 23:38:15 -07002595 /*
2596 * Set interleaving policy for system init. Interleaving is only
2597 * enabled across suitably sized nodes (default is >= 16MB), or
2598 * fall back to the largest node if they're all smaller.
2599 */
2600 nodes_clear(interleave_nodes);
Christoph Lameter56bbd652007-10-16 01:25:35 -07002601 for_each_node_state(nid, N_HIGH_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002602 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603
Paul Mundtb71636e2007-07-15 23:38:15 -07002604 /* Preserve the largest node */
2605 if (largest < total_pages) {
2606 largest = total_pages;
2607 prefer = nid;
2608 }
2609
2610 /* Interleave this node? */
2611 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2612 node_set(nid, interleave_nodes);
2613 }
2614
2615 /* All too small, use the largest */
2616 if (unlikely(nodes_empty(interleave_nodes)))
2617 node_set(prefer, interleave_nodes);
2618
David Rientjes028fec42008-04-28 02:12:25 -07002619 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 printk("numa_policy_init: interleaving failed\n");
2621}
2622
Christoph Lameter8bccd852005-10-29 18:16:59 -07002623/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624void numa_default_policy(void)
2625{
David Rientjes028fec42008-04-28 02:12:25 -07002626 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627}
Paul Jackson68860ec2005-10-30 15:02:36 -08002628
Paul Jackson42253992006-01-08 01:01:59 -08002629/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002630 * Parse and format mempolicy from/to strings
2631 */
2632
2633/*
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002634 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002635 * Used only for mpol_parse_str() and mpol_to_str()
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002636 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002637static const char * const policy_modes[] =
2638{
2639 [MPOL_DEFAULT] = "default",
2640 [MPOL_PREFERRED] = "prefer",
2641 [MPOL_BIND] = "bind",
2642 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002643 [MPOL_LOCAL] = "local",
2644 [MPOL_NOOP] = "noop", /* should not actually be used */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002645};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002646
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002647
2648#ifdef CONFIG_TMPFS
2649/**
2650 * mpol_parse_str - parse string to mempolicy
2651 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002652 * @mpol: pointer to struct mempolicy pointer, returned on success.
2653 * @no_context: flag whether to "contextualize" the mempolicy
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002654 *
2655 * Format of input:
2656 * <mode>[=<flags>][:<nodelist>]
2657 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002658 * if @no_context is true, save the input nodemask in w.user_nodemask in
2659 * the returned mempolicy. This will be used to "clone" the mempolicy in
2660 * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
2661 * mount option. Note that if 'static' or 'relative' mode flags were
2662 * specified, the input nodemask will already have been saved. Saving
2663 * it again is redundant, but safe.
2664 *
2665 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002666 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002667int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002668{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002669 struct mempolicy *new = NULL;
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002670 unsigned short mode;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002671 unsigned short uninitialized_var(mode_flags);
2672 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002673 char *nodelist = strchr(str, ':');
2674 char *flags = strchr(str, '=');
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002675 int err = 1;
2676
2677 if (nodelist) {
2678 /* NUL-terminate mode or flags string */
2679 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002680 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002681 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002682 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002683 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002684 } else
2685 nodes_clear(nodes);
2686
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002687 if (flags)
2688 *flags++ = '\0'; /* terminate mode string */
2689
Peter Zijlstra479e2802012-10-25 14:16:28 +02002690 for (mode = 0; mode < MPOL_MAX; mode++) {
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002691 if (!strcmp(str, policy_modes[mode])) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002692 break;
2693 }
2694 }
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002695 if (mode >= MPOL_MAX || mode == MPOL_NOOP)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002696 goto out;
2697
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002698 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002699 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002700 /*
2701 * Insist on a nodelist of one node only
2702 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002703 if (nodelist) {
2704 char *rest = nodelist;
2705 while (isdigit(*rest))
2706 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002707 if (*rest)
2708 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002709 }
2710 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002711 case MPOL_INTERLEAVE:
2712 /*
2713 * Default to online nodes with memory if no nodelist
2714 */
2715 if (!nodelist)
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002716 nodes = node_states[N_HIGH_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002717 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002718 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002719 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002720 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002721 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002722 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002723 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002724 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002725 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002726 case MPOL_DEFAULT:
2727 /*
2728 * Insist on a empty nodelist
2729 */
2730 if (!nodelist)
2731 err = 0;
2732 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002733 case MPOL_BIND:
2734 /*
2735 * Insist on a nodelist
2736 */
2737 if (!nodelist)
2738 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002739 }
2740
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002741 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002742 if (flags) {
2743 /*
2744 * Currently, we only support two mutually exclusive
2745 * mode flags.
2746 */
2747 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002748 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002749 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002750 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002751 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002752 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002753 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002754
2755 new = mpol_new(mode, mode_flags, &nodes);
2756 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002757 goto out;
2758
Lee Schermerhorne17f74a2010-05-24 14:32:02 -07002759 if (no_context) {
2760 /* save for contextualization */
2761 new->w.user_nodemask = nodes;
2762 } else {
Miao Xie58568d22009-06-16 15:31:49 -07002763 int ret;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002764 NODEMASK_SCRATCH(scratch);
2765 if (scratch) {
2766 task_lock(current);
2767 ret = mpol_set_nodemask(new, &nodes, scratch);
2768 task_unlock(current);
2769 } else
2770 ret = -ENOMEM;
2771 NODEMASK_SCRATCH_FREE(scratch);
2772 if (ret) {
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002773 mpol_put(new);
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002774 goto out;
Miao Xie58568d22009-06-16 15:31:49 -07002775 }
2776 }
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002777 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002778
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002779out:
2780 /* Restore string for error message */
2781 if (nodelist)
2782 *--nodelist = ':';
2783 if (flags)
2784 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002785 if (!err)
2786 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002787 return err;
2788}
2789#endif /* CONFIG_TMPFS */
2790
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002791/**
2792 * mpol_to_str - format a mempolicy structure for printing
2793 * @buffer: to contain formatted mempolicy string
2794 * @maxlen: length of @buffer
2795 * @pol: pointer to mempolicy to be formatted
2796 * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
2797 *
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002798 * Convert a mempolicy into a string.
2799 * Returns the number of characters in buffer (if positive)
2800 * or an error (negative)
2801 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002802int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002803{
2804 char *p = buffer;
2805 int l;
2806 nodemask_t nodes;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002807 unsigned short mode;
David Rientjesf5b087b2008-04-28 02:12:27 -07002808 unsigned short flags = pol ? pol->flags : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002809
Lee Schermerhorn22919902008-04-28 02:13:22 -07002810 /*
2811 * Sanity check: room for longest mode, flag and some nodes
2812 */
2813 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2814
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002815 if (!pol || pol == &default_policy)
2816 mode = MPOL_DEFAULT;
2817 else
2818 mode = pol->mode;
2819
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002820 switch (mode) {
2821 case MPOL_DEFAULT:
2822 nodes_clear(nodes);
2823 break;
2824
2825 case MPOL_PREFERRED:
2826 nodes_clear(nodes);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002827 if (flags & MPOL_F_LOCAL)
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002828 mode = MPOL_LOCAL; /* pseudo-policy */
2829 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002830 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002831 break;
2832
2833 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07002834 /* Fall through */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002835 case MPOL_INTERLEAVE:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002836 if (no_context)
2837 nodes = pol->w.user_nodemask;
2838 else
2839 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002840 break;
2841
2842 default:
Dave Jones80de7c32012-09-06 12:01:00 -04002843 return -EINVAL;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002844 }
2845
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002846 l = strlen(policy_modes[mode]);
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002847 if (buffer + maxlen < p + l + 1)
2848 return -ENOSPC;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002849
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002850 strcpy(p, policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002851 p += l;
2852
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002853 if (flags & MPOL_MODE_FLAGS) {
David Rientjesf5b087b2008-04-28 02:12:27 -07002854 if (buffer + maxlen < p + 2)
2855 return -ENOSPC;
2856 *p++ = '=';
2857
Lee Schermerhorn22919902008-04-28 02:13:22 -07002858 /*
2859 * Currently, the only defined flags are mutually exclusive
2860 */
David Rientjesf5b087b2008-04-28 02:12:27 -07002861 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07002862 p += snprintf(p, buffer + maxlen - p, "static");
2863 else if (flags & MPOL_F_RELATIVE_NODES)
2864 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07002865 }
2866
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002867 if (!nodes_empty(nodes)) {
2868 if (buffer + maxlen < p + 2)
2869 return -ENOSPC;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002870 *p++ = ':';
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002871 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2872 }
2873 return p - buffer;
2874}