blob: f7371341d42a8305292680b51e8eb19f2569278d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
Paul Jackson029190c2007-10-18 23:40:20 -07007 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
Paul Menage8793d852007-10-18 23:39:39 -07008 * Copyright (C) 2006 Google, Inc
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Paul Jackson825a46a2006-03-24 03:16:03 -080013 * 2003-10-10 Written by Simon Derr.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * 2003-10-22 Updates by Stephen Hemminger.
Paul Jackson825a46a2006-03-24 03:16:03 -080015 * 2004 May-July Rework by Paul Jackson.
Paul Menage8793d852007-10-18 23:39:39 -070016 * 2006 Rework by Paul Menage to use generic cgroups
Max Krasnyanskycf417142008-08-11 14:33:53 -070017 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/cpu.h>
26#include <linux/cpumask.h>
27#include <linux/cpuset.h>
28#include <linux/err.h>
29#include <linux/errno.h>
30#include <linux/file.h>
31#include <linux/fs.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/kernel.h>
35#include <linux/kmod.h>
36#include <linux/list.h>
Paul Jackson68860ec2005-10-30 15:02:36 -080037#include <linux/mempolicy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/mm.h>
Miao Xief4818912008-11-19 15:36:30 -080039#include <linux/memory.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040040#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mount.h>
42#include <linux/namei.h>
43#include <linux/pagemap.h>
44#include <linux/proc_fs.h>
Paul Jackson6b9c2602006-01-08 01:02:02 -080045#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/sched.h>
47#include <linux/seq_file.h>
David Quigley22fb52d2006-06-23 02:04:00 -070048#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/spinlock.h>
51#include <linux/stat.h>
52#include <linux/string.h>
53#include <linux/time.h>
54#include <linux/backing-dev.h>
55#include <linux/sort.h>
56
57#include <asm/uaccess.h>
Arun Sharma600634972011-07-26 16:09:06 -070058#include <linux/atomic.h>
Ingo Molnar3d3f26a2006-03-23 03:00:18 -080059#include <linux/mutex.h>
Cliff Wickman956db3c2008-02-07 00:14:43 -080060#include <linux/workqueue.h>
61#include <linux/cgroup.h>
Li Zefane44193d2013-06-09 17:14:22 +080062#include <linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Paul Jackson202f72d2006-01-08 01:01:57 -080064/*
65 * Tracks how many cpusets are currently defined in system.
66 * When there is only one cpuset (the root cpuset) we can
67 * short circuit some hooks.
68 */
Paul Jackson7edc5962006-01-08 01:02:03 -080069int number_of_cpusets __read_mostly;
Paul Jackson202f72d2006-01-08 01:01:57 -080070
Paul Menage2df167a2008-02-07 00:14:45 -080071/* Forward declare cgroup structures */
Paul Menage8793d852007-10-18 23:39:39 -070072struct cgroup_subsys cpuset_subsys;
Paul Menage8793d852007-10-18 23:39:39 -070073
Paul Jackson3e0d98b2006-01-08 01:01:49 -080074/* See "Frequency meter" comments, below. */
75
76struct fmeter {
77 int cnt; /* unprocessed events count */
78 int val; /* most recent output value */
79 time_t time; /* clock (secs) when val computed */
80 spinlock_t lock; /* guards read or write of above */
81};
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083struct cpuset {
Paul Menage8793d852007-10-18 23:39:39 -070084 struct cgroup_subsys_state css;
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 unsigned long flags; /* "unsigned long" so bitops work */
Li Zefan300ed6c2009-01-07 18:08:44 -080087 cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
89
Li Zefan33ad8012013-06-09 17:15:08 +080090 /*
91 * This is old Memory Nodes tasks took on.
92 *
93 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
94 * - A new cpuset's old_mems_allowed is initialized when some
95 * task is moved into it.
96 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
97 * cpuset.mems_allowed and have tasks' nodemask updated, and
98 * then old_mems_allowed is updated to mems_allowed.
99 */
100 nodemask_t old_mems_allowed;
101
Paul Jackson3e0d98b2006-01-08 01:01:49 -0800102 struct fmeter fmeter; /* memory_pressure filter */
Paul Jackson029190c2007-10-18 23:40:20 -0700103
Tejun Heo452477f2013-01-07 08:51:07 -0800104 /*
105 * Tasks are being attached to this cpuset. Used to prevent
106 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
107 */
108 int attach_in_progress;
109
Paul Jackson029190c2007-10-18 23:40:20 -0700110 /* partition number for rebuild_sched_domains() */
111 int pn;
Cliff Wickman956db3c2008-02-07 00:14:43 -0800112
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900113 /* for custom sched domain */
114 int relax_domain_level;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115};
116
Paul Menage8793d852007-10-18 23:39:39 -0700117/* Retrieve the cpuset for a cgroup */
Li Zefanc9e5fe62013-06-14 11:18:27 +0800118static inline struct cpuset *cgroup_cs(struct cgroup *cgrp)
Paul Menage8793d852007-10-18 23:39:39 -0700119{
Tejun Heo8af01f52013-08-08 20:11:22 -0400120 return container_of(cgroup_css(cgrp, cpuset_subsys_id),
Paul Menage8793d852007-10-18 23:39:39 -0700121 struct cpuset, css);
122}
123
124/* Retrieve the cpuset for a task */
125static inline struct cpuset *task_cs(struct task_struct *task)
126{
Tejun Heo8af01f52013-08-08 20:11:22 -0400127 return container_of(task_css(task, cpuset_subsys_id),
Paul Menage8793d852007-10-18 23:39:39 -0700128 struct cpuset, css);
129}
Paul Menage8793d852007-10-18 23:39:39 -0700130
Tejun Heoc9710d82013-08-08 20:11:22 -0400131static inline struct cpuset *parent_cs(struct cpuset *cs)
Tejun Heoc4310692013-01-07 08:51:08 -0800132{
133 struct cgroup *pcgrp = cs->css.cgroup->parent;
134
135 if (pcgrp)
136 return cgroup_cs(pcgrp);
137 return NULL;
138}
139
David Rientjesb2462722011-12-19 17:11:52 -0800140#ifdef CONFIG_NUMA
141static inline bool task_has_mempolicy(struct task_struct *task)
142{
143 return task->mempolicy;
144}
145#else
146static inline bool task_has_mempolicy(struct task_struct *task)
147{
148 return false;
149}
150#endif
151
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153/* bits in struct cpuset flags field */
154typedef enum {
Tejun Heoefeb77b2013-01-07 08:51:07 -0800155 CS_ONLINE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 CS_CPU_EXCLUSIVE,
157 CS_MEM_EXCLUSIVE,
Paul Menage78608362008-04-29 01:00:26 -0700158 CS_MEM_HARDWALL,
Paul Jackson45b07ef2006-01-08 01:00:56 -0800159 CS_MEMORY_MIGRATE,
Paul Jackson029190c2007-10-18 23:40:20 -0700160 CS_SCHED_LOAD_BALANCE,
Paul Jackson825a46a2006-03-24 03:16:03 -0800161 CS_SPREAD_PAGE,
162 CS_SPREAD_SLAB,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163} cpuset_flagbits_t;
164
165/* convenient tests for these bits */
Tejun Heoefeb77b2013-01-07 08:51:07 -0800166static inline bool is_cpuset_online(const struct cpuset *cs)
167{
168 return test_bit(CS_ONLINE, &cs->flags);
169}
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171static inline int is_cpu_exclusive(const struct cpuset *cs)
172{
Paul Jackson7b5b9ef2006-03-24 03:16:00 -0800173 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
176static inline int is_mem_exclusive(const struct cpuset *cs)
177{
Paul Jackson7b5b9ef2006-03-24 03:16:00 -0800178 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
Paul Menage78608362008-04-29 01:00:26 -0700181static inline int is_mem_hardwall(const struct cpuset *cs)
182{
183 return test_bit(CS_MEM_HARDWALL, &cs->flags);
184}
185
Paul Jackson029190c2007-10-18 23:40:20 -0700186static inline int is_sched_load_balance(const struct cpuset *cs)
187{
188 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
189}
190
Paul Jackson45b07ef2006-01-08 01:00:56 -0800191static inline int is_memory_migrate(const struct cpuset *cs)
192{
Paul Jackson7b5b9ef2006-03-24 03:16:00 -0800193 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
Paul Jackson45b07ef2006-01-08 01:00:56 -0800194}
195
Paul Jackson825a46a2006-03-24 03:16:03 -0800196static inline int is_spread_page(const struct cpuset *cs)
197{
198 return test_bit(CS_SPREAD_PAGE, &cs->flags);
199}
200
201static inline int is_spread_slab(const struct cpuset *cs)
202{
203 return test_bit(CS_SPREAD_SLAB, &cs->flags);
204}
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206static struct cpuset top_cpuset = {
Tejun Heoefeb77b2013-01-07 08:51:07 -0800207 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
208 (1 << CS_MEM_EXCLUSIVE)),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209};
210
Tejun Heoae8086c2013-01-07 08:51:07 -0800211/**
212 * cpuset_for_each_child - traverse online children of a cpuset
213 * @child_cs: loop cursor pointing to the current child
214 * @pos_cgrp: used for iteration
215 * @parent_cs: target cpuset to walk children of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 *
Tejun Heoae8086c2013-01-07 08:51:07 -0800217 * Walk @child_cs through the online children of @parent_cs. Must be used
218 * with RCU read locked.
219 */
220#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs) \
221 cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup) \
222 if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
223
Tejun Heofc560a22013-01-07 08:51:08 -0800224/**
225 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
226 * @des_cs: loop cursor pointing to the current descendant
227 * @pos_cgrp: used for iteration
228 * @root_cs: target cpuset to walk ancestor of
229 *
230 * Walk @des_cs through the online descendants of @root_cs. Must be used
231 * with RCU read locked. The caller may modify @pos_cgrp by calling
232 * cgroup_rightmost_descendant() to skip subtree.
233 */
234#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs) \
235 cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
236 if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238/*
Tejun Heo5d21cc22013-01-07 08:51:08 -0800239 * There are two global mutexes guarding cpuset structures - cpuset_mutex
240 * and callback_mutex. The latter may nest inside the former. We also
241 * require taking task_lock() when dereferencing a task's cpuset pointer.
242 * See "The task_lock() exception", at the end of this comment.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800244 * A task must hold both mutexes to modify cpusets. If a task holds
245 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
246 * is the only task able to also acquire callback_mutex and be able to
247 * modify cpusets. It can perform various checks on the cpuset structure
248 * first, knowing nothing will change. It can also allocate memory while
249 * just holding cpuset_mutex. While it is performing these checks, various
250 * callback routines can briefly acquire callback_mutex to query cpusets.
251 * Once it is ready to make the changes, it takes callback_mutex, blocking
252 * everyone else.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 *
Paul Jackson053199e2005-10-30 15:02:30 -0800254 * Calls to the kernel memory allocator can not be made while holding
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800255 * callback_mutex, as that would risk double tripping on callback_mutex
Paul Jackson053199e2005-10-30 15:02:30 -0800256 * from one of the callbacks into the cpuset code from within
257 * __alloc_pages().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800259 * If a task is only holding callback_mutex, then it has read-only
Paul Jackson053199e2005-10-30 15:02:30 -0800260 * access to cpusets.
261 *
Miao Xie58568d22009-06-16 15:31:49 -0700262 * Now, the task_struct fields mems_allowed and mempolicy may be changed
263 * by other task, we use alloc_lock in the task_struct fields to protect
264 * them.
Paul Jackson053199e2005-10-30 15:02:30 -0800265 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800266 * The cpuset_common_file_read() handlers only hold callback_mutex across
Paul Jackson053199e2005-10-30 15:02:30 -0800267 * small pieces of code, such as when reading out possibly multi-word
268 * cpumasks and nodemasks.
269 *
Paul Menage2df167a2008-02-07 00:14:45 -0800270 * Accessing a task's cpuset should be done in accordance with the
271 * guidelines for accessing subsystem state in kernel/cgroup.c
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 */
273
Tejun Heo5d21cc22013-01-07 08:51:08 -0800274static DEFINE_MUTEX(cpuset_mutex);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800275static DEFINE_MUTEX(callback_mutex);
Paul Jackson4247bdc2005-09-10 00:26:06 -0700276
Max Krasnyanskycf417142008-08-11 14:33:53 -0700277/*
Tejun Heo3a5a6d02013-01-07 08:51:07 -0800278 * CPU / memory hotplug is handled asynchronously.
279 */
280static void cpuset_hotplug_workfn(struct work_struct *work);
Tejun Heo3a5a6d02013-01-07 08:51:07 -0800281static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
282
Li Zefane44193d2013-06-09 17:14:22 +0800283static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
284
Tejun Heo3a5a6d02013-01-07 08:51:07 -0800285/*
Max Krasnyanskycf417142008-08-11 14:33:53 -0700286 * This is ugly, but preserves the userspace API for existing cpuset
Paul Menage8793d852007-10-18 23:39:39 -0700287 * users. If someone tries to mount the "cpuset" filesystem, we
Max Krasnyanskycf417142008-08-11 14:33:53 -0700288 * silently switch it to mount "cgroup" instead
289 */
Al Virof7e83572010-07-26 13:23:11 +0400290static struct dentry *cpuset_mount(struct file_system_type *fs_type,
291 int flags, const char *unused_dev_name, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292{
Paul Menage8793d852007-10-18 23:39:39 -0700293 struct file_system_type *cgroup_fs = get_fs_type("cgroup");
Al Virof7e83572010-07-26 13:23:11 +0400294 struct dentry *ret = ERR_PTR(-ENODEV);
Paul Menage8793d852007-10-18 23:39:39 -0700295 if (cgroup_fs) {
296 char mountopts[] =
297 "cpuset,noprefix,"
298 "release_agent=/sbin/cpuset_release_agent";
Al Virof7e83572010-07-26 13:23:11 +0400299 ret = cgroup_fs->mount(cgroup_fs, flags,
300 unused_dev_name, mountopts);
Paul Menage8793d852007-10-18 23:39:39 -0700301 put_filesystem(cgroup_fs);
302 }
303 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304}
305
306static struct file_system_type cpuset_fs_type = {
307 .name = "cpuset",
Al Virof7e83572010-07-26 13:23:11 +0400308 .mount = cpuset_mount,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309};
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311/*
Li Zefan300ed6c2009-01-07 18:08:44 -0800312 * Return in pmask the portion of a cpusets's cpus_allowed that
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 * are online. If none are online, walk up the cpuset hierarchy
Li Zefan40df2de2013-06-05 17:15:23 +0800314 * until we find one that does have some online cpus. The top
315 * cpuset always has some cpus online.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 *
317 * One way or another, we guarantee to return some non-empty subset
Rusty Russell5f054e32012-03-29 15:38:31 +1030318 * of cpu_online_mask.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800320 * Call with callback_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 */
Tejun Heoc9710d82013-08-08 20:11:22 -0400322static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323{
Li Zefan40df2de2013-06-05 17:15:23 +0800324 while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
Tejun Heoc4310692013-01-07 08:51:08 -0800325 cs = parent_cs(cs);
Li Zefan40df2de2013-06-05 17:15:23 +0800326 cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
329/*
330 * Return in *pmask the portion of a cpusets's mems_allowed that
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700331 * are online, with memory. If none are online with memory, walk
332 * up the cpuset hierarchy until we find one that does have some
Li Zefan40df2de2013-06-05 17:15:23 +0800333 * online mems. The top cpuset always has some mems online.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 *
335 * One way or another, we guarantee to return some non-empty subset
Lai Jiangshan38d7bee2012-12-12 13:51:24 -0800336 * of node_states[N_MEMORY].
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800338 * Call with callback_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 */
Tejun Heoc9710d82013-08-08 20:11:22 -0400340static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
Li Zefan40df2de2013-06-05 17:15:23 +0800342 while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
Tejun Heoc4310692013-01-07 08:51:08 -0800343 cs = parent_cs(cs);
Li Zefan40df2de2013-06-05 17:15:23 +0800344 nodes_and(*pmask, cs->mems_allowed, node_states[N_MEMORY]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345}
346
Miao Xief3b39d42009-06-16 15:31:46 -0700347/*
348 * update task's spread flag if cpuset's page/slab spread flag is set
349 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800350 * Called with callback_mutex/cpuset_mutex held
Miao Xief3b39d42009-06-16 15:31:46 -0700351 */
352static void cpuset_update_task_spread_flag(struct cpuset *cs,
353 struct task_struct *tsk)
354{
355 if (is_spread_page(cs))
356 tsk->flags |= PF_SPREAD_PAGE;
357 else
358 tsk->flags &= ~PF_SPREAD_PAGE;
359 if (is_spread_slab(cs))
360 tsk->flags |= PF_SPREAD_SLAB;
361 else
362 tsk->flags &= ~PF_SPREAD_SLAB;
363}
364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365/*
366 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
367 *
368 * One cpuset is a subset of another if all its allowed CPUs and
369 * Memory Nodes are a subset of the other, and its exclusive flags
Tejun Heo5d21cc22013-01-07 08:51:08 -0800370 * are only set if the other's are set. Call holding cpuset_mutex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 */
372
373static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
374{
Li Zefan300ed6c2009-01-07 18:08:44 -0800375 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 nodes_subset(p->mems_allowed, q->mems_allowed) &&
377 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
378 is_mem_exclusive(p) <= is_mem_exclusive(q);
379}
380
Li Zefan645fcc92009-01-07 18:08:43 -0800381/**
382 * alloc_trial_cpuset - allocate a trial cpuset
383 * @cs: the cpuset that the trial cpuset duplicates
384 */
Tejun Heoc9710d82013-08-08 20:11:22 -0400385static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
Li Zefan645fcc92009-01-07 18:08:43 -0800386{
Li Zefan300ed6c2009-01-07 18:08:44 -0800387 struct cpuset *trial;
388
389 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
390 if (!trial)
391 return NULL;
392
393 if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
394 kfree(trial);
395 return NULL;
396 }
397 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
398
399 return trial;
Li Zefan645fcc92009-01-07 18:08:43 -0800400}
401
402/**
403 * free_trial_cpuset - free the trial cpuset
404 * @trial: the trial cpuset to be freed
405 */
406static void free_trial_cpuset(struct cpuset *trial)
407{
Li Zefan300ed6c2009-01-07 18:08:44 -0800408 free_cpumask_var(trial->cpus_allowed);
Li Zefan645fcc92009-01-07 18:08:43 -0800409 kfree(trial);
410}
411
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412/*
413 * validate_change() - Used to validate that any proposed cpuset change
414 * follows the structural rules for cpusets.
415 *
416 * If we replaced the flag and mask values of the current cpuset
417 * (cur) with those values in the trial cpuset (trial), would
418 * our various subset and exclusive rules still be valid? Presumes
Tejun Heo5d21cc22013-01-07 08:51:08 -0800419 * cpuset_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 *
421 * 'cur' is the address of an actual, in-use cpuset. Operations
422 * such as list traversal that depend on the actual address of the
423 * cpuset in the list must use cur below, not trial.
424 *
425 * 'trial' is the address of bulk structure copy of cur, with
426 * perhaps one or more of the fields cpus_allowed, mems_allowed,
427 * or flags changed to new, trial values.
428 *
429 * Return 0 if valid, -errno if not.
430 */
431
Tejun Heoc9710d82013-08-08 20:11:22 -0400432static int validate_change(struct cpuset *cur, struct cpuset *trial)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
Li Zefanc9e5fe62013-06-14 11:18:27 +0800434 struct cgroup *cgrp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 struct cpuset *c, *par;
Tejun Heoae8086c2013-01-07 08:51:07 -0800436 int ret;
437
438 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
440 /* Each of our child cpusets must be a subset of us */
Tejun Heoae8086c2013-01-07 08:51:07 -0800441 ret = -EBUSY;
Li Zefanc9e5fe62013-06-14 11:18:27 +0800442 cpuset_for_each_child(c, cgrp, cur)
Tejun Heoae8086c2013-01-07 08:51:07 -0800443 if (!is_cpuset_subset(c, trial))
444 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446 /* Remaining checks don't apply to root cpuset */
Tejun Heoae8086c2013-01-07 08:51:07 -0800447 ret = 0;
Paul Jackson69604062006-12-06 20:36:15 -0800448 if (cur == &top_cpuset)
Tejun Heoae8086c2013-01-07 08:51:07 -0800449 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Tejun Heoc4310692013-01-07 08:51:08 -0800451 par = parent_cs(cur);
Paul Jackson69604062006-12-06 20:36:15 -0800452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 /* We must be a subset of our parent cpuset */
Tejun Heoae8086c2013-01-07 08:51:07 -0800454 ret = -EACCES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 if (!is_cpuset_subset(trial, par))
Tejun Heoae8086c2013-01-07 08:51:07 -0800456 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Paul Menage2df167a2008-02-07 00:14:45 -0800458 /*
459 * If either I or some sibling (!= me) is exclusive, we can't
460 * overlap
461 */
Tejun Heoae8086c2013-01-07 08:51:07 -0800462 ret = -EINVAL;
Li Zefanc9e5fe62013-06-14 11:18:27 +0800463 cpuset_for_each_child(c, cgrp, par) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
465 c != cur &&
Li Zefan300ed6c2009-01-07 18:08:44 -0800466 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
Tejun Heoae8086c2013-01-07 08:51:07 -0800467 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
469 c != cur &&
470 nodes_intersects(trial->mems_allowed, c->mems_allowed))
Tejun Heoae8086c2013-01-07 08:51:07 -0800471 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
473
Tejun Heo452477f2013-01-07 08:51:07 -0800474 /*
475 * Cpusets with tasks - existing or newly being attached - can't
476 * have empty cpus_allowed or mems_allowed.
477 */
Tejun Heoae8086c2013-01-07 08:51:07 -0800478 ret = -ENOSPC;
Tejun Heo452477f2013-01-07 08:51:07 -0800479 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
Li Zefan88fa5232013-06-09 17:16:46 +0800480 (cpumask_empty(trial->cpus_allowed) &&
Tejun Heoae8086c2013-01-07 08:51:07 -0800481 nodes_empty(trial->mems_allowed)))
482 goto out;
Paul Jackson020958b2007-10-18 23:40:21 -0700483
Tejun Heoae8086c2013-01-07 08:51:07 -0800484 ret = 0;
485out:
486 rcu_read_unlock();
487 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488}
489
Paul Menagedb7f47c2009-04-02 16:57:55 -0700490#ifdef CONFIG_SMP
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -0700491/*
Max Krasnyanskycf417142008-08-11 14:33:53 -0700492 * Helper routine for generate_sched_domains().
Paul Jackson029190c2007-10-18 23:40:20 -0700493 * Do cpusets a, b have overlapping cpus_allowed masks?
494 */
Paul Jackson029190c2007-10-18 23:40:20 -0700495static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
496{
Li Zefan300ed6c2009-01-07 18:08:44 -0800497 return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
Paul Jackson029190c2007-10-18 23:40:20 -0700498}
499
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900500static void
501update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
502{
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900503 if (dattr->relax_domain_level < c->relax_domain_level)
504 dattr->relax_domain_level = c->relax_domain_level;
505 return;
506}
507
Tejun Heofc560a22013-01-07 08:51:08 -0800508static void update_domain_attr_tree(struct sched_domain_attr *dattr,
509 struct cpuset *root_cs)
Lai Jiangshanf5393692008-07-29 22:33:22 -0700510{
Tejun Heofc560a22013-01-07 08:51:08 -0800511 struct cpuset *cp;
512 struct cgroup *pos_cgrp;
Lai Jiangshanf5393692008-07-29 22:33:22 -0700513
Tejun Heofc560a22013-01-07 08:51:08 -0800514 rcu_read_lock();
515 cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
516 /* skip the whole subtree if @cp doesn't have any CPU */
517 if (cpumask_empty(cp->cpus_allowed)) {
518 pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
Lai Jiangshanf5393692008-07-29 22:33:22 -0700519 continue;
Tejun Heofc560a22013-01-07 08:51:08 -0800520 }
Lai Jiangshanf5393692008-07-29 22:33:22 -0700521
522 if (is_sched_load_balance(cp))
523 update_domain_attr(dattr, cp);
Lai Jiangshanf5393692008-07-29 22:33:22 -0700524 }
Tejun Heofc560a22013-01-07 08:51:08 -0800525 rcu_read_unlock();
Lai Jiangshanf5393692008-07-29 22:33:22 -0700526}
527
Paul Jackson029190c2007-10-18 23:40:20 -0700528/*
Max Krasnyanskycf417142008-08-11 14:33:53 -0700529 * generate_sched_domains()
Paul Jackson029190c2007-10-18 23:40:20 -0700530 *
Max Krasnyanskycf417142008-08-11 14:33:53 -0700531 * This function builds a partial partition of the systems CPUs
532 * A 'partial partition' is a set of non-overlapping subsets whose
533 * union is a subset of that set.
Viresh Kumar0a0fca92013-06-04 13:10:24 +0530534 * The output of this function needs to be passed to kernel/sched/core.c
Max Krasnyanskycf417142008-08-11 14:33:53 -0700535 * partition_sched_domains() routine, which will rebuild the scheduler's
536 * load balancing domains (sched domains) as specified by that partial
537 * partition.
Paul Jackson029190c2007-10-18 23:40:20 -0700538 *
Li Zefan45ce80f2009-01-15 13:50:59 -0800539 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
Paul Jackson029190c2007-10-18 23:40:20 -0700540 * for a background explanation of this.
541 *
542 * Does not return errors, on the theory that the callers of this
543 * routine would rather not worry about failures to rebuild sched
544 * domains when operating in the severe memory shortage situations
545 * that could cause allocation failures below.
546 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800547 * Must be called with cpuset_mutex held.
Paul Jackson029190c2007-10-18 23:40:20 -0700548 *
549 * The three key local variables below are:
Li Zefanaeed6822008-07-29 22:33:24 -0700550 * q - a linked-list queue of cpuset pointers, used to implement a
Paul Jackson029190c2007-10-18 23:40:20 -0700551 * top-down scan of all cpusets. This scan loads a pointer
552 * to each cpuset marked is_sched_load_balance into the
553 * array 'csa'. For our purposes, rebuilding the schedulers
554 * sched domains, we can ignore !is_sched_load_balance cpusets.
555 * csa - (for CpuSet Array) Array of pointers to all the cpusets
556 * that need to be load balanced, for convenient iterative
557 * access by the subsequent code that finds the best partition,
558 * i.e the set of domains (subsets) of CPUs such that the
559 * cpus_allowed of every cpuset marked is_sched_load_balance
560 * is a subset of one of these domains, while there are as
561 * many such domains as possible, each as small as possible.
562 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
Viresh Kumar0a0fca92013-06-04 13:10:24 +0530563 * the kernel/sched/core.c routine partition_sched_domains() in a
Paul Jackson029190c2007-10-18 23:40:20 -0700564 * convenient format, that can be easily compared to the prior
565 * value to determine what partition elements (sched domains)
566 * were changed (added or removed.)
567 *
568 * Finding the best partition (set of domains):
569 * The triple nested loops below over i, j, k scan over the
570 * load balanced cpusets (using the array of cpuset pointers in
571 * csa[]) looking for pairs of cpusets that have overlapping
572 * cpus_allowed, but which don't have the same 'pn' partition
573 * number and gives them in the same partition number. It keeps
574 * looping on the 'restart' label until it can no longer find
575 * any such pairs.
576 *
577 * The union of the cpus_allowed masks from the set of
578 * all cpusets having the same 'pn' value then form the one
579 * element of the partition (one sched domain) to be passed to
580 * partition_sched_domains().
581 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030582static int generate_sched_domains(cpumask_var_t **domains,
Max Krasnyanskycf417142008-08-11 14:33:53 -0700583 struct sched_domain_attr **attributes)
Paul Jackson029190c2007-10-18 23:40:20 -0700584{
Paul Jackson029190c2007-10-18 23:40:20 -0700585 struct cpuset *cp; /* scans q */
586 struct cpuset **csa; /* array of all cpuset ptrs */
587 int csn; /* how many cpuset ptrs in csa so far */
588 int i, j, k; /* indices for partition finding loops */
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030589 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900590 struct sched_domain_attr *dattr; /* attributes for custom domains */
Ingo Molnar15837152008-11-25 10:27:49 +0100591 int ndoms = 0; /* number of sched domains in result */
Li Zefan6af866a2009-01-07 18:08:45 -0800592 int nslot; /* next empty doms[] struct cpumask slot */
Tejun Heofc560a22013-01-07 08:51:08 -0800593 struct cgroup *pos_cgrp;
Paul Jackson029190c2007-10-18 23:40:20 -0700594
Paul Jackson029190c2007-10-18 23:40:20 -0700595 doms = NULL;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900596 dattr = NULL;
Max Krasnyanskycf417142008-08-11 14:33:53 -0700597 csa = NULL;
Paul Jackson029190c2007-10-18 23:40:20 -0700598
599 /* Special case for the 99% of systems with one, full, sched domain */
600 if (is_sched_load_balance(&top_cpuset)) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030601 ndoms = 1;
602 doms = alloc_sched_domains(ndoms);
Paul Jackson029190c2007-10-18 23:40:20 -0700603 if (!doms)
Max Krasnyanskycf417142008-08-11 14:33:53 -0700604 goto done;
605
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900606 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
607 if (dattr) {
608 *dattr = SD_ATTR_INIT;
Li Zefan93a65572008-07-29 22:33:23 -0700609 update_domain_attr_tree(dattr, &top_cpuset);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900610 }
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030611 cpumask_copy(doms[0], top_cpuset.cpus_allowed);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700612
Max Krasnyanskycf417142008-08-11 14:33:53 -0700613 goto done;
Paul Jackson029190c2007-10-18 23:40:20 -0700614 }
615
Paul Jackson029190c2007-10-18 23:40:20 -0700616 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
617 if (!csa)
618 goto done;
619 csn = 0;
620
Tejun Heofc560a22013-01-07 08:51:08 -0800621 rcu_read_lock();
622 cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
Lai Jiangshanf5393692008-07-29 22:33:22 -0700623 /*
Tejun Heofc560a22013-01-07 08:51:08 -0800624 * Continue traversing beyond @cp iff @cp has some CPUs and
625 * isn't load balancing. The former is obvious. The
626 * latter: All child cpusets contain a subset of the
627 * parent's cpus, so just skip them, and then we call
628 * update_domain_attr_tree() to calc relax_domain_level of
629 * the corresponding sched domain.
Lai Jiangshanf5393692008-07-29 22:33:22 -0700630 */
Tejun Heofc560a22013-01-07 08:51:08 -0800631 if (!cpumask_empty(cp->cpus_allowed) &&
632 !is_sched_load_balance(cp))
Lai Jiangshanf5393692008-07-29 22:33:22 -0700633 continue;
Lai Jiangshan489a5392008-07-25 01:47:23 -0700634
Tejun Heofc560a22013-01-07 08:51:08 -0800635 if (is_sched_load_balance(cp))
636 csa[csn++] = cp;
637
638 /* skip @cp's subtree */
639 pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
640 }
641 rcu_read_unlock();
Paul Jackson029190c2007-10-18 23:40:20 -0700642
643 for (i = 0; i < csn; i++)
644 csa[i]->pn = i;
645 ndoms = csn;
646
647restart:
648 /* Find the best partition (set of sched domains) */
649 for (i = 0; i < csn; i++) {
650 struct cpuset *a = csa[i];
651 int apn = a->pn;
652
653 for (j = 0; j < csn; j++) {
654 struct cpuset *b = csa[j];
655 int bpn = b->pn;
656
657 if (apn != bpn && cpusets_overlap(a, b)) {
658 for (k = 0; k < csn; k++) {
659 struct cpuset *c = csa[k];
660
661 if (c->pn == bpn)
662 c->pn = apn;
663 }
664 ndoms--; /* one less element */
665 goto restart;
666 }
667 }
668 }
669
Max Krasnyanskycf417142008-08-11 14:33:53 -0700670 /*
671 * Now we know how many domains to create.
672 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
673 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030674 doms = alloc_sched_domains(ndoms);
Li Zefan700018e2008-11-18 14:02:03 +0800675 if (!doms)
Max Krasnyanskycf417142008-08-11 14:33:53 -0700676 goto done;
Max Krasnyanskycf417142008-08-11 14:33:53 -0700677
678 /*
679 * The rest of the code, including the scheduler, can deal with
680 * dattr==NULL case. No need to abort if alloc fails.
681 */
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900682 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
Paul Jackson029190c2007-10-18 23:40:20 -0700683
684 for (nslot = 0, i = 0; i < csn; i++) {
685 struct cpuset *a = csa[i];
Li Zefan6af866a2009-01-07 18:08:45 -0800686 struct cpumask *dp;
Paul Jackson029190c2007-10-18 23:40:20 -0700687 int apn = a->pn;
688
Max Krasnyanskycf417142008-08-11 14:33:53 -0700689 if (apn < 0) {
690 /* Skip completed partitions */
691 continue;
Paul Jackson029190c2007-10-18 23:40:20 -0700692 }
Max Krasnyanskycf417142008-08-11 14:33:53 -0700693
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030694 dp = doms[nslot];
Max Krasnyanskycf417142008-08-11 14:33:53 -0700695
696 if (nslot == ndoms) {
697 static int warnings = 10;
698 if (warnings) {
699 printk(KERN_WARNING
700 "rebuild_sched_domains confused:"
701 " nslot %d, ndoms %d, csn %d, i %d,"
702 " apn %d\n",
703 nslot, ndoms, csn, i, apn);
704 warnings--;
705 }
706 continue;
707 }
708
Li Zefan6af866a2009-01-07 18:08:45 -0800709 cpumask_clear(dp);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700710 if (dattr)
711 *(dattr + nslot) = SD_ATTR_INIT;
712 for (j = i; j < csn; j++) {
713 struct cpuset *b = csa[j];
714
715 if (apn == b->pn) {
Li Zefan300ed6c2009-01-07 18:08:44 -0800716 cpumask_or(dp, dp, b->cpus_allowed);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700717 if (dattr)
718 update_domain_attr_tree(dattr + nslot, b);
719
720 /* Done with this partition */
721 b->pn = -1;
722 }
723 }
724 nslot++;
Paul Jackson029190c2007-10-18 23:40:20 -0700725 }
726 BUG_ON(nslot != ndoms);
727
Paul Jackson029190c2007-10-18 23:40:20 -0700728done:
Paul Jackson029190c2007-10-18 23:40:20 -0700729 kfree(csa);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700730
Li Zefan700018e2008-11-18 14:02:03 +0800731 /*
732 * Fallback to the default domain if kmalloc() failed.
733 * See comments in partition_sched_domains().
734 */
735 if (doms == NULL)
736 ndoms = 1;
737
Max Krasnyanskycf417142008-08-11 14:33:53 -0700738 *domains = doms;
739 *attributes = dattr;
740 return ndoms;
741}
742
743/*
744 * Rebuild scheduler domains.
745 *
Tejun Heo699140b2013-01-07 08:51:07 -0800746 * If the flag 'sched_load_balance' of any cpuset with non-empty
747 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
748 * which has that flag enabled, or if any cpuset with a non-empty
749 * 'cpus' is removed, then call this routine to rebuild the
750 * scheduler's dynamic sched domains.
Max Krasnyanskycf417142008-08-11 14:33:53 -0700751 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800752 * Call with cpuset_mutex held. Takes get_online_cpus().
Max Krasnyanskycf417142008-08-11 14:33:53 -0700753 */
Tejun Heo699140b2013-01-07 08:51:07 -0800754static void rebuild_sched_domains_locked(void)
Max Krasnyanskycf417142008-08-11 14:33:53 -0700755{
756 struct sched_domain_attr *attr;
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030757 cpumask_var_t *doms;
Max Krasnyanskycf417142008-08-11 14:33:53 -0700758 int ndoms;
759
Tejun Heo5d21cc22013-01-07 08:51:08 -0800760 lockdep_assert_held(&cpuset_mutex);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700761 get_online_cpus();
762
Li Zefan5b16c2a2013-04-27 06:52:43 -0700763 /*
764 * We have raced with CPU hotplug. Don't do anything to avoid
765 * passing doms with offlined cpu to partition_sched_domains().
766 * Anyways, hotplug work item will rebuild sched domains.
767 */
768 if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
769 goto out;
770
Max Krasnyanskycf417142008-08-11 14:33:53 -0700771 /* Generate domain masks and attrs */
Max Krasnyanskycf417142008-08-11 14:33:53 -0700772 ndoms = generate_sched_domains(&doms, &attr);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700773
774 /* Have scheduler rebuild the domains */
775 partition_sched_domains(ndoms, doms, attr);
Li Zefan5b16c2a2013-04-27 06:52:43 -0700776out:
Max Krasnyanskycf417142008-08-11 14:33:53 -0700777 put_online_cpus();
778}
Paul Menagedb7f47c2009-04-02 16:57:55 -0700779#else /* !CONFIG_SMP */
Tejun Heo699140b2013-01-07 08:51:07 -0800780static void rebuild_sched_domains_locked(void)
Paul Menagedb7f47c2009-04-02 16:57:55 -0700781{
782}
Paul Menagedb7f47c2009-04-02 16:57:55 -0700783#endif /* CONFIG_SMP */
Max Krasnyanskycf417142008-08-11 14:33:53 -0700784
Max Krasnyanskycf417142008-08-11 14:33:53 -0700785void rebuild_sched_domains(void)
786{
Tejun Heo5d21cc22013-01-07 08:51:08 -0800787 mutex_lock(&cpuset_mutex);
Tejun Heo699140b2013-01-07 08:51:07 -0800788 rebuild_sched_domains_locked();
Tejun Heo5d21cc22013-01-07 08:51:08 -0800789 mutex_unlock(&cpuset_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -0700790}
791
Li Zefan070b57f2013-06-09 17:15:22 +0800792/*
793 * effective_cpumask_cpuset - return nearest ancestor with non-empty cpus
794 * @cs: the cpuset in interest
Cliff Wickman58f47902008-02-07 00:14:44 -0800795 *
Li Zefan070b57f2013-06-09 17:15:22 +0800796 * A cpuset's effective cpumask is the cpumask of the nearest ancestor
797 * with non-empty cpus. We use effective cpumask whenever:
798 * - we update tasks' cpus_allowed. (they take on the ancestor's cpumask
799 * if the cpuset they reside in has no cpus)
800 * - we want to retrieve task_cs(tsk)'s cpus_allowed.
801 *
802 * Called with cpuset_mutex held. cpuset_cpus_allowed_fallback() is an
803 * exception. See comments there.
Paul Jackson053199e2005-10-30 15:02:30 -0800804 */
Li Zefan070b57f2013-06-09 17:15:22 +0800805static struct cpuset *effective_cpumask_cpuset(struct cpuset *cs)
Cliff Wickman58f47902008-02-07 00:14:44 -0800806{
Li Zefan070b57f2013-06-09 17:15:22 +0800807 while (cpumask_empty(cs->cpus_allowed))
808 cs = parent_cs(cs);
809 return cs;
810}
811
812/*
813 * effective_nodemask_cpuset - return nearest ancestor with non-empty mems
814 * @cs: the cpuset in interest
815 *
816 * A cpuset's effective nodemask is the nodemask of the nearest ancestor
817 * with non-empty memss. We use effective nodemask whenever:
818 * - we update tasks' mems_allowed. (they take on the ancestor's nodemask
819 * if the cpuset they reside in has no mems)
820 * - we want to retrieve task_cs(tsk)'s mems_allowed.
821 *
822 * Called with cpuset_mutex held.
823 */
824static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
825{
826 while (nodes_empty(cs->mems_allowed))
827 cs = parent_cs(cs);
828 return cs;
Cliff Wickman58f47902008-02-07 00:14:44 -0800829}
Paul Jackson053199e2005-10-30 15:02:30 -0800830
Cliff Wickman58f47902008-02-07 00:14:44 -0800831/**
832 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
833 * @tsk: task to test
834 * @scan: struct cgroup_scanner containing the cgroup of the task
835 *
836 * Called by cgroup_scan_tasks() for each task in a cgroup whose
837 * cpus_allowed mask needs to be changed.
838 *
839 * We don't need to re-check for the cgroup/cpuset membership, since we're
Tejun Heo5d21cc22013-01-07 08:51:08 -0800840 * holding cpuset_mutex at this point.
Cliff Wickman58f47902008-02-07 00:14:44 -0800841 */
Adrian Bunk9e0c9142008-04-29 01:00:25 -0700842static void cpuset_change_cpumask(struct task_struct *tsk,
843 struct cgroup_scanner *scan)
Cliff Wickman58f47902008-02-07 00:14:44 -0800844{
Li Zefan070b57f2013-06-09 17:15:22 +0800845 struct cpuset *cpus_cs;
846
Li Zefan6f4b7e62013-07-31 16:18:36 +0800847 cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cgrp));
Li Zefan070b57f2013-06-09 17:15:22 +0800848 set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
Cliff Wickman58f47902008-02-07 00:14:44 -0800849}
850
851/**
Miao Xie0b2f6302008-07-25 01:47:21 -0700852 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
853 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
Li Zefan4e743392008-09-13 02:33:08 -0700854 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
Miao Xie0b2f6302008-07-25 01:47:21 -0700855 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800856 * Called with cpuset_mutex held
Miao Xie0b2f6302008-07-25 01:47:21 -0700857 *
858 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
859 * calling callback functions for each.
860 *
Li Zefan4e743392008-09-13 02:33:08 -0700861 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
862 * if @heap != NULL.
Miao Xie0b2f6302008-07-25 01:47:21 -0700863 */
Li Zefan4e743392008-09-13 02:33:08 -0700864static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
Miao Xie0b2f6302008-07-25 01:47:21 -0700865{
866 struct cgroup_scanner scan;
Miao Xie0b2f6302008-07-25 01:47:21 -0700867
Li Zefan6f4b7e62013-07-31 16:18:36 +0800868 scan.cgrp = cs->css.cgroup;
Li Zefan249cc862013-06-05 17:15:48 +0800869 scan.test_task = NULL;
Miao Xie0b2f6302008-07-25 01:47:21 -0700870 scan.process_task = cpuset_change_cpumask;
Li Zefan4e743392008-09-13 02:33:08 -0700871 scan.heap = heap;
872 cgroup_scan_tasks(&scan);
Miao Xie0b2f6302008-07-25 01:47:21 -0700873}
874
Li Zefan5c5cc622013-06-09 17:16:29 +0800875/*
876 * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
877 * @root_cs: the root cpuset of the hierarchy
878 * @update_root: update root cpuset or not?
879 * @heap: the heap used by cgroup_scan_tasks()
880 *
881 * This will update cpumasks of tasks in @root_cs and all other empty cpusets
882 * which take on cpumask of @root_cs.
883 *
884 * Called with cpuset_mutex held
885 */
886static void update_tasks_cpumask_hier(struct cpuset *root_cs,
887 bool update_root, struct ptr_heap *heap)
888{
889 struct cpuset *cp;
890 struct cgroup *pos_cgrp;
891
892 if (update_root)
893 update_tasks_cpumask(root_cs, heap);
894
895 rcu_read_lock();
896 cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
897 /* skip the whole subtree if @cp have some CPU */
898 if (!cpumask_empty(cp->cpus_allowed)) {
899 pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
900 continue;
901 }
902 if (!css_tryget(&cp->css))
903 continue;
904 rcu_read_unlock();
905
906 update_tasks_cpumask(cp, heap);
907
908 rcu_read_lock();
909 css_put(&cp->css);
910 }
911 rcu_read_unlock();
912}
913
Miao Xie0b2f6302008-07-25 01:47:21 -0700914/**
Cliff Wickman58f47902008-02-07 00:14:44 -0800915 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
916 * @cs: the cpuset to consider
917 * @buf: buffer of cpu numbers written to this cpuset
918 */
Li Zefan645fcc92009-01-07 18:08:43 -0800919static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
920 const char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921{
Li Zefan4e743392008-09-13 02:33:08 -0700922 struct ptr_heap heap;
Cliff Wickman58f47902008-02-07 00:14:44 -0800923 int retval;
924 int is_load_balanced;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Rusty Russell5f054e32012-03-29 15:38:31 +1030926 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
Paul Jackson4c4d50f2006-08-27 01:23:51 -0700927 if (cs == &top_cpuset)
928 return -EACCES;
929
David Rientjes6f7f02e2007-05-08 00:31:43 -0700930 /*
Paul Jacksonc8d9c902008-02-07 00:14:46 -0800931 * An empty cpus_allowed is ok only if the cpuset has no tasks.
Paul Jackson020958b2007-10-18 23:40:21 -0700932 * Since cpulist_parse() fails on an empty mask, we special case
933 * that parsing. The validate_change() call ensures that cpusets
934 * with tasks have cpus.
David Rientjes6f7f02e2007-05-08 00:31:43 -0700935 */
Paul Jackson020958b2007-10-18 23:40:21 -0700936 if (!*buf) {
Li Zefan300ed6c2009-01-07 18:08:44 -0800937 cpumask_clear(trialcs->cpus_allowed);
David Rientjes6f7f02e2007-05-08 00:31:43 -0700938 } else {
Li Zefan300ed6c2009-01-07 18:08:44 -0800939 retval = cpulist_parse(buf, trialcs->cpus_allowed);
David Rientjes6f7f02e2007-05-08 00:31:43 -0700940 if (retval < 0)
941 return retval;
Lai Jiangshan37340742008-06-05 22:46:32 -0700942
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100943 if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
Lai Jiangshan37340742008-06-05 22:46:32 -0700944 return -EINVAL;
David Rientjes6f7f02e2007-05-08 00:31:43 -0700945 }
Paul Jackson029190c2007-10-18 23:40:20 -0700946
Paul Menage8707d8b2007-10-18 23:40:22 -0700947 /* Nothing to do if the cpus didn't change */
Li Zefan300ed6c2009-01-07 18:08:44 -0800948 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
Paul Menage8707d8b2007-10-18 23:40:22 -0700949 return 0;
Cliff Wickman58f47902008-02-07 00:14:44 -0800950
Li Zefana73456f2013-06-05 17:15:59 +0800951 retval = validate_change(cs, trialcs);
952 if (retval < 0)
953 return retval;
954
Li Zefan4e743392008-09-13 02:33:08 -0700955 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
956 if (retval)
957 return retval;
958
Li Zefan645fcc92009-01-07 18:08:43 -0800959 is_load_balanced = is_sched_load_balance(trialcs);
Paul Jackson029190c2007-10-18 23:40:20 -0700960
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800961 mutex_lock(&callback_mutex);
Li Zefan300ed6c2009-01-07 18:08:44 -0800962 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800963 mutex_unlock(&callback_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -0700964
Li Zefan5c5cc622013-06-09 17:16:29 +0800965 update_tasks_cpumask_hier(cs, true, &heap);
Li Zefan4e743392008-09-13 02:33:08 -0700966
967 heap_free(&heap);
Cliff Wickman58f47902008-02-07 00:14:44 -0800968
Paul Menage8707d8b2007-10-18 23:40:22 -0700969 if (is_load_balanced)
Tejun Heo699140b2013-01-07 08:51:07 -0800970 rebuild_sched_domains_locked();
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -0700971 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972}
973
Paul Jackson053199e2005-10-30 15:02:30 -0800974/*
Paul Jacksone4e364e2006-03-31 02:30:52 -0800975 * cpuset_migrate_mm
976 *
977 * Migrate memory region from one set of nodes to another.
978 *
979 * Temporarilly set tasks mems_allowed to target nodes of migration,
980 * so that the migration code can allocate pages on these nodes.
981 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800982 * Call holding cpuset_mutex, so current's cpuset won't change
Paul Jacksonc8d9c902008-02-07 00:14:46 -0800983 * during this call, as manage_mutex holds off any cpuset_attach()
Paul Jacksone4e364e2006-03-31 02:30:52 -0800984 * calls. Therefore we don't need to take task_lock around the
985 * call to guarantee_online_mems(), as we know no one is changing
Paul Menage2df167a2008-02-07 00:14:45 -0800986 * our task's cpuset.
Paul Jacksone4e364e2006-03-31 02:30:52 -0800987 *
Paul Jacksone4e364e2006-03-31 02:30:52 -0800988 * While the mm_struct we are migrating is typically from some
989 * other task, the task_struct mems_allowed that we are hacking
990 * is for our current task, which must allocate new pages for that
991 * migrating memory region.
Paul Jacksone4e364e2006-03-31 02:30:52 -0800992 */
993
994static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
995 const nodemask_t *to)
996{
997 struct task_struct *tsk = current;
Li Zefan070b57f2013-06-09 17:15:22 +0800998 struct cpuset *mems_cs;
Paul Jacksone4e364e2006-03-31 02:30:52 -0800999
Paul Jacksone4e364e2006-03-31 02:30:52 -08001000 tsk->mems_allowed = *to;
Paul Jacksone4e364e2006-03-31 02:30:52 -08001001
1002 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
1003
Li Zefan070b57f2013-06-09 17:15:22 +08001004 mems_cs = effective_nodemask_cpuset(task_cs(tsk));
1005 guarantee_online_mems(mems_cs, &tsk->mems_allowed);
Paul Jacksone4e364e2006-03-31 02:30:52 -08001006}
1007
Li Zefan3b6766f2009-04-02 16:57:51 -07001008/*
Miao Xie58568d22009-06-16 15:31:49 -07001009 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1010 * @tsk: the task to change
1011 * @newmems: new nodes that the task will be set
1012 *
1013 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
1014 * we structure updates as setting all new allowed nodes, then clearing newly
1015 * disallowed ones.
Miao Xie58568d22009-06-16 15:31:49 -07001016 */
1017static void cpuset_change_task_nodemask(struct task_struct *tsk,
1018 nodemask_t *newmems)
1019{
David Rientjesb2462722011-12-19 17:11:52 -08001020 bool need_loop;
David Rientjes89e8a242011-11-02 13:38:39 -07001021
Miao Xiec0ff7452010-05-24 14:32:08 -07001022 /*
1023 * Allow tasks that have access to memory reserves because they have
1024 * been OOM killed to get memory anywhere.
1025 */
1026 if (unlikely(test_thread_flag(TIF_MEMDIE)))
1027 return;
1028 if (current->flags & PF_EXITING) /* Let dying task have memory */
1029 return;
1030
1031 task_lock(tsk);
David Rientjesb2462722011-12-19 17:11:52 -08001032 /*
1033 * Determine if a loop is necessary if another thread is doing
1034 * get_mems_allowed(). If at least one node remains unchanged and
1035 * tsk does not have a mempolicy, then an empty nodemask will not be
1036 * possible when mems_allowed is larger than a word.
1037 */
1038 need_loop = task_has_mempolicy(tsk) ||
1039 !nodes_intersects(*newmems, tsk->mems_allowed);
Mel Gormancc9a6c82012-03-21 16:34:11 -07001040
1041 if (need_loop)
1042 write_seqcount_begin(&tsk->mems_allowed_seq);
1043
Miao Xie58568d22009-06-16 15:31:49 -07001044 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
Miao Xiec0ff7452010-05-24 14:32:08 -07001045 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
1046
Miao Xiec0ff7452010-05-24 14:32:08 -07001047 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
Miao Xie58568d22009-06-16 15:31:49 -07001048 tsk->mems_allowed = *newmems;
Mel Gormancc9a6c82012-03-21 16:34:11 -07001049
1050 if (need_loop)
1051 write_seqcount_end(&tsk->mems_allowed_seq);
1052
Miao Xiec0ff7452010-05-24 14:32:08 -07001053 task_unlock(tsk);
Miao Xie58568d22009-06-16 15:31:49 -07001054}
1055
1056/*
1057 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1058 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
Tejun Heo5d21cc22013-01-07 08:51:08 -08001059 * memory_migrate flag is set. Called with cpuset_mutex held.
Li Zefan3b6766f2009-04-02 16:57:51 -07001060 */
1061static void cpuset_change_nodemask(struct task_struct *p,
1062 struct cgroup_scanner *scan)
1063{
Li Zefan6f4b7e62013-07-31 16:18:36 +08001064 struct cpuset *cs = cgroup_cs(scan->cgrp);
Li Zefan3b6766f2009-04-02 16:57:51 -07001065 struct mm_struct *mm;
Li Zefan3b6766f2009-04-02 16:57:51 -07001066 int migrate;
Li Zefan33ad8012013-06-09 17:15:08 +08001067 nodemask_t *newmems = scan->data;
Miao Xie58568d22009-06-16 15:31:49 -07001068
Li Zefan33ad8012013-06-09 17:15:08 +08001069 cpuset_change_task_nodemask(p, newmems);
Miao Xie53feb292010-03-23 13:35:35 -07001070
Li Zefan3b6766f2009-04-02 16:57:51 -07001071 mm = get_task_mm(p);
1072 if (!mm)
1073 return;
1074
Li Zefan3b6766f2009-04-02 16:57:51 -07001075 migrate = is_memory_migrate(cs);
1076
1077 mpol_rebind_mm(mm, &cs->mems_allowed);
1078 if (migrate)
Li Zefan33ad8012013-06-09 17:15:08 +08001079 cpuset_migrate_mm(mm, &cs->old_mems_allowed, newmems);
Li Zefan3b6766f2009-04-02 16:57:51 -07001080 mmput(mm);
1081}
1082
Paul Menage8793d852007-10-18 23:39:39 -07001083static void *cpuset_being_rebound;
1084
Miao Xie0b2f6302008-07-25 01:47:21 -07001085/**
1086 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1087 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
Li Zefan010cfac2009-04-02 16:57:52 -07001088 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
Miao Xie0b2f6302008-07-25 01:47:21 -07001089 *
Tejun Heo5d21cc22013-01-07 08:51:08 -08001090 * Called with cpuset_mutex held
Li Zefan010cfac2009-04-02 16:57:52 -07001091 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1092 * if @heap != NULL.
Miao Xie0b2f6302008-07-25 01:47:21 -07001093 */
Li Zefan33ad8012013-06-09 17:15:08 +08001094static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095{
Li Zefan33ad8012013-06-09 17:15:08 +08001096 static nodemask_t newmems; /* protected by cpuset_mutex */
Li Zefan3b6766f2009-04-02 16:57:51 -07001097 struct cgroup_scanner scan;
Li Zefan070b57f2013-06-09 17:15:22 +08001098 struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
Paul Jackson59dac162006-01-08 01:01:52 -08001099
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07001100 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
Paul Jackson42253992006-01-08 01:01:59 -08001101
Li Zefan070b57f2013-06-09 17:15:22 +08001102 guarantee_online_mems(mems_cs, &newmems);
Li Zefan33ad8012013-06-09 17:15:08 +08001103
Li Zefan6f4b7e62013-07-31 16:18:36 +08001104 scan.cgrp = cs->css.cgroup;
Li Zefan3b6766f2009-04-02 16:57:51 -07001105 scan.test_task = NULL;
1106 scan.process_task = cpuset_change_nodemask;
Li Zefan010cfac2009-04-02 16:57:52 -07001107 scan.heap = heap;
Li Zefan33ad8012013-06-09 17:15:08 +08001108 scan.data = &newmems;
Paul Jackson42253992006-01-08 01:01:59 -08001109
1110 /*
Li Zefan3b6766f2009-04-02 16:57:51 -07001111 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1112 * take while holding tasklist_lock. Forks can happen - the
1113 * mpol_dup() cpuset_being_rebound check will catch such forks,
1114 * and rebind their vma mempolicies too. Because we still hold
Tejun Heo5d21cc22013-01-07 08:51:08 -08001115 * the global cpuset_mutex, we know that no other rebind effort
Li Zefan3b6766f2009-04-02 16:57:51 -07001116 * will be contending for the global variable cpuset_being_rebound.
Paul Jackson42253992006-01-08 01:01:59 -08001117 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
Paul Jackson04c19fa2006-01-08 01:02:00 -08001118 * is idempotent. Also migrate pages in each mm to new nodes.
Paul Jackson42253992006-01-08 01:01:59 -08001119 */
Li Zefan010cfac2009-04-02 16:57:52 -07001120 cgroup_scan_tasks(&scan);
Paul Jackson42253992006-01-08 01:01:59 -08001121
Li Zefan33ad8012013-06-09 17:15:08 +08001122 /*
1123 * All the tasks' nodemasks have been updated, update
1124 * cs->old_mems_allowed.
1125 */
1126 cs->old_mems_allowed = newmems;
1127
Paul Menage2df167a2008-02-07 00:14:45 -08001128 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
Paul Menage8793d852007-10-18 23:39:39 -07001129 cpuset_being_rebound = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130}
1131
Miao Xie0b2f6302008-07-25 01:47:21 -07001132/*
Li Zefan5c5cc622013-06-09 17:16:29 +08001133 * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
1134 * @cs: the root cpuset of the hierarchy
1135 * @update_root: update the root cpuset or not?
1136 * @heap: the heap used by cgroup_scan_tasks()
1137 *
1138 * This will update nodemasks of tasks in @root_cs and all other empty cpusets
1139 * which take on nodemask of @root_cs.
1140 *
1141 * Called with cpuset_mutex held
1142 */
1143static void update_tasks_nodemask_hier(struct cpuset *root_cs,
1144 bool update_root, struct ptr_heap *heap)
1145{
1146 struct cpuset *cp;
1147 struct cgroup *pos_cgrp;
1148
1149 if (update_root)
1150 update_tasks_nodemask(root_cs, heap);
1151
1152 rcu_read_lock();
1153 cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
1154 /* skip the whole subtree if @cp have some CPU */
1155 if (!nodes_empty(cp->mems_allowed)) {
1156 pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
1157 continue;
1158 }
1159 if (!css_tryget(&cp->css))
1160 continue;
1161 rcu_read_unlock();
1162
1163 update_tasks_nodemask(cp, heap);
1164
1165 rcu_read_lock();
1166 css_put(&cp->css);
1167 }
1168 rcu_read_unlock();
1169}
1170
1171/*
Miao Xie0b2f6302008-07-25 01:47:21 -07001172 * Handle user request to change the 'mems' memory placement
1173 * of a cpuset. Needs to validate the request, update the
Miao Xie58568d22009-06-16 15:31:49 -07001174 * cpusets mems_allowed, and for each task in the cpuset,
1175 * update mems_allowed and rebind task's mempolicy and any vma
1176 * mempolicies and if the cpuset is marked 'memory_migrate',
1177 * migrate the tasks pages to the new memory.
Miao Xie0b2f6302008-07-25 01:47:21 -07001178 *
Tejun Heo5d21cc22013-01-07 08:51:08 -08001179 * Call with cpuset_mutex held. May take callback_mutex during call.
Miao Xie0b2f6302008-07-25 01:47:21 -07001180 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1181 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1182 * their mempolicies to the cpusets new mems_allowed.
1183 */
Li Zefan645fcc92009-01-07 18:08:43 -08001184static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1185 const char *buf)
Miao Xie0b2f6302008-07-25 01:47:21 -07001186{
Miao Xie0b2f6302008-07-25 01:47:21 -07001187 int retval;
Li Zefan010cfac2009-04-02 16:57:52 -07001188 struct ptr_heap heap;
Miao Xie0b2f6302008-07-25 01:47:21 -07001189
1190 /*
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08001191 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
Miao Xie0b2f6302008-07-25 01:47:21 -07001192 * it's read-only
1193 */
Miao Xie53feb292010-03-23 13:35:35 -07001194 if (cs == &top_cpuset) {
1195 retval = -EACCES;
1196 goto done;
1197 }
Miao Xie0b2f6302008-07-25 01:47:21 -07001198
Miao Xie0b2f6302008-07-25 01:47:21 -07001199 /*
1200 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1201 * Since nodelist_parse() fails on an empty mask, we special case
1202 * that parsing. The validate_change() call ensures that cpusets
1203 * with tasks have memory.
1204 */
1205 if (!*buf) {
Li Zefan645fcc92009-01-07 18:08:43 -08001206 nodes_clear(trialcs->mems_allowed);
Miao Xie0b2f6302008-07-25 01:47:21 -07001207 } else {
Li Zefan645fcc92009-01-07 18:08:43 -08001208 retval = nodelist_parse(buf, trialcs->mems_allowed);
Miao Xie0b2f6302008-07-25 01:47:21 -07001209 if (retval < 0)
1210 goto done;
1211
Li Zefan645fcc92009-01-07 18:08:43 -08001212 if (!nodes_subset(trialcs->mems_allowed,
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08001213 node_states[N_MEMORY])) {
Miao Xie53feb292010-03-23 13:35:35 -07001214 retval = -EINVAL;
1215 goto done;
1216 }
Miao Xie0b2f6302008-07-25 01:47:21 -07001217 }
Li Zefan33ad8012013-06-09 17:15:08 +08001218
1219 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
Miao Xie0b2f6302008-07-25 01:47:21 -07001220 retval = 0; /* Too easy - nothing to do */
1221 goto done;
1222 }
Li Zefan645fcc92009-01-07 18:08:43 -08001223 retval = validate_change(cs, trialcs);
Miao Xie0b2f6302008-07-25 01:47:21 -07001224 if (retval < 0)
1225 goto done;
1226
Li Zefan010cfac2009-04-02 16:57:52 -07001227 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1228 if (retval < 0)
1229 goto done;
1230
Miao Xie0b2f6302008-07-25 01:47:21 -07001231 mutex_lock(&callback_mutex);
Li Zefan645fcc92009-01-07 18:08:43 -08001232 cs->mems_allowed = trialcs->mems_allowed;
Miao Xie0b2f6302008-07-25 01:47:21 -07001233 mutex_unlock(&callback_mutex);
1234
Li Zefan5c5cc622013-06-09 17:16:29 +08001235 update_tasks_nodemask_hier(cs, true, &heap);
Li Zefan010cfac2009-04-02 16:57:52 -07001236
1237 heap_free(&heap);
Miao Xie0b2f6302008-07-25 01:47:21 -07001238done:
1239 return retval;
1240}
1241
Paul Menage8793d852007-10-18 23:39:39 -07001242int current_cpuset_is_being_rebound(void)
1243{
1244 return task_cs(current) == cpuset_being_rebound;
1245}
1246
Paul Menage5be7a472008-05-06 20:42:41 -07001247static int update_relax_domain_level(struct cpuset *cs, s64 val)
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001248{
Paul Menagedb7f47c2009-04-02 16:57:55 -07001249#ifdef CONFIG_SMP
Peter Zijlstra60495e72011-04-07 14:10:04 +02001250 if (val < -1 || val >= sched_domain_level_max)
Li Zefan30e0e172008-05-13 10:27:17 +08001251 return -EINVAL;
Paul Menagedb7f47c2009-04-02 16:57:55 -07001252#endif
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001253
1254 if (val != cs->relax_domain_level) {
1255 cs->relax_domain_level = val;
Li Zefan300ed6c2009-01-07 18:08:44 -08001256 if (!cpumask_empty(cs->cpus_allowed) &&
1257 is_sched_load_balance(cs))
Tejun Heo699140b2013-01-07 08:51:07 -08001258 rebuild_sched_domains_locked();
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001259 }
1260
1261 return 0;
1262}
1263
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001264/*
Miao Xie950592f2009-06-16 15:31:47 -07001265 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1266 * @tsk: task to be updated
1267 * @scan: struct cgroup_scanner containing the cgroup of the task
1268 *
1269 * Called by cgroup_scan_tasks() for each task in a cgroup.
1270 *
1271 * We don't need to re-check for the cgroup/cpuset membership, since we're
Tejun Heo5d21cc22013-01-07 08:51:08 -08001272 * holding cpuset_mutex at this point.
Miao Xie950592f2009-06-16 15:31:47 -07001273 */
1274static void cpuset_change_flag(struct task_struct *tsk,
1275 struct cgroup_scanner *scan)
1276{
Li Zefan6f4b7e62013-07-31 16:18:36 +08001277 cpuset_update_task_spread_flag(cgroup_cs(scan->cgrp), tsk);
Miao Xie950592f2009-06-16 15:31:47 -07001278}
1279
1280/*
1281 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1282 * @cs: the cpuset in which each task's spread flags needs to be changed
1283 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1284 *
Tejun Heo5d21cc22013-01-07 08:51:08 -08001285 * Called with cpuset_mutex held
Miao Xie950592f2009-06-16 15:31:47 -07001286 *
1287 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1288 * calling callback functions for each.
1289 *
1290 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1291 * if @heap != NULL.
1292 */
1293static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1294{
1295 struct cgroup_scanner scan;
1296
Li Zefan6f4b7e62013-07-31 16:18:36 +08001297 scan.cgrp = cs->css.cgroup;
Miao Xie950592f2009-06-16 15:31:47 -07001298 scan.test_task = NULL;
1299 scan.process_task = cpuset_change_flag;
1300 scan.heap = heap;
1301 cgroup_scan_tasks(&scan);
1302}
1303
1304/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 * update_flag - read a 0 or a 1 in a file and update associated flag
Paul Menage78608362008-04-29 01:00:26 -07001306 * bit: the bit to update (see cpuset_flagbits_t)
1307 * cs: the cpuset to update
1308 * turning_on: whether the flag is being set or cleared
Paul Jackson053199e2005-10-30 15:02:30 -08001309 *
Tejun Heo5d21cc22013-01-07 08:51:08 -08001310 * Call with cpuset_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 */
1312
Paul Menage700fe1a2008-04-29 01:00:00 -07001313static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1314 int turning_on)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315{
Li Zefan645fcc92009-01-07 18:08:43 -08001316 struct cpuset *trialcs;
Rakib Mullick40b6a762008-10-18 20:28:18 -07001317 int balance_flag_changed;
Miao Xie950592f2009-06-16 15:31:47 -07001318 int spread_flag_changed;
1319 struct ptr_heap heap;
1320 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Li Zefan645fcc92009-01-07 18:08:43 -08001322 trialcs = alloc_trial_cpuset(cs);
1323 if (!trialcs)
1324 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
Li Zefan645fcc92009-01-07 18:08:43 -08001326 if (turning_on)
1327 set_bit(bit, &trialcs->flags);
1328 else
1329 clear_bit(bit, &trialcs->flags);
1330
1331 err = validate_change(cs, trialcs);
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -07001332 if (err < 0)
Li Zefan645fcc92009-01-07 18:08:43 -08001333 goto out;
Paul Jackson029190c2007-10-18 23:40:20 -07001334
Miao Xie950592f2009-06-16 15:31:47 -07001335 err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1336 if (err < 0)
1337 goto out;
1338
Paul Jackson029190c2007-10-18 23:40:20 -07001339 balance_flag_changed = (is_sched_load_balance(cs) !=
Li Zefan645fcc92009-01-07 18:08:43 -08001340 is_sched_load_balance(trialcs));
Paul Jackson029190c2007-10-18 23:40:20 -07001341
Miao Xie950592f2009-06-16 15:31:47 -07001342 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1343 || (is_spread_page(cs) != is_spread_page(trialcs)));
1344
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001345 mutex_lock(&callback_mutex);
Li Zefan645fcc92009-01-07 18:08:43 -08001346 cs->flags = trialcs->flags;
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001347 mutex_unlock(&callback_mutex);
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -07001348
Li Zefan300ed6c2009-01-07 18:08:44 -08001349 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
Tejun Heo699140b2013-01-07 08:51:07 -08001350 rebuild_sched_domains_locked();
Paul Jackson029190c2007-10-18 23:40:20 -07001351
Miao Xie950592f2009-06-16 15:31:47 -07001352 if (spread_flag_changed)
1353 update_tasks_flags(cs, &heap);
1354 heap_free(&heap);
Li Zefan645fcc92009-01-07 18:08:43 -08001355out:
1356 free_trial_cpuset(trialcs);
1357 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358}
1359
Paul Jackson053199e2005-10-30 15:02:30 -08001360/*
Adrian Bunk80f72282006-06-30 18:27:16 +02001361 * Frequency meter - How fast is some event occurring?
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001362 *
1363 * These routines manage a digitally filtered, constant time based,
1364 * event frequency meter. There are four routines:
1365 * fmeter_init() - initialize a frequency meter.
1366 * fmeter_markevent() - called each time the event happens.
1367 * fmeter_getrate() - returns the recent rate of such events.
1368 * fmeter_update() - internal routine used to update fmeter.
1369 *
1370 * A common data structure is passed to each of these routines,
1371 * which is used to keep track of the state required to manage the
1372 * frequency meter and its digital filter.
1373 *
1374 * The filter works on the number of events marked per unit time.
1375 * The filter is single-pole low-pass recursive (IIR). The time unit
1376 * is 1 second. Arithmetic is done using 32-bit integers scaled to
1377 * simulate 3 decimal digits of precision (multiplied by 1000).
1378 *
1379 * With an FM_COEF of 933, and a time base of 1 second, the filter
1380 * has a half-life of 10 seconds, meaning that if the events quit
1381 * happening, then the rate returned from the fmeter_getrate()
1382 * will be cut in half each 10 seconds, until it converges to zero.
1383 *
1384 * It is not worth doing a real infinitely recursive filter. If more
1385 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1386 * just compute FM_MAXTICKS ticks worth, by which point the level
1387 * will be stable.
1388 *
1389 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1390 * arithmetic overflow in the fmeter_update() routine.
1391 *
1392 * Given the simple 32 bit integer arithmetic used, this meter works
1393 * best for reporting rates between one per millisecond (msec) and
1394 * one per 32 (approx) seconds. At constant rates faster than one
1395 * per msec it maxes out at values just under 1,000,000. At constant
1396 * rates between one per msec, and one per second it will stabilize
1397 * to a value N*1000, where N is the rate of events per second.
1398 * At constant rates between one per second and one per 32 seconds,
1399 * it will be choppy, moving up on the seconds that have an event,
1400 * and then decaying until the next event. At rates slower than
1401 * about one in 32 seconds, it decays all the way back to zero between
1402 * each event.
1403 */
1404
1405#define FM_COEF 933 /* coefficient for half-life of 10 secs */
1406#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1407#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
1408#define FM_SCALE 1000 /* faux fixed point scale */
1409
1410/* Initialize a frequency meter */
1411static void fmeter_init(struct fmeter *fmp)
1412{
1413 fmp->cnt = 0;
1414 fmp->val = 0;
1415 fmp->time = 0;
1416 spin_lock_init(&fmp->lock);
1417}
1418
1419/* Internal meter update - process cnt events and update value */
1420static void fmeter_update(struct fmeter *fmp)
1421{
1422 time_t now = get_seconds();
1423 time_t ticks = now - fmp->time;
1424
1425 if (ticks == 0)
1426 return;
1427
1428 ticks = min(FM_MAXTICKS, ticks);
1429 while (ticks-- > 0)
1430 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1431 fmp->time = now;
1432
1433 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1434 fmp->cnt = 0;
1435}
1436
1437/* Process any previous ticks, then bump cnt by one (times scale). */
1438static void fmeter_markevent(struct fmeter *fmp)
1439{
1440 spin_lock(&fmp->lock);
1441 fmeter_update(fmp);
1442 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1443 spin_unlock(&fmp->lock);
1444}
1445
1446/* Process any previous ticks, then return current value. */
1447static int fmeter_getrate(struct fmeter *fmp)
1448{
1449 int val;
1450
1451 spin_lock(&fmp->lock);
1452 fmeter_update(fmp);
1453 val = fmp->val;
1454 spin_unlock(&fmp->lock);
1455 return val;
1456}
1457
Tejun Heo5d21cc22013-01-07 08:51:08 -08001458/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
Li Zefan761b3ef2012-01-31 13:47:36 +08001459static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
Ben Blumf780bdb2011-05-26 16:25:19 -07001460{
Tejun Heo2f7ee562011-12-12 18:12:21 -08001461 struct cpuset *cs = cgroup_cs(cgrp);
Tejun Heobb9d97b2011-12-12 18:12:21 -08001462 struct task_struct *task;
1463 int ret;
Ben Blumf780bdb2011-05-26 16:25:19 -07001464
Tejun Heo5d21cc22013-01-07 08:51:08 -08001465 mutex_lock(&cpuset_mutex);
1466
Li Zefan88fa5232013-06-09 17:16:46 +08001467 /*
1468 * We allow to move tasks into an empty cpuset if sane_behavior
1469 * flag is set.
1470 */
Tejun Heo5d21cc22013-01-07 08:51:08 -08001471 ret = -ENOSPC;
Li Zefan88fa5232013-06-09 17:16:46 +08001472 if (!cgroup_sane_behavior(cgrp) &&
1473 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
Tejun Heo5d21cc22013-01-07 08:51:08 -08001474 goto out_unlock;
Ben Blumbe367d02009-09-23 15:56:31 -07001475
Tejun Heobb9d97b2011-12-12 18:12:21 -08001476 cgroup_taskset_for_each(task, cgrp, tset) {
1477 /*
Tejun Heo14a40ff2013-03-19 13:45:20 -07001478 * Kthreads which disallow setaffinity shouldn't be moved
1479 * to a new cpuset; we don't want to change their cpu
1480 * affinity and isolating such threads by their set of
1481 * allowed nodes is unnecessary. Thus, cpusets are not
1482 * applicable for such threads. This prevents checking for
1483 * success of set_cpus_allowed_ptr() on all attached tasks
1484 * before cpus_allowed may be changed.
Tejun Heobb9d97b2011-12-12 18:12:21 -08001485 */
Tejun Heo5d21cc22013-01-07 08:51:08 -08001486 ret = -EINVAL;
Tejun Heo14a40ff2013-03-19 13:45:20 -07001487 if (task->flags & PF_NO_SETAFFINITY)
Tejun Heo5d21cc22013-01-07 08:51:08 -08001488 goto out_unlock;
1489 ret = security_task_setscheduler(task);
1490 if (ret)
1491 goto out_unlock;
Tejun Heobb9d97b2011-12-12 18:12:21 -08001492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
Tejun Heo452477f2013-01-07 08:51:07 -08001494 /*
1495 * Mark attach is in progress. This makes validate_change() fail
1496 * changes which zero cpus/mems_allowed.
1497 */
1498 cs->attach_in_progress++;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001499 ret = 0;
1500out_unlock:
1501 mutex_unlock(&cpuset_mutex);
1502 return ret;
Tejun Heo4e4c9a12013-01-07 08:51:07 -08001503}
1504
Tejun Heo452477f2013-01-07 08:51:07 -08001505static void cpuset_cancel_attach(struct cgroup *cgrp,
1506 struct cgroup_taskset *tset)
1507{
Tejun Heo5d21cc22013-01-07 08:51:08 -08001508 mutex_lock(&cpuset_mutex);
Tejun Heo452477f2013-01-07 08:51:07 -08001509 cgroup_cs(cgrp)->attach_in_progress--;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001510 mutex_unlock(&cpuset_mutex);
Tejun Heo452477f2013-01-07 08:51:07 -08001511}
1512
Tejun Heo4e4c9a12013-01-07 08:51:07 -08001513/*
Tejun Heo5d21cc22013-01-07 08:51:08 -08001514 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
Tejun Heo4e4c9a12013-01-07 08:51:07 -08001515 * but we can't allocate it dynamically there. Define it global and
1516 * allocate from cpuset_init().
1517 */
1518static cpumask_var_t cpus_attach;
1519
1520static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1521{
Li Zefan67bd2c52013-06-05 17:15:35 +08001522 /* static buf protected by cpuset_mutex */
Tejun Heo4e4c9a12013-01-07 08:51:07 -08001523 static nodemask_t cpuset_attach_nodemask_to;
1524 struct mm_struct *mm;
1525 struct task_struct *task;
1526 struct task_struct *leader = cgroup_taskset_first(tset);
1527 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1528 struct cpuset *cs = cgroup_cs(cgrp);
1529 struct cpuset *oldcs = cgroup_cs(oldcgrp);
Li Zefan070b57f2013-06-09 17:15:22 +08001530 struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
1531 struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
Tejun Heo4e4c9a12013-01-07 08:51:07 -08001532
Tejun Heo5d21cc22013-01-07 08:51:08 -08001533 mutex_lock(&cpuset_mutex);
1534
Tejun Heo94196f52011-12-12 18:12:22 -08001535 /* prepare for attach */
Ben Blumf780bdb2011-05-26 16:25:19 -07001536 if (cs == &top_cpuset)
1537 cpumask_copy(cpus_attach, cpu_possible_mask);
1538 else
Li Zefan070b57f2013-06-09 17:15:22 +08001539 guarantee_online_cpus(cpus_cs, cpus_attach);
Ben Blumf780bdb2011-05-26 16:25:19 -07001540
Li Zefan070b57f2013-06-09 17:15:22 +08001541 guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
Tejun Heo94196f52011-12-12 18:12:22 -08001542
Tejun Heobb9d97b2011-12-12 18:12:21 -08001543 cgroup_taskset_for_each(task, cgrp, tset) {
1544 /*
1545 * can_attach beforehand should guarantee that this doesn't
1546 * fail. TODO: have a better way to handle failure here
1547 */
1548 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1549
1550 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1551 cpuset_update_task_spread_flag(cs, task);
1552 }
David Quigley22fb52d2006-06-23 02:04:00 -07001553
Ben Blumf780bdb2011-05-26 16:25:19 -07001554 /*
1555 * Change mm, possibly for multiple threads in a threadgroup. This is
1556 * expensive and may sleep.
1557 */
Ben Blumf780bdb2011-05-26 16:25:19 -07001558 cpuset_attach_nodemask_to = cs->mems_allowed;
Tejun Heobb9d97b2011-12-12 18:12:21 -08001559 mm = get_task_mm(leader);
Paul Jackson42253992006-01-08 01:01:59 -08001560 if (mm) {
Li Zefan070b57f2013-06-09 17:15:22 +08001561 struct cpuset *mems_oldcs = effective_nodemask_cpuset(oldcs);
1562
Ben Blumf780bdb2011-05-26 16:25:19 -07001563 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
Li Zefanf047cec2013-06-13 15:11:44 +08001564
1565 /*
1566 * old_mems_allowed is the same with mems_allowed here, except
1567 * if this task is being moved automatically due to hotplug.
1568 * In that case @mems_allowed has been updated and is empty,
1569 * so @old_mems_allowed is the right nodesets that we migrate
1570 * mm from.
1571 */
1572 if (is_memory_migrate(cs)) {
1573 cpuset_migrate_mm(mm, &mems_oldcs->old_mems_allowed,
Ben Blumf780bdb2011-05-26 16:25:19 -07001574 &cpuset_attach_nodemask_to);
Li Zefanf047cec2013-06-13 15:11:44 +08001575 }
Paul Jackson42253992006-01-08 01:01:59 -08001576 mmput(mm);
1577 }
Tejun Heo452477f2013-01-07 08:51:07 -08001578
Li Zefan33ad8012013-06-09 17:15:08 +08001579 cs->old_mems_allowed = cpuset_attach_nodemask_to;
Tejun Heo02bb5862013-01-07 08:51:08 -08001580
Tejun Heo452477f2013-01-07 08:51:07 -08001581 cs->attach_in_progress--;
Li Zefane44193d2013-06-09 17:14:22 +08001582 if (!cs->attach_in_progress)
1583 wake_up(&cpuset_attach_wq);
Tejun Heo5d21cc22013-01-07 08:51:08 -08001584
1585 mutex_unlock(&cpuset_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586}
1587
1588/* The various types of files and directories in a cpuset file system */
1589
1590typedef enum {
Paul Jackson45b07ef2006-01-08 01:00:56 -08001591 FILE_MEMORY_MIGRATE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 FILE_CPULIST,
1593 FILE_MEMLIST,
1594 FILE_CPU_EXCLUSIVE,
1595 FILE_MEM_EXCLUSIVE,
Paul Menage78608362008-04-29 01:00:26 -07001596 FILE_MEM_HARDWALL,
Paul Jackson029190c2007-10-18 23:40:20 -07001597 FILE_SCHED_LOAD_BALANCE,
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001598 FILE_SCHED_RELAX_DOMAIN_LEVEL,
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001599 FILE_MEMORY_PRESSURE_ENABLED,
1600 FILE_MEMORY_PRESSURE,
Paul Jackson825a46a2006-03-24 03:16:03 -08001601 FILE_SPREAD_PAGE,
1602 FILE_SPREAD_SLAB,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603} cpuset_filetype_t;
1604
Paul Menage700fe1a2008-04-29 01:00:00 -07001605static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1606{
Paul Menage700fe1a2008-04-29 01:00:00 -07001607 struct cpuset *cs = cgroup_cs(cgrp);
1608 cpuset_filetype_t type = cft->private;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001609 int retval = -ENODEV;
Paul Menage700fe1a2008-04-29 01:00:00 -07001610
Tejun Heo5d21cc22013-01-07 08:51:08 -08001611 mutex_lock(&cpuset_mutex);
1612 if (!is_cpuset_online(cs))
1613 goto out_unlock;
Paul Menage700fe1a2008-04-29 01:00:00 -07001614
1615 switch (type) {
1616 case FILE_CPU_EXCLUSIVE:
1617 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1618 break;
1619 case FILE_MEM_EXCLUSIVE:
1620 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1621 break;
Paul Menage78608362008-04-29 01:00:26 -07001622 case FILE_MEM_HARDWALL:
1623 retval = update_flag(CS_MEM_HARDWALL, cs, val);
1624 break;
Paul Menage700fe1a2008-04-29 01:00:00 -07001625 case FILE_SCHED_LOAD_BALANCE:
1626 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1627 break;
1628 case FILE_MEMORY_MIGRATE:
1629 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1630 break;
1631 case FILE_MEMORY_PRESSURE_ENABLED:
1632 cpuset_memory_pressure_enabled = !!val;
1633 break;
1634 case FILE_MEMORY_PRESSURE:
1635 retval = -EACCES;
1636 break;
1637 case FILE_SPREAD_PAGE:
1638 retval = update_flag(CS_SPREAD_PAGE, cs, val);
Paul Menage700fe1a2008-04-29 01:00:00 -07001639 break;
1640 case FILE_SPREAD_SLAB:
1641 retval = update_flag(CS_SPREAD_SLAB, cs, val);
Paul Menage700fe1a2008-04-29 01:00:00 -07001642 break;
1643 default:
1644 retval = -EINVAL;
1645 break;
1646 }
Tejun Heo5d21cc22013-01-07 08:51:08 -08001647out_unlock:
1648 mutex_unlock(&cpuset_mutex);
Paul Menage700fe1a2008-04-29 01:00:00 -07001649 return retval;
1650}
1651
Paul Menage5be7a472008-05-06 20:42:41 -07001652static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1653{
Paul Menage5be7a472008-05-06 20:42:41 -07001654 struct cpuset *cs = cgroup_cs(cgrp);
1655 cpuset_filetype_t type = cft->private;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001656 int retval = -ENODEV;
Paul Menage5be7a472008-05-06 20:42:41 -07001657
Tejun Heo5d21cc22013-01-07 08:51:08 -08001658 mutex_lock(&cpuset_mutex);
1659 if (!is_cpuset_online(cs))
1660 goto out_unlock;
Paul Menagee3712392008-07-25 01:47:02 -07001661
Paul Menage5be7a472008-05-06 20:42:41 -07001662 switch (type) {
1663 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1664 retval = update_relax_domain_level(cs, val);
1665 break;
1666 default:
1667 retval = -EINVAL;
1668 break;
1669 }
Tejun Heo5d21cc22013-01-07 08:51:08 -08001670out_unlock:
1671 mutex_unlock(&cpuset_mutex);
Paul Menage5be7a472008-05-06 20:42:41 -07001672 return retval;
1673}
1674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675/*
Paul Menagee3712392008-07-25 01:47:02 -07001676 * Common handling for a write to a "cpus" or "mems" file.
1677 */
1678static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1679 const char *buf)
1680{
Li Zefan645fcc92009-01-07 18:08:43 -08001681 struct cpuset *cs = cgroup_cs(cgrp);
1682 struct cpuset *trialcs;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001683 int retval = -ENODEV;
Paul Menagee3712392008-07-25 01:47:02 -07001684
Tejun Heo3a5a6d02013-01-07 08:51:07 -08001685 /*
1686 * CPU or memory hotunplug may leave @cs w/o any execution
1687 * resources, in which case the hotplug code asynchronously updates
1688 * configuration and transfers all tasks to the nearest ancestor
1689 * which can execute.
1690 *
1691 * As writes to "cpus" or "mems" may restore @cs's execution
1692 * resources, wait for the previously scheduled operations before
1693 * proceeding, so that we don't end up keep removing tasks added
1694 * after execution capability is restored.
1695 */
1696 flush_work(&cpuset_hotplug_work);
1697
Tejun Heo5d21cc22013-01-07 08:51:08 -08001698 mutex_lock(&cpuset_mutex);
1699 if (!is_cpuset_online(cs))
1700 goto out_unlock;
Paul Menagee3712392008-07-25 01:47:02 -07001701
Li Zefan645fcc92009-01-07 18:08:43 -08001702 trialcs = alloc_trial_cpuset(cs);
Li Zefanb75f38d2011-03-04 17:36:21 -08001703 if (!trialcs) {
1704 retval = -ENOMEM;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001705 goto out_unlock;
Li Zefanb75f38d2011-03-04 17:36:21 -08001706 }
Li Zefan645fcc92009-01-07 18:08:43 -08001707
Paul Menagee3712392008-07-25 01:47:02 -07001708 switch (cft->private) {
1709 case FILE_CPULIST:
Li Zefan645fcc92009-01-07 18:08:43 -08001710 retval = update_cpumask(cs, trialcs, buf);
Paul Menagee3712392008-07-25 01:47:02 -07001711 break;
1712 case FILE_MEMLIST:
Li Zefan645fcc92009-01-07 18:08:43 -08001713 retval = update_nodemask(cs, trialcs, buf);
Paul Menagee3712392008-07-25 01:47:02 -07001714 break;
1715 default:
1716 retval = -EINVAL;
1717 break;
1718 }
Li Zefan645fcc92009-01-07 18:08:43 -08001719
1720 free_trial_cpuset(trialcs);
Tejun Heo5d21cc22013-01-07 08:51:08 -08001721out_unlock:
1722 mutex_unlock(&cpuset_mutex);
Paul Menagee3712392008-07-25 01:47:02 -07001723 return retval;
1724}
1725
1726/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 * These ascii lists should be read in a single call, by using a user
1728 * buffer large enough to hold the entire map. If read in smaller
1729 * chunks, there is no guarantee of atomicity. Since the display format
1730 * used, list of ranges of sequential numbers, is variable length,
1731 * and since these maps can change value dynamically, one could read
1732 * gibberish by doing partial reads while a list was changing.
1733 * A single large read to a buffer that crosses a page boundary is
1734 * ok, because the result being copied to user land is not recomputed
1735 * across a page fault.
1736 */
1737
Li Zefan9303e0c2011-03-23 16:42:45 -07001738static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739{
Li Zefan9303e0c2011-03-23 16:42:45 -07001740 size_t count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001742 mutex_lock(&callback_mutex);
Li Zefan9303e0c2011-03-23 16:42:45 -07001743 count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001744 mutex_unlock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Li Zefan9303e0c2011-03-23 16:42:45 -07001746 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747}
1748
Li Zefan9303e0c2011-03-23 16:42:45 -07001749static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750{
Li Zefan9303e0c2011-03-23 16:42:45 -07001751 size_t count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001753 mutex_lock(&callback_mutex);
Li Zefan9303e0c2011-03-23 16:42:45 -07001754 count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001755 mutex_unlock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756
Li Zefan9303e0c2011-03-23 16:42:45 -07001757 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758}
1759
Li Zefanc9e5fe62013-06-14 11:18:27 +08001760static ssize_t cpuset_common_file_read(struct cgroup *cgrp,
Paul Menage8793d852007-10-18 23:39:39 -07001761 struct cftype *cft,
1762 struct file *file,
1763 char __user *buf,
1764 size_t nbytes, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765{
Li Zefanc9e5fe62013-06-14 11:18:27 +08001766 struct cpuset *cs = cgroup_cs(cgrp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 cpuset_filetype_t type = cft->private;
1768 char *page;
1769 ssize_t retval = 0;
1770 char *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Mel Gormane12ba742007-10-16 01:25:52 -07001772 if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 return -ENOMEM;
1774
1775 s = page;
1776
1777 switch (type) {
1778 case FILE_CPULIST:
1779 s += cpuset_sprintf_cpulist(s, cs);
1780 break;
1781 case FILE_MEMLIST:
1782 s += cpuset_sprintf_memlist(s, cs);
1783 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 default:
1785 retval = -EINVAL;
1786 goto out;
1787 }
1788 *s++ = '\n';
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
Al Viroeacaa1f2005-09-30 03:26:43 +01001790 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791out:
1792 free_page((unsigned long)page);
1793 return retval;
1794}
1795
Li Zefanc9e5fe62013-06-14 11:18:27 +08001796static u64 cpuset_read_u64(struct cgroup *cgrp, struct cftype *cft)
Paul Menage700fe1a2008-04-29 01:00:00 -07001797{
Li Zefanc9e5fe62013-06-14 11:18:27 +08001798 struct cpuset *cs = cgroup_cs(cgrp);
Paul Menage700fe1a2008-04-29 01:00:00 -07001799 cpuset_filetype_t type = cft->private;
1800 switch (type) {
1801 case FILE_CPU_EXCLUSIVE:
1802 return is_cpu_exclusive(cs);
1803 case FILE_MEM_EXCLUSIVE:
1804 return is_mem_exclusive(cs);
Paul Menage78608362008-04-29 01:00:26 -07001805 case FILE_MEM_HARDWALL:
1806 return is_mem_hardwall(cs);
Paul Menage700fe1a2008-04-29 01:00:00 -07001807 case FILE_SCHED_LOAD_BALANCE:
1808 return is_sched_load_balance(cs);
1809 case FILE_MEMORY_MIGRATE:
1810 return is_memory_migrate(cs);
1811 case FILE_MEMORY_PRESSURE_ENABLED:
1812 return cpuset_memory_pressure_enabled;
1813 case FILE_MEMORY_PRESSURE:
1814 return fmeter_getrate(&cs->fmeter);
1815 case FILE_SPREAD_PAGE:
1816 return is_spread_page(cs);
1817 case FILE_SPREAD_SLAB:
1818 return is_spread_slab(cs);
1819 default:
1820 BUG();
1821 }
Max Krasnyanskycf417142008-08-11 14:33:53 -07001822
1823 /* Unreachable but makes gcc happy */
1824 return 0;
Paul Menage700fe1a2008-04-29 01:00:00 -07001825}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
Li Zefanc9e5fe62013-06-14 11:18:27 +08001827static s64 cpuset_read_s64(struct cgroup *cgrp, struct cftype *cft)
Paul Menage5be7a472008-05-06 20:42:41 -07001828{
Li Zefanc9e5fe62013-06-14 11:18:27 +08001829 struct cpuset *cs = cgroup_cs(cgrp);
Paul Menage5be7a472008-05-06 20:42:41 -07001830 cpuset_filetype_t type = cft->private;
1831 switch (type) {
1832 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1833 return cs->relax_domain_level;
1834 default:
1835 BUG();
1836 }
Max Krasnyanskycf417142008-08-11 14:33:53 -07001837
1838 /* Unrechable but makes gcc happy */
1839 return 0;
Paul Menage5be7a472008-05-06 20:42:41 -07001840}
1841
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
1843/*
1844 * for the common functions, 'private' gives the type of file
1845 */
1846
Paul Menageaddf2c72008-04-29 01:00:26 -07001847static struct cftype files[] = {
1848 {
1849 .name = "cpus",
1850 .read = cpuset_common_file_read,
Paul Menagee3712392008-07-25 01:47:02 -07001851 .write_string = cpuset_write_resmask,
1852 .max_write_len = (100U + 6 * NR_CPUS),
Paul Menageaddf2c72008-04-29 01:00:26 -07001853 .private = FILE_CPULIST,
1854 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855
Paul Menageaddf2c72008-04-29 01:00:26 -07001856 {
1857 .name = "mems",
1858 .read = cpuset_common_file_read,
Paul Menagee3712392008-07-25 01:47:02 -07001859 .write_string = cpuset_write_resmask,
1860 .max_write_len = (100U + 6 * MAX_NUMNODES),
Paul Menageaddf2c72008-04-29 01:00:26 -07001861 .private = FILE_MEMLIST,
1862 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Paul Menageaddf2c72008-04-29 01:00:26 -07001864 {
1865 .name = "cpu_exclusive",
1866 .read_u64 = cpuset_read_u64,
1867 .write_u64 = cpuset_write_u64,
1868 .private = FILE_CPU_EXCLUSIVE,
1869 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
Paul Menageaddf2c72008-04-29 01:00:26 -07001871 {
1872 .name = "mem_exclusive",
1873 .read_u64 = cpuset_read_u64,
1874 .write_u64 = cpuset_write_u64,
1875 .private = FILE_MEM_EXCLUSIVE,
1876 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
Paul Menageaddf2c72008-04-29 01:00:26 -07001878 {
Paul Menage78608362008-04-29 01:00:26 -07001879 .name = "mem_hardwall",
1880 .read_u64 = cpuset_read_u64,
1881 .write_u64 = cpuset_write_u64,
1882 .private = FILE_MEM_HARDWALL,
1883 },
1884
1885 {
Paul Menageaddf2c72008-04-29 01:00:26 -07001886 .name = "sched_load_balance",
1887 .read_u64 = cpuset_read_u64,
1888 .write_u64 = cpuset_write_u64,
1889 .private = FILE_SCHED_LOAD_BALANCE,
1890 },
Paul Jackson029190c2007-10-18 23:40:20 -07001891
Paul Menageaddf2c72008-04-29 01:00:26 -07001892 {
1893 .name = "sched_relax_domain_level",
Paul Menage5be7a472008-05-06 20:42:41 -07001894 .read_s64 = cpuset_read_s64,
1895 .write_s64 = cpuset_write_s64,
Paul Menageaddf2c72008-04-29 01:00:26 -07001896 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1897 },
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001898
Paul Menageaddf2c72008-04-29 01:00:26 -07001899 {
1900 .name = "memory_migrate",
1901 .read_u64 = cpuset_read_u64,
1902 .write_u64 = cpuset_write_u64,
1903 .private = FILE_MEMORY_MIGRATE,
1904 },
1905
1906 {
1907 .name = "memory_pressure",
1908 .read_u64 = cpuset_read_u64,
1909 .write_u64 = cpuset_write_u64,
1910 .private = FILE_MEMORY_PRESSURE,
Li Zefan099fca32009-04-02 16:57:29 -07001911 .mode = S_IRUGO,
Paul Menageaddf2c72008-04-29 01:00:26 -07001912 },
1913
1914 {
1915 .name = "memory_spread_page",
1916 .read_u64 = cpuset_read_u64,
1917 .write_u64 = cpuset_write_u64,
1918 .private = FILE_SPREAD_PAGE,
1919 },
1920
1921 {
1922 .name = "memory_spread_slab",
1923 .read_u64 = cpuset_read_u64,
1924 .write_u64 = cpuset_write_u64,
1925 .private = FILE_SPREAD_SLAB,
1926 },
Tejun Heo4baf6e32012-04-01 12:09:55 -07001927
1928 {
1929 .name = "memory_pressure_enabled",
1930 .flags = CFTYPE_ONLY_ON_ROOT,
1931 .read_u64 = cpuset_read_u64,
1932 .write_u64 = cpuset_write_u64,
1933 .private = FILE_MEMORY_PRESSURE_ENABLED,
1934 },
1935
1936 { } /* terminate */
Paul Jackson45b07ef2006-01-08 01:00:56 -08001937};
1938
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939/*
Tejun Heo92fb9742012-11-19 08:13:38 -08001940 * cpuset_css_alloc - allocate a cpuset css
Li Zefanc9e5fe62013-06-14 11:18:27 +08001941 * cgrp: control group that the new cpuset will be part of
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 */
1943
Li Zefanc9e5fe62013-06-14 11:18:27 +08001944static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945{
Tejun Heoc8f699b2013-01-07 08:51:07 -08001946 struct cpuset *cs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947
Li Zefanc9e5fe62013-06-14 11:18:27 +08001948 if (!cgrp->parent)
Paul Menage8793d852007-10-18 23:39:39 -07001949 return &top_cpuset.css;
Tejun Heo033fa1c2012-11-19 08:13:39 -08001950
Tejun Heoc8f699b2013-01-07 08:51:07 -08001951 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 if (!cs)
Paul Menage8793d852007-10-18 23:39:39 -07001953 return ERR_PTR(-ENOMEM);
Li Zefan300ed6c2009-01-07 18:08:44 -08001954 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1955 kfree(cs);
1956 return ERR_PTR(-ENOMEM);
1957 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
Paul Jackson029190c2007-10-18 23:40:20 -07001959 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
Li Zefan300ed6c2009-01-07 18:08:44 -08001960 cpumask_clear(cs->cpus_allowed);
Mike Travisf9a86fc2008-04-04 18:11:07 -07001961 nodes_clear(cs->mems_allowed);
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001962 fmeter_init(&cs->fmeter);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001963 cs->relax_domain_level = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
Tejun Heoc8f699b2013-01-07 08:51:07 -08001965 return &cs->css;
1966}
1967
1968static int cpuset_css_online(struct cgroup *cgrp)
1969{
1970 struct cpuset *cs = cgroup_cs(cgrp);
Tejun Heoc4310692013-01-07 08:51:08 -08001971 struct cpuset *parent = parent_cs(cs);
Tejun Heoae8086c2013-01-07 08:51:07 -08001972 struct cpuset *tmp_cs;
Li Zefan6f4b7e62013-07-31 16:18:36 +08001973 struct cgroup *pos_cgrp;
Tejun Heoc8f699b2013-01-07 08:51:07 -08001974
1975 if (!parent)
1976 return 0;
1977
Tejun Heo5d21cc22013-01-07 08:51:08 -08001978 mutex_lock(&cpuset_mutex);
1979
Tejun Heoefeb77b2013-01-07 08:51:07 -08001980 set_bit(CS_ONLINE, &cs->flags);
Tejun Heoc8f699b2013-01-07 08:51:07 -08001981 if (is_spread_page(parent))
1982 set_bit(CS_SPREAD_PAGE, &cs->flags);
1983 if (is_spread_slab(parent))
1984 set_bit(CS_SPREAD_SLAB, &cs->flags);
1985
Paul Jackson202f72d2006-01-08 01:01:57 -08001986 number_of_cpusets++;
Tejun Heo033fa1c2012-11-19 08:13:39 -08001987
Tejun Heoc8f699b2013-01-07 08:51:07 -08001988 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
Tejun Heo5d21cc22013-01-07 08:51:08 -08001989 goto out_unlock;
Tejun Heo033fa1c2012-11-19 08:13:39 -08001990
1991 /*
1992 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
1993 * set. This flag handling is implemented in cgroup core for
1994 * histrical reasons - the flag may be specified during mount.
1995 *
1996 * Currently, if any sibling cpusets have exclusive cpus or mem, we
1997 * refuse to clone the configuration - thereby refusing the task to
1998 * be entered, and as a result refusing the sys_unshare() or
1999 * clone() which initiated it. If this becomes a problem for some
2000 * users who wish to allow that scenario, then this could be
2001 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2002 * (and likewise for mems) to the new cgroup.
2003 */
Tejun Heoae8086c2013-01-07 08:51:07 -08002004 rcu_read_lock();
Li Zefan6f4b7e62013-07-31 16:18:36 +08002005 cpuset_for_each_child(tmp_cs, pos_cgrp, parent) {
Tejun Heoae8086c2013-01-07 08:51:07 -08002006 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2007 rcu_read_unlock();
Tejun Heo5d21cc22013-01-07 08:51:08 -08002008 goto out_unlock;
Tejun Heoae8086c2013-01-07 08:51:07 -08002009 }
Tejun Heo033fa1c2012-11-19 08:13:39 -08002010 }
Tejun Heoae8086c2013-01-07 08:51:07 -08002011 rcu_read_unlock();
Tejun Heo033fa1c2012-11-19 08:13:39 -08002012
2013 mutex_lock(&callback_mutex);
2014 cs->mems_allowed = parent->mems_allowed;
2015 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2016 mutex_unlock(&callback_mutex);
Tejun Heo5d21cc22013-01-07 08:51:08 -08002017out_unlock:
2018 mutex_unlock(&cpuset_mutex);
Tejun Heoc8f699b2013-01-07 08:51:07 -08002019 return 0;
2020}
2021
Zhao Hongjiang0b9e6962013-07-27 11:56:53 +08002022/*
2023 * If the cpuset being removed has its flag 'sched_load_balance'
2024 * enabled, then simulate turning sched_load_balance off, which
2025 * will call rebuild_sched_domains_locked().
2026 */
2027
Tejun Heoc8f699b2013-01-07 08:51:07 -08002028static void cpuset_css_offline(struct cgroup *cgrp)
2029{
2030 struct cpuset *cs = cgroup_cs(cgrp);
2031
Tejun Heo5d21cc22013-01-07 08:51:08 -08002032 mutex_lock(&cpuset_mutex);
Tejun Heoc8f699b2013-01-07 08:51:07 -08002033
2034 if (is_sched_load_balance(cs))
2035 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2036
2037 number_of_cpusets--;
Tejun Heoefeb77b2013-01-07 08:51:07 -08002038 clear_bit(CS_ONLINE, &cs->flags);
Tejun Heoc8f699b2013-01-07 08:51:07 -08002039
Tejun Heo5d21cc22013-01-07 08:51:08 -08002040 mutex_unlock(&cpuset_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041}
2042
Li Zefanc9e5fe62013-06-14 11:18:27 +08002043static void cpuset_css_free(struct cgroup *cgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044{
Li Zefanc9e5fe62013-06-14 11:18:27 +08002045 struct cpuset *cs = cgroup_cs(cgrp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
Li Zefan300ed6c2009-01-07 18:08:44 -08002047 free_cpumask_var(cs->cpus_allowed);
Paul Menage8793d852007-10-18 23:39:39 -07002048 kfree(cs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049}
2050
Paul Menage8793d852007-10-18 23:39:39 -07002051struct cgroup_subsys cpuset_subsys = {
2052 .name = "cpuset",
Tejun Heo92fb9742012-11-19 08:13:38 -08002053 .css_alloc = cpuset_css_alloc,
Tejun Heoc8f699b2013-01-07 08:51:07 -08002054 .css_online = cpuset_css_online,
2055 .css_offline = cpuset_css_offline,
Tejun Heo92fb9742012-11-19 08:13:38 -08002056 .css_free = cpuset_css_free,
Paul Menage8793d852007-10-18 23:39:39 -07002057 .can_attach = cpuset_can_attach,
Tejun Heo452477f2013-01-07 08:51:07 -08002058 .cancel_attach = cpuset_cancel_attach,
Paul Menage8793d852007-10-18 23:39:39 -07002059 .attach = cpuset_attach,
Paul Menage8793d852007-10-18 23:39:39 -07002060 .subsys_id = cpuset_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -07002061 .base_cftypes = files,
Paul Menage8793d852007-10-18 23:39:39 -07002062 .early_init = 1,
2063};
2064
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065/**
2066 * cpuset_init - initialize cpusets at system boot
2067 *
2068 * Description: Initialize top_cpuset and the cpuset internal file system,
2069 **/
2070
2071int __init cpuset_init(void)
2072{
Paul Menage8793d852007-10-18 23:39:39 -07002073 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
Miao Xie58568d22009-06-16 15:31:49 -07002075 if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
2076 BUG();
2077
Li Zefan300ed6c2009-01-07 18:08:44 -08002078 cpumask_setall(top_cpuset.cpus_allowed);
Mike Travisf9a86fc2008-04-04 18:11:07 -07002079 nodes_setall(top_cpuset.mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002081 fmeter_init(&top_cpuset.fmeter);
Paul Jackson029190c2007-10-18 23:40:20 -07002082 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09002083 top_cpuset.relax_domain_level = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 err = register_filesystem(&cpuset_fs_type);
2086 if (err < 0)
Paul Menage8793d852007-10-18 23:39:39 -07002087 return err;
2088
Li Zefan2341d1b2009-01-07 18:08:42 -08002089 if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
2090 BUG();
2091
Paul Jackson202f72d2006-01-08 01:01:57 -08002092 number_of_cpusets = 1;
Paul Menage8793d852007-10-18 23:39:39 -07002093 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094}
2095
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002096/*
Max Krasnyanskycf417142008-08-11 14:33:53 -07002097 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002098 * or memory nodes, we need to walk over the cpuset hierarchy,
2099 * removing that CPU or node from all cpusets. If this removes the
Cliff Wickman956db3c2008-02-07 00:14:43 -08002100 * last CPU or node from a cpuset, then move the tasks in the empty
2101 * cpuset to its next-highest non-empty parent.
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002102 */
Cliff Wickman956db3c2008-02-07 00:14:43 -08002103static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002104{
Cliff Wickman956db3c2008-02-07 00:14:43 -08002105 struct cpuset *parent;
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002106
Paul Jacksonc8d9c902008-02-07 00:14:46 -08002107 /*
Cliff Wickman956db3c2008-02-07 00:14:43 -08002108 * Find its next-highest non-empty parent, (top cpuset
2109 * has online cpus, so can't be empty).
2110 */
Tejun Heoc4310692013-01-07 08:51:08 -08002111 parent = parent_cs(cs);
Li Zefan300ed6c2009-01-07 18:08:44 -08002112 while (cpumask_empty(parent->cpus_allowed) ||
Paul Jacksonb4501292008-02-07 00:14:47 -08002113 nodes_empty(parent->mems_allowed))
Tejun Heoc4310692013-01-07 08:51:08 -08002114 parent = parent_cs(parent);
Cliff Wickman956db3c2008-02-07 00:14:43 -08002115
Tejun Heo8cc99342013-04-07 09:29:50 -07002116 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2117 rcu_read_lock();
2118 printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n",
2119 cgroup_name(cs->css.cgroup));
2120 rcu_read_unlock();
2121 }
Cliff Wickman956db3c2008-02-07 00:14:43 -08002122}
2123
Tejun Heodeb7aa32013-01-07 08:51:07 -08002124/**
Li Zefan388afd82013-06-09 17:14:47 +08002125 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
Tejun Heodeb7aa32013-01-07 08:51:07 -08002126 * @cs: cpuset in interest
Cliff Wickman956db3c2008-02-07 00:14:43 -08002127 *
Tejun Heodeb7aa32013-01-07 08:51:07 -08002128 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2129 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
2130 * all its tasks are moved to the nearest ancestor with both resources.
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002131 */
Li Zefan388afd82013-06-09 17:14:47 +08002132static void cpuset_hotplug_update_tasks(struct cpuset *cs)
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002133{
Tejun Heodeb7aa32013-01-07 08:51:07 -08002134 static cpumask_t off_cpus;
Li Zefan33ad8012013-06-09 17:15:08 +08002135 static nodemask_t off_mems;
Tejun Heo5d21cc22013-01-07 08:51:08 -08002136 bool is_empty;
Li Zefan5c5cc622013-06-09 17:16:29 +08002137 bool sane = cgroup_sane_behavior(cs->css.cgroup);
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002138
Li Zefane44193d2013-06-09 17:14:22 +08002139retry:
2140 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002141
Tejun Heo5d21cc22013-01-07 08:51:08 -08002142 mutex_lock(&cpuset_mutex);
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002143
Li Zefane44193d2013-06-09 17:14:22 +08002144 /*
2145 * We have raced with task attaching. We wait until attaching
2146 * is finished, so we won't attach a task to an empty cpuset.
2147 */
2148 if (cs->attach_in_progress) {
2149 mutex_unlock(&cpuset_mutex);
2150 goto retry;
2151 }
2152
Tejun Heodeb7aa32013-01-07 08:51:07 -08002153 cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
2154 nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
Paul Jacksonb4501292008-02-07 00:14:47 -08002155
Li Zefan5c5cc622013-06-09 17:16:29 +08002156 mutex_lock(&callback_mutex);
2157 cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
2158 mutex_unlock(&callback_mutex);
Cliff Wickman956db3c2008-02-07 00:14:43 -08002159
Li Zefan5c5cc622013-06-09 17:16:29 +08002160 /*
2161 * If sane_behavior flag is set, we need to update tasks' cpumask
Li Zefanf047cec2013-06-13 15:11:44 +08002162 * for empty cpuset to take on ancestor's cpumask. Otherwise, don't
2163 * call update_tasks_cpumask() if the cpuset becomes empty, as
2164 * the tasks in it will be migrated to an ancestor.
Li Zefan5c5cc622013-06-09 17:16:29 +08002165 */
2166 if ((sane && cpumask_empty(cs->cpus_allowed)) ||
Li Zefanf047cec2013-06-13 15:11:44 +08002167 (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed)))
Li Zefan5c5cc622013-06-09 17:16:29 +08002168 update_tasks_cpumask(cs, NULL);
2169
2170 mutex_lock(&callback_mutex);
2171 nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
2172 mutex_unlock(&callback_mutex);
2173
2174 /*
2175 * If sane_behavior flag is set, we need to update tasks' nodemask
Li Zefanf047cec2013-06-13 15:11:44 +08002176 * for empty cpuset to take on ancestor's nodemask. Otherwise, don't
2177 * call update_tasks_nodemask() if the cpuset becomes empty, as
2178 * the tasks in it will be migratd to an ancestor.
Li Zefan5c5cc622013-06-09 17:16:29 +08002179 */
2180 if ((sane && nodes_empty(cs->mems_allowed)) ||
Li Zefanf047cec2013-06-13 15:11:44 +08002181 (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed)))
Li Zefan33ad8012013-06-09 17:15:08 +08002182 update_tasks_nodemask(cs, NULL);
Miao Xief9b4fb82008-07-25 01:47:22 -07002183
Tejun Heo5d21cc22013-01-07 08:51:08 -08002184 is_empty = cpumask_empty(cs->cpus_allowed) ||
2185 nodes_empty(cs->mems_allowed);
Tejun Heo8d033942013-01-07 08:51:07 -08002186
Tejun Heo5d21cc22013-01-07 08:51:08 -08002187 mutex_unlock(&cpuset_mutex);
2188
2189 /*
Li Zefan5c5cc622013-06-09 17:16:29 +08002190 * If sane_behavior flag is set, we'll keep tasks in empty cpusets.
2191 *
2192 * Otherwise move tasks to the nearest ancestor with execution
2193 * resources. This is full cgroup operation which will
Tejun Heo5d21cc22013-01-07 08:51:08 -08002194 * also call back into cpuset. Should be done outside any lock.
2195 */
Li Zefan5c5cc622013-06-09 17:16:29 +08002196 if (!sane && is_empty)
Tejun Heo5d21cc22013-01-07 08:51:08 -08002197 remove_tasks_in_empty_cpuset(cs);
Tejun Heodeb7aa32013-01-07 08:51:07 -08002198}
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01002199
Tejun Heodeb7aa32013-01-07 08:51:07 -08002200/**
Tejun Heo3a5a6d02013-01-07 08:51:07 -08002201 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
Tejun Heodeb7aa32013-01-07 08:51:07 -08002202 *
2203 * This function is called after either CPU or memory configuration has
2204 * changed and updates cpuset accordingly. The top_cpuset is always
2205 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2206 * order to make cpusets transparent (of no affect) on systems that are
2207 * actively using CPU hotplug but making no active use of cpusets.
2208 *
2209 * Non-root cpusets are only affected by offlining. If any CPUs or memory
Li Zefan388afd82013-06-09 17:14:47 +08002210 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
2211 * all descendants.
Tejun Heodeb7aa32013-01-07 08:51:07 -08002212 *
2213 * Note that CPU offlining during suspend is ignored. We don't modify
2214 * cpusets across suspend/resume cycles at all.
2215 */
Tejun Heo3a5a6d02013-01-07 08:51:07 -08002216static void cpuset_hotplug_workfn(struct work_struct *work)
Tejun Heodeb7aa32013-01-07 08:51:07 -08002217{
Li Zefan5c5cc622013-06-09 17:16:29 +08002218 static cpumask_t new_cpus;
2219 static nodemask_t new_mems;
Tejun Heodeb7aa32013-01-07 08:51:07 -08002220 bool cpus_updated, mems_updated;
Paul Jacksonb4501292008-02-07 00:14:47 -08002221
Tejun Heo5d21cc22013-01-07 08:51:08 -08002222 mutex_lock(&cpuset_mutex);
Cliff Wickman956db3c2008-02-07 00:14:43 -08002223
Tejun Heodeb7aa32013-01-07 08:51:07 -08002224 /* fetch the available cpus/mems and find out which changed how */
2225 cpumask_copy(&new_cpus, cpu_active_mask);
2226 new_mems = node_states[N_MEMORY];
Cliff Wickman956db3c2008-02-07 00:14:43 -08002227
Tejun Heodeb7aa32013-01-07 08:51:07 -08002228 cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
Tejun Heodeb7aa32013-01-07 08:51:07 -08002229 mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05302230
Tejun Heodeb7aa32013-01-07 08:51:07 -08002231 /* synchronize cpus_allowed to cpu_active_mask */
2232 if (cpus_updated) {
2233 mutex_lock(&callback_mutex);
2234 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2235 mutex_unlock(&callback_mutex);
2236 /* we don't mess with cpumasks of tasks in top_cpuset */
2237 }
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05302238
Tejun Heodeb7aa32013-01-07 08:51:07 -08002239 /* synchronize mems_allowed to N_MEMORY */
2240 if (mems_updated) {
Tejun Heodeb7aa32013-01-07 08:51:07 -08002241 mutex_lock(&callback_mutex);
2242 top_cpuset.mems_allowed = new_mems;
2243 mutex_unlock(&callback_mutex);
Li Zefan33ad8012013-06-09 17:15:08 +08002244 update_tasks_nodemask(&top_cpuset, NULL);
Tejun Heodeb7aa32013-01-07 08:51:07 -08002245 }
2246
Tejun Heo5d21cc22013-01-07 08:51:08 -08002247 mutex_unlock(&cpuset_mutex);
Tejun Heodeb7aa32013-01-07 08:51:07 -08002248
Li Zefan5c5cc622013-06-09 17:16:29 +08002249 /* if cpus or mems changed, we need to propagate to descendants */
2250 if (cpus_updated || mems_updated) {
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08002251 struct cpuset *cs;
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05302252 struct cgroup *pos_cgrp;
Paul Jacksonb4501292008-02-07 00:14:47 -08002253
Paul Jacksonb4501292008-02-07 00:14:47 -08002254 rcu_read_lock();
Li Zefan388afd82013-06-09 17:14:47 +08002255 cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset) {
2256 if (!css_tryget(&cs->css))
2257 continue;
2258 rcu_read_unlock();
2259
2260 cpuset_hotplug_update_tasks(cs);
2261
2262 rcu_read_lock();
2263 css_put(&cs->css);
2264 }
Tejun Heodeb7aa32013-01-07 08:51:07 -08002265 rcu_read_unlock();
2266 }
Tejun Heo8d033942013-01-07 08:51:07 -08002267
Tejun Heodeb7aa32013-01-07 08:51:07 -08002268 /* rebuild sched domains if cpus_allowed has changed */
Li Zhonge0e80a02013-04-27 06:52:43 -07002269 if (cpus_updated)
2270 rebuild_sched_domains();
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002271}
2272
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05302273void cpuset_update_active_cpus(bool cpu_online)
Paul Jackson4c4d50f2006-08-27 01:23:51 -07002274{
Tejun Heo3a5a6d02013-01-07 08:51:07 -08002275 /*
2276 * We're inside cpu hotplug critical region which usually nests
2277 * inside cgroup synchronization. Bounce actual hotplug processing
2278 * to a work item to avoid reverse locking order.
2279 *
2280 * We still need to do partition_sched_domains() synchronously;
2281 * otherwise, the scheduler will get confused and put tasks to the
2282 * dead CPU. Fall back to the default single domain.
2283 * cpuset_hotplug_workfn() will rebuild it as necessary.
2284 */
2285 partition_sched_domains(1, NULL, NULL);
2286 schedule_work(&cpuset_hotplug_work);
Paul Jackson4c4d50f2006-08-27 01:23:51 -07002287}
Paul Jackson4c4d50f2006-08-27 01:23:51 -07002288
Paul Jackson38837fc2006-09-29 02:01:16 -07002289/*
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08002290 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2291 * Call this routine anytime after node_states[N_MEMORY] changes.
Srivatsa S. Bhata1cd2b12012-05-24 19:47:03 +05302292 * See cpuset_update_active_cpus() for CPU hotplug handling.
Paul Jackson38837fc2006-09-29 02:01:16 -07002293 */
Miao Xief4818912008-11-19 15:36:30 -08002294static int cpuset_track_online_nodes(struct notifier_block *self,
2295 unsigned long action, void *arg)
Paul Jackson38837fc2006-09-29 02:01:16 -07002296{
Tejun Heo3a5a6d02013-01-07 08:51:07 -08002297 schedule_work(&cpuset_hotplug_work);
Miao Xief4818912008-11-19 15:36:30 -08002298 return NOTIFY_OK;
Paul Jackson38837fc2006-09-29 02:01:16 -07002299}
Andrew Mortond8f10cb2013-04-29 15:08:08 -07002300
2301static struct notifier_block cpuset_track_online_nodes_nb = {
2302 .notifier_call = cpuset_track_online_nodes,
2303 .priority = 10, /* ??! */
2304};
Paul Jackson38837fc2006-09-29 02:01:16 -07002305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306/**
2307 * cpuset_init_smp - initialize cpus_allowed
2308 *
2309 * Description: Finish top cpuset after cpu, node maps are initialized
Andrew Mortond8f10cb2013-04-29 15:08:08 -07002310 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311void __init cpuset_init_smp(void)
2312{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01002313 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08002314 top_cpuset.mems_allowed = node_states[N_MEMORY];
Li Zefan33ad8012013-06-09 17:15:08 +08002315 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
Paul Jackson4c4d50f2006-08-27 01:23:51 -07002316
Andrew Mortond8f10cb2013-04-29 15:08:08 -07002317 register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318}
2319
2320/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2322 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
Li Zefan6af866a2009-01-07 18:08:45 -08002323 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 *
Li Zefan300ed6c2009-01-07 18:08:44 -08002325 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 * attached to the specified @tsk. Guaranteed to return some non-empty
Rusty Russell5f054e32012-03-29 15:38:31 +10302327 * subset of cpu_online_mask, even if this means going outside the
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 * tasks cpuset.
2329 **/
2330
Li Zefan6af866a2009-01-07 18:08:45 -08002331void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332{
Li Zefan070b57f2013-06-09 17:15:22 +08002333 struct cpuset *cpus_cs;
2334
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002335 mutex_lock(&callback_mutex);
Paul Jackson909d75a2006-01-08 01:01:55 -08002336 task_lock(tsk);
Li Zefan070b57f2013-06-09 17:15:22 +08002337 cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
2338 guarantee_online_cpus(cpus_cs, pmask);
Paul Jackson909d75a2006-01-08 01:01:55 -08002339 task_unlock(tsk);
Oleg Nesterov897f0b32010-03-15 10:10:03 +01002340 mutex_unlock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341}
2342
Peter Zijlstra2baab4e2012-03-20 15:57:01 +01002343void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
Oleg Nesterov9084bb82010-03-15 10:10:27 +01002344{
Tejun Heoc9710d82013-08-08 20:11:22 -04002345 struct cpuset *cpus_cs;
Oleg Nesterov9084bb82010-03-15 10:10:27 +01002346
2347 rcu_read_lock();
Li Zefan070b57f2013-06-09 17:15:22 +08002348 cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
2349 do_set_cpus_allowed(tsk, cpus_cs->cpus_allowed);
Oleg Nesterov9084bb82010-03-15 10:10:27 +01002350 rcu_read_unlock();
2351
2352 /*
2353 * We own tsk->cpus_allowed, nobody can change it under us.
2354 *
2355 * But we used cs && cs->cpus_allowed lockless and thus can
2356 * race with cgroup_attach_task() or update_cpumask() and get
2357 * the wrong tsk->cpus_allowed. However, both cases imply the
2358 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2359 * which takes task_rq_lock().
2360 *
2361 * If we are called after it dropped the lock we must see all
2362 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2363 * set any mask even if it is not right from task_cs() pov,
2364 * the pending set_cpus_allowed_ptr() will fix things.
Peter Zijlstra2baab4e2012-03-20 15:57:01 +01002365 *
2366 * select_fallback_rq() will fix things ups and set cpu_possible_mask
2367 * if required.
Oleg Nesterov9084bb82010-03-15 10:10:27 +01002368 */
Oleg Nesterov9084bb82010-03-15 10:10:27 +01002369}
2370
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371void cpuset_init_current_mems_allowed(void)
2372{
Mike Travisf9a86fc2008-04-04 18:11:07 -07002373 nodes_setall(current->mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374}
2375
Randy Dunlapd9fd8a62005-07-27 11:45:11 -07002376/**
Paul Jackson909d75a2006-01-08 01:01:55 -08002377 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2378 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2379 *
2380 * Description: Returns the nodemask_t mems_allowed of the cpuset
2381 * attached to the specified @tsk. Guaranteed to return some non-empty
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08002382 * subset of node_states[N_MEMORY], even if this means going outside the
Paul Jackson909d75a2006-01-08 01:01:55 -08002383 * tasks cpuset.
2384 **/
2385
2386nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2387{
Li Zefan070b57f2013-06-09 17:15:22 +08002388 struct cpuset *mems_cs;
Paul Jackson909d75a2006-01-08 01:01:55 -08002389 nodemask_t mask;
2390
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002391 mutex_lock(&callback_mutex);
Paul Jackson909d75a2006-01-08 01:01:55 -08002392 task_lock(tsk);
Li Zefan070b57f2013-06-09 17:15:22 +08002393 mems_cs = effective_nodemask_cpuset(task_cs(tsk));
2394 guarantee_online_mems(mems_cs, &mask);
Paul Jackson909d75a2006-01-08 01:01:55 -08002395 task_unlock(tsk);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002396 mutex_unlock(&callback_mutex);
Paul Jackson909d75a2006-01-08 01:01:55 -08002397
2398 return mask;
2399}
2400
2401/**
Mel Gorman19770b32008-04-28 02:12:18 -07002402 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2403 * @nodemask: the nodemask to be checked
Randy Dunlapd9fd8a62005-07-27 11:45:11 -07002404 *
Mel Gorman19770b32008-04-28 02:12:18 -07002405 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 */
Mel Gorman19770b32008-04-28 02:12:18 -07002407int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408{
Mel Gorman19770b32008-04-28 02:12:18 -07002409 return nodes_intersects(*nodemask, current->mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410}
2411
Paul Jackson9bf22292005-09-06 15:18:12 -07002412/*
Paul Menage78608362008-04-29 01:00:26 -07002413 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2414 * mem_hardwall ancestor to the specified cpuset. Call holding
2415 * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall
2416 * (an unusual configuration), then returns the root cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 */
Tejun Heoc9710d82013-08-08 20:11:22 -04002418static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419{
Tejun Heoc4310692013-01-07 08:51:08 -08002420 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
2421 cs = parent_cs(cs);
Paul Jackson9bf22292005-09-06 15:18:12 -07002422 return cs;
2423}
2424
2425/**
David Rientjesa1bc5a42009-04-02 16:57:54 -07002426 * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2427 * @node: is this an allowed node?
Paul Jackson02a0e532006-12-13 00:34:25 -08002428 * @gfp_mask: memory allocation flags
Paul Jackson9bf22292005-09-06 15:18:12 -07002429 *
David Rientjesa1bc5a42009-04-02 16:57:54 -07002430 * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
2431 * set, yes, we can always allocate. If node is in our task's mems_allowed,
2432 * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest
2433 * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been
2434 * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2435 * flag, yes.
Paul Jackson9bf22292005-09-06 15:18:12 -07002436 * Otherwise, no.
2437 *
David Rientjesa1bc5a42009-04-02 16:57:54 -07002438 * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2439 * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall()
2440 * might sleep, and might allow a node from an enclosing cpuset.
Paul Jackson02a0e532006-12-13 00:34:25 -08002441 *
David Rientjesa1bc5a42009-04-02 16:57:54 -07002442 * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2443 * cpusets, and never sleeps.
Paul Jackson02a0e532006-12-13 00:34:25 -08002444 *
2445 * The __GFP_THISNODE placement logic is really handled elsewhere,
2446 * by forcibly using a zonelist starting at a specified node, and by
2447 * (in get_page_from_freelist()) refusing to consider the zones for
2448 * any node on the zonelist except the first. By the time any such
2449 * calls get to this routine, we should just shut up and say 'yes'.
2450 *
Paul Jackson9bf22292005-09-06 15:18:12 -07002451 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
David Rientjesc596d9f2007-05-06 14:49:32 -07002452 * and do not allow allocations outside the current tasks cpuset
2453 * unless the task has been OOM killed as is marked TIF_MEMDIE.
Paul Jackson9bf22292005-09-06 15:18:12 -07002454 * GFP_KERNEL allocations are not so marked, so can escape to the
Paul Menage78608362008-04-29 01:00:26 -07002455 * nearest enclosing hardwalled ancestor cpuset.
Paul Jackson9bf22292005-09-06 15:18:12 -07002456 *
Paul Jackson02a0e532006-12-13 00:34:25 -08002457 * Scanning up parent cpusets requires callback_mutex. The
2458 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2459 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2460 * current tasks mems_allowed came up empty on the first pass over
2461 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2462 * cpuset are short of memory, might require taking the callback_mutex
2463 * mutex.
Paul Jackson9bf22292005-09-06 15:18:12 -07002464 *
Paul Jackson36be57f2006-05-20 15:00:10 -07002465 * The first call here from mm/page_alloc:get_page_from_freelist()
Paul Jackson02a0e532006-12-13 00:34:25 -08002466 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2467 * so no allocation on a node outside the cpuset is allowed (unless
2468 * in interrupt, of course).
Paul Jackson9bf22292005-09-06 15:18:12 -07002469 *
Paul Jackson36be57f2006-05-20 15:00:10 -07002470 * The second pass through get_page_from_freelist() doesn't even call
2471 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
2472 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2473 * in alloc_flags. That logic and the checks below have the combined
2474 * affect that:
Paul Jackson9bf22292005-09-06 15:18:12 -07002475 * in_interrupt - any node ok (current task context irrelevant)
2476 * GFP_ATOMIC - any node ok
David Rientjesc596d9f2007-05-06 14:49:32 -07002477 * TIF_MEMDIE - any node ok
Paul Menage78608362008-04-29 01:00:26 -07002478 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
Paul Jackson9bf22292005-09-06 15:18:12 -07002479 * GFP_USER - only nodes in current tasks mems allowed ok.
Paul Jackson36be57f2006-05-20 15:00:10 -07002480 *
2481 * Rule:
David Rientjesa1bc5a42009-04-02 16:57:54 -07002482 * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
Paul Jackson36be57f2006-05-20 15:00:10 -07002483 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2484 * the code that might scan up ancestor cpusets and sleep.
Paul Jackson02a0e532006-12-13 00:34:25 -08002485 */
David Rientjesa1bc5a42009-04-02 16:57:54 -07002486int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
Paul Jackson9bf22292005-09-06 15:18:12 -07002487{
Tejun Heoc9710d82013-08-08 20:11:22 -04002488 struct cpuset *cs; /* current cpuset ancestors */
Paul Jackson29afd492006-03-24 03:16:12 -08002489 int allowed; /* is allocation in zone z allowed? */
Paul Jackson9bf22292005-09-06 15:18:12 -07002490
Christoph Lameter9b819d22006-09-25 23:31:40 -07002491 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
Paul Jackson9bf22292005-09-06 15:18:12 -07002492 return 1;
Paul Jackson92d1dbd2006-05-20 15:00:11 -07002493 might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
Paul Jackson9bf22292005-09-06 15:18:12 -07002494 if (node_isset(node, current->mems_allowed))
2495 return 1;
David Rientjesc596d9f2007-05-06 14:49:32 -07002496 /*
2497 * Allow tasks that have access to memory reserves because they have
2498 * been OOM killed to get memory anywhere.
2499 */
2500 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2501 return 1;
Paul Jackson9bf22292005-09-06 15:18:12 -07002502 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
2503 return 0;
2504
Bob Picco5563e772005-11-13 16:06:35 -08002505 if (current->flags & PF_EXITING) /* Let dying task have memory */
2506 return 1;
2507
Paul Jackson9bf22292005-09-06 15:18:12 -07002508 /* Not hardwall and node outside mems_allowed: scan up cpusets */
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002509 mutex_lock(&callback_mutex);
Paul Jackson053199e2005-10-30 15:02:30 -08002510
Paul Jackson053199e2005-10-30 15:02:30 -08002511 task_lock(current);
Paul Menage78608362008-04-29 01:00:26 -07002512 cs = nearest_hardwall_ancestor(task_cs(current));
Paul Jackson053199e2005-10-30 15:02:30 -08002513 task_unlock(current);
2514
Paul Jackson9bf22292005-09-06 15:18:12 -07002515 allowed = node_isset(node, cs->mems_allowed);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002516 mutex_unlock(&callback_mutex);
Paul Jackson9bf22292005-09-06 15:18:12 -07002517 return allowed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518}
2519
Paul Jackson02a0e532006-12-13 00:34:25 -08002520/*
David Rientjesa1bc5a42009-04-02 16:57:54 -07002521 * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2522 * @node: is this an allowed node?
Paul Jackson02a0e532006-12-13 00:34:25 -08002523 * @gfp_mask: memory allocation flags
2524 *
David Rientjesa1bc5a42009-04-02 16:57:54 -07002525 * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
2526 * set, yes, we can always allocate. If node is in our task's mems_allowed,
2527 * yes. If the task has been OOM killed and has access to memory reserves as
2528 * specified by the TIF_MEMDIE flag, yes.
2529 * Otherwise, no.
Paul Jackson02a0e532006-12-13 00:34:25 -08002530 *
2531 * The __GFP_THISNODE placement logic is really handled elsewhere,
2532 * by forcibly using a zonelist starting at a specified node, and by
2533 * (in get_page_from_freelist()) refusing to consider the zones for
2534 * any node on the zonelist except the first. By the time any such
2535 * calls get to this routine, we should just shut up and say 'yes'.
2536 *
David Rientjesa1bc5a42009-04-02 16:57:54 -07002537 * Unlike the cpuset_node_allowed_softwall() variant, above,
2538 * this variant requires that the node be in the current task's
Paul Jackson02a0e532006-12-13 00:34:25 -08002539 * mems_allowed or that we're in interrupt. It does not scan up the
2540 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2541 * It never sleeps.
2542 */
David Rientjesa1bc5a42009-04-02 16:57:54 -07002543int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
Paul Jackson02a0e532006-12-13 00:34:25 -08002544{
Paul Jackson02a0e532006-12-13 00:34:25 -08002545 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2546 return 1;
Paul Jackson02a0e532006-12-13 00:34:25 -08002547 if (node_isset(node, current->mems_allowed))
2548 return 1;
Daniel Walkerdedf8b72007-10-18 03:06:04 -07002549 /*
2550 * Allow tasks that have access to memory reserves because they have
2551 * been OOM killed to get memory anywhere.
2552 */
2553 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2554 return 1;
Paul Jackson02a0e532006-12-13 00:34:25 -08002555 return 0;
2556}
2557
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002558/**
Jack Steiner6adef3e2010-05-26 14:42:49 -07002559 * cpuset_mem_spread_node() - On which node to begin search for a file page
2560 * cpuset_slab_spread_node() - On which node to begin search for a slab page
Paul Jackson825a46a2006-03-24 03:16:03 -08002561 *
2562 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2563 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2564 * and if the memory allocation used cpuset_mem_spread_node()
2565 * to determine on which node to start looking, as it will for
2566 * certain page cache or slab cache pages such as used for file
2567 * system buffers and inode caches, then instead of starting on the
2568 * local node to look for a free page, rather spread the starting
2569 * node around the tasks mems_allowed nodes.
2570 *
2571 * We don't have to worry about the returned node being offline
2572 * because "it can't happen", and even if it did, it would be ok.
2573 *
2574 * The routines calling guarantee_online_mems() are careful to
2575 * only set nodes in task->mems_allowed that are online. So it
2576 * should not be possible for the following code to return an
2577 * offline node. But if it did, that would be ok, as this routine
2578 * is not returning the node where the allocation must be, only
2579 * the node where the search should start. The zonelist passed to
2580 * __alloc_pages() will include all nodes. If the slab allocator
2581 * is passed an offline node, it will fall back to the local node.
2582 * See kmem_cache_alloc_node().
2583 */
2584
Jack Steiner6adef3e2010-05-26 14:42:49 -07002585static int cpuset_spread_node(int *rotor)
Paul Jackson825a46a2006-03-24 03:16:03 -08002586{
2587 int node;
2588
Jack Steiner6adef3e2010-05-26 14:42:49 -07002589 node = next_node(*rotor, current->mems_allowed);
Paul Jackson825a46a2006-03-24 03:16:03 -08002590 if (node == MAX_NUMNODES)
2591 node = first_node(current->mems_allowed);
Jack Steiner6adef3e2010-05-26 14:42:49 -07002592 *rotor = node;
Paul Jackson825a46a2006-03-24 03:16:03 -08002593 return node;
2594}
Jack Steiner6adef3e2010-05-26 14:42:49 -07002595
2596int cpuset_mem_spread_node(void)
2597{
Michal Hocko778d3b02011-07-26 16:08:30 -07002598 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2599 current->cpuset_mem_spread_rotor =
2600 node_random(&current->mems_allowed);
2601
Jack Steiner6adef3e2010-05-26 14:42:49 -07002602 return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2603}
2604
2605int cpuset_slab_spread_node(void)
2606{
Michal Hocko778d3b02011-07-26 16:08:30 -07002607 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2608 current->cpuset_slab_spread_rotor =
2609 node_random(&current->mems_allowed);
2610
Jack Steiner6adef3e2010-05-26 14:42:49 -07002611 return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2612}
2613
Paul Jackson825a46a2006-03-24 03:16:03 -08002614EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2615
2616/**
David Rientjesbbe373f2007-10-16 23:25:58 -07002617 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2618 * @tsk1: pointer to task_struct of some task.
2619 * @tsk2: pointer to task_struct of some other task.
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002620 *
David Rientjesbbe373f2007-10-16 23:25:58 -07002621 * Description: Return true if @tsk1's mems_allowed intersects the
2622 * mems_allowed of @tsk2. Used by the OOM killer to determine if
2623 * one of the task's memory usage might impact the memory available
2624 * to the other.
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002625 **/
2626
David Rientjesbbe373f2007-10-16 23:25:58 -07002627int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2628 const struct task_struct *tsk2)
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002629{
David Rientjesbbe373f2007-10-16 23:25:58 -07002630 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002631}
2632
Li Zefanf440d982013-03-01 15:02:15 +08002633#define CPUSET_NODELIST_LEN (256)
2634
David Rientjes75aa1992009-01-06 14:39:01 -08002635/**
2636 * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2637 * @task: pointer to task_struct of some task.
2638 *
2639 * Description: Prints @task's name, cpuset name, and cached copy of its
2640 * mems_allowed to the kernel log. Must hold task_lock(task) to allow
2641 * dereferencing task_cs(task).
2642 */
2643void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2644{
Li Zefanf440d982013-03-01 15:02:15 +08002645 /* Statically allocated to prevent using excess stack. */
2646 static char cpuset_nodelist[CPUSET_NODELIST_LEN];
2647 static DEFINE_SPINLOCK(cpuset_buffer_lock);
David Rientjes75aa1992009-01-06 14:39:01 -08002648
Li Zefanf440d982013-03-01 15:02:15 +08002649 struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
2650
Li Zefancfb59662013-03-12 10:28:39 +08002651 rcu_read_lock();
David Rientjes75aa1992009-01-06 14:39:01 -08002652 spin_lock(&cpuset_buffer_lock);
Li Zefan63f43f52013-01-25 16:08:01 +08002653
David Rientjes75aa1992009-01-06 14:39:01 -08002654 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2655 tsk->mems_allowed);
2656 printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
Li Zefanf440d982013-03-01 15:02:15 +08002657 tsk->comm, cgroup_name(cgrp), cpuset_nodelist);
2658
David Rientjes75aa1992009-01-06 14:39:01 -08002659 spin_unlock(&cpuset_buffer_lock);
Li Zefancfb59662013-03-12 10:28:39 +08002660 rcu_read_unlock();
David Rientjes75aa1992009-01-06 14:39:01 -08002661}
2662
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663/*
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002664 * Collection of memory_pressure is suppressed unless
2665 * this flag is enabled by writing "1" to the special
2666 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2667 */
2668
Paul Jacksonc5b2aff2006-01-08 01:01:51 -08002669int cpuset_memory_pressure_enabled __read_mostly;
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002670
2671/**
2672 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2673 *
2674 * Keep a running average of the rate of synchronous (direct)
2675 * page reclaim efforts initiated by tasks in each cpuset.
2676 *
2677 * This represents the rate at which some task in the cpuset
2678 * ran low on memory on all nodes it was allowed to use, and
2679 * had to enter the kernels page reclaim code in an effort to
2680 * create more free memory by tossing clean pages or swapping
2681 * or writing dirty pages.
2682 *
2683 * Display to user space in the per-cpuset read-only file
2684 * "memory_pressure". Value displayed is an integer
2685 * representing the recent rate of entry into the synchronous
2686 * (direct) page reclaim by any task attached to the cpuset.
2687 **/
2688
2689void __cpuset_memory_pressure_bump(void)
2690{
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002691 task_lock(current);
Paul Menage8793d852007-10-18 23:39:39 -07002692 fmeter_markevent(&task_cs(current)->fmeter);
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002693 task_unlock(current);
2694}
2695
Paul Menage8793d852007-10-18 23:39:39 -07002696#ifdef CONFIG_PROC_PID_CPUSET
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002697/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 * proc_cpuset_show()
2699 * - Print tasks cpuset path into seq_file.
2700 * - Used for /proc/<pid>/cpuset.
Paul Jackson053199e2005-10-30 15:02:30 -08002701 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2702 * doesn't really matter if tsk->cpuset changes after we read it,
Tejun Heo5d21cc22013-01-07 08:51:08 -08002703 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
Paul Menage2df167a2008-02-07 00:14:45 -08002704 * anyway.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 */
Al Viro8d8b97b2013-04-19 23:11:24 -04002706int proc_cpuset_show(struct seq_file *m, void *unused_v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707{
Eric W. Biederman13b41b02006-06-26 00:25:56 -07002708 struct pid *pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 struct task_struct *tsk;
2710 char *buf;
Paul Menage8793d852007-10-18 23:39:39 -07002711 struct cgroup_subsys_state *css;
Eric W. Biederman99f89552006-06-26 00:25:55 -07002712 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713
Eric W. Biederman99f89552006-06-26 00:25:55 -07002714 retval = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2716 if (!buf)
Eric W. Biederman99f89552006-06-26 00:25:55 -07002717 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718
Eric W. Biederman99f89552006-06-26 00:25:55 -07002719 retval = -ESRCH;
Eric W. Biederman13b41b02006-06-26 00:25:56 -07002720 pid = m->private;
2721 tsk = get_pid_task(pid, PIDTYPE_PID);
Eric W. Biederman99f89552006-06-26 00:25:55 -07002722 if (!tsk)
2723 goto out_free;
2724
Li Zefan27e89ae2013-01-15 14:10:57 +08002725 rcu_read_lock();
Tejun Heo8af01f52013-08-08 20:11:22 -04002726 css = task_css(tsk, cpuset_subsys_id);
Paul Menage8793d852007-10-18 23:39:39 -07002727 retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
Li Zefan27e89ae2013-01-15 14:10:57 +08002728 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 if (retval < 0)
Li Zefan27e89ae2013-01-15 14:10:57 +08002730 goto out_put_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 seq_puts(m, buf);
2732 seq_putc(m, '\n');
Li Zefan27e89ae2013-01-15 14:10:57 +08002733out_put_task:
Eric W. Biederman99f89552006-06-26 00:25:55 -07002734 put_task_struct(tsk);
2735out_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 kfree(buf);
Eric W. Biederman99f89552006-06-26 00:25:55 -07002737out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 return retval;
2739}
Paul Menage8793d852007-10-18 23:39:39 -07002740#endif /* CONFIG_PROC_PID_CPUSET */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741
Heiko Carstensd01d4822009-09-21 11:06:27 +02002742/* Display task mems_allowed in /proc/<pid>/status file. */
Eric W. Biedermandf5f8312008-02-08 04:18:33 -08002743void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744{
Eric W. Biedermandf5f8312008-02-08 04:18:33 -08002745 seq_printf(m, "Mems_allowed:\t");
Lai Jiangshan30e8e132008-10-18 20:28:20 -07002746 seq_nodemask(m, &task->mems_allowed);
Eric W. Biedermandf5f8312008-02-08 04:18:33 -08002747 seq_printf(m, "\n");
Mike Travis39106dc2008-04-08 11:43:03 -07002748 seq_printf(m, "Mems_allowed_list:\t");
Lai Jiangshan30e8e132008-10-18 20:28:20 -07002749 seq_nodemask_list(m, &task->mems_allowed);
Mike Travis39106dc2008-04-08 11:43:03 -07002750 seq_printf(m, "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751}