blob: 5e348ae37ce9b4903cb5bb4bff144861f0d1b8d4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
Paul Jackson029190c2007-10-18 23:40:20 -07007 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
Paul Menage8793d852007-10-18 23:39:39 -07008 * Copyright (C) 2006 Google, Inc
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Paul Jackson825a46a2006-03-24 03:16:03 -080013 * 2003-10-10 Written by Simon Derr.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * 2003-10-22 Updates by Stephen Hemminger.
Paul Jackson825a46a2006-03-24 03:16:03 -080015 * 2004 May-July Rework by Paul Jackson.
Paul Menage8793d852007-10-18 23:39:39 -070016 * 2006 Rework by Paul Menage to use generic cgroups
Max Krasnyanskycf417142008-08-11 14:33:53 -070017 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/cpu.h>
26#include <linux/cpumask.h>
27#include <linux/cpuset.h>
28#include <linux/err.h>
29#include <linux/errno.h>
30#include <linux/file.h>
31#include <linux/fs.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/kernel.h>
35#include <linux/kmod.h>
36#include <linux/list.h>
Paul Jackson68860ec2005-10-30 15:02:36 -080037#include <linux/mempolicy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/mm.h>
Miao Xief4818912008-11-19 15:36:30 -080039#include <linux/memory.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040040#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mount.h>
42#include <linux/namei.h>
43#include <linux/pagemap.h>
44#include <linux/proc_fs.h>
Paul Jackson6b9c2602006-01-08 01:02:02 -080045#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/sched.h>
47#include <linux/seq_file.h>
David Quigley22fb52d2006-06-23 02:04:00 -070048#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/spinlock.h>
51#include <linux/stat.h>
52#include <linux/string.h>
53#include <linux/time.h>
54#include <linux/backing-dev.h>
55#include <linux/sort.h>
56
57#include <asm/uaccess.h>
Arun Sharma600634972011-07-26 16:09:06 -070058#include <linux/atomic.h>
Ingo Molnar3d3f26a2006-03-23 03:00:18 -080059#include <linux/mutex.h>
Cliff Wickman956db3c2008-02-07 00:14:43 -080060#include <linux/workqueue.h>
61#include <linux/cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Paul Jackson202f72d2006-01-08 01:01:57 -080063/*
64 * Tracks how many cpusets are currently defined in system.
65 * When there is only one cpuset (the root cpuset) we can
66 * short circuit some hooks.
67 */
Paul Jackson7edc5962006-01-08 01:02:03 -080068int number_of_cpusets __read_mostly;
Paul Jackson202f72d2006-01-08 01:01:57 -080069
Paul Menage2df167a2008-02-07 00:14:45 -080070/* Forward declare cgroup structures */
Paul Menage8793d852007-10-18 23:39:39 -070071struct cgroup_subsys cpuset_subsys;
72struct cpuset;
73
Paul Jackson3e0d98b2006-01-08 01:01:49 -080074/* See "Frequency meter" comments, below. */
75
76struct fmeter {
77 int cnt; /* unprocessed events count */
78 int val; /* most recent output value */
79 time_t time; /* clock (secs) when val computed */
80 spinlock_t lock; /* guards read or write of above */
81};
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083struct cpuset {
Paul Menage8793d852007-10-18 23:39:39 -070084 struct cgroup_subsys_state css;
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 unsigned long flags; /* "unsigned long" so bitops work */
Li Zefan300ed6c2009-01-07 18:08:44 -080087 cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 struct cpuset *parent; /* my parent */
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Paul Jackson3e0d98b2006-01-08 01:01:49 -080092 struct fmeter fmeter; /* memory_pressure filter */
Paul Jackson029190c2007-10-18 23:40:20 -070093
Tejun Heo452477f2013-01-07 08:51:07 -080094 /*
95 * Tasks are being attached to this cpuset. Used to prevent
96 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
97 */
98 int attach_in_progress;
99
Paul Jackson029190c2007-10-18 23:40:20 -0700100 /* partition number for rebuild_sched_domains() */
101 int pn;
Cliff Wickman956db3c2008-02-07 00:14:43 -0800102
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900103 /* for custom sched domain */
104 int relax_domain_level;
105
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200106 /* used for walking a cpuset hierarchy */
Cliff Wickman956db3c2008-02-07 00:14:43 -0800107 struct list_head stack_list;
Tejun Heo8d033942013-01-07 08:51:07 -0800108
109 struct work_struct hotplug_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110};
111
Paul Menage8793d852007-10-18 23:39:39 -0700112/* Retrieve the cpuset for a cgroup */
113static inline struct cpuset *cgroup_cs(struct cgroup *cont)
114{
115 return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
116 struct cpuset, css);
117}
118
119/* Retrieve the cpuset for a task */
120static inline struct cpuset *task_cs(struct task_struct *task)
121{
122 return container_of(task_subsys_state(task, cpuset_subsys_id),
123 struct cpuset, css);
124}
Paul Menage8793d852007-10-18 23:39:39 -0700125
David Rientjesb2462722011-12-19 17:11:52 -0800126#ifdef CONFIG_NUMA
127static inline bool task_has_mempolicy(struct task_struct *task)
128{
129 return task->mempolicy;
130}
131#else
132static inline bool task_has_mempolicy(struct task_struct *task)
133{
134 return false;
135}
136#endif
137
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/* bits in struct cpuset flags field */
140typedef enum {
Tejun Heoefeb77b2013-01-07 08:51:07 -0800141 CS_ONLINE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 CS_CPU_EXCLUSIVE,
143 CS_MEM_EXCLUSIVE,
Paul Menage78608362008-04-29 01:00:26 -0700144 CS_MEM_HARDWALL,
Paul Jackson45b07ef2006-01-08 01:00:56 -0800145 CS_MEMORY_MIGRATE,
Paul Jackson029190c2007-10-18 23:40:20 -0700146 CS_SCHED_LOAD_BALANCE,
Paul Jackson825a46a2006-03-24 03:16:03 -0800147 CS_SPREAD_PAGE,
148 CS_SPREAD_SLAB,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149} cpuset_flagbits_t;
150
151/* convenient tests for these bits */
Tejun Heoefeb77b2013-01-07 08:51:07 -0800152static inline bool is_cpuset_online(const struct cpuset *cs)
153{
154 return test_bit(CS_ONLINE, &cs->flags);
155}
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157static inline int is_cpu_exclusive(const struct cpuset *cs)
158{
Paul Jackson7b5b9ef2006-03-24 03:16:00 -0800159 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
161
162static inline int is_mem_exclusive(const struct cpuset *cs)
163{
Paul Jackson7b5b9ef2006-03-24 03:16:00 -0800164 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165}
166
Paul Menage78608362008-04-29 01:00:26 -0700167static inline int is_mem_hardwall(const struct cpuset *cs)
168{
169 return test_bit(CS_MEM_HARDWALL, &cs->flags);
170}
171
Paul Jackson029190c2007-10-18 23:40:20 -0700172static inline int is_sched_load_balance(const struct cpuset *cs)
173{
174 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
175}
176
Paul Jackson45b07ef2006-01-08 01:00:56 -0800177static inline int is_memory_migrate(const struct cpuset *cs)
178{
Paul Jackson7b5b9ef2006-03-24 03:16:00 -0800179 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
Paul Jackson45b07ef2006-01-08 01:00:56 -0800180}
181
Paul Jackson825a46a2006-03-24 03:16:03 -0800182static inline int is_spread_page(const struct cpuset *cs)
183{
184 return test_bit(CS_SPREAD_PAGE, &cs->flags);
185}
186
187static inline int is_spread_slab(const struct cpuset *cs)
188{
189 return test_bit(CS_SPREAD_SLAB, &cs->flags);
190}
191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192static struct cpuset top_cpuset = {
Tejun Heoefeb77b2013-01-07 08:51:07 -0800193 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
194 (1 << CS_MEM_EXCLUSIVE)),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195};
196
Tejun Heoae8086c2013-01-07 08:51:07 -0800197/**
198 * cpuset_for_each_child - traverse online children of a cpuset
199 * @child_cs: loop cursor pointing to the current child
200 * @pos_cgrp: used for iteration
201 * @parent_cs: target cpuset to walk children of
202 *
203 * Walk @child_cs through the online children of @parent_cs. Must be used
204 * with RCU read locked.
205 */
206#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs) \
207 cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup) \
208 if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210/*
Tejun Heo5d21cc22013-01-07 08:51:08 -0800211 * There are two global mutexes guarding cpuset structures - cpuset_mutex
212 * and callback_mutex. The latter may nest inside the former. We also
213 * require taking task_lock() when dereferencing a task's cpuset pointer.
214 * See "The task_lock() exception", at the end of this comment.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800216 * A task must hold both mutexes to modify cpusets. If a task holds
217 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
218 * is the only task able to also acquire callback_mutex and be able to
219 * modify cpusets. It can perform various checks on the cpuset structure
220 * first, knowing nothing will change. It can also allocate memory while
221 * just holding cpuset_mutex. While it is performing these checks, various
222 * callback routines can briefly acquire callback_mutex to query cpusets.
223 * Once it is ready to make the changes, it takes callback_mutex, blocking
224 * everyone else.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 *
Paul Jackson053199e2005-10-30 15:02:30 -0800226 * Calls to the kernel memory allocator can not be made while holding
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800227 * callback_mutex, as that would risk double tripping on callback_mutex
Paul Jackson053199e2005-10-30 15:02:30 -0800228 * from one of the callbacks into the cpuset code from within
229 * __alloc_pages().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800231 * If a task is only holding callback_mutex, then it has read-only
Paul Jackson053199e2005-10-30 15:02:30 -0800232 * access to cpusets.
233 *
Miao Xie58568d22009-06-16 15:31:49 -0700234 * Now, the task_struct fields mems_allowed and mempolicy may be changed
235 * by other task, we use alloc_lock in the task_struct fields to protect
236 * them.
Paul Jackson053199e2005-10-30 15:02:30 -0800237 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800238 * The cpuset_common_file_read() handlers only hold callback_mutex across
Paul Jackson053199e2005-10-30 15:02:30 -0800239 * small pieces of code, such as when reading out possibly multi-word
240 * cpumasks and nodemasks.
241 *
Paul Menage2df167a2008-02-07 00:14:45 -0800242 * Accessing a task's cpuset should be done in accordance with the
243 * guidelines for accessing subsystem state in kernel/cgroup.c
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 */
245
Tejun Heo5d21cc22013-01-07 08:51:08 -0800246static DEFINE_MUTEX(cpuset_mutex);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800247static DEFINE_MUTEX(callback_mutex);
Paul Jackson4247bdc2005-09-10 00:26:06 -0700248
Max Krasnyanskycf417142008-08-11 14:33:53 -0700249/*
David Rientjes75aa1992009-01-06 14:39:01 -0800250 * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist
251 * buffers. They are statically allocated to prevent using excess stack
252 * when calling cpuset_print_task_mems_allowed().
253 */
254#define CPUSET_NAME_LEN (128)
255#define CPUSET_NODELIST_LEN (256)
256static char cpuset_name[CPUSET_NAME_LEN];
257static char cpuset_nodelist[CPUSET_NODELIST_LEN];
258static DEFINE_SPINLOCK(cpuset_buffer_lock);
259
260/*
Tejun Heo3a5a6d02013-01-07 08:51:07 -0800261 * CPU / memory hotplug is handled asynchronously.
262 */
Tejun Heo8d033942013-01-07 08:51:07 -0800263static struct workqueue_struct *cpuset_propagate_hotplug_wq;
264
Tejun Heo3a5a6d02013-01-07 08:51:07 -0800265static void cpuset_hotplug_workfn(struct work_struct *work);
Tejun Heo8d033942013-01-07 08:51:07 -0800266static void cpuset_propagate_hotplug_workfn(struct work_struct *work);
Tejun Heo02bb5862013-01-07 08:51:08 -0800267static void schedule_cpuset_propagate_hotplug(struct cpuset *cs);
Tejun Heo3a5a6d02013-01-07 08:51:07 -0800268
269static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
270
271/*
Max Krasnyanskycf417142008-08-11 14:33:53 -0700272 * This is ugly, but preserves the userspace API for existing cpuset
Paul Menage8793d852007-10-18 23:39:39 -0700273 * users. If someone tries to mount the "cpuset" filesystem, we
Max Krasnyanskycf417142008-08-11 14:33:53 -0700274 * silently switch it to mount "cgroup" instead
275 */
Al Virof7e83572010-07-26 13:23:11 +0400276static struct dentry *cpuset_mount(struct file_system_type *fs_type,
277 int flags, const char *unused_dev_name, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278{
Paul Menage8793d852007-10-18 23:39:39 -0700279 struct file_system_type *cgroup_fs = get_fs_type("cgroup");
Al Virof7e83572010-07-26 13:23:11 +0400280 struct dentry *ret = ERR_PTR(-ENODEV);
Paul Menage8793d852007-10-18 23:39:39 -0700281 if (cgroup_fs) {
282 char mountopts[] =
283 "cpuset,noprefix,"
284 "release_agent=/sbin/cpuset_release_agent";
Al Virof7e83572010-07-26 13:23:11 +0400285 ret = cgroup_fs->mount(cgroup_fs, flags,
286 unused_dev_name, mountopts);
Paul Menage8793d852007-10-18 23:39:39 -0700287 put_filesystem(cgroup_fs);
288 }
289 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290}
291
292static struct file_system_type cpuset_fs_type = {
293 .name = "cpuset",
Al Virof7e83572010-07-26 13:23:11 +0400294 .mount = cpuset_mount,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295};
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297/*
Li Zefan300ed6c2009-01-07 18:08:44 -0800298 * Return in pmask the portion of a cpusets's cpus_allowed that
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 * are online. If none are online, walk up the cpuset hierarchy
300 * until we find one that does have some online cpus. If we get
301 * all the way to the top and still haven't found any online cpus,
Rusty Russell5f054e32012-03-29 15:38:31 +1030302 * return cpu_online_mask. Or if passed a NULL cs from an exit'ing
303 * task, return cpu_online_mask.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 *
305 * One way or another, we guarantee to return some non-empty subset
Rusty Russell5f054e32012-03-29 15:38:31 +1030306 * of cpu_online_mask.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800308 * Call with callback_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 */
310
Li Zefan6af866a2009-01-07 18:08:45 -0800311static void guarantee_online_cpus(const struct cpuset *cs,
312 struct cpumask *pmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313{
Li Zefan300ed6c2009-01-07 18:08:44 -0800314 while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 cs = cs->parent;
316 if (cs)
Li Zefan300ed6c2009-01-07 18:08:44 -0800317 cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 else
Li Zefan300ed6c2009-01-07 18:08:44 -0800319 cpumask_copy(pmask, cpu_online_mask);
320 BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
322
323/*
324 * Return in *pmask the portion of a cpusets's mems_allowed that
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700325 * are online, with memory. If none are online with memory, walk
326 * up the cpuset hierarchy until we find one that does have some
327 * online mems. If we get all the way to the top and still haven't
Lai Jiangshan38d7bee2012-12-12 13:51:24 -0800328 * found any online mems, return node_states[N_MEMORY].
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 *
330 * One way or another, we guarantee to return some non-empty subset
Lai Jiangshan38d7bee2012-12-12 13:51:24 -0800331 * of node_states[N_MEMORY].
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800333 * Call with callback_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 */
335
336static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
337{
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700338 while (cs && !nodes_intersects(cs->mems_allowed,
Lai Jiangshan38d7bee2012-12-12 13:51:24 -0800339 node_states[N_MEMORY]))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 cs = cs->parent;
341 if (cs)
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700342 nodes_and(*pmask, cs->mems_allowed,
Lai Jiangshan38d7bee2012-12-12 13:51:24 -0800343 node_states[N_MEMORY]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 else
Lai Jiangshan38d7bee2012-12-12 13:51:24 -0800345 *pmask = node_states[N_MEMORY];
346 BUG_ON(!nodes_intersects(*pmask, node_states[N_MEMORY]));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
Miao Xief3b39d42009-06-16 15:31:46 -0700349/*
350 * update task's spread flag if cpuset's page/slab spread flag is set
351 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800352 * Called with callback_mutex/cpuset_mutex held
Miao Xief3b39d42009-06-16 15:31:46 -0700353 */
354static void cpuset_update_task_spread_flag(struct cpuset *cs,
355 struct task_struct *tsk)
356{
357 if (is_spread_page(cs))
358 tsk->flags |= PF_SPREAD_PAGE;
359 else
360 tsk->flags &= ~PF_SPREAD_PAGE;
361 if (is_spread_slab(cs))
362 tsk->flags |= PF_SPREAD_SLAB;
363 else
364 tsk->flags &= ~PF_SPREAD_SLAB;
365}
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367/*
368 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
369 *
370 * One cpuset is a subset of another if all its allowed CPUs and
371 * Memory Nodes are a subset of the other, and its exclusive flags
Tejun Heo5d21cc22013-01-07 08:51:08 -0800372 * are only set if the other's are set. Call holding cpuset_mutex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 */
374
375static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
376{
Li Zefan300ed6c2009-01-07 18:08:44 -0800377 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 nodes_subset(p->mems_allowed, q->mems_allowed) &&
379 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
380 is_mem_exclusive(p) <= is_mem_exclusive(q);
381}
382
Li Zefan645fcc92009-01-07 18:08:43 -0800383/**
384 * alloc_trial_cpuset - allocate a trial cpuset
385 * @cs: the cpuset that the trial cpuset duplicates
386 */
387static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
388{
Li Zefan300ed6c2009-01-07 18:08:44 -0800389 struct cpuset *trial;
390
391 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
392 if (!trial)
393 return NULL;
394
395 if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
396 kfree(trial);
397 return NULL;
398 }
399 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
400
401 return trial;
Li Zefan645fcc92009-01-07 18:08:43 -0800402}
403
404/**
405 * free_trial_cpuset - free the trial cpuset
406 * @trial: the trial cpuset to be freed
407 */
408static void free_trial_cpuset(struct cpuset *trial)
409{
Li Zefan300ed6c2009-01-07 18:08:44 -0800410 free_cpumask_var(trial->cpus_allowed);
Li Zefan645fcc92009-01-07 18:08:43 -0800411 kfree(trial);
412}
413
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414/*
415 * validate_change() - Used to validate that any proposed cpuset change
416 * follows the structural rules for cpusets.
417 *
418 * If we replaced the flag and mask values of the current cpuset
419 * (cur) with those values in the trial cpuset (trial), would
420 * our various subset and exclusive rules still be valid? Presumes
Tejun Heo5d21cc22013-01-07 08:51:08 -0800421 * cpuset_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 *
423 * 'cur' is the address of an actual, in-use cpuset. Operations
424 * such as list traversal that depend on the actual address of the
425 * cpuset in the list must use cur below, not trial.
426 *
427 * 'trial' is the address of bulk structure copy of cur, with
428 * perhaps one or more of the fields cpus_allowed, mems_allowed,
429 * or flags changed to new, trial values.
430 *
431 * Return 0 if valid, -errno if not.
432 */
433
434static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
435{
Paul Menage8793d852007-10-18 23:39:39 -0700436 struct cgroup *cont;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 struct cpuset *c, *par;
Tejun Heoae8086c2013-01-07 08:51:07 -0800438 int ret;
439
440 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
442 /* Each of our child cpusets must be a subset of us */
Tejun Heoae8086c2013-01-07 08:51:07 -0800443 ret = -EBUSY;
444 cpuset_for_each_child(c, cont, cur)
445 if (!is_cpuset_subset(c, trial))
446 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 /* Remaining checks don't apply to root cpuset */
Tejun Heoae8086c2013-01-07 08:51:07 -0800449 ret = 0;
Paul Jackson69604062006-12-06 20:36:15 -0800450 if (cur == &top_cpuset)
Tejun Heoae8086c2013-01-07 08:51:07 -0800451 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
Paul Jackson69604062006-12-06 20:36:15 -0800453 par = cur->parent;
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 /* We must be a subset of our parent cpuset */
Tejun Heoae8086c2013-01-07 08:51:07 -0800456 ret = -EACCES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 if (!is_cpuset_subset(trial, par))
Tejun Heoae8086c2013-01-07 08:51:07 -0800458 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Paul Menage2df167a2008-02-07 00:14:45 -0800460 /*
461 * If either I or some sibling (!= me) is exclusive, we can't
462 * overlap
463 */
Tejun Heoae8086c2013-01-07 08:51:07 -0800464 ret = -EINVAL;
465 cpuset_for_each_child(c, cont, par) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
467 c != cur &&
Li Zefan300ed6c2009-01-07 18:08:44 -0800468 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
Tejun Heoae8086c2013-01-07 08:51:07 -0800469 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
471 c != cur &&
472 nodes_intersects(trial->mems_allowed, c->mems_allowed))
Tejun Heoae8086c2013-01-07 08:51:07 -0800473 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 }
475
Tejun Heo452477f2013-01-07 08:51:07 -0800476 /*
477 * Cpusets with tasks - existing or newly being attached - can't
478 * have empty cpus_allowed or mems_allowed.
479 */
Tejun Heoae8086c2013-01-07 08:51:07 -0800480 ret = -ENOSPC;
Tejun Heo452477f2013-01-07 08:51:07 -0800481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
Tejun Heoae8086c2013-01-07 08:51:07 -0800482 (cpumask_empty(trial->cpus_allowed) ||
483 nodes_empty(trial->mems_allowed)))
484 goto out;
Paul Jackson020958b2007-10-18 23:40:21 -0700485
Tejun Heoae8086c2013-01-07 08:51:07 -0800486 ret = 0;
487out:
488 rcu_read_unlock();
489 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490}
491
Paul Menagedb7f47c2009-04-02 16:57:55 -0700492#ifdef CONFIG_SMP
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -0700493/*
Max Krasnyanskycf417142008-08-11 14:33:53 -0700494 * Helper routine for generate_sched_domains().
Paul Jackson029190c2007-10-18 23:40:20 -0700495 * Do cpusets a, b have overlapping cpus_allowed masks?
496 */
Paul Jackson029190c2007-10-18 23:40:20 -0700497static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
498{
Li Zefan300ed6c2009-01-07 18:08:44 -0800499 return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
Paul Jackson029190c2007-10-18 23:40:20 -0700500}
501
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900502static void
503update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
504{
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900505 if (dattr->relax_domain_level < c->relax_domain_level)
506 dattr->relax_domain_level = c->relax_domain_level;
507 return;
508}
509
Lai Jiangshanf5393692008-07-29 22:33:22 -0700510static void
511update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
512{
513 LIST_HEAD(q);
514
515 list_add(&c->stack_list, &q);
516 while (!list_empty(&q)) {
517 struct cpuset *cp;
518 struct cgroup *cont;
519 struct cpuset *child;
520
521 cp = list_first_entry(&q, struct cpuset, stack_list);
522 list_del(q.next);
523
Li Zefan300ed6c2009-01-07 18:08:44 -0800524 if (cpumask_empty(cp->cpus_allowed))
Lai Jiangshanf5393692008-07-29 22:33:22 -0700525 continue;
526
527 if (is_sched_load_balance(cp))
528 update_domain_attr(dattr, cp);
529
Tejun Heoae8086c2013-01-07 08:51:07 -0800530 rcu_read_lock();
531 cpuset_for_each_child(child, cont, cp)
Lai Jiangshanf5393692008-07-29 22:33:22 -0700532 list_add_tail(&child->stack_list, &q);
Tejun Heoae8086c2013-01-07 08:51:07 -0800533 rcu_read_unlock();
Lai Jiangshanf5393692008-07-29 22:33:22 -0700534 }
535}
536
Paul Jackson029190c2007-10-18 23:40:20 -0700537/*
Max Krasnyanskycf417142008-08-11 14:33:53 -0700538 * generate_sched_domains()
Paul Jackson029190c2007-10-18 23:40:20 -0700539 *
Max Krasnyanskycf417142008-08-11 14:33:53 -0700540 * This function builds a partial partition of the systems CPUs
541 * A 'partial partition' is a set of non-overlapping subsets whose
542 * union is a subset of that set.
543 * The output of this function needs to be passed to kernel/sched.c
544 * partition_sched_domains() routine, which will rebuild the scheduler's
545 * load balancing domains (sched domains) as specified by that partial
546 * partition.
Paul Jackson029190c2007-10-18 23:40:20 -0700547 *
Li Zefan45ce80f2009-01-15 13:50:59 -0800548 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
Paul Jackson029190c2007-10-18 23:40:20 -0700549 * for a background explanation of this.
550 *
551 * Does not return errors, on the theory that the callers of this
552 * routine would rather not worry about failures to rebuild sched
553 * domains when operating in the severe memory shortage situations
554 * that could cause allocation failures below.
555 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800556 * Must be called with cpuset_mutex held.
Paul Jackson029190c2007-10-18 23:40:20 -0700557 *
558 * The three key local variables below are:
Li Zefanaeed6822008-07-29 22:33:24 -0700559 * q - a linked-list queue of cpuset pointers, used to implement a
Paul Jackson029190c2007-10-18 23:40:20 -0700560 * top-down scan of all cpusets. This scan loads a pointer
561 * to each cpuset marked is_sched_load_balance into the
562 * array 'csa'. For our purposes, rebuilding the schedulers
563 * sched domains, we can ignore !is_sched_load_balance cpusets.
564 * csa - (for CpuSet Array) Array of pointers to all the cpusets
565 * that need to be load balanced, for convenient iterative
566 * access by the subsequent code that finds the best partition,
567 * i.e the set of domains (subsets) of CPUs such that the
568 * cpus_allowed of every cpuset marked is_sched_load_balance
569 * is a subset of one of these domains, while there are as
570 * many such domains as possible, each as small as possible.
571 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
572 * the kernel/sched.c routine partition_sched_domains() in a
573 * convenient format, that can be easily compared to the prior
574 * value to determine what partition elements (sched domains)
575 * were changed (added or removed.)
576 *
577 * Finding the best partition (set of domains):
578 * The triple nested loops below over i, j, k scan over the
579 * load balanced cpusets (using the array of cpuset pointers in
580 * csa[]) looking for pairs of cpusets that have overlapping
581 * cpus_allowed, but which don't have the same 'pn' partition
582 * number and gives them in the same partition number. It keeps
583 * looping on the 'restart' label until it can no longer find
584 * any such pairs.
585 *
586 * The union of the cpus_allowed masks from the set of
587 * all cpusets having the same 'pn' value then form the one
588 * element of the partition (one sched domain) to be passed to
589 * partition_sched_domains().
590 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030591static int generate_sched_domains(cpumask_var_t **domains,
Max Krasnyanskycf417142008-08-11 14:33:53 -0700592 struct sched_domain_attr **attributes)
Paul Jackson029190c2007-10-18 23:40:20 -0700593{
Max Krasnyanskycf417142008-08-11 14:33:53 -0700594 LIST_HEAD(q); /* queue of cpusets to be scanned */
Paul Jackson029190c2007-10-18 23:40:20 -0700595 struct cpuset *cp; /* scans q */
596 struct cpuset **csa; /* array of all cpuset ptrs */
597 int csn; /* how many cpuset ptrs in csa so far */
598 int i, j, k; /* indices for partition finding loops */
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030599 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900600 struct sched_domain_attr *dattr; /* attributes for custom domains */
Ingo Molnar15837152008-11-25 10:27:49 +0100601 int ndoms = 0; /* number of sched domains in result */
Li Zefan6af866a2009-01-07 18:08:45 -0800602 int nslot; /* next empty doms[] struct cpumask slot */
Paul Jackson029190c2007-10-18 23:40:20 -0700603
Paul Jackson029190c2007-10-18 23:40:20 -0700604 doms = NULL;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900605 dattr = NULL;
Max Krasnyanskycf417142008-08-11 14:33:53 -0700606 csa = NULL;
Paul Jackson029190c2007-10-18 23:40:20 -0700607
608 /* Special case for the 99% of systems with one, full, sched domain */
609 if (is_sched_load_balance(&top_cpuset)) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030610 ndoms = 1;
611 doms = alloc_sched_domains(ndoms);
Paul Jackson029190c2007-10-18 23:40:20 -0700612 if (!doms)
Max Krasnyanskycf417142008-08-11 14:33:53 -0700613 goto done;
614
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900615 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
616 if (dattr) {
617 *dattr = SD_ATTR_INIT;
Li Zefan93a65572008-07-29 22:33:23 -0700618 update_domain_attr_tree(dattr, &top_cpuset);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900619 }
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030620 cpumask_copy(doms[0], top_cpuset.cpus_allowed);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700621
Max Krasnyanskycf417142008-08-11 14:33:53 -0700622 goto done;
Paul Jackson029190c2007-10-18 23:40:20 -0700623 }
624
Paul Jackson029190c2007-10-18 23:40:20 -0700625 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
626 if (!csa)
627 goto done;
628 csn = 0;
629
Li Zefanaeed6822008-07-29 22:33:24 -0700630 list_add(&top_cpuset.stack_list, &q);
631 while (!list_empty(&q)) {
Paul Jackson029190c2007-10-18 23:40:20 -0700632 struct cgroup *cont;
633 struct cpuset *child; /* scans child cpusets of cp */
Lai Jiangshan489a5392008-07-25 01:47:23 -0700634
Li Zefanaeed6822008-07-29 22:33:24 -0700635 cp = list_first_entry(&q, struct cpuset, stack_list);
636 list_del(q.next);
637
Li Zefan300ed6c2009-01-07 18:08:44 -0800638 if (cpumask_empty(cp->cpus_allowed))
Lai Jiangshan489a5392008-07-25 01:47:23 -0700639 continue;
640
Lai Jiangshanf5393692008-07-29 22:33:22 -0700641 /*
642 * All child cpusets contain a subset of the parent's cpus, so
643 * just skip them, and then we call update_domain_attr_tree()
644 * to calc relax_domain_level of the corresponding sched
645 * domain.
646 */
647 if (is_sched_load_balance(cp)) {
Paul Jackson029190c2007-10-18 23:40:20 -0700648 csa[csn++] = cp;
Lai Jiangshanf5393692008-07-29 22:33:22 -0700649 continue;
650 }
Lai Jiangshan489a5392008-07-25 01:47:23 -0700651
Tejun Heoae8086c2013-01-07 08:51:07 -0800652 rcu_read_lock();
653 cpuset_for_each_child(child, cont, cp)
Li Zefanaeed6822008-07-29 22:33:24 -0700654 list_add_tail(&child->stack_list, &q);
Tejun Heoae8086c2013-01-07 08:51:07 -0800655 rcu_read_unlock();
Paul Jackson029190c2007-10-18 23:40:20 -0700656 }
657
658 for (i = 0; i < csn; i++)
659 csa[i]->pn = i;
660 ndoms = csn;
661
662restart:
663 /* Find the best partition (set of sched domains) */
664 for (i = 0; i < csn; i++) {
665 struct cpuset *a = csa[i];
666 int apn = a->pn;
667
668 for (j = 0; j < csn; j++) {
669 struct cpuset *b = csa[j];
670 int bpn = b->pn;
671
672 if (apn != bpn && cpusets_overlap(a, b)) {
673 for (k = 0; k < csn; k++) {
674 struct cpuset *c = csa[k];
675
676 if (c->pn == bpn)
677 c->pn = apn;
678 }
679 ndoms--; /* one less element */
680 goto restart;
681 }
682 }
683 }
684
Max Krasnyanskycf417142008-08-11 14:33:53 -0700685 /*
686 * Now we know how many domains to create.
687 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
688 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030689 doms = alloc_sched_domains(ndoms);
Li Zefan700018e2008-11-18 14:02:03 +0800690 if (!doms)
Max Krasnyanskycf417142008-08-11 14:33:53 -0700691 goto done;
Max Krasnyanskycf417142008-08-11 14:33:53 -0700692
693 /*
694 * The rest of the code, including the scheduler, can deal with
695 * dattr==NULL case. No need to abort if alloc fails.
696 */
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900697 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
Paul Jackson029190c2007-10-18 23:40:20 -0700698
699 for (nslot = 0, i = 0; i < csn; i++) {
700 struct cpuset *a = csa[i];
Li Zefan6af866a2009-01-07 18:08:45 -0800701 struct cpumask *dp;
Paul Jackson029190c2007-10-18 23:40:20 -0700702 int apn = a->pn;
703
Max Krasnyanskycf417142008-08-11 14:33:53 -0700704 if (apn < 0) {
705 /* Skip completed partitions */
706 continue;
Paul Jackson029190c2007-10-18 23:40:20 -0700707 }
Max Krasnyanskycf417142008-08-11 14:33:53 -0700708
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030709 dp = doms[nslot];
Max Krasnyanskycf417142008-08-11 14:33:53 -0700710
711 if (nslot == ndoms) {
712 static int warnings = 10;
713 if (warnings) {
714 printk(KERN_WARNING
715 "rebuild_sched_domains confused:"
716 " nslot %d, ndoms %d, csn %d, i %d,"
717 " apn %d\n",
718 nslot, ndoms, csn, i, apn);
719 warnings--;
720 }
721 continue;
722 }
723
Li Zefan6af866a2009-01-07 18:08:45 -0800724 cpumask_clear(dp);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700725 if (dattr)
726 *(dattr + nslot) = SD_ATTR_INIT;
727 for (j = i; j < csn; j++) {
728 struct cpuset *b = csa[j];
729
730 if (apn == b->pn) {
Li Zefan300ed6c2009-01-07 18:08:44 -0800731 cpumask_or(dp, dp, b->cpus_allowed);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700732 if (dattr)
733 update_domain_attr_tree(dattr + nslot, b);
734
735 /* Done with this partition */
736 b->pn = -1;
737 }
738 }
739 nslot++;
Paul Jackson029190c2007-10-18 23:40:20 -0700740 }
741 BUG_ON(nslot != ndoms);
742
Paul Jackson029190c2007-10-18 23:40:20 -0700743done:
Paul Jackson029190c2007-10-18 23:40:20 -0700744 kfree(csa);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700745
Li Zefan700018e2008-11-18 14:02:03 +0800746 /*
747 * Fallback to the default domain if kmalloc() failed.
748 * See comments in partition_sched_domains().
749 */
750 if (doms == NULL)
751 ndoms = 1;
752
Max Krasnyanskycf417142008-08-11 14:33:53 -0700753 *domains = doms;
754 *attributes = dattr;
755 return ndoms;
756}
757
758/*
759 * Rebuild scheduler domains.
760 *
Tejun Heo699140b2013-01-07 08:51:07 -0800761 * If the flag 'sched_load_balance' of any cpuset with non-empty
762 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
763 * which has that flag enabled, or if any cpuset with a non-empty
764 * 'cpus' is removed, then call this routine to rebuild the
765 * scheduler's dynamic sched domains.
Max Krasnyanskycf417142008-08-11 14:33:53 -0700766 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800767 * Call with cpuset_mutex held. Takes get_online_cpus().
Max Krasnyanskycf417142008-08-11 14:33:53 -0700768 */
Tejun Heo699140b2013-01-07 08:51:07 -0800769static void rebuild_sched_domains_locked(void)
Max Krasnyanskycf417142008-08-11 14:33:53 -0700770{
771 struct sched_domain_attr *attr;
Rusty Russellacc3f5d2009-11-03 14:53:40 +1030772 cpumask_var_t *doms;
Max Krasnyanskycf417142008-08-11 14:33:53 -0700773 int ndoms;
774
Tejun Heo5d21cc22013-01-07 08:51:08 -0800775 lockdep_assert_held(&cpuset_mutex);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700776 get_online_cpus();
777
778 /* Generate domain masks and attrs */
Max Krasnyanskycf417142008-08-11 14:33:53 -0700779 ndoms = generate_sched_domains(&doms, &attr);
Max Krasnyanskycf417142008-08-11 14:33:53 -0700780
781 /* Have scheduler rebuild the domains */
782 partition_sched_domains(ndoms, doms, attr);
783
784 put_online_cpus();
785}
Paul Menagedb7f47c2009-04-02 16:57:55 -0700786#else /* !CONFIG_SMP */
Tejun Heo699140b2013-01-07 08:51:07 -0800787static void rebuild_sched_domains_locked(void)
Paul Menagedb7f47c2009-04-02 16:57:55 -0700788{
789}
790
Geert Uytterhoevene1b80902009-12-06 20:41:16 +0100791static int generate_sched_domains(cpumask_var_t **domains,
Paul Menagedb7f47c2009-04-02 16:57:55 -0700792 struct sched_domain_attr **attributes)
793{
794 *domains = NULL;
795 return 1;
796}
797#endif /* CONFIG_SMP */
Max Krasnyanskycf417142008-08-11 14:33:53 -0700798
Max Krasnyanskycf417142008-08-11 14:33:53 -0700799void rebuild_sched_domains(void)
800{
Tejun Heo5d21cc22013-01-07 08:51:08 -0800801 mutex_lock(&cpuset_mutex);
Tejun Heo699140b2013-01-07 08:51:07 -0800802 rebuild_sched_domains_locked();
Tejun Heo5d21cc22013-01-07 08:51:08 -0800803 mutex_unlock(&cpuset_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -0700804}
805
Cliff Wickman58f47902008-02-07 00:14:44 -0800806/**
807 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
808 * @tsk: task to test
809 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
810 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800811 * Call with cpuset_mutex held. May take callback_mutex during call.
Cliff Wickman58f47902008-02-07 00:14:44 -0800812 * Called for each task in a cgroup by cgroup_scan_tasks().
813 * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
814 * words, if its mask is not equal to its cpuset's mask).
Paul Jackson053199e2005-10-30 15:02:30 -0800815 */
Adrian Bunk9e0c9142008-04-29 01:00:25 -0700816static int cpuset_test_cpumask(struct task_struct *tsk,
817 struct cgroup_scanner *scan)
Cliff Wickman58f47902008-02-07 00:14:44 -0800818{
Li Zefan300ed6c2009-01-07 18:08:44 -0800819 return !cpumask_equal(&tsk->cpus_allowed,
Cliff Wickman58f47902008-02-07 00:14:44 -0800820 (cgroup_cs(scan->cg))->cpus_allowed);
821}
Paul Jackson053199e2005-10-30 15:02:30 -0800822
Cliff Wickman58f47902008-02-07 00:14:44 -0800823/**
824 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
825 * @tsk: task to test
826 * @scan: struct cgroup_scanner containing the cgroup of the task
827 *
828 * Called by cgroup_scan_tasks() for each task in a cgroup whose
829 * cpus_allowed mask needs to be changed.
830 *
831 * We don't need to re-check for the cgroup/cpuset membership, since we're
Tejun Heo5d21cc22013-01-07 08:51:08 -0800832 * holding cpuset_mutex at this point.
Cliff Wickman58f47902008-02-07 00:14:44 -0800833 */
Adrian Bunk9e0c9142008-04-29 01:00:25 -0700834static void cpuset_change_cpumask(struct task_struct *tsk,
835 struct cgroup_scanner *scan)
Cliff Wickman58f47902008-02-07 00:14:44 -0800836{
Li Zefan300ed6c2009-01-07 18:08:44 -0800837 set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
Cliff Wickman58f47902008-02-07 00:14:44 -0800838}
839
840/**
Miao Xie0b2f6302008-07-25 01:47:21 -0700841 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
842 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
Li Zefan4e743392008-09-13 02:33:08 -0700843 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
Miao Xie0b2f6302008-07-25 01:47:21 -0700844 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800845 * Called with cpuset_mutex held
Miao Xie0b2f6302008-07-25 01:47:21 -0700846 *
847 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
848 * calling callback functions for each.
849 *
Li Zefan4e743392008-09-13 02:33:08 -0700850 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
851 * if @heap != NULL.
Miao Xie0b2f6302008-07-25 01:47:21 -0700852 */
Li Zefan4e743392008-09-13 02:33:08 -0700853static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
Miao Xie0b2f6302008-07-25 01:47:21 -0700854{
855 struct cgroup_scanner scan;
Miao Xie0b2f6302008-07-25 01:47:21 -0700856
857 scan.cg = cs->css.cgroup;
858 scan.test_task = cpuset_test_cpumask;
859 scan.process_task = cpuset_change_cpumask;
Li Zefan4e743392008-09-13 02:33:08 -0700860 scan.heap = heap;
861 cgroup_scan_tasks(&scan);
Miao Xie0b2f6302008-07-25 01:47:21 -0700862}
863
864/**
Cliff Wickman58f47902008-02-07 00:14:44 -0800865 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
866 * @cs: the cpuset to consider
867 * @buf: buffer of cpu numbers written to this cpuset
868 */
Li Zefan645fcc92009-01-07 18:08:43 -0800869static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
870 const char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871{
Li Zefan4e743392008-09-13 02:33:08 -0700872 struct ptr_heap heap;
Cliff Wickman58f47902008-02-07 00:14:44 -0800873 int retval;
874 int is_load_balanced;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
Rusty Russell5f054e32012-03-29 15:38:31 +1030876 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
Paul Jackson4c4d50f2006-08-27 01:23:51 -0700877 if (cs == &top_cpuset)
878 return -EACCES;
879
David Rientjes6f7f02e2007-05-08 00:31:43 -0700880 /*
Paul Jacksonc8d9c902008-02-07 00:14:46 -0800881 * An empty cpus_allowed is ok only if the cpuset has no tasks.
Paul Jackson020958b2007-10-18 23:40:21 -0700882 * Since cpulist_parse() fails on an empty mask, we special case
883 * that parsing. The validate_change() call ensures that cpusets
884 * with tasks have cpus.
David Rientjes6f7f02e2007-05-08 00:31:43 -0700885 */
Paul Jackson020958b2007-10-18 23:40:21 -0700886 if (!*buf) {
Li Zefan300ed6c2009-01-07 18:08:44 -0800887 cpumask_clear(trialcs->cpus_allowed);
David Rientjes6f7f02e2007-05-08 00:31:43 -0700888 } else {
Li Zefan300ed6c2009-01-07 18:08:44 -0800889 retval = cpulist_parse(buf, trialcs->cpus_allowed);
David Rientjes6f7f02e2007-05-08 00:31:43 -0700890 if (retval < 0)
891 return retval;
Lai Jiangshan37340742008-06-05 22:46:32 -0700892
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100893 if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
Lai Jiangshan37340742008-06-05 22:46:32 -0700894 return -EINVAL;
David Rientjes6f7f02e2007-05-08 00:31:43 -0700895 }
Li Zefan645fcc92009-01-07 18:08:43 -0800896 retval = validate_change(cs, trialcs);
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -0700897 if (retval < 0)
898 return retval;
Paul Jackson029190c2007-10-18 23:40:20 -0700899
Paul Menage8707d8b2007-10-18 23:40:22 -0700900 /* Nothing to do if the cpus didn't change */
Li Zefan300ed6c2009-01-07 18:08:44 -0800901 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
Paul Menage8707d8b2007-10-18 23:40:22 -0700902 return 0;
Cliff Wickman58f47902008-02-07 00:14:44 -0800903
Li Zefan4e743392008-09-13 02:33:08 -0700904 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
905 if (retval)
906 return retval;
907
Li Zefan645fcc92009-01-07 18:08:43 -0800908 is_load_balanced = is_sched_load_balance(trialcs);
Paul Jackson029190c2007-10-18 23:40:20 -0700909
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800910 mutex_lock(&callback_mutex);
Li Zefan300ed6c2009-01-07 18:08:44 -0800911 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800912 mutex_unlock(&callback_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -0700913
Paul Menage8707d8b2007-10-18 23:40:22 -0700914 /*
915 * Scan tasks in the cpuset, and update the cpumasks of any
Cliff Wickman58f47902008-02-07 00:14:44 -0800916 * that need an update.
Paul Menage8707d8b2007-10-18 23:40:22 -0700917 */
Li Zefan4e743392008-09-13 02:33:08 -0700918 update_tasks_cpumask(cs, &heap);
919
920 heap_free(&heap);
Cliff Wickman58f47902008-02-07 00:14:44 -0800921
Paul Menage8707d8b2007-10-18 23:40:22 -0700922 if (is_load_balanced)
Tejun Heo699140b2013-01-07 08:51:07 -0800923 rebuild_sched_domains_locked();
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -0700924 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925}
926
Paul Jackson053199e2005-10-30 15:02:30 -0800927/*
Paul Jacksone4e364e2006-03-31 02:30:52 -0800928 * cpuset_migrate_mm
929 *
930 * Migrate memory region from one set of nodes to another.
931 *
932 * Temporarilly set tasks mems_allowed to target nodes of migration,
933 * so that the migration code can allocate pages on these nodes.
934 *
Tejun Heo5d21cc22013-01-07 08:51:08 -0800935 * Call holding cpuset_mutex, so current's cpuset won't change
Paul Jacksonc8d9c902008-02-07 00:14:46 -0800936 * during this call, as manage_mutex holds off any cpuset_attach()
Paul Jacksone4e364e2006-03-31 02:30:52 -0800937 * calls. Therefore we don't need to take task_lock around the
938 * call to guarantee_online_mems(), as we know no one is changing
Paul Menage2df167a2008-02-07 00:14:45 -0800939 * our task's cpuset.
Paul Jacksone4e364e2006-03-31 02:30:52 -0800940 *
Paul Jacksone4e364e2006-03-31 02:30:52 -0800941 * While the mm_struct we are migrating is typically from some
942 * other task, the task_struct mems_allowed that we are hacking
943 * is for our current task, which must allocate new pages for that
944 * migrating memory region.
Paul Jacksone4e364e2006-03-31 02:30:52 -0800945 */
946
947static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
948 const nodemask_t *to)
949{
950 struct task_struct *tsk = current;
951
Paul Jacksone4e364e2006-03-31 02:30:52 -0800952 tsk->mems_allowed = *to;
Paul Jacksone4e364e2006-03-31 02:30:52 -0800953
954 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
955
Paul Menage8793d852007-10-18 23:39:39 -0700956 guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
Paul Jacksone4e364e2006-03-31 02:30:52 -0800957}
958
Li Zefan3b6766f2009-04-02 16:57:51 -0700959/*
Miao Xie58568d22009-06-16 15:31:49 -0700960 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
961 * @tsk: the task to change
962 * @newmems: new nodes that the task will be set
963 *
964 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
965 * we structure updates as setting all new allowed nodes, then clearing newly
966 * disallowed ones.
Miao Xie58568d22009-06-16 15:31:49 -0700967 */
968static void cpuset_change_task_nodemask(struct task_struct *tsk,
969 nodemask_t *newmems)
970{
David Rientjesb2462722011-12-19 17:11:52 -0800971 bool need_loop;
David Rientjes89e8a242011-11-02 13:38:39 -0700972
Miao Xiec0ff7452010-05-24 14:32:08 -0700973 /*
974 * Allow tasks that have access to memory reserves because they have
975 * been OOM killed to get memory anywhere.
976 */
977 if (unlikely(test_thread_flag(TIF_MEMDIE)))
978 return;
979 if (current->flags & PF_EXITING) /* Let dying task have memory */
980 return;
981
982 task_lock(tsk);
David Rientjesb2462722011-12-19 17:11:52 -0800983 /*
984 * Determine if a loop is necessary if another thread is doing
985 * get_mems_allowed(). If at least one node remains unchanged and
986 * tsk does not have a mempolicy, then an empty nodemask will not be
987 * possible when mems_allowed is larger than a word.
988 */
989 need_loop = task_has_mempolicy(tsk) ||
990 !nodes_intersects(*newmems, tsk->mems_allowed);
Mel Gormancc9a6c82012-03-21 16:34:11 -0700991
992 if (need_loop)
993 write_seqcount_begin(&tsk->mems_allowed_seq);
994
Miao Xie58568d22009-06-16 15:31:49 -0700995 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
Miao Xiec0ff7452010-05-24 14:32:08 -0700996 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
997
Miao Xiec0ff7452010-05-24 14:32:08 -0700998 mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
Miao Xie58568d22009-06-16 15:31:49 -0700999 tsk->mems_allowed = *newmems;
Mel Gormancc9a6c82012-03-21 16:34:11 -07001000
1001 if (need_loop)
1002 write_seqcount_end(&tsk->mems_allowed_seq);
1003
Miao Xiec0ff7452010-05-24 14:32:08 -07001004 task_unlock(tsk);
Miao Xie58568d22009-06-16 15:31:49 -07001005}
1006
1007/*
1008 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1009 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
Tejun Heo5d21cc22013-01-07 08:51:08 -08001010 * memory_migrate flag is set. Called with cpuset_mutex held.
Li Zefan3b6766f2009-04-02 16:57:51 -07001011 */
1012static void cpuset_change_nodemask(struct task_struct *p,
1013 struct cgroup_scanner *scan)
1014{
1015 struct mm_struct *mm;
1016 struct cpuset *cs;
1017 int migrate;
1018 const nodemask_t *oldmem = scan->data;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001019 static nodemask_t newmems; /* protected by cpuset_mutex */
Miao Xie58568d22009-06-16 15:31:49 -07001020
1021 cs = cgroup_cs(scan->cg);
Li Zefanee24d372011-03-23 16:42:47 -07001022 guarantee_online_mems(cs, &newmems);
Miao Xie58568d22009-06-16 15:31:49 -07001023
Li Zefanee24d372011-03-23 16:42:47 -07001024 cpuset_change_task_nodemask(p, &newmems);
Miao Xie53feb292010-03-23 13:35:35 -07001025
Li Zefan3b6766f2009-04-02 16:57:51 -07001026 mm = get_task_mm(p);
1027 if (!mm)
1028 return;
1029
Li Zefan3b6766f2009-04-02 16:57:51 -07001030 migrate = is_memory_migrate(cs);
1031
1032 mpol_rebind_mm(mm, &cs->mems_allowed);
1033 if (migrate)
1034 cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1035 mmput(mm);
1036}
1037
Paul Menage8793d852007-10-18 23:39:39 -07001038static void *cpuset_being_rebound;
1039
Miao Xie0b2f6302008-07-25 01:47:21 -07001040/**
1041 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1042 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1043 * @oldmem: old mems_allowed of cpuset cs
Li Zefan010cfac2009-04-02 16:57:52 -07001044 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
Miao Xie0b2f6302008-07-25 01:47:21 -07001045 *
Tejun Heo5d21cc22013-01-07 08:51:08 -08001046 * Called with cpuset_mutex held
Li Zefan010cfac2009-04-02 16:57:52 -07001047 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1048 * if @heap != NULL.
Miao Xie0b2f6302008-07-25 01:47:21 -07001049 */
Li Zefan010cfac2009-04-02 16:57:52 -07001050static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1051 struct ptr_heap *heap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052{
Li Zefan3b6766f2009-04-02 16:57:51 -07001053 struct cgroup_scanner scan;
Paul Jackson59dac162006-01-08 01:01:52 -08001054
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07001055 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
Paul Jackson42253992006-01-08 01:01:59 -08001056
Li Zefan3b6766f2009-04-02 16:57:51 -07001057 scan.cg = cs->css.cgroup;
1058 scan.test_task = NULL;
1059 scan.process_task = cpuset_change_nodemask;
Li Zefan010cfac2009-04-02 16:57:52 -07001060 scan.heap = heap;
Li Zefan3b6766f2009-04-02 16:57:51 -07001061 scan.data = (nodemask_t *)oldmem;
Paul Jackson42253992006-01-08 01:01:59 -08001062
1063 /*
Li Zefan3b6766f2009-04-02 16:57:51 -07001064 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1065 * take while holding tasklist_lock. Forks can happen - the
1066 * mpol_dup() cpuset_being_rebound check will catch such forks,
1067 * and rebind their vma mempolicies too. Because we still hold
Tejun Heo5d21cc22013-01-07 08:51:08 -08001068 * the global cpuset_mutex, we know that no other rebind effort
Li Zefan3b6766f2009-04-02 16:57:51 -07001069 * will be contending for the global variable cpuset_being_rebound.
Paul Jackson42253992006-01-08 01:01:59 -08001070 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
Paul Jackson04c19fa2006-01-08 01:02:00 -08001071 * is idempotent. Also migrate pages in each mm to new nodes.
Paul Jackson42253992006-01-08 01:01:59 -08001072 */
Li Zefan010cfac2009-04-02 16:57:52 -07001073 cgroup_scan_tasks(&scan);
Paul Jackson42253992006-01-08 01:01:59 -08001074
Paul Menage2df167a2008-02-07 00:14:45 -08001075 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
Paul Menage8793d852007-10-18 23:39:39 -07001076 cpuset_being_rebound = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077}
1078
Miao Xie0b2f6302008-07-25 01:47:21 -07001079/*
1080 * Handle user request to change the 'mems' memory placement
1081 * of a cpuset. Needs to validate the request, update the
Miao Xie58568d22009-06-16 15:31:49 -07001082 * cpusets mems_allowed, and for each task in the cpuset,
1083 * update mems_allowed and rebind task's mempolicy and any vma
1084 * mempolicies and if the cpuset is marked 'memory_migrate',
1085 * migrate the tasks pages to the new memory.
Miao Xie0b2f6302008-07-25 01:47:21 -07001086 *
Tejun Heo5d21cc22013-01-07 08:51:08 -08001087 * Call with cpuset_mutex held. May take callback_mutex during call.
Miao Xie0b2f6302008-07-25 01:47:21 -07001088 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1089 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1090 * their mempolicies to the cpusets new mems_allowed.
1091 */
Li Zefan645fcc92009-01-07 18:08:43 -08001092static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1093 const char *buf)
Miao Xie0b2f6302008-07-25 01:47:21 -07001094{
Miao Xie53feb292010-03-23 13:35:35 -07001095 NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL);
Miao Xie0b2f6302008-07-25 01:47:21 -07001096 int retval;
Li Zefan010cfac2009-04-02 16:57:52 -07001097 struct ptr_heap heap;
Miao Xie0b2f6302008-07-25 01:47:21 -07001098
Miao Xie53feb292010-03-23 13:35:35 -07001099 if (!oldmem)
1100 return -ENOMEM;
1101
Miao Xie0b2f6302008-07-25 01:47:21 -07001102 /*
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08001103 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
Miao Xie0b2f6302008-07-25 01:47:21 -07001104 * it's read-only
1105 */
Miao Xie53feb292010-03-23 13:35:35 -07001106 if (cs == &top_cpuset) {
1107 retval = -EACCES;
1108 goto done;
1109 }
Miao Xie0b2f6302008-07-25 01:47:21 -07001110
Miao Xie0b2f6302008-07-25 01:47:21 -07001111 /*
1112 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1113 * Since nodelist_parse() fails on an empty mask, we special case
1114 * that parsing. The validate_change() call ensures that cpusets
1115 * with tasks have memory.
1116 */
1117 if (!*buf) {
Li Zefan645fcc92009-01-07 18:08:43 -08001118 nodes_clear(trialcs->mems_allowed);
Miao Xie0b2f6302008-07-25 01:47:21 -07001119 } else {
Li Zefan645fcc92009-01-07 18:08:43 -08001120 retval = nodelist_parse(buf, trialcs->mems_allowed);
Miao Xie0b2f6302008-07-25 01:47:21 -07001121 if (retval < 0)
1122 goto done;
1123
Li Zefan645fcc92009-01-07 18:08:43 -08001124 if (!nodes_subset(trialcs->mems_allowed,
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08001125 node_states[N_MEMORY])) {
Miao Xie53feb292010-03-23 13:35:35 -07001126 retval = -EINVAL;
1127 goto done;
1128 }
Miao Xie0b2f6302008-07-25 01:47:21 -07001129 }
Miao Xie53feb292010-03-23 13:35:35 -07001130 *oldmem = cs->mems_allowed;
1131 if (nodes_equal(*oldmem, trialcs->mems_allowed)) {
Miao Xie0b2f6302008-07-25 01:47:21 -07001132 retval = 0; /* Too easy - nothing to do */
1133 goto done;
1134 }
Li Zefan645fcc92009-01-07 18:08:43 -08001135 retval = validate_change(cs, trialcs);
Miao Xie0b2f6302008-07-25 01:47:21 -07001136 if (retval < 0)
1137 goto done;
1138
Li Zefan010cfac2009-04-02 16:57:52 -07001139 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1140 if (retval < 0)
1141 goto done;
1142
Miao Xie0b2f6302008-07-25 01:47:21 -07001143 mutex_lock(&callback_mutex);
Li Zefan645fcc92009-01-07 18:08:43 -08001144 cs->mems_allowed = trialcs->mems_allowed;
Miao Xie0b2f6302008-07-25 01:47:21 -07001145 mutex_unlock(&callback_mutex);
1146
Miao Xie53feb292010-03-23 13:35:35 -07001147 update_tasks_nodemask(cs, oldmem, &heap);
Li Zefan010cfac2009-04-02 16:57:52 -07001148
1149 heap_free(&heap);
Miao Xie0b2f6302008-07-25 01:47:21 -07001150done:
Miao Xie53feb292010-03-23 13:35:35 -07001151 NODEMASK_FREE(oldmem);
Miao Xie0b2f6302008-07-25 01:47:21 -07001152 return retval;
1153}
1154
Paul Menage8793d852007-10-18 23:39:39 -07001155int current_cpuset_is_being_rebound(void)
1156{
1157 return task_cs(current) == cpuset_being_rebound;
1158}
1159
Paul Menage5be7a472008-05-06 20:42:41 -07001160static int update_relax_domain_level(struct cpuset *cs, s64 val)
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001161{
Paul Menagedb7f47c2009-04-02 16:57:55 -07001162#ifdef CONFIG_SMP
Peter Zijlstra60495e72011-04-07 14:10:04 +02001163 if (val < -1 || val >= sched_domain_level_max)
Li Zefan30e0e172008-05-13 10:27:17 +08001164 return -EINVAL;
Paul Menagedb7f47c2009-04-02 16:57:55 -07001165#endif
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001166
1167 if (val != cs->relax_domain_level) {
1168 cs->relax_domain_level = val;
Li Zefan300ed6c2009-01-07 18:08:44 -08001169 if (!cpumask_empty(cs->cpus_allowed) &&
1170 is_sched_load_balance(cs))
Tejun Heo699140b2013-01-07 08:51:07 -08001171 rebuild_sched_domains_locked();
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001172 }
1173
1174 return 0;
1175}
1176
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001177/*
Miao Xie950592f2009-06-16 15:31:47 -07001178 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1179 * @tsk: task to be updated
1180 * @scan: struct cgroup_scanner containing the cgroup of the task
1181 *
1182 * Called by cgroup_scan_tasks() for each task in a cgroup.
1183 *
1184 * We don't need to re-check for the cgroup/cpuset membership, since we're
Tejun Heo5d21cc22013-01-07 08:51:08 -08001185 * holding cpuset_mutex at this point.
Miao Xie950592f2009-06-16 15:31:47 -07001186 */
1187static void cpuset_change_flag(struct task_struct *tsk,
1188 struct cgroup_scanner *scan)
1189{
1190 cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
1191}
1192
1193/*
1194 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1195 * @cs: the cpuset in which each task's spread flags needs to be changed
1196 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1197 *
Tejun Heo5d21cc22013-01-07 08:51:08 -08001198 * Called with cpuset_mutex held
Miao Xie950592f2009-06-16 15:31:47 -07001199 *
1200 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1201 * calling callback functions for each.
1202 *
1203 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1204 * if @heap != NULL.
1205 */
1206static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1207{
1208 struct cgroup_scanner scan;
1209
1210 scan.cg = cs->css.cgroup;
1211 scan.test_task = NULL;
1212 scan.process_task = cpuset_change_flag;
1213 scan.heap = heap;
1214 cgroup_scan_tasks(&scan);
1215}
1216
1217/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 * update_flag - read a 0 or a 1 in a file and update associated flag
Paul Menage78608362008-04-29 01:00:26 -07001219 * bit: the bit to update (see cpuset_flagbits_t)
1220 * cs: the cpuset to update
1221 * turning_on: whether the flag is being set or cleared
Paul Jackson053199e2005-10-30 15:02:30 -08001222 *
Tejun Heo5d21cc22013-01-07 08:51:08 -08001223 * Call with cpuset_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 */
1225
Paul Menage700fe1a2008-04-29 01:00:00 -07001226static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1227 int turning_on)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228{
Li Zefan645fcc92009-01-07 18:08:43 -08001229 struct cpuset *trialcs;
Rakib Mullick40b6a762008-10-18 20:28:18 -07001230 int balance_flag_changed;
Miao Xie950592f2009-06-16 15:31:47 -07001231 int spread_flag_changed;
1232 struct ptr_heap heap;
1233 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
Li Zefan645fcc92009-01-07 18:08:43 -08001235 trialcs = alloc_trial_cpuset(cs);
1236 if (!trialcs)
1237 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Li Zefan645fcc92009-01-07 18:08:43 -08001239 if (turning_on)
1240 set_bit(bit, &trialcs->flags);
1241 else
1242 clear_bit(bit, &trialcs->flags);
1243
1244 err = validate_change(cs, trialcs);
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -07001245 if (err < 0)
Li Zefan645fcc92009-01-07 18:08:43 -08001246 goto out;
Paul Jackson029190c2007-10-18 23:40:20 -07001247
Miao Xie950592f2009-06-16 15:31:47 -07001248 err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1249 if (err < 0)
1250 goto out;
1251
Paul Jackson029190c2007-10-18 23:40:20 -07001252 balance_flag_changed = (is_sched_load_balance(cs) !=
Li Zefan645fcc92009-01-07 18:08:43 -08001253 is_sched_load_balance(trialcs));
Paul Jackson029190c2007-10-18 23:40:20 -07001254
Miao Xie950592f2009-06-16 15:31:47 -07001255 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1256 || (is_spread_page(cs) != is_spread_page(trialcs)));
1257
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001258 mutex_lock(&callback_mutex);
Li Zefan645fcc92009-01-07 18:08:43 -08001259 cs->flags = trialcs->flags;
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001260 mutex_unlock(&callback_mutex);
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -07001261
Li Zefan300ed6c2009-01-07 18:08:44 -08001262 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
Tejun Heo699140b2013-01-07 08:51:07 -08001263 rebuild_sched_domains_locked();
Paul Jackson029190c2007-10-18 23:40:20 -07001264
Miao Xie950592f2009-06-16 15:31:47 -07001265 if (spread_flag_changed)
1266 update_tasks_flags(cs, &heap);
1267 heap_free(&heap);
Li Zefan645fcc92009-01-07 18:08:43 -08001268out:
1269 free_trial_cpuset(trialcs);
1270 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271}
1272
Paul Jackson053199e2005-10-30 15:02:30 -08001273/*
Adrian Bunk80f72282006-06-30 18:27:16 +02001274 * Frequency meter - How fast is some event occurring?
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001275 *
1276 * These routines manage a digitally filtered, constant time based,
1277 * event frequency meter. There are four routines:
1278 * fmeter_init() - initialize a frequency meter.
1279 * fmeter_markevent() - called each time the event happens.
1280 * fmeter_getrate() - returns the recent rate of such events.
1281 * fmeter_update() - internal routine used to update fmeter.
1282 *
1283 * A common data structure is passed to each of these routines,
1284 * which is used to keep track of the state required to manage the
1285 * frequency meter and its digital filter.
1286 *
1287 * The filter works on the number of events marked per unit time.
1288 * The filter is single-pole low-pass recursive (IIR). The time unit
1289 * is 1 second. Arithmetic is done using 32-bit integers scaled to
1290 * simulate 3 decimal digits of precision (multiplied by 1000).
1291 *
1292 * With an FM_COEF of 933, and a time base of 1 second, the filter
1293 * has a half-life of 10 seconds, meaning that if the events quit
1294 * happening, then the rate returned from the fmeter_getrate()
1295 * will be cut in half each 10 seconds, until it converges to zero.
1296 *
1297 * It is not worth doing a real infinitely recursive filter. If more
1298 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1299 * just compute FM_MAXTICKS ticks worth, by which point the level
1300 * will be stable.
1301 *
1302 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1303 * arithmetic overflow in the fmeter_update() routine.
1304 *
1305 * Given the simple 32 bit integer arithmetic used, this meter works
1306 * best for reporting rates between one per millisecond (msec) and
1307 * one per 32 (approx) seconds. At constant rates faster than one
1308 * per msec it maxes out at values just under 1,000,000. At constant
1309 * rates between one per msec, and one per second it will stabilize
1310 * to a value N*1000, where N is the rate of events per second.
1311 * At constant rates between one per second and one per 32 seconds,
1312 * it will be choppy, moving up on the seconds that have an event,
1313 * and then decaying until the next event. At rates slower than
1314 * about one in 32 seconds, it decays all the way back to zero between
1315 * each event.
1316 */
1317
1318#define FM_COEF 933 /* coefficient for half-life of 10 secs */
1319#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1320#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
1321#define FM_SCALE 1000 /* faux fixed point scale */
1322
1323/* Initialize a frequency meter */
1324static void fmeter_init(struct fmeter *fmp)
1325{
1326 fmp->cnt = 0;
1327 fmp->val = 0;
1328 fmp->time = 0;
1329 spin_lock_init(&fmp->lock);
1330}
1331
1332/* Internal meter update - process cnt events and update value */
1333static void fmeter_update(struct fmeter *fmp)
1334{
1335 time_t now = get_seconds();
1336 time_t ticks = now - fmp->time;
1337
1338 if (ticks == 0)
1339 return;
1340
1341 ticks = min(FM_MAXTICKS, ticks);
1342 while (ticks-- > 0)
1343 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1344 fmp->time = now;
1345
1346 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1347 fmp->cnt = 0;
1348}
1349
1350/* Process any previous ticks, then bump cnt by one (times scale). */
1351static void fmeter_markevent(struct fmeter *fmp)
1352{
1353 spin_lock(&fmp->lock);
1354 fmeter_update(fmp);
1355 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1356 spin_unlock(&fmp->lock);
1357}
1358
1359/* Process any previous ticks, then return current value. */
1360static int fmeter_getrate(struct fmeter *fmp)
1361{
1362 int val;
1363
1364 spin_lock(&fmp->lock);
1365 fmeter_update(fmp);
1366 val = fmp->val;
1367 spin_unlock(&fmp->lock);
1368 return val;
1369}
1370
Tejun Heo5d21cc22013-01-07 08:51:08 -08001371/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
Li Zefan761b3ef2012-01-31 13:47:36 +08001372static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
Ben Blumf780bdb2011-05-26 16:25:19 -07001373{
Tejun Heo2f7ee562011-12-12 18:12:21 -08001374 struct cpuset *cs = cgroup_cs(cgrp);
Tejun Heobb9d97b2011-12-12 18:12:21 -08001375 struct task_struct *task;
1376 int ret;
Ben Blumf780bdb2011-05-26 16:25:19 -07001377
Tejun Heo5d21cc22013-01-07 08:51:08 -08001378 mutex_lock(&cpuset_mutex);
1379
1380 ret = -ENOSPC;
Ben Blumbe367d02009-09-23 15:56:31 -07001381 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
Tejun Heo5d21cc22013-01-07 08:51:08 -08001382 goto out_unlock;
Ben Blumbe367d02009-09-23 15:56:31 -07001383
Tejun Heobb9d97b2011-12-12 18:12:21 -08001384 cgroup_taskset_for_each(task, cgrp, tset) {
1385 /*
1386 * Kthreads bound to specific cpus cannot be moved to a new
1387 * cpuset; we cannot change their cpu affinity and
1388 * isolating such threads by their set of allowed nodes is
1389 * unnecessary. Thus, cpusets are not applicable for such
1390 * threads. This prevents checking for success of
1391 * set_cpus_allowed_ptr() on all attached tasks before
1392 * cpus_allowed may be changed.
1393 */
Tejun Heo5d21cc22013-01-07 08:51:08 -08001394 ret = -EINVAL;
Tejun Heobb9d97b2011-12-12 18:12:21 -08001395 if (task->flags & PF_THREAD_BOUND)
Tejun Heo5d21cc22013-01-07 08:51:08 -08001396 goto out_unlock;
1397 ret = security_task_setscheduler(task);
1398 if (ret)
1399 goto out_unlock;
Tejun Heobb9d97b2011-12-12 18:12:21 -08001400 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401
Tejun Heo452477f2013-01-07 08:51:07 -08001402 /*
1403 * Mark attach is in progress. This makes validate_change() fail
1404 * changes which zero cpus/mems_allowed.
1405 */
1406 cs->attach_in_progress++;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001407 ret = 0;
1408out_unlock:
1409 mutex_unlock(&cpuset_mutex);
1410 return ret;
Tejun Heo4e4c9a12013-01-07 08:51:07 -08001411}
1412
Tejun Heo452477f2013-01-07 08:51:07 -08001413static void cpuset_cancel_attach(struct cgroup *cgrp,
1414 struct cgroup_taskset *tset)
1415{
Tejun Heo5d21cc22013-01-07 08:51:08 -08001416 mutex_lock(&cpuset_mutex);
Tejun Heo452477f2013-01-07 08:51:07 -08001417 cgroup_cs(cgrp)->attach_in_progress--;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001418 mutex_unlock(&cpuset_mutex);
Tejun Heo452477f2013-01-07 08:51:07 -08001419}
1420
Tejun Heo4e4c9a12013-01-07 08:51:07 -08001421/*
Tejun Heo5d21cc22013-01-07 08:51:08 -08001422 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
Tejun Heo4e4c9a12013-01-07 08:51:07 -08001423 * but we can't allocate it dynamically there. Define it global and
1424 * allocate from cpuset_init().
1425 */
1426static cpumask_var_t cpus_attach;
1427
1428static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1429{
Tejun Heo5d21cc22013-01-07 08:51:08 -08001430 /* static bufs protected by cpuset_mutex */
Tejun Heo4e4c9a12013-01-07 08:51:07 -08001431 static nodemask_t cpuset_attach_nodemask_from;
1432 static nodemask_t cpuset_attach_nodemask_to;
1433 struct mm_struct *mm;
1434 struct task_struct *task;
1435 struct task_struct *leader = cgroup_taskset_first(tset);
1436 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1437 struct cpuset *cs = cgroup_cs(cgrp);
1438 struct cpuset *oldcs = cgroup_cs(oldcgrp);
1439
Tejun Heo5d21cc22013-01-07 08:51:08 -08001440 mutex_lock(&cpuset_mutex);
1441
Tejun Heo94196f52011-12-12 18:12:22 -08001442 /* prepare for attach */
Ben Blumf780bdb2011-05-26 16:25:19 -07001443 if (cs == &top_cpuset)
1444 cpumask_copy(cpus_attach, cpu_possible_mask);
1445 else
1446 guarantee_online_cpus(cs, cpus_attach);
1447
1448 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
Tejun Heo94196f52011-12-12 18:12:22 -08001449
Tejun Heobb9d97b2011-12-12 18:12:21 -08001450 cgroup_taskset_for_each(task, cgrp, tset) {
1451 /*
1452 * can_attach beforehand should guarantee that this doesn't
1453 * fail. TODO: have a better way to handle failure here
1454 */
1455 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1456
1457 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1458 cpuset_update_task_spread_flag(cs, task);
1459 }
David Quigley22fb52d2006-06-23 02:04:00 -07001460
Ben Blumf780bdb2011-05-26 16:25:19 -07001461 /*
1462 * Change mm, possibly for multiple threads in a threadgroup. This is
1463 * expensive and may sleep.
1464 */
1465 cpuset_attach_nodemask_from = oldcs->mems_allowed;
1466 cpuset_attach_nodemask_to = cs->mems_allowed;
Tejun Heobb9d97b2011-12-12 18:12:21 -08001467 mm = get_task_mm(leader);
Paul Jackson42253992006-01-08 01:01:59 -08001468 if (mm) {
Ben Blumf780bdb2011-05-26 16:25:19 -07001469 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
Paul Jackson2741a552006-03-31 02:30:51 -08001470 if (is_memory_migrate(cs))
Ben Blumf780bdb2011-05-26 16:25:19 -07001471 cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
1472 &cpuset_attach_nodemask_to);
Paul Jackson42253992006-01-08 01:01:59 -08001473 mmput(mm);
1474 }
Tejun Heo452477f2013-01-07 08:51:07 -08001475
1476 cs->attach_in_progress--;
Tejun Heo02bb5862013-01-07 08:51:08 -08001477
1478 /*
1479 * We may have raced with CPU/memory hotunplug. Trigger hotplug
1480 * propagation if @cs doesn't have any CPU or memory. It will move
1481 * the newly added tasks to the nearest parent which can execute.
1482 */
1483 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1484 schedule_cpuset_propagate_hotplug(cs);
Tejun Heo5d21cc22013-01-07 08:51:08 -08001485
1486 mutex_unlock(&cpuset_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487}
1488
1489/* The various types of files and directories in a cpuset file system */
1490
1491typedef enum {
Paul Jackson45b07ef2006-01-08 01:00:56 -08001492 FILE_MEMORY_MIGRATE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 FILE_CPULIST,
1494 FILE_MEMLIST,
1495 FILE_CPU_EXCLUSIVE,
1496 FILE_MEM_EXCLUSIVE,
Paul Menage78608362008-04-29 01:00:26 -07001497 FILE_MEM_HARDWALL,
Paul Jackson029190c2007-10-18 23:40:20 -07001498 FILE_SCHED_LOAD_BALANCE,
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001499 FILE_SCHED_RELAX_DOMAIN_LEVEL,
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001500 FILE_MEMORY_PRESSURE_ENABLED,
1501 FILE_MEMORY_PRESSURE,
Paul Jackson825a46a2006-03-24 03:16:03 -08001502 FILE_SPREAD_PAGE,
1503 FILE_SPREAD_SLAB,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504} cpuset_filetype_t;
1505
Paul Menage700fe1a2008-04-29 01:00:00 -07001506static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1507{
Paul Menage700fe1a2008-04-29 01:00:00 -07001508 struct cpuset *cs = cgroup_cs(cgrp);
1509 cpuset_filetype_t type = cft->private;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001510 int retval = -ENODEV;
Paul Menage700fe1a2008-04-29 01:00:00 -07001511
Tejun Heo5d21cc22013-01-07 08:51:08 -08001512 mutex_lock(&cpuset_mutex);
1513 if (!is_cpuset_online(cs))
1514 goto out_unlock;
Paul Menage700fe1a2008-04-29 01:00:00 -07001515
1516 switch (type) {
1517 case FILE_CPU_EXCLUSIVE:
1518 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1519 break;
1520 case FILE_MEM_EXCLUSIVE:
1521 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1522 break;
Paul Menage78608362008-04-29 01:00:26 -07001523 case FILE_MEM_HARDWALL:
1524 retval = update_flag(CS_MEM_HARDWALL, cs, val);
1525 break;
Paul Menage700fe1a2008-04-29 01:00:00 -07001526 case FILE_SCHED_LOAD_BALANCE:
1527 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1528 break;
1529 case FILE_MEMORY_MIGRATE:
1530 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1531 break;
1532 case FILE_MEMORY_PRESSURE_ENABLED:
1533 cpuset_memory_pressure_enabled = !!val;
1534 break;
1535 case FILE_MEMORY_PRESSURE:
1536 retval = -EACCES;
1537 break;
1538 case FILE_SPREAD_PAGE:
1539 retval = update_flag(CS_SPREAD_PAGE, cs, val);
Paul Menage700fe1a2008-04-29 01:00:00 -07001540 break;
1541 case FILE_SPREAD_SLAB:
1542 retval = update_flag(CS_SPREAD_SLAB, cs, val);
Paul Menage700fe1a2008-04-29 01:00:00 -07001543 break;
1544 default:
1545 retval = -EINVAL;
1546 break;
1547 }
Tejun Heo5d21cc22013-01-07 08:51:08 -08001548out_unlock:
1549 mutex_unlock(&cpuset_mutex);
Paul Menage700fe1a2008-04-29 01:00:00 -07001550 return retval;
1551}
1552
Paul Menage5be7a472008-05-06 20:42:41 -07001553static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1554{
Paul Menage5be7a472008-05-06 20:42:41 -07001555 struct cpuset *cs = cgroup_cs(cgrp);
1556 cpuset_filetype_t type = cft->private;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001557 int retval = -ENODEV;
Paul Menage5be7a472008-05-06 20:42:41 -07001558
Tejun Heo5d21cc22013-01-07 08:51:08 -08001559 mutex_lock(&cpuset_mutex);
1560 if (!is_cpuset_online(cs))
1561 goto out_unlock;
Paul Menagee3712392008-07-25 01:47:02 -07001562
Paul Menage5be7a472008-05-06 20:42:41 -07001563 switch (type) {
1564 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1565 retval = update_relax_domain_level(cs, val);
1566 break;
1567 default:
1568 retval = -EINVAL;
1569 break;
1570 }
Tejun Heo5d21cc22013-01-07 08:51:08 -08001571out_unlock:
1572 mutex_unlock(&cpuset_mutex);
Paul Menage5be7a472008-05-06 20:42:41 -07001573 return retval;
1574}
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576/*
Paul Menagee3712392008-07-25 01:47:02 -07001577 * Common handling for a write to a "cpus" or "mems" file.
1578 */
1579static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1580 const char *buf)
1581{
Li Zefan645fcc92009-01-07 18:08:43 -08001582 struct cpuset *cs = cgroup_cs(cgrp);
1583 struct cpuset *trialcs;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001584 int retval = -ENODEV;
Paul Menagee3712392008-07-25 01:47:02 -07001585
Tejun Heo3a5a6d02013-01-07 08:51:07 -08001586 /*
1587 * CPU or memory hotunplug may leave @cs w/o any execution
1588 * resources, in which case the hotplug code asynchronously updates
1589 * configuration and transfers all tasks to the nearest ancestor
1590 * which can execute.
1591 *
1592 * As writes to "cpus" or "mems" may restore @cs's execution
1593 * resources, wait for the previously scheduled operations before
1594 * proceeding, so that we don't end up keep removing tasks added
1595 * after execution capability is restored.
Tejun Heo02bb5862013-01-07 08:51:08 -08001596 *
1597 * Flushing cpuset_hotplug_work is enough to synchronize against
1598 * hotplug hanlding; however, cpuset_attach() may schedule
1599 * propagation work directly. Flush the workqueue too.
Tejun Heo3a5a6d02013-01-07 08:51:07 -08001600 */
1601 flush_work(&cpuset_hotplug_work);
Tejun Heo02bb5862013-01-07 08:51:08 -08001602 flush_workqueue(cpuset_propagate_hotplug_wq);
Tejun Heo3a5a6d02013-01-07 08:51:07 -08001603
Tejun Heo5d21cc22013-01-07 08:51:08 -08001604 mutex_lock(&cpuset_mutex);
1605 if (!is_cpuset_online(cs))
1606 goto out_unlock;
Paul Menagee3712392008-07-25 01:47:02 -07001607
Li Zefan645fcc92009-01-07 18:08:43 -08001608 trialcs = alloc_trial_cpuset(cs);
Li Zefanb75f38d2011-03-04 17:36:21 -08001609 if (!trialcs) {
1610 retval = -ENOMEM;
Tejun Heo5d21cc22013-01-07 08:51:08 -08001611 goto out_unlock;
Li Zefanb75f38d2011-03-04 17:36:21 -08001612 }
Li Zefan645fcc92009-01-07 18:08:43 -08001613
Paul Menagee3712392008-07-25 01:47:02 -07001614 switch (cft->private) {
1615 case FILE_CPULIST:
Li Zefan645fcc92009-01-07 18:08:43 -08001616 retval = update_cpumask(cs, trialcs, buf);
Paul Menagee3712392008-07-25 01:47:02 -07001617 break;
1618 case FILE_MEMLIST:
Li Zefan645fcc92009-01-07 18:08:43 -08001619 retval = update_nodemask(cs, trialcs, buf);
Paul Menagee3712392008-07-25 01:47:02 -07001620 break;
1621 default:
1622 retval = -EINVAL;
1623 break;
1624 }
Li Zefan645fcc92009-01-07 18:08:43 -08001625
1626 free_trial_cpuset(trialcs);
Tejun Heo5d21cc22013-01-07 08:51:08 -08001627out_unlock:
1628 mutex_unlock(&cpuset_mutex);
Paul Menagee3712392008-07-25 01:47:02 -07001629 return retval;
1630}
1631
1632/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 * These ascii lists should be read in a single call, by using a user
1634 * buffer large enough to hold the entire map. If read in smaller
1635 * chunks, there is no guarantee of atomicity. Since the display format
1636 * used, list of ranges of sequential numbers, is variable length,
1637 * and since these maps can change value dynamically, one could read
1638 * gibberish by doing partial reads while a list was changing.
1639 * A single large read to a buffer that crosses a page boundary is
1640 * ok, because the result being copied to user land is not recomputed
1641 * across a page fault.
1642 */
1643
Li Zefan9303e0c2011-03-23 16:42:45 -07001644static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645{
Li Zefan9303e0c2011-03-23 16:42:45 -07001646 size_t count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001648 mutex_lock(&callback_mutex);
Li Zefan9303e0c2011-03-23 16:42:45 -07001649 count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001650 mutex_unlock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Li Zefan9303e0c2011-03-23 16:42:45 -07001652 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653}
1654
Li Zefan9303e0c2011-03-23 16:42:45 -07001655static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656{
Li Zefan9303e0c2011-03-23 16:42:45 -07001657 size_t count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001659 mutex_lock(&callback_mutex);
Li Zefan9303e0c2011-03-23 16:42:45 -07001660 count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001661 mutex_unlock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Li Zefan9303e0c2011-03-23 16:42:45 -07001663 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664}
1665
Paul Menage8793d852007-10-18 23:39:39 -07001666static ssize_t cpuset_common_file_read(struct cgroup *cont,
1667 struct cftype *cft,
1668 struct file *file,
1669 char __user *buf,
1670 size_t nbytes, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671{
Paul Menage8793d852007-10-18 23:39:39 -07001672 struct cpuset *cs = cgroup_cs(cont);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 cpuset_filetype_t type = cft->private;
1674 char *page;
1675 ssize_t retval = 0;
1676 char *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Mel Gormane12ba742007-10-16 01:25:52 -07001678 if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 return -ENOMEM;
1680
1681 s = page;
1682
1683 switch (type) {
1684 case FILE_CPULIST:
1685 s += cpuset_sprintf_cpulist(s, cs);
1686 break;
1687 case FILE_MEMLIST:
1688 s += cpuset_sprintf_memlist(s, cs);
1689 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 default:
1691 retval = -EINVAL;
1692 goto out;
1693 }
1694 *s++ = '\n';
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
Al Viroeacaa1f2005-09-30 03:26:43 +01001696 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697out:
1698 free_page((unsigned long)page);
1699 return retval;
1700}
1701
Paul Menage700fe1a2008-04-29 01:00:00 -07001702static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1703{
1704 struct cpuset *cs = cgroup_cs(cont);
1705 cpuset_filetype_t type = cft->private;
1706 switch (type) {
1707 case FILE_CPU_EXCLUSIVE:
1708 return is_cpu_exclusive(cs);
1709 case FILE_MEM_EXCLUSIVE:
1710 return is_mem_exclusive(cs);
Paul Menage78608362008-04-29 01:00:26 -07001711 case FILE_MEM_HARDWALL:
1712 return is_mem_hardwall(cs);
Paul Menage700fe1a2008-04-29 01:00:00 -07001713 case FILE_SCHED_LOAD_BALANCE:
1714 return is_sched_load_balance(cs);
1715 case FILE_MEMORY_MIGRATE:
1716 return is_memory_migrate(cs);
1717 case FILE_MEMORY_PRESSURE_ENABLED:
1718 return cpuset_memory_pressure_enabled;
1719 case FILE_MEMORY_PRESSURE:
1720 return fmeter_getrate(&cs->fmeter);
1721 case FILE_SPREAD_PAGE:
1722 return is_spread_page(cs);
1723 case FILE_SPREAD_SLAB:
1724 return is_spread_slab(cs);
1725 default:
1726 BUG();
1727 }
Max Krasnyanskycf417142008-08-11 14:33:53 -07001728
1729 /* Unreachable but makes gcc happy */
1730 return 0;
Paul Menage700fe1a2008-04-29 01:00:00 -07001731}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
Paul Menage5be7a472008-05-06 20:42:41 -07001733static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
1734{
1735 struct cpuset *cs = cgroup_cs(cont);
1736 cpuset_filetype_t type = cft->private;
1737 switch (type) {
1738 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1739 return cs->relax_domain_level;
1740 default:
1741 BUG();
1742 }
Max Krasnyanskycf417142008-08-11 14:33:53 -07001743
1744 /* Unrechable but makes gcc happy */
1745 return 0;
Paul Menage5be7a472008-05-06 20:42:41 -07001746}
1747
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
1749/*
1750 * for the common functions, 'private' gives the type of file
1751 */
1752
Paul Menageaddf2c72008-04-29 01:00:26 -07001753static struct cftype files[] = {
1754 {
1755 .name = "cpus",
1756 .read = cpuset_common_file_read,
Paul Menagee3712392008-07-25 01:47:02 -07001757 .write_string = cpuset_write_resmask,
1758 .max_write_len = (100U + 6 * NR_CPUS),
Paul Menageaddf2c72008-04-29 01:00:26 -07001759 .private = FILE_CPULIST,
1760 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761
Paul Menageaddf2c72008-04-29 01:00:26 -07001762 {
1763 .name = "mems",
1764 .read = cpuset_common_file_read,
Paul Menagee3712392008-07-25 01:47:02 -07001765 .write_string = cpuset_write_resmask,
1766 .max_write_len = (100U + 6 * MAX_NUMNODES),
Paul Menageaddf2c72008-04-29 01:00:26 -07001767 .private = FILE_MEMLIST,
1768 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
Paul Menageaddf2c72008-04-29 01:00:26 -07001770 {
1771 .name = "cpu_exclusive",
1772 .read_u64 = cpuset_read_u64,
1773 .write_u64 = cpuset_write_u64,
1774 .private = FILE_CPU_EXCLUSIVE,
1775 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Paul Menageaddf2c72008-04-29 01:00:26 -07001777 {
1778 .name = "mem_exclusive",
1779 .read_u64 = cpuset_read_u64,
1780 .write_u64 = cpuset_write_u64,
1781 .private = FILE_MEM_EXCLUSIVE,
1782 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
Paul Menageaddf2c72008-04-29 01:00:26 -07001784 {
Paul Menage78608362008-04-29 01:00:26 -07001785 .name = "mem_hardwall",
1786 .read_u64 = cpuset_read_u64,
1787 .write_u64 = cpuset_write_u64,
1788 .private = FILE_MEM_HARDWALL,
1789 },
1790
1791 {
Paul Menageaddf2c72008-04-29 01:00:26 -07001792 .name = "sched_load_balance",
1793 .read_u64 = cpuset_read_u64,
1794 .write_u64 = cpuset_write_u64,
1795 .private = FILE_SCHED_LOAD_BALANCE,
1796 },
Paul Jackson029190c2007-10-18 23:40:20 -07001797
Paul Menageaddf2c72008-04-29 01:00:26 -07001798 {
1799 .name = "sched_relax_domain_level",
Paul Menage5be7a472008-05-06 20:42:41 -07001800 .read_s64 = cpuset_read_s64,
1801 .write_s64 = cpuset_write_s64,
Paul Menageaddf2c72008-04-29 01:00:26 -07001802 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1803 },
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001804
Paul Menageaddf2c72008-04-29 01:00:26 -07001805 {
1806 .name = "memory_migrate",
1807 .read_u64 = cpuset_read_u64,
1808 .write_u64 = cpuset_write_u64,
1809 .private = FILE_MEMORY_MIGRATE,
1810 },
1811
1812 {
1813 .name = "memory_pressure",
1814 .read_u64 = cpuset_read_u64,
1815 .write_u64 = cpuset_write_u64,
1816 .private = FILE_MEMORY_PRESSURE,
Li Zefan099fca32009-04-02 16:57:29 -07001817 .mode = S_IRUGO,
Paul Menageaddf2c72008-04-29 01:00:26 -07001818 },
1819
1820 {
1821 .name = "memory_spread_page",
1822 .read_u64 = cpuset_read_u64,
1823 .write_u64 = cpuset_write_u64,
1824 .private = FILE_SPREAD_PAGE,
1825 },
1826
1827 {
1828 .name = "memory_spread_slab",
1829 .read_u64 = cpuset_read_u64,
1830 .write_u64 = cpuset_write_u64,
1831 .private = FILE_SPREAD_SLAB,
1832 },
Tejun Heo4baf6e32012-04-01 12:09:55 -07001833
1834 {
1835 .name = "memory_pressure_enabled",
1836 .flags = CFTYPE_ONLY_ON_ROOT,
1837 .read_u64 = cpuset_read_u64,
1838 .write_u64 = cpuset_write_u64,
1839 .private = FILE_MEMORY_PRESSURE_ENABLED,
1840 },
1841
1842 { } /* terminate */
Paul Jackson45b07ef2006-01-08 01:00:56 -08001843};
1844
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845/*
Tejun Heo92fb9742012-11-19 08:13:38 -08001846 * cpuset_css_alloc - allocate a cpuset css
Paul Menage2df167a2008-02-07 00:14:45 -08001847 * cont: control group that the new cpuset will be part of
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 */
1849
Tejun Heo92fb9742012-11-19 08:13:38 -08001850static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851{
Tejun Heoc8f699b2013-01-07 08:51:07 -08001852 struct cpuset *cs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853
Tejun Heoc8f699b2013-01-07 08:51:07 -08001854 if (!cont->parent)
Paul Menage8793d852007-10-18 23:39:39 -07001855 return &top_cpuset.css;
Tejun Heo033fa1c2012-11-19 08:13:39 -08001856
Tejun Heoc8f699b2013-01-07 08:51:07 -08001857 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 if (!cs)
Paul Menage8793d852007-10-18 23:39:39 -07001859 return ERR_PTR(-ENOMEM);
Li Zefan300ed6c2009-01-07 18:08:44 -08001860 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1861 kfree(cs);
1862 return ERR_PTR(-ENOMEM);
1863 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
Paul Jackson029190c2007-10-18 23:40:20 -07001865 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
Li Zefan300ed6c2009-01-07 18:08:44 -08001866 cpumask_clear(cs->cpus_allowed);
Mike Travisf9a86fc2008-04-04 18:11:07 -07001867 nodes_clear(cs->mems_allowed);
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001868 fmeter_init(&cs->fmeter);
Tejun Heo8d033942013-01-07 08:51:07 -08001869 INIT_WORK(&cs->hotplug_work, cpuset_propagate_hotplug_workfn);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001870 cs->relax_domain_level = -1;
Tejun Heoc8f699b2013-01-07 08:51:07 -08001871 cs->parent = cgroup_cs(cont->parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872
Tejun Heoc8f699b2013-01-07 08:51:07 -08001873 return &cs->css;
1874}
1875
1876static int cpuset_css_online(struct cgroup *cgrp)
1877{
1878 struct cpuset *cs = cgroup_cs(cgrp);
1879 struct cpuset *parent = cs->parent;
Tejun Heoae8086c2013-01-07 08:51:07 -08001880 struct cpuset *tmp_cs;
1881 struct cgroup *pos_cg;
Tejun Heoc8f699b2013-01-07 08:51:07 -08001882
1883 if (!parent)
1884 return 0;
1885
Tejun Heo5d21cc22013-01-07 08:51:08 -08001886 mutex_lock(&cpuset_mutex);
1887
Tejun Heoefeb77b2013-01-07 08:51:07 -08001888 set_bit(CS_ONLINE, &cs->flags);
Tejun Heoc8f699b2013-01-07 08:51:07 -08001889 if (is_spread_page(parent))
1890 set_bit(CS_SPREAD_PAGE, &cs->flags);
1891 if (is_spread_slab(parent))
1892 set_bit(CS_SPREAD_SLAB, &cs->flags);
1893
Paul Jackson202f72d2006-01-08 01:01:57 -08001894 number_of_cpusets++;
Tejun Heo033fa1c2012-11-19 08:13:39 -08001895
Tejun Heoc8f699b2013-01-07 08:51:07 -08001896 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
Tejun Heo5d21cc22013-01-07 08:51:08 -08001897 goto out_unlock;
Tejun Heo033fa1c2012-11-19 08:13:39 -08001898
1899 /*
1900 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
1901 * set. This flag handling is implemented in cgroup core for
1902 * histrical reasons - the flag may be specified during mount.
1903 *
1904 * Currently, if any sibling cpusets have exclusive cpus or mem, we
1905 * refuse to clone the configuration - thereby refusing the task to
1906 * be entered, and as a result refusing the sys_unshare() or
1907 * clone() which initiated it. If this becomes a problem for some
1908 * users who wish to allow that scenario, then this could be
1909 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1910 * (and likewise for mems) to the new cgroup.
1911 */
Tejun Heoae8086c2013-01-07 08:51:07 -08001912 rcu_read_lock();
1913 cpuset_for_each_child(tmp_cs, pos_cg, parent) {
1914 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
1915 rcu_read_unlock();
Tejun Heo5d21cc22013-01-07 08:51:08 -08001916 goto out_unlock;
Tejun Heoae8086c2013-01-07 08:51:07 -08001917 }
Tejun Heo033fa1c2012-11-19 08:13:39 -08001918 }
Tejun Heoae8086c2013-01-07 08:51:07 -08001919 rcu_read_unlock();
Tejun Heo033fa1c2012-11-19 08:13:39 -08001920
1921 mutex_lock(&callback_mutex);
1922 cs->mems_allowed = parent->mems_allowed;
1923 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
1924 mutex_unlock(&callback_mutex);
Tejun Heo5d21cc22013-01-07 08:51:08 -08001925out_unlock:
1926 mutex_unlock(&cpuset_mutex);
Tejun Heoc8f699b2013-01-07 08:51:07 -08001927 return 0;
1928}
1929
1930static void cpuset_css_offline(struct cgroup *cgrp)
1931{
1932 struct cpuset *cs = cgroup_cs(cgrp);
1933
Tejun Heo5d21cc22013-01-07 08:51:08 -08001934 mutex_lock(&cpuset_mutex);
Tejun Heoc8f699b2013-01-07 08:51:07 -08001935
1936 if (is_sched_load_balance(cs))
1937 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1938
1939 number_of_cpusets--;
Tejun Heoefeb77b2013-01-07 08:51:07 -08001940 clear_bit(CS_ONLINE, &cs->flags);
Tejun Heoc8f699b2013-01-07 08:51:07 -08001941
Tejun Heo5d21cc22013-01-07 08:51:08 -08001942 mutex_unlock(&cpuset_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943}
1944
Paul Jackson029190c2007-10-18 23:40:20 -07001945/*
Paul Jackson029190c2007-10-18 23:40:20 -07001946 * If the cpuset being removed has its flag 'sched_load_balance'
1947 * enabled, then simulate turning sched_load_balance off, which
Tejun Heo699140b2013-01-07 08:51:07 -08001948 * will call rebuild_sched_domains_locked().
Paul Jackson029190c2007-10-18 23:40:20 -07001949 */
1950
Tejun Heo92fb9742012-11-19 08:13:38 -08001951static void cpuset_css_free(struct cgroup *cont)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952{
Paul Menage8793d852007-10-18 23:39:39 -07001953 struct cpuset *cs = cgroup_cs(cont);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954
Li Zefan300ed6c2009-01-07 18:08:44 -08001955 free_cpumask_var(cs->cpus_allowed);
Paul Menage8793d852007-10-18 23:39:39 -07001956 kfree(cs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957}
1958
Paul Menage8793d852007-10-18 23:39:39 -07001959struct cgroup_subsys cpuset_subsys = {
1960 .name = "cpuset",
Tejun Heo92fb9742012-11-19 08:13:38 -08001961 .css_alloc = cpuset_css_alloc,
Tejun Heoc8f699b2013-01-07 08:51:07 -08001962 .css_online = cpuset_css_online,
1963 .css_offline = cpuset_css_offline,
Tejun Heo92fb9742012-11-19 08:13:38 -08001964 .css_free = cpuset_css_free,
Paul Menage8793d852007-10-18 23:39:39 -07001965 .can_attach = cpuset_can_attach,
Tejun Heo452477f2013-01-07 08:51:07 -08001966 .cancel_attach = cpuset_cancel_attach,
Paul Menage8793d852007-10-18 23:39:39 -07001967 .attach = cpuset_attach,
Paul Menage8793d852007-10-18 23:39:39 -07001968 .subsys_id = cpuset_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -07001969 .base_cftypes = files,
Paul Menage8793d852007-10-18 23:39:39 -07001970 .early_init = 1,
1971};
1972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973/**
1974 * cpuset_init - initialize cpusets at system boot
1975 *
1976 * Description: Initialize top_cpuset and the cpuset internal file system,
1977 **/
1978
1979int __init cpuset_init(void)
1980{
Paul Menage8793d852007-10-18 23:39:39 -07001981 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982
Miao Xie58568d22009-06-16 15:31:49 -07001983 if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
1984 BUG();
1985
Li Zefan300ed6c2009-01-07 18:08:44 -08001986 cpumask_setall(top_cpuset.cpus_allowed);
Mike Travisf9a86fc2008-04-04 18:11:07 -07001987 nodes_setall(top_cpuset.mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001989 fmeter_init(&top_cpuset.fmeter);
Paul Jackson029190c2007-10-18 23:40:20 -07001990 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001991 top_cpuset.relax_domain_level = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 err = register_filesystem(&cpuset_fs_type);
1994 if (err < 0)
Paul Menage8793d852007-10-18 23:39:39 -07001995 return err;
1996
Li Zefan2341d1b2009-01-07 18:08:42 -08001997 if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
1998 BUG();
1999
Paul Jackson202f72d2006-01-08 01:01:57 -08002000 number_of_cpusets = 1;
Paul Menage8793d852007-10-18 23:39:39 -07002001 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002}
2003
Cliff Wickman956db3c2008-02-07 00:14:43 -08002004/**
2005 * cpuset_do_move_task - move a given task to another cpuset
2006 * @tsk: pointer to task_struct the task to move
2007 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
2008 *
2009 * Called by cgroup_scan_tasks() for each task in a cgroup.
2010 * Return nonzero to stop the walk through the tasks.
2011 */
Adrian Bunk9e0c9142008-04-29 01:00:25 -07002012static void cpuset_do_move_task(struct task_struct *tsk,
2013 struct cgroup_scanner *scan)
Cliff Wickman956db3c2008-02-07 00:14:43 -08002014{
Li Zefan7f81b1a2009-04-02 16:57:53 -07002015 struct cgroup *new_cgroup = scan->data;
Cliff Wickman956db3c2008-02-07 00:14:43 -08002016
Tejun Heo5d21cc22013-01-07 08:51:08 -08002017 cgroup_lock();
Li Zefan7f81b1a2009-04-02 16:57:53 -07002018 cgroup_attach_task(new_cgroup, tsk);
Tejun Heo5d21cc22013-01-07 08:51:08 -08002019 cgroup_unlock();
Cliff Wickman956db3c2008-02-07 00:14:43 -08002020}
2021
2022/**
2023 * move_member_tasks_to_cpuset - move tasks from one cpuset to another
2024 * @from: cpuset in which the tasks currently reside
2025 * @to: cpuset to which the tasks will be moved
2026 *
Tejun Heo5d21cc22013-01-07 08:51:08 -08002027 * Called with cpuset_mutex held
Paul Jacksonc8d9c902008-02-07 00:14:46 -08002028 * callback_mutex must not be held, as cpuset_attach() will take it.
Cliff Wickman956db3c2008-02-07 00:14:43 -08002029 *
2030 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
2031 * calling callback functions for each.
2032 */
2033static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
2034{
Li Zefan7f81b1a2009-04-02 16:57:53 -07002035 struct cgroup_scanner scan;
Cliff Wickman956db3c2008-02-07 00:14:43 -08002036
Li Zefan7f81b1a2009-04-02 16:57:53 -07002037 scan.cg = from->css.cgroup;
2038 scan.test_task = NULL; /* select all tasks in cgroup */
2039 scan.process_task = cpuset_do_move_task;
2040 scan.heap = NULL;
2041 scan.data = to->css.cgroup;
Cliff Wickman956db3c2008-02-07 00:14:43 -08002042
Li Zefan7f81b1a2009-04-02 16:57:53 -07002043 if (cgroup_scan_tasks(&scan))
Cliff Wickman956db3c2008-02-07 00:14:43 -08002044 printk(KERN_ERR "move_member_tasks_to_cpuset: "
2045 "cgroup_scan_tasks failed\n");
2046}
2047
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002048/*
Max Krasnyanskycf417142008-08-11 14:33:53 -07002049 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002050 * or memory nodes, we need to walk over the cpuset hierarchy,
2051 * removing that CPU or node from all cpusets. If this removes the
Cliff Wickman956db3c2008-02-07 00:14:43 -08002052 * last CPU or node from a cpuset, then move the tasks in the empty
2053 * cpuset to its next-highest non-empty parent.
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002054 */
Cliff Wickman956db3c2008-02-07 00:14:43 -08002055static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002056{
Cliff Wickman956db3c2008-02-07 00:14:43 -08002057 struct cpuset *parent;
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002058
Paul Jacksonc8d9c902008-02-07 00:14:46 -08002059 /*
Cliff Wickman956db3c2008-02-07 00:14:43 -08002060 * Find its next-highest non-empty parent, (top cpuset
2061 * has online cpus, so can't be empty).
2062 */
2063 parent = cs->parent;
Li Zefan300ed6c2009-01-07 18:08:44 -08002064 while (cpumask_empty(parent->cpus_allowed) ||
Paul Jacksonb4501292008-02-07 00:14:47 -08002065 nodes_empty(parent->mems_allowed))
Cliff Wickman956db3c2008-02-07 00:14:43 -08002066 parent = parent->parent;
Cliff Wickman956db3c2008-02-07 00:14:43 -08002067
2068 move_member_tasks_to_cpuset(cs, parent);
2069}
2070
2071/*
Srivatsa S. Bhat80d1fa62012-05-24 19:46:41 +05302072 * Helper function to traverse cpusets.
2073 * It can be used to walk the cpuset tree from top to bottom, completing
2074 * one layer before dropping down to the next (thus always processing a
2075 * node before any of its children).
2076 */
2077static struct cpuset *cpuset_next(struct list_head *queue)
2078{
2079 struct cpuset *cp;
2080 struct cpuset *child; /* scans child cpusets of cp */
2081 struct cgroup *cont;
2082
2083 if (list_empty(queue))
2084 return NULL;
2085
2086 cp = list_first_entry(queue, struct cpuset, stack_list);
2087 list_del(queue->next);
Tejun Heoae8086c2013-01-07 08:51:07 -08002088 rcu_read_lock();
2089 cpuset_for_each_child(child, cont, cp)
Srivatsa S. Bhat80d1fa62012-05-24 19:46:41 +05302090 list_add_tail(&child->stack_list, queue);
Tejun Heoae8086c2013-01-07 08:51:07 -08002091 rcu_read_unlock();
Srivatsa S. Bhat80d1fa62012-05-24 19:46:41 +05302092
2093 return cp;
2094}
2095
Tejun Heodeb7aa32013-01-07 08:51:07 -08002096/**
Tejun Heo8d033942013-01-07 08:51:07 -08002097 * cpuset_propagate_hotplug_workfn - propagate CPU/memory hotplug to a cpuset
Tejun Heodeb7aa32013-01-07 08:51:07 -08002098 * @cs: cpuset in interest
Cliff Wickman956db3c2008-02-07 00:14:43 -08002099 *
Tejun Heodeb7aa32013-01-07 08:51:07 -08002100 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2101 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
2102 * all its tasks are moved to the nearest ancestor with both resources.
Cliff Wickman956db3c2008-02-07 00:14:43 -08002103 */
Tejun Heo8d033942013-01-07 08:51:07 -08002104static void cpuset_propagate_hotplug_workfn(struct work_struct *work)
Cliff Wickman956db3c2008-02-07 00:14:43 -08002105{
Tejun Heodeb7aa32013-01-07 08:51:07 -08002106 static cpumask_t off_cpus;
2107 static nodemask_t off_mems, tmp_mems;
Tejun Heo8d033942013-01-07 08:51:07 -08002108 struct cpuset *cs = container_of(work, struct cpuset, hotplug_work);
Tejun Heo5d21cc22013-01-07 08:51:08 -08002109 bool is_empty;
Cliff Wickman956db3c2008-02-07 00:14:43 -08002110
Tejun Heo5d21cc22013-01-07 08:51:08 -08002111 mutex_lock(&cpuset_mutex);
Cliff Wickman956db3c2008-02-07 00:14:43 -08002112
Tejun Heodeb7aa32013-01-07 08:51:07 -08002113 cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
2114 nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
Paul Jacksonb4501292008-02-07 00:14:47 -08002115
Tejun Heodeb7aa32013-01-07 08:51:07 -08002116 /* remove offline cpus from @cs */
2117 if (!cpumask_empty(&off_cpus)) {
2118 mutex_lock(&callback_mutex);
2119 cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
2120 mutex_unlock(&callback_mutex);
2121 update_tasks_cpumask(cs, NULL);
2122 }
Paul Jacksonb4501292008-02-07 00:14:47 -08002123
Tejun Heodeb7aa32013-01-07 08:51:07 -08002124 /* remove offline mems from @cs */
2125 if (!nodes_empty(off_mems)) {
2126 tmp_mems = cs->mems_allowed;
2127 mutex_lock(&callback_mutex);
2128 nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
2129 mutex_unlock(&callback_mutex);
2130 update_tasks_nodemask(cs, &tmp_mems, NULL);
2131 }
Miao Xief9b4fb82008-07-25 01:47:22 -07002132
Tejun Heo5d21cc22013-01-07 08:51:08 -08002133 is_empty = cpumask_empty(cs->cpus_allowed) ||
2134 nodes_empty(cs->mems_allowed);
Tejun Heo8d033942013-01-07 08:51:07 -08002135
Tejun Heo5d21cc22013-01-07 08:51:08 -08002136 mutex_unlock(&cpuset_mutex);
2137
2138 /*
2139 * If @cs became empty, move tasks to the nearest ancestor with
2140 * execution resources. This is full cgroup operation which will
2141 * also call back into cpuset. Should be done outside any lock.
2142 */
2143 if (is_empty)
2144 remove_tasks_in_empty_cpuset(cs);
Tejun Heo8d033942013-01-07 08:51:07 -08002145
2146 /* the following may free @cs, should be the last operation */
2147 css_put(&cs->css);
2148}
2149
2150/**
2151 * schedule_cpuset_propagate_hotplug - schedule hotplug propagation to a cpuset
2152 * @cs: cpuset of interest
2153 *
2154 * Schedule cpuset_propagate_hotplug_workfn() which will update CPU and
2155 * memory masks according to top_cpuset.
2156 */
2157static void schedule_cpuset_propagate_hotplug(struct cpuset *cs)
2158{
2159 /*
2160 * Pin @cs. The refcnt will be released when the work item
2161 * finishes executing.
2162 */
2163 if (!css_tryget(&cs->css))
2164 return;
2165
2166 /*
2167 * Queue @cs->hotplug_work. If already pending, lose the css ref.
2168 * cpuset_propagate_hotplug_wq is ordered and propagation will
2169 * happen in the order this function is called.
2170 */
2171 if (!queue_work(cpuset_propagate_hotplug_wq, &cs->hotplug_work))
2172 css_put(&cs->css);
Tejun Heodeb7aa32013-01-07 08:51:07 -08002173}
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05302174
Tejun Heodeb7aa32013-01-07 08:51:07 -08002175/**
Tejun Heo3a5a6d02013-01-07 08:51:07 -08002176 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
Tejun Heodeb7aa32013-01-07 08:51:07 -08002177 *
2178 * This function is called after either CPU or memory configuration has
2179 * changed and updates cpuset accordingly. The top_cpuset is always
2180 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2181 * order to make cpusets transparent (of no affect) on systems that are
2182 * actively using CPU hotplug but making no active use of cpusets.
2183 *
2184 * Non-root cpusets are only affected by offlining. If any CPUs or memory
2185 * nodes have been taken down, cpuset_propagate_hotplug() is invoked on all
2186 * descendants.
2187 *
2188 * Note that CPU offlining during suspend is ignored. We don't modify
2189 * cpusets across suspend/resume cycles at all.
2190 */
Tejun Heo3a5a6d02013-01-07 08:51:07 -08002191static void cpuset_hotplug_workfn(struct work_struct *work)
Tejun Heodeb7aa32013-01-07 08:51:07 -08002192{
2193 static cpumask_t new_cpus, tmp_cpus;
2194 static nodemask_t new_mems, tmp_mems;
2195 bool cpus_updated, mems_updated;
2196 bool cpus_offlined, mems_offlined;
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05302197
Tejun Heo5d21cc22013-01-07 08:51:08 -08002198 mutex_lock(&cpuset_mutex);
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05302199
Tejun Heodeb7aa32013-01-07 08:51:07 -08002200 /* fetch the available cpus/mems and find out which changed how */
2201 cpumask_copy(&new_cpus, cpu_active_mask);
2202 new_mems = node_states[N_MEMORY];
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05302203
Tejun Heodeb7aa32013-01-07 08:51:07 -08002204 cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
2205 cpus_offlined = cpumask_andnot(&tmp_cpus, top_cpuset.cpus_allowed,
2206 &new_cpus);
Paul Jacksonb4501292008-02-07 00:14:47 -08002207
Tejun Heodeb7aa32013-01-07 08:51:07 -08002208 mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
2209 nodes_andnot(tmp_mems, top_cpuset.mems_allowed, new_mems);
2210 mems_offlined = !nodes_empty(tmp_mems);
2211
2212 /* synchronize cpus_allowed to cpu_active_mask */
2213 if (cpus_updated) {
2214 mutex_lock(&callback_mutex);
2215 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2216 mutex_unlock(&callback_mutex);
2217 /* we don't mess with cpumasks of tasks in top_cpuset */
2218 }
2219
2220 /* synchronize mems_allowed to N_MEMORY */
2221 if (mems_updated) {
2222 tmp_mems = top_cpuset.mems_allowed;
2223 mutex_lock(&callback_mutex);
2224 top_cpuset.mems_allowed = new_mems;
2225 mutex_unlock(&callback_mutex);
2226 update_tasks_nodemask(&top_cpuset, &tmp_mems, NULL);
2227 }
2228
2229 /* if cpus or mems went down, we need to propagate to descendants */
2230 if (cpus_offlined || mems_offlined) {
2231 struct cpuset *cs;
2232 LIST_HEAD(queue);
2233
2234 list_add_tail(&top_cpuset.stack_list, &queue);
2235 while ((cs = cpuset_next(&queue)))
2236 if (cs != &top_cpuset)
Tejun Heo8d033942013-01-07 08:51:07 -08002237 schedule_cpuset_propagate_hotplug(cs);
Tejun Heodeb7aa32013-01-07 08:51:07 -08002238 }
2239
Tejun Heo5d21cc22013-01-07 08:51:08 -08002240 mutex_unlock(&cpuset_mutex);
Tejun Heodeb7aa32013-01-07 08:51:07 -08002241
Tejun Heo8d033942013-01-07 08:51:07 -08002242 /* wait for propagations to finish */
2243 flush_workqueue(cpuset_propagate_hotplug_wq);
2244
Tejun Heodeb7aa32013-01-07 08:51:07 -08002245 /* rebuild sched domains if cpus_allowed has changed */
2246 if (cpus_updated) {
2247 struct sched_domain_attr *attr;
2248 cpumask_var_t *doms;
2249 int ndoms;
2250
Tejun Heo5d21cc22013-01-07 08:51:08 -08002251 mutex_lock(&cpuset_mutex);
Tejun Heodeb7aa32013-01-07 08:51:07 -08002252 ndoms = generate_sched_domains(&doms, &attr);
Tejun Heo5d21cc22013-01-07 08:51:08 -08002253 mutex_unlock(&cpuset_mutex);
Tejun Heodeb7aa32013-01-07 08:51:07 -08002254
2255 partition_sched_domains(ndoms, doms, attr);
Cliff Wickman956db3c2008-02-07 00:14:43 -08002256 }
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002257}
2258
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05302259void cpuset_update_active_cpus(bool cpu_online)
Paul Jackson4c4d50f2006-08-27 01:23:51 -07002260{
Tejun Heo3a5a6d02013-01-07 08:51:07 -08002261 /*
2262 * We're inside cpu hotplug critical region which usually nests
2263 * inside cgroup synchronization. Bounce actual hotplug processing
2264 * to a work item to avoid reverse locking order.
2265 *
2266 * We still need to do partition_sched_domains() synchronously;
2267 * otherwise, the scheduler will get confused and put tasks to the
2268 * dead CPU. Fall back to the default single domain.
2269 * cpuset_hotplug_workfn() will rebuild it as necessary.
2270 */
2271 partition_sched_domains(1, NULL, NULL);
2272 schedule_work(&cpuset_hotplug_work);
Paul Jackson4c4d50f2006-08-27 01:23:51 -07002273}
Paul Jackson4c4d50f2006-08-27 01:23:51 -07002274
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07002275#ifdef CONFIG_MEMORY_HOTPLUG
Paul Jackson38837fc2006-09-29 02:01:16 -07002276/*
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08002277 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2278 * Call this routine anytime after node_states[N_MEMORY] changes.
Srivatsa S. Bhata1cd2b12012-05-24 19:47:03 +05302279 * See cpuset_update_active_cpus() for CPU hotplug handling.
Paul Jackson38837fc2006-09-29 02:01:16 -07002280 */
Miao Xief4818912008-11-19 15:36:30 -08002281static int cpuset_track_online_nodes(struct notifier_block *self,
2282 unsigned long action, void *arg)
Paul Jackson38837fc2006-09-29 02:01:16 -07002283{
Tejun Heo3a5a6d02013-01-07 08:51:07 -08002284 schedule_work(&cpuset_hotplug_work);
Miao Xief4818912008-11-19 15:36:30 -08002285 return NOTIFY_OK;
Paul Jackson38837fc2006-09-29 02:01:16 -07002286}
2287#endif
2288
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289/**
2290 * cpuset_init_smp - initialize cpus_allowed
2291 *
2292 * Description: Finish top cpuset after cpu, node maps are initialized
2293 **/
2294
2295void __init cpuset_init_smp(void)
2296{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01002297 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08002298 top_cpuset.mems_allowed = node_states[N_MEMORY];
Paul Jackson4c4d50f2006-08-27 01:23:51 -07002299
Miao Xief4818912008-11-19 15:36:30 -08002300 hotplug_memory_notifier(cpuset_track_online_nodes, 10);
Tejun Heo8d033942013-01-07 08:51:07 -08002301
2302 cpuset_propagate_hotplug_wq =
2303 alloc_ordered_workqueue("cpuset_hotplug", 0);
2304 BUG_ON(!cpuset_propagate_hotplug_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305}
2306
2307/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2309 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
Li Zefan6af866a2009-01-07 18:08:45 -08002310 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 *
Li Zefan300ed6c2009-01-07 18:08:44 -08002312 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 * attached to the specified @tsk. Guaranteed to return some non-empty
Rusty Russell5f054e32012-03-29 15:38:31 +10302314 * subset of cpu_online_mask, even if this means going outside the
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 * tasks cpuset.
2316 **/
2317
Li Zefan6af866a2009-01-07 18:08:45 -08002318void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319{
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002320 mutex_lock(&callback_mutex);
Paul Jackson909d75a2006-01-08 01:01:55 -08002321 task_lock(tsk);
Mike Travisf9a86fc2008-04-04 18:11:07 -07002322 guarantee_online_cpus(task_cs(tsk), pmask);
Paul Jackson909d75a2006-01-08 01:01:55 -08002323 task_unlock(tsk);
Oleg Nesterov897f0b32010-03-15 10:10:03 +01002324 mutex_unlock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325}
2326
Peter Zijlstra2baab4e2012-03-20 15:57:01 +01002327void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
Oleg Nesterov9084bb82010-03-15 10:10:27 +01002328{
2329 const struct cpuset *cs;
Oleg Nesterov9084bb82010-03-15 10:10:27 +01002330
2331 rcu_read_lock();
2332 cs = task_cs(tsk);
2333 if (cs)
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09002334 do_set_cpus_allowed(tsk, cs->cpus_allowed);
Oleg Nesterov9084bb82010-03-15 10:10:27 +01002335 rcu_read_unlock();
2336
2337 /*
2338 * We own tsk->cpus_allowed, nobody can change it under us.
2339 *
2340 * But we used cs && cs->cpus_allowed lockless and thus can
2341 * race with cgroup_attach_task() or update_cpumask() and get
2342 * the wrong tsk->cpus_allowed. However, both cases imply the
2343 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2344 * which takes task_rq_lock().
2345 *
2346 * If we are called after it dropped the lock we must see all
2347 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2348 * set any mask even if it is not right from task_cs() pov,
2349 * the pending set_cpus_allowed_ptr() will fix things.
Peter Zijlstra2baab4e2012-03-20 15:57:01 +01002350 *
2351 * select_fallback_rq() will fix things ups and set cpu_possible_mask
2352 * if required.
Oleg Nesterov9084bb82010-03-15 10:10:27 +01002353 */
Oleg Nesterov9084bb82010-03-15 10:10:27 +01002354}
2355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356void cpuset_init_current_mems_allowed(void)
2357{
Mike Travisf9a86fc2008-04-04 18:11:07 -07002358 nodes_setall(current->mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359}
2360
Randy Dunlapd9fd8a62005-07-27 11:45:11 -07002361/**
Paul Jackson909d75a2006-01-08 01:01:55 -08002362 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2363 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2364 *
2365 * Description: Returns the nodemask_t mems_allowed of the cpuset
2366 * attached to the specified @tsk. Guaranteed to return some non-empty
Lai Jiangshan38d7bee2012-12-12 13:51:24 -08002367 * subset of node_states[N_MEMORY], even if this means going outside the
Paul Jackson909d75a2006-01-08 01:01:55 -08002368 * tasks cpuset.
2369 **/
2370
2371nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2372{
2373 nodemask_t mask;
2374
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002375 mutex_lock(&callback_mutex);
Paul Jackson909d75a2006-01-08 01:01:55 -08002376 task_lock(tsk);
Paul Menage8793d852007-10-18 23:39:39 -07002377 guarantee_online_mems(task_cs(tsk), &mask);
Paul Jackson909d75a2006-01-08 01:01:55 -08002378 task_unlock(tsk);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002379 mutex_unlock(&callback_mutex);
Paul Jackson909d75a2006-01-08 01:01:55 -08002380
2381 return mask;
2382}
2383
2384/**
Mel Gorman19770b32008-04-28 02:12:18 -07002385 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2386 * @nodemask: the nodemask to be checked
Randy Dunlapd9fd8a62005-07-27 11:45:11 -07002387 *
Mel Gorman19770b32008-04-28 02:12:18 -07002388 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 */
Mel Gorman19770b32008-04-28 02:12:18 -07002390int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391{
Mel Gorman19770b32008-04-28 02:12:18 -07002392 return nodes_intersects(*nodemask, current->mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393}
2394
Paul Jackson9bf22292005-09-06 15:18:12 -07002395/*
Paul Menage78608362008-04-29 01:00:26 -07002396 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2397 * mem_hardwall ancestor to the specified cpuset. Call holding
2398 * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall
2399 * (an unusual configuration), then returns the root cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 */
Paul Menage78608362008-04-29 01:00:26 -07002401static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402{
Paul Menage78608362008-04-29 01:00:26 -07002403 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
Paul Jackson9bf22292005-09-06 15:18:12 -07002404 cs = cs->parent;
2405 return cs;
2406}
2407
2408/**
David Rientjesa1bc5a42009-04-02 16:57:54 -07002409 * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2410 * @node: is this an allowed node?
Paul Jackson02a0e532006-12-13 00:34:25 -08002411 * @gfp_mask: memory allocation flags
Paul Jackson9bf22292005-09-06 15:18:12 -07002412 *
David Rientjesa1bc5a42009-04-02 16:57:54 -07002413 * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
2414 * set, yes, we can always allocate. If node is in our task's mems_allowed,
2415 * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest
2416 * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been
2417 * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2418 * flag, yes.
Paul Jackson9bf22292005-09-06 15:18:12 -07002419 * Otherwise, no.
2420 *
David Rientjesa1bc5a42009-04-02 16:57:54 -07002421 * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2422 * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall()
2423 * might sleep, and might allow a node from an enclosing cpuset.
Paul Jackson02a0e532006-12-13 00:34:25 -08002424 *
David Rientjesa1bc5a42009-04-02 16:57:54 -07002425 * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2426 * cpusets, and never sleeps.
Paul Jackson02a0e532006-12-13 00:34:25 -08002427 *
2428 * The __GFP_THISNODE placement logic is really handled elsewhere,
2429 * by forcibly using a zonelist starting at a specified node, and by
2430 * (in get_page_from_freelist()) refusing to consider the zones for
2431 * any node on the zonelist except the first. By the time any such
2432 * calls get to this routine, we should just shut up and say 'yes'.
2433 *
Paul Jackson9bf22292005-09-06 15:18:12 -07002434 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
David Rientjesc596d9f2007-05-06 14:49:32 -07002435 * and do not allow allocations outside the current tasks cpuset
2436 * unless the task has been OOM killed as is marked TIF_MEMDIE.
Paul Jackson9bf22292005-09-06 15:18:12 -07002437 * GFP_KERNEL allocations are not so marked, so can escape to the
Paul Menage78608362008-04-29 01:00:26 -07002438 * nearest enclosing hardwalled ancestor cpuset.
Paul Jackson9bf22292005-09-06 15:18:12 -07002439 *
Paul Jackson02a0e532006-12-13 00:34:25 -08002440 * Scanning up parent cpusets requires callback_mutex. The
2441 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2442 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2443 * current tasks mems_allowed came up empty on the first pass over
2444 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2445 * cpuset are short of memory, might require taking the callback_mutex
2446 * mutex.
Paul Jackson9bf22292005-09-06 15:18:12 -07002447 *
Paul Jackson36be57f2006-05-20 15:00:10 -07002448 * The first call here from mm/page_alloc:get_page_from_freelist()
Paul Jackson02a0e532006-12-13 00:34:25 -08002449 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2450 * so no allocation on a node outside the cpuset is allowed (unless
2451 * in interrupt, of course).
Paul Jackson9bf22292005-09-06 15:18:12 -07002452 *
Paul Jackson36be57f2006-05-20 15:00:10 -07002453 * The second pass through get_page_from_freelist() doesn't even call
2454 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
2455 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2456 * in alloc_flags. That logic and the checks below have the combined
2457 * affect that:
Paul Jackson9bf22292005-09-06 15:18:12 -07002458 * in_interrupt - any node ok (current task context irrelevant)
2459 * GFP_ATOMIC - any node ok
David Rientjesc596d9f2007-05-06 14:49:32 -07002460 * TIF_MEMDIE - any node ok
Paul Menage78608362008-04-29 01:00:26 -07002461 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
Paul Jackson9bf22292005-09-06 15:18:12 -07002462 * GFP_USER - only nodes in current tasks mems allowed ok.
Paul Jackson36be57f2006-05-20 15:00:10 -07002463 *
2464 * Rule:
David Rientjesa1bc5a42009-04-02 16:57:54 -07002465 * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
Paul Jackson36be57f2006-05-20 15:00:10 -07002466 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2467 * the code that might scan up ancestor cpusets and sleep.
Paul Jackson02a0e532006-12-13 00:34:25 -08002468 */
David Rientjesa1bc5a42009-04-02 16:57:54 -07002469int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
Paul Jackson9bf22292005-09-06 15:18:12 -07002470{
Paul Jackson9bf22292005-09-06 15:18:12 -07002471 const struct cpuset *cs; /* current cpuset ancestors */
Paul Jackson29afd492006-03-24 03:16:12 -08002472 int allowed; /* is allocation in zone z allowed? */
Paul Jackson9bf22292005-09-06 15:18:12 -07002473
Christoph Lameter9b819d22006-09-25 23:31:40 -07002474 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
Paul Jackson9bf22292005-09-06 15:18:12 -07002475 return 1;
Paul Jackson92d1dbd2006-05-20 15:00:11 -07002476 might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
Paul Jackson9bf22292005-09-06 15:18:12 -07002477 if (node_isset(node, current->mems_allowed))
2478 return 1;
David Rientjesc596d9f2007-05-06 14:49:32 -07002479 /*
2480 * Allow tasks that have access to memory reserves because they have
2481 * been OOM killed to get memory anywhere.
2482 */
2483 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2484 return 1;
Paul Jackson9bf22292005-09-06 15:18:12 -07002485 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
2486 return 0;
2487
Bob Picco5563e772005-11-13 16:06:35 -08002488 if (current->flags & PF_EXITING) /* Let dying task have memory */
2489 return 1;
2490
Paul Jackson9bf22292005-09-06 15:18:12 -07002491 /* Not hardwall and node outside mems_allowed: scan up cpusets */
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002492 mutex_lock(&callback_mutex);
Paul Jackson053199e2005-10-30 15:02:30 -08002493
Paul Jackson053199e2005-10-30 15:02:30 -08002494 task_lock(current);
Paul Menage78608362008-04-29 01:00:26 -07002495 cs = nearest_hardwall_ancestor(task_cs(current));
Paul Jackson053199e2005-10-30 15:02:30 -08002496 task_unlock(current);
2497
Paul Jackson9bf22292005-09-06 15:18:12 -07002498 allowed = node_isset(node, cs->mems_allowed);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002499 mutex_unlock(&callback_mutex);
Paul Jackson9bf22292005-09-06 15:18:12 -07002500 return allowed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501}
2502
Paul Jackson02a0e532006-12-13 00:34:25 -08002503/*
David Rientjesa1bc5a42009-04-02 16:57:54 -07002504 * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2505 * @node: is this an allowed node?
Paul Jackson02a0e532006-12-13 00:34:25 -08002506 * @gfp_mask: memory allocation flags
2507 *
David Rientjesa1bc5a42009-04-02 16:57:54 -07002508 * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
2509 * set, yes, we can always allocate. If node is in our task's mems_allowed,
2510 * yes. If the task has been OOM killed and has access to memory reserves as
2511 * specified by the TIF_MEMDIE flag, yes.
2512 * Otherwise, no.
Paul Jackson02a0e532006-12-13 00:34:25 -08002513 *
2514 * The __GFP_THISNODE placement logic is really handled elsewhere,
2515 * by forcibly using a zonelist starting at a specified node, and by
2516 * (in get_page_from_freelist()) refusing to consider the zones for
2517 * any node on the zonelist except the first. By the time any such
2518 * calls get to this routine, we should just shut up and say 'yes'.
2519 *
David Rientjesa1bc5a42009-04-02 16:57:54 -07002520 * Unlike the cpuset_node_allowed_softwall() variant, above,
2521 * this variant requires that the node be in the current task's
Paul Jackson02a0e532006-12-13 00:34:25 -08002522 * mems_allowed or that we're in interrupt. It does not scan up the
2523 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2524 * It never sleeps.
2525 */
David Rientjesa1bc5a42009-04-02 16:57:54 -07002526int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
Paul Jackson02a0e532006-12-13 00:34:25 -08002527{
Paul Jackson02a0e532006-12-13 00:34:25 -08002528 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2529 return 1;
Paul Jackson02a0e532006-12-13 00:34:25 -08002530 if (node_isset(node, current->mems_allowed))
2531 return 1;
Daniel Walkerdedf8b72007-10-18 03:06:04 -07002532 /*
2533 * Allow tasks that have access to memory reserves because they have
2534 * been OOM killed to get memory anywhere.
2535 */
2536 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2537 return 1;
Paul Jackson02a0e532006-12-13 00:34:25 -08002538 return 0;
2539}
2540
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002541/**
Jack Steiner6adef3e2010-05-26 14:42:49 -07002542 * cpuset_mem_spread_node() - On which node to begin search for a file page
2543 * cpuset_slab_spread_node() - On which node to begin search for a slab page
Paul Jackson825a46a2006-03-24 03:16:03 -08002544 *
2545 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2546 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2547 * and if the memory allocation used cpuset_mem_spread_node()
2548 * to determine on which node to start looking, as it will for
2549 * certain page cache or slab cache pages such as used for file
2550 * system buffers and inode caches, then instead of starting on the
2551 * local node to look for a free page, rather spread the starting
2552 * node around the tasks mems_allowed nodes.
2553 *
2554 * We don't have to worry about the returned node being offline
2555 * because "it can't happen", and even if it did, it would be ok.
2556 *
2557 * The routines calling guarantee_online_mems() are careful to
2558 * only set nodes in task->mems_allowed that are online. So it
2559 * should not be possible for the following code to return an
2560 * offline node. But if it did, that would be ok, as this routine
2561 * is not returning the node where the allocation must be, only
2562 * the node where the search should start. The zonelist passed to
2563 * __alloc_pages() will include all nodes. If the slab allocator
2564 * is passed an offline node, it will fall back to the local node.
2565 * See kmem_cache_alloc_node().
2566 */
2567
Jack Steiner6adef3e2010-05-26 14:42:49 -07002568static int cpuset_spread_node(int *rotor)
Paul Jackson825a46a2006-03-24 03:16:03 -08002569{
2570 int node;
2571
Jack Steiner6adef3e2010-05-26 14:42:49 -07002572 node = next_node(*rotor, current->mems_allowed);
Paul Jackson825a46a2006-03-24 03:16:03 -08002573 if (node == MAX_NUMNODES)
2574 node = first_node(current->mems_allowed);
Jack Steiner6adef3e2010-05-26 14:42:49 -07002575 *rotor = node;
Paul Jackson825a46a2006-03-24 03:16:03 -08002576 return node;
2577}
Jack Steiner6adef3e2010-05-26 14:42:49 -07002578
2579int cpuset_mem_spread_node(void)
2580{
Michal Hocko778d3b02011-07-26 16:08:30 -07002581 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2582 current->cpuset_mem_spread_rotor =
2583 node_random(&current->mems_allowed);
2584
Jack Steiner6adef3e2010-05-26 14:42:49 -07002585 return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2586}
2587
2588int cpuset_slab_spread_node(void)
2589{
Michal Hocko778d3b02011-07-26 16:08:30 -07002590 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2591 current->cpuset_slab_spread_rotor =
2592 node_random(&current->mems_allowed);
2593
Jack Steiner6adef3e2010-05-26 14:42:49 -07002594 return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2595}
2596
Paul Jackson825a46a2006-03-24 03:16:03 -08002597EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2598
2599/**
David Rientjesbbe373f2007-10-16 23:25:58 -07002600 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2601 * @tsk1: pointer to task_struct of some task.
2602 * @tsk2: pointer to task_struct of some other task.
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002603 *
David Rientjesbbe373f2007-10-16 23:25:58 -07002604 * Description: Return true if @tsk1's mems_allowed intersects the
2605 * mems_allowed of @tsk2. Used by the OOM killer to determine if
2606 * one of the task's memory usage might impact the memory available
2607 * to the other.
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002608 **/
2609
David Rientjesbbe373f2007-10-16 23:25:58 -07002610int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2611 const struct task_struct *tsk2)
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002612{
David Rientjesbbe373f2007-10-16 23:25:58 -07002613 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002614}
2615
David Rientjes75aa1992009-01-06 14:39:01 -08002616/**
2617 * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2618 * @task: pointer to task_struct of some task.
2619 *
2620 * Description: Prints @task's name, cpuset name, and cached copy of its
2621 * mems_allowed to the kernel log. Must hold task_lock(task) to allow
2622 * dereferencing task_cs(task).
2623 */
2624void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2625{
2626 struct dentry *dentry;
2627
2628 dentry = task_cs(tsk)->css.cgroup->dentry;
2629 spin_lock(&cpuset_buffer_lock);
2630 snprintf(cpuset_name, CPUSET_NAME_LEN,
2631 dentry ? (const char *)dentry->d_name.name : "/");
2632 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2633 tsk->mems_allowed);
2634 printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2635 tsk->comm, cpuset_name, cpuset_nodelist);
2636 spin_unlock(&cpuset_buffer_lock);
2637}
2638
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639/*
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002640 * Collection of memory_pressure is suppressed unless
2641 * this flag is enabled by writing "1" to the special
2642 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2643 */
2644
Paul Jacksonc5b2aff2006-01-08 01:01:51 -08002645int cpuset_memory_pressure_enabled __read_mostly;
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002646
2647/**
2648 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2649 *
2650 * Keep a running average of the rate of synchronous (direct)
2651 * page reclaim efforts initiated by tasks in each cpuset.
2652 *
2653 * This represents the rate at which some task in the cpuset
2654 * ran low on memory on all nodes it was allowed to use, and
2655 * had to enter the kernels page reclaim code in an effort to
2656 * create more free memory by tossing clean pages or swapping
2657 * or writing dirty pages.
2658 *
2659 * Display to user space in the per-cpuset read-only file
2660 * "memory_pressure". Value displayed is an integer
2661 * representing the recent rate of entry into the synchronous
2662 * (direct) page reclaim by any task attached to the cpuset.
2663 **/
2664
2665void __cpuset_memory_pressure_bump(void)
2666{
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002667 task_lock(current);
Paul Menage8793d852007-10-18 23:39:39 -07002668 fmeter_markevent(&task_cs(current)->fmeter);
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002669 task_unlock(current);
2670}
2671
Paul Menage8793d852007-10-18 23:39:39 -07002672#ifdef CONFIG_PROC_PID_CPUSET
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002673/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 * proc_cpuset_show()
2675 * - Print tasks cpuset path into seq_file.
2676 * - Used for /proc/<pid>/cpuset.
Paul Jackson053199e2005-10-30 15:02:30 -08002677 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2678 * doesn't really matter if tsk->cpuset changes after we read it,
Tejun Heo5d21cc22013-01-07 08:51:08 -08002679 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
Paul Menage2df167a2008-02-07 00:14:45 -08002680 * anyway.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 */
Paul Jackson029190c2007-10-18 23:40:20 -07002682static int proc_cpuset_show(struct seq_file *m, void *unused_v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683{
Eric W. Biederman13b41b02006-06-26 00:25:56 -07002684 struct pid *pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 struct task_struct *tsk;
2686 char *buf;
Paul Menage8793d852007-10-18 23:39:39 -07002687 struct cgroup_subsys_state *css;
Eric W. Biederman99f89552006-06-26 00:25:55 -07002688 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
Eric W. Biederman99f89552006-06-26 00:25:55 -07002690 retval = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2692 if (!buf)
Eric W. Biederman99f89552006-06-26 00:25:55 -07002693 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
Eric W. Biederman99f89552006-06-26 00:25:55 -07002695 retval = -ESRCH;
Eric W. Biederman13b41b02006-06-26 00:25:56 -07002696 pid = m->private;
2697 tsk = get_pid_task(pid, PIDTYPE_PID);
Eric W. Biederman99f89552006-06-26 00:25:55 -07002698 if (!tsk)
2699 goto out_free;
2700
2701 retval = -EINVAL;
Tejun Heo5d21cc22013-01-07 08:51:08 -08002702 mutex_lock(&cpuset_mutex);
Paul Menage8793d852007-10-18 23:39:39 -07002703 css = task_subsys_state(tsk, cpuset_subsys_id);
2704 retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 if (retval < 0)
Eric W. Biederman99f89552006-06-26 00:25:55 -07002706 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 seq_puts(m, buf);
2708 seq_putc(m, '\n');
Eric W. Biederman99f89552006-06-26 00:25:55 -07002709out_unlock:
Tejun Heo5d21cc22013-01-07 08:51:08 -08002710 mutex_unlock(&cpuset_mutex);
Eric W. Biederman99f89552006-06-26 00:25:55 -07002711 put_task_struct(tsk);
2712out_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 kfree(buf);
Eric W. Biederman99f89552006-06-26 00:25:55 -07002714out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 return retval;
2716}
2717
2718static int cpuset_open(struct inode *inode, struct file *file)
2719{
Eric W. Biederman13b41b02006-06-26 00:25:56 -07002720 struct pid *pid = PROC_I(inode)->pid;
2721 return single_open(file, proc_cpuset_show, pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722}
2723
Arjan van de Ven9a321442007-02-12 00:55:35 -08002724const struct file_operations proc_cpuset_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 .open = cpuset_open,
2726 .read = seq_read,
2727 .llseek = seq_lseek,
2728 .release = single_release,
2729};
Paul Menage8793d852007-10-18 23:39:39 -07002730#endif /* CONFIG_PROC_PID_CPUSET */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731
Heiko Carstensd01d4822009-09-21 11:06:27 +02002732/* Display task mems_allowed in /proc/<pid>/status file. */
Eric W. Biedermandf5f8312008-02-08 04:18:33 -08002733void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734{
Eric W. Biedermandf5f8312008-02-08 04:18:33 -08002735 seq_printf(m, "Mems_allowed:\t");
Lai Jiangshan30e8e132008-10-18 20:28:20 -07002736 seq_nodemask(m, &task->mems_allowed);
Eric W. Biedermandf5f8312008-02-08 04:18:33 -08002737 seq_printf(m, "\n");
Mike Travis39106dc2008-04-08 11:43:03 -07002738 seq_printf(m, "Mems_allowed_list:\t");
Lai Jiangshan30e8e132008-10-18 20:28:20 -07002739 seq_nodemask_list(m, &task->mems_allowed);
Mike Travis39106dc2008-04-08 11:43:03 -07002740 seq_printf(m, "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741}