| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  kernel/cpuset.c | 
 | 3 |  * | 
 | 4 |  *  Processor and Memory placement constraints for sets of tasks. | 
 | 5 |  * | 
 | 6 |  *  Copyright (C) 2003 BULL SA. | 
 | 7 |  *  Copyright (C) 2004 Silicon Graphics, Inc. | 
 | 8 |  * | 
 | 9 |  *  Portions derived from Patrick Mochel's sysfs code. | 
 | 10 |  *  sysfs is Copyright (c) 2001-3 Patrick Mochel | 
 | 11 |  *  Portions Copyright (c) 2004 Silicon Graphics, Inc. | 
 | 12 |  * | 
 | 13 |  *  2003-10-10 Written by Simon Derr <simon.derr@bull.net> | 
 | 14 |  *  2003-10-22 Updates by Stephen Hemminger. | 
 | 15 |  *  2004 May-July Rework by Paul Jackson <pj@sgi.com> | 
 | 16 |  * | 
 | 17 |  *  This file is subject to the terms and conditions of the GNU General Public | 
 | 18 |  *  License.  See the file COPYING in the main directory of the Linux | 
 | 19 |  *  distribution for more details. | 
 | 20 |  */ | 
 | 21 |  | 
 | 22 | #include <linux/config.h> | 
 | 23 | #include <linux/cpu.h> | 
 | 24 | #include <linux/cpumask.h> | 
 | 25 | #include <linux/cpuset.h> | 
 | 26 | #include <linux/err.h> | 
 | 27 | #include <linux/errno.h> | 
 | 28 | #include <linux/file.h> | 
 | 29 | #include <linux/fs.h> | 
 | 30 | #include <linux/init.h> | 
 | 31 | #include <linux/interrupt.h> | 
 | 32 | #include <linux/kernel.h> | 
 | 33 | #include <linux/kmod.h> | 
 | 34 | #include <linux/list.h> | 
| Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 35 | #include <linux/mempolicy.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/mm.h> | 
 | 37 | #include <linux/module.h> | 
 | 38 | #include <linux/mount.h> | 
 | 39 | #include <linux/namei.h> | 
 | 40 | #include <linux/pagemap.h> | 
 | 41 | #include <linux/proc_fs.h> | 
| Paul Jackson | 6b9c260 | 2006-01-08 01:02:02 -0800 | [diff] [blame] | 42 | #include <linux/rcupdate.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/sched.h> | 
 | 44 | #include <linux/seq_file.h> | 
 | 45 | #include <linux/slab.h> | 
 | 46 | #include <linux/smp_lock.h> | 
 | 47 | #include <linux/spinlock.h> | 
 | 48 | #include <linux/stat.h> | 
 | 49 | #include <linux/string.h> | 
 | 50 | #include <linux/time.h> | 
 | 51 | #include <linux/backing-dev.h> | 
 | 52 | #include <linux/sort.h> | 
 | 53 |  | 
 | 54 | #include <asm/uaccess.h> | 
 | 55 | #include <asm/atomic.h> | 
 | 56 | #include <asm/semaphore.h> | 
 | 57 |  | 
| Paul Jackson | c5b2aff | 2006-01-08 01:01:51 -0800 | [diff] [blame] | 58 | #define CPUSET_SUPER_MAGIC		0x27e0eb | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 |  | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 60 | /* | 
 | 61 |  * Tracks how many cpusets are currently defined in system. | 
 | 62 |  * When there is only one cpuset (the root cpuset) we can | 
 | 63 |  * short circuit some hooks. | 
 | 64 |  */ | 
| Paul Jackson | 7edc596 | 2006-01-08 01:02:03 -0800 | [diff] [blame] | 65 | int number_of_cpusets __read_mostly; | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 66 |  | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 67 | /* See "Frequency meter" comments, below. */ | 
 | 68 |  | 
 | 69 | struct fmeter { | 
 | 70 | 	int cnt;		/* unprocessed events count */ | 
 | 71 | 	int val;		/* most recent output value */ | 
 | 72 | 	time_t time;		/* clock (secs) when val computed */ | 
 | 73 | 	spinlock_t lock;	/* guards read or write of above */ | 
 | 74 | }; | 
 | 75 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | struct cpuset { | 
 | 77 | 	unsigned long flags;		/* "unsigned long" so bitops work */ | 
 | 78 | 	cpumask_t cpus_allowed;		/* CPUs allowed to tasks in cpuset */ | 
 | 79 | 	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */ | 
 | 80 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 81 | 	/* | 
 | 82 | 	 * Count is atomic so can incr (fork) or decr (exit) without a lock. | 
 | 83 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | 	atomic_t count;			/* count tasks using this cpuset */ | 
 | 85 |  | 
 | 86 | 	/* | 
 | 87 | 	 * We link our 'sibling' struct into our parents 'children'. | 
 | 88 | 	 * Our children link their 'sibling' into our 'children'. | 
 | 89 | 	 */ | 
 | 90 | 	struct list_head sibling;	/* my parents children */ | 
 | 91 | 	struct list_head children;	/* my children */ | 
 | 92 |  | 
 | 93 | 	struct cpuset *parent;		/* my parent */ | 
 | 94 | 	struct dentry *dentry;		/* cpuset fs entry */ | 
 | 95 |  | 
 | 96 | 	/* | 
 | 97 | 	 * Copy of global cpuset_mems_generation as of the most | 
 | 98 | 	 * recent time this cpuset changed its mems_allowed. | 
 | 99 | 	 */ | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 100 | 	int mems_generation; | 
 | 101 |  | 
 | 102 | 	struct fmeter fmeter;		/* memory_pressure filter */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | }; | 
 | 104 |  | 
 | 105 | /* bits in struct cpuset flags field */ | 
 | 106 | typedef enum { | 
 | 107 | 	CS_CPU_EXCLUSIVE, | 
 | 108 | 	CS_MEM_EXCLUSIVE, | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 109 | 	CS_MEMORY_MIGRATE, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | 	CS_REMOVED, | 
 | 111 | 	CS_NOTIFY_ON_RELEASE | 
 | 112 | } cpuset_flagbits_t; | 
 | 113 |  | 
 | 114 | /* convenient tests for these bits */ | 
 | 115 | static inline int is_cpu_exclusive(const struct cpuset *cs) | 
 | 116 | { | 
 | 117 | 	return !!test_bit(CS_CPU_EXCLUSIVE, &cs->flags); | 
 | 118 | } | 
 | 119 |  | 
 | 120 | static inline int is_mem_exclusive(const struct cpuset *cs) | 
 | 121 | { | 
 | 122 | 	return !!test_bit(CS_MEM_EXCLUSIVE, &cs->flags); | 
 | 123 | } | 
 | 124 |  | 
 | 125 | static inline int is_removed(const struct cpuset *cs) | 
 | 126 | { | 
 | 127 | 	return !!test_bit(CS_REMOVED, &cs->flags); | 
 | 128 | } | 
 | 129 |  | 
 | 130 | static inline int notify_on_release(const struct cpuset *cs) | 
 | 131 | { | 
 | 132 | 	return !!test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); | 
 | 133 | } | 
 | 134 |  | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 135 | static inline int is_memory_migrate(const struct cpuset *cs) | 
 | 136 | { | 
 | 137 | 	return !!test_bit(CS_MEMORY_MIGRATE, &cs->flags); | 
 | 138 | } | 
 | 139 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | /* | 
 | 141 |  * Increment this atomic integer everytime any cpuset changes its | 
 | 142 |  * mems_allowed value.  Users of cpusets can track this generation | 
 | 143 |  * number, and avoid having to lock and reload mems_allowed unless | 
 | 144 |  * the cpuset they're using changes generation. | 
 | 145 |  * | 
 | 146 |  * A single, global generation is needed because attach_task() could | 
 | 147 |  * reattach a task to a different cpuset, which must not have its | 
 | 148 |  * generation numbers aliased with those of that tasks previous cpuset. | 
 | 149 |  * | 
 | 150 |  * Generations are needed for mems_allowed because one task cannot | 
 | 151 |  * modify anothers memory placement.  So we must enable every task, | 
 | 152 |  * on every visit to __alloc_pages(), to efficiently check whether | 
 | 153 |  * its current->cpuset->mems_allowed has changed, requiring an update | 
 | 154 |  * of its current->mems_allowed. | 
 | 155 |  */ | 
 | 156 | static atomic_t cpuset_mems_generation = ATOMIC_INIT(1); | 
 | 157 |  | 
 | 158 | static struct cpuset top_cpuset = { | 
 | 159 | 	.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), | 
 | 160 | 	.cpus_allowed = CPU_MASK_ALL, | 
 | 161 | 	.mems_allowed = NODE_MASK_ALL, | 
 | 162 | 	.count = ATOMIC_INIT(0), | 
 | 163 | 	.sibling = LIST_HEAD_INIT(top_cpuset.sibling), | 
 | 164 | 	.children = LIST_HEAD_INIT(top_cpuset.children), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | }; | 
 | 166 |  | 
 | 167 | static struct vfsmount *cpuset_mount; | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 168 | static struct super_block *cpuset_sb; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 |  | 
 | 170 | /* | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 171 |  * We have two global cpuset semaphores below.  They can nest. | 
 | 172 |  * It is ok to first take manage_sem, then nest callback_sem.  We also | 
 | 173 |  * require taking task_lock() when dereferencing a tasks cpuset pointer. | 
 | 174 |  * See "The task_lock() exception", at the end of this comment. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 176 |  * A task must hold both semaphores to modify cpusets.  If a task | 
 | 177 |  * holds manage_sem, then it blocks others wanting that semaphore, | 
 | 178 |  * ensuring that it is the only task able to also acquire callback_sem | 
 | 179 |  * and be able to modify cpusets.  It can perform various checks on | 
 | 180 |  * the cpuset structure first, knowing nothing will change.  It can | 
 | 181 |  * also allocate memory while just holding manage_sem.  While it is | 
 | 182 |  * performing these checks, various callback routines can briefly | 
 | 183 |  * acquire callback_sem to query cpusets.  Once it is ready to make | 
 | 184 |  * the changes, it takes callback_sem, blocking everyone else. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 186 |  * Calls to the kernel memory allocator can not be made while holding | 
 | 187 |  * callback_sem, as that would risk double tripping on callback_sem | 
 | 188 |  * from one of the callbacks into the cpuset code from within | 
 | 189 |  * __alloc_pages(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 191 |  * If a task is only holding callback_sem, then it has read-only | 
 | 192 |  * access to cpusets. | 
 | 193 |  * | 
 | 194 |  * The task_struct fields mems_allowed and mems_generation may only | 
 | 195 |  * be accessed in the context of that task, so require no locks. | 
 | 196 |  * | 
 | 197 |  * Any task can increment and decrement the count field without lock. | 
 | 198 |  * So in general, code holding manage_sem or callback_sem can't rely | 
 | 199 |  * on the count field not changing.  However, if the count goes to | 
 | 200 |  * zero, then only attach_task(), which holds both semaphores, can | 
 | 201 |  * increment it again.  Because a count of zero means that no tasks | 
 | 202 |  * are currently attached, therefore there is no way a task attached | 
 | 203 |  * to that cpuset can fork (the other way to increment the count). | 
 | 204 |  * So code holding manage_sem or callback_sem can safely assume that | 
 | 205 |  * if the count is zero, it will stay zero.  Similarly, if a task | 
 | 206 |  * holds manage_sem or callback_sem on a cpuset with zero count, it | 
 | 207 |  * knows that the cpuset won't be removed, as cpuset_rmdir() needs | 
 | 208 |  * both of those semaphores. | 
 | 209 |  * | 
 | 210 |  * A possible optimization to improve parallelism would be to make | 
 | 211 |  * callback_sem a R/W semaphore (rwsem), allowing the callback routines | 
 | 212 |  * to proceed in parallel, with read access, until the holder of | 
 | 213 |  * manage_sem needed to take this rwsem for exclusive write access | 
 | 214 |  * and modify some cpusets. | 
 | 215 |  * | 
 | 216 |  * The cpuset_common_file_write handler for operations that modify | 
 | 217 |  * the cpuset hierarchy holds manage_sem across the entire operation, | 
 | 218 |  * single threading all such cpuset modifications across the system. | 
 | 219 |  * | 
 | 220 |  * The cpuset_common_file_read() handlers only hold callback_sem across | 
 | 221 |  * small pieces of code, such as when reading out possibly multi-word | 
 | 222 |  * cpumasks and nodemasks. | 
 | 223 |  * | 
 | 224 |  * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't | 
 | 225 |  * (usually) take either semaphore.  These are the two most performance | 
 | 226 |  * critical pieces of code here.  The exception occurs on cpuset_exit(), | 
 | 227 |  * when a task in a notify_on_release cpuset exits.  Then manage_sem | 
| Paul Jackson | 2efe86b | 2005-05-27 02:02:43 -0700 | [diff] [blame] | 228 |  * is taken, and if the cpuset count is zero, a usermode call made | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 |  * to /sbin/cpuset_release_agent with the name of the cpuset (path | 
 | 230 |  * relative to the root of cpuset file system) as the argument. | 
 | 231 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 232 |  * A cpuset can only be deleted if both its 'count' of using tasks | 
 | 233 |  * is zero, and its list of 'children' cpusets is empty.  Since all | 
 | 234 |  * tasks in the system use _some_ cpuset, and since there is always at | 
 | 235 |  * least one task in the system (init, pid == 1), therefore, top_cpuset | 
 | 236 |  * always has either children cpusets and/or using tasks.  So we don't | 
 | 237 |  * need a special hack to ensure that top_cpuset cannot be deleted. | 
 | 238 |  * | 
 | 239 |  * The above "Tale of Two Semaphores" would be complete, but for: | 
 | 240 |  * | 
 | 241 |  *	The task_lock() exception | 
 | 242 |  * | 
 | 243 |  * The need for this exception arises from the action of attach_task(), | 
 | 244 |  * which overwrites one tasks cpuset pointer with another.  It does | 
 | 245 |  * so using both semaphores, however there are several performance | 
 | 246 |  * critical places that need to reference task->cpuset without the | 
 | 247 |  * expense of grabbing a system global semaphore.  Therefore except as | 
 | 248 |  * noted below, when dereferencing or, as in attach_task(), modifying | 
 | 249 |  * a tasks cpuset pointer we use task_lock(), which acts on a spinlock | 
 | 250 |  * (task->alloc_lock) already in the task_struct routinely used for | 
 | 251 |  * such matters. | 
| Paul Jackson | 6b9c260 | 2006-01-08 01:02:02 -0800 | [diff] [blame] | 252 |  * | 
 | 253 |  * P.S.  One more locking exception.  RCU is used to guard the | 
 | 254 |  * update of a tasks cpuset pointer by attach_task() and the | 
 | 255 |  * access of task->cpuset->mems_generation via that pointer in | 
 | 256 |  * the routine cpuset_update_task_memory_state(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 |  */ | 
 | 258 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 259 | static DECLARE_MUTEX(manage_sem); | 
 | 260 | static DECLARE_MUTEX(callback_sem); | 
| Paul Jackson | 4247bdc | 2005-09-10 00:26:06 -0700 | [diff] [blame] | 261 |  | 
 | 262 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 |  * A couple of forward declarations required, due to cyclic reference loop: | 
 | 264 |  *  cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file | 
 | 265 |  *  -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir. | 
 | 266 |  */ | 
 | 267 |  | 
 | 268 | static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode); | 
 | 269 | static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry); | 
 | 270 |  | 
 | 271 | static struct backing_dev_info cpuset_backing_dev_info = { | 
 | 272 | 	.ra_pages = 0,		/* No readahead */ | 
 | 273 | 	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, | 
 | 274 | }; | 
 | 275 |  | 
 | 276 | static struct inode *cpuset_new_inode(mode_t mode) | 
 | 277 | { | 
 | 278 | 	struct inode *inode = new_inode(cpuset_sb); | 
 | 279 |  | 
 | 280 | 	if (inode) { | 
 | 281 | 		inode->i_mode = mode; | 
 | 282 | 		inode->i_uid = current->fsuid; | 
 | 283 | 		inode->i_gid = current->fsgid; | 
 | 284 | 		inode->i_blksize = PAGE_CACHE_SIZE; | 
 | 285 | 		inode->i_blocks = 0; | 
 | 286 | 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 
 | 287 | 		inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info; | 
 | 288 | 	} | 
 | 289 | 	return inode; | 
 | 290 | } | 
 | 291 |  | 
 | 292 | static void cpuset_diput(struct dentry *dentry, struct inode *inode) | 
 | 293 | { | 
 | 294 | 	/* is dentry a directory ? if so, kfree() associated cpuset */ | 
 | 295 | 	if (S_ISDIR(inode->i_mode)) { | 
 | 296 | 		struct cpuset *cs = dentry->d_fsdata; | 
 | 297 | 		BUG_ON(!(is_removed(cs))); | 
 | 298 | 		kfree(cs); | 
 | 299 | 	} | 
 | 300 | 	iput(inode); | 
 | 301 | } | 
 | 302 |  | 
 | 303 | static struct dentry_operations cpuset_dops = { | 
 | 304 | 	.d_iput = cpuset_diput, | 
 | 305 | }; | 
 | 306 |  | 
 | 307 | static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name) | 
 | 308 | { | 
| Christoph Hellwig | 5f45f1a | 2005-06-23 00:09:12 -0700 | [diff] [blame] | 309 | 	struct dentry *d = lookup_one_len(name, parent, strlen(name)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | 	if (!IS_ERR(d)) | 
 | 311 | 		d->d_op = &cpuset_dops; | 
 | 312 | 	return d; | 
 | 313 | } | 
 | 314 |  | 
 | 315 | static void remove_dir(struct dentry *d) | 
 | 316 | { | 
 | 317 | 	struct dentry *parent = dget(d->d_parent); | 
 | 318 |  | 
 | 319 | 	d_delete(d); | 
 | 320 | 	simple_rmdir(parent->d_inode, d); | 
 | 321 | 	dput(parent); | 
 | 322 | } | 
 | 323 |  | 
 | 324 | /* | 
 | 325 |  * NOTE : the dentry must have been dget()'ed | 
 | 326 |  */ | 
 | 327 | static void cpuset_d_remove_dir(struct dentry *dentry) | 
 | 328 | { | 
 | 329 | 	struct list_head *node; | 
 | 330 |  | 
 | 331 | 	spin_lock(&dcache_lock); | 
 | 332 | 	node = dentry->d_subdirs.next; | 
 | 333 | 	while (node != &dentry->d_subdirs) { | 
| Eric Dumazet | 5160ee6 | 2006-01-08 01:03:32 -0800 | [diff] [blame] | 334 | 		struct dentry *d = list_entry(node, struct dentry, d_u.d_child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | 		list_del_init(node); | 
 | 336 | 		if (d->d_inode) { | 
 | 337 | 			d = dget_locked(d); | 
 | 338 | 			spin_unlock(&dcache_lock); | 
 | 339 | 			d_delete(d); | 
 | 340 | 			simple_unlink(dentry->d_inode, d); | 
 | 341 | 			dput(d); | 
 | 342 | 			spin_lock(&dcache_lock); | 
 | 343 | 		} | 
 | 344 | 		node = dentry->d_subdirs.next; | 
 | 345 | 	} | 
| Eric Dumazet | 5160ee6 | 2006-01-08 01:03:32 -0800 | [diff] [blame] | 346 | 	list_del_init(&dentry->d_u.d_child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | 	spin_unlock(&dcache_lock); | 
 | 348 | 	remove_dir(dentry); | 
 | 349 | } | 
 | 350 |  | 
 | 351 | static struct super_operations cpuset_ops = { | 
 | 352 | 	.statfs = simple_statfs, | 
 | 353 | 	.drop_inode = generic_delete_inode, | 
 | 354 | }; | 
 | 355 |  | 
 | 356 | static int cpuset_fill_super(struct super_block *sb, void *unused_data, | 
 | 357 | 							int unused_silent) | 
 | 358 | { | 
 | 359 | 	struct inode *inode; | 
 | 360 | 	struct dentry *root; | 
 | 361 |  | 
 | 362 | 	sb->s_blocksize = PAGE_CACHE_SIZE; | 
 | 363 | 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 
 | 364 | 	sb->s_magic = CPUSET_SUPER_MAGIC; | 
 | 365 | 	sb->s_op = &cpuset_ops; | 
 | 366 | 	cpuset_sb = sb; | 
 | 367 |  | 
 | 368 | 	inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR); | 
 | 369 | 	if (inode) { | 
 | 370 | 		inode->i_op = &simple_dir_inode_operations; | 
 | 371 | 		inode->i_fop = &simple_dir_operations; | 
 | 372 | 		/* directories start off with i_nlink == 2 (for "." entry) */ | 
 | 373 | 		inode->i_nlink++; | 
 | 374 | 	} else { | 
 | 375 | 		return -ENOMEM; | 
 | 376 | 	} | 
 | 377 |  | 
 | 378 | 	root = d_alloc_root(inode); | 
 | 379 | 	if (!root) { | 
 | 380 | 		iput(inode); | 
 | 381 | 		return -ENOMEM; | 
 | 382 | 	} | 
 | 383 | 	sb->s_root = root; | 
 | 384 | 	return 0; | 
 | 385 | } | 
 | 386 |  | 
 | 387 | static struct super_block *cpuset_get_sb(struct file_system_type *fs_type, | 
 | 388 | 					int flags, const char *unused_dev_name, | 
 | 389 | 					void *data) | 
 | 390 | { | 
 | 391 | 	return get_sb_single(fs_type, flags, data, cpuset_fill_super); | 
 | 392 | } | 
 | 393 |  | 
 | 394 | static struct file_system_type cpuset_fs_type = { | 
 | 395 | 	.name = "cpuset", | 
 | 396 | 	.get_sb = cpuset_get_sb, | 
 | 397 | 	.kill_sb = kill_litter_super, | 
 | 398 | }; | 
 | 399 |  | 
 | 400 | /* struct cftype: | 
 | 401 |  * | 
 | 402 |  * The files in the cpuset filesystem mostly have a very simple read/write | 
 | 403 |  * handling, some common function will take care of it. Nevertheless some cases | 
 | 404 |  * (read tasks) are special and therefore I define this structure for every | 
 | 405 |  * kind of file. | 
 | 406 |  * | 
 | 407 |  * | 
 | 408 |  * When reading/writing to a file: | 
 | 409 |  *	- the cpuset to use in file->f_dentry->d_parent->d_fsdata | 
 | 410 |  *	- the 'cftype' of the file is file->f_dentry->d_fsdata | 
 | 411 |  */ | 
 | 412 |  | 
 | 413 | struct cftype { | 
 | 414 | 	char *name; | 
 | 415 | 	int private; | 
 | 416 | 	int (*open) (struct inode *inode, struct file *file); | 
 | 417 | 	ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes, | 
 | 418 | 							loff_t *ppos); | 
 | 419 | 	int (*write) (struct file *file, const char __user *buf, size_t nbytes, | 
 | 420 | 							loff_t *ppos); | 
 | 421 | 	int (*release) (struct inode *inode, struct file *file); | 
 | 422 | }; | 
 | 423 |  | 
 | 424 | static inline struct cpuset *__d_cs(struct dentry *dentry) | 
 | 425 | { | 
 | 426 | 	return dentry->d_fsdata; | 
 | 427 | } | 
 | 428 |  | 
 | 429 | static inline struct cftype *__d_cft(struct dentry *dentry) | 
 | 430 | { | 
 | 431 | 	return dentry->d_fsdata; | 
 | 432 | } | 
 | 433 |  | 
 | 434 | /* | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 435 |  * Call with manage_sem held.  Writes path of cpuset into buf. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 |  * Returns 0 on success, -errno on error. | 
 | 437 |  */ | 
 | 438 |  | 
 | 439 | static int cpuset_path(const struct cpuset *cs, char *buf, int buflen) | 
 | 440 | { | 
 | 441 | 	char *start; | 
 | 442 |  | 
 | 443 | 	start = buf + buflen; | 
 | 444 |  | 
 | 445 | 	*--start = '\0'; | 
 | 446 | 	for (;;) { | 
 | 447 | 		int len = cs->dentry->d_name.len; | 
 | 448 | 		if ((start -= len) < buf) | 
 | 449 | 			return -ENAMETOOLONG; | 
 | 450 | 		memcpy(start, cs->dentry->d_name.name, len); | 
 | 451 | 		cs = cs->parent; | 
 | 452 | 		if (!cs) | 
 | 453 | 			break; | 
 | 454 | 		if (!cs->parent) | 
 | 455 | 			continue; | 
 | 456 | 		if (--start < buf) | 
 | 457 | 			return -ENAMETOOLONG; | 
 | 458 | 		*start = '/'; | 
 | 459 | 	} | 
 | 460 | 	memmove(buf, start, buf + buflen - start); | 
 | 461 | 	return 0; | 
 | 462 | } | 
 | 463 |  | 
 | 464 | /* | 
 | 465 |  * Notify userspace when a cpuset is released, by running | 
 | 466 |  * /sbin/cpuset_release_agent with the name of the cpuset (path | 
 | 467 |  * relative to the root of cpuset file system) as the argument. | 
 | 468 |  * | 
 | 469 |  * Most likely, this user command will try to rmdir this cpuset. | 
 | 470 |  * | 
 | 471 |  * This races with the possibility that some other task will be | 
 | 472 |  * attached to this cpuset before it is removed, or that some other | 
 | 473 |  * user task will 'mkdir' a child cpuset of this cpuset.  That's ok. | 
 | 474 |  * The presumed 'rmdir' will fail quietly if this cpuset is no longer | 
 | 475 |  * unused, and this cpuset will be reprieved from its death sentence, | 
 | 476 |  * to continue to serve a useful existence.  Next time it's released, | 
 | 477 |  * we will get notified again, if it still has 'notify_on_release' set. | 
 | 478 |  * | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 479 |  * The final arg to call_usermodehelper() is 0, which means don't | 
 | 480 |  * wait.  The separate /sbin/cpuset_release_agent task is forked by | 
 | 481 |  * call_usermodehelper(), then control in this thread returns here, | 
 | 482 |  * without waiting for the release agent task.  We don't bother to | 
 | 483 |  * wait because the caller of this routine has no use for the exit | 
 | 484 |  * status of the /sbin/cpuset_release_agent task, so no sense holding | 
 | 485 |  * our caller up for that. | 
 | 486 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 487 |  * When we had only one cpuset semaphore, we had to call this | 
 | 488 |  * without holding it, to avoid deadlock when call_usermodehelper() | 
 | 489 |  * allocated memory.  With two locks, we could now call this while | 
 | 490 |  * holding manage_sem, but we still don't, so as to minimize | 
 | 491 |  * the time manage_sem is held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 |  */ | 
 | 493 |  | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 494 | static void cpuset_release_agent(const char *pathbuf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | { | 
 | 496 | 	char *argv[3], *envp[3]; | 
 | 497 | 	int i; | 
 | 498 |  | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 499 | 	if (!pathbuf) | 
 | 500 | 		return; | 
 | 501 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 | 	i = 0; | 
 | 503 | 	argv[i++] = "/sbin/cpuset_release_agent"; | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 504 | 	argv[i++] = (char *)pathbuf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | 	argv[i] = NULL; | 
 | 506 |  | 
 | 507 | 	i = 0; | 
 | 508 | 	/* minimal command environment */ | 
 | 509 | 	envp[i++] = "HOME=/"; | 
 | 510 | 	envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; | 
 | 511 | 	envp[i] = NULL; | 
 | 512 |  | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 513 | 	call_usermodehelper(argv[0], argv, envp, 0); | 
 | 514 | 	kfree(pathbuf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | } | 
 | 516 |  | 
 | 517 | /* | 
 | 518 |  * Either cs->count of using tasks transitioned to zero, or the | 
 | 519 |  * cs->children list of child cpusets just became empty.  If this | 
 | 520 |  * cs is notify_on_release() and now both the user count is zero and | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 521 |  * the list of children is empty, prepare cpuset path in a kmalloc'd | 
 | 522 |  * buffer, to be returned via ppathbuf, so that the caller can invoke | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 523 |  * cpuset_release_agent() with it later on, once manage_sem is dropped. | 
 | 524 |  * Call here with manage_sem held. | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 525 |  * | 
 | 526 |  * This check_for_release() routine is responsible for kmalloc'ing | 
 | 527 |  * pathbuf.  The above cpuset_release_agent() is responsible for | 
 | 528 |  * kfree'ing pathbuf.  The caller of these routines is responsible | 
 | 529 |  * for providing a pathbuf pointer, initialized to NULL, then | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 530 |  * calling check_for_release() with manage_sem held and the address | 
 | 531 |  * of the pathbuf pointer, then dropping manage_sem, then calling | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 532 |  * cpuset_release_agent() with pathbuf, as set by check_for_release(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 |  */ | 
 | 534 |  | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 535 | static void check_for_release(struct cpuset *cs, char **ppathbuf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | { | 
 | 537 | 	if (notify_on_release(cs) && atomic_read(&cs->count) == 0 && | 
 | 538 | 	    list_empty(&cs->children)) { | 
 | 539 | 		char *buf; | 
 | 540 |  | 
 | 541 | 		buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 
 | 542 | 		if (!buf) | 
 | 543 | 			return; | 
 | 544 | 		if (cpuset_path(cs, buf, PAGE_SIZE) < 0) | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 545 | 			kfree(buf); | 
 | 546 | 		else | 
 | 547 | 			*ppathbuf = buf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | 	} | 
 | 549 | } | 
 | 550 |  | 
 | 551 | /* | 
 | 552 |  * Return in *pmask the portion of a cpusets's cpus_allowed that | 
 | 553 |  * are online.  If none are online, walk up the cpuset hierarchy | 
 | 554 |  * until we find one that does have some online cpus.  If we get | 
 | 555 |  * all the way to the top and still haven't found any online cpus, | 
 | 556 |  * return cpu_online_map.  Or if passed a NULL cs from an exit'ing | 
 | 557 |  * task, return cpu_online_map. | 
 | 558 |  * | 
 | 559 |  * One way or another, we guarantee to return some non-empty subset | 
 | 560 |  * of cpu_online_map. | 
 | 561 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 562 |  * Call with callback_sem held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 |  */ | 
 | 564 |  | 
 | 565 | static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) | 
 | 566 | { | 
 | 567 | 	while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map)) | 
 | 568 | 		cs = cs->parent; | 
 | 569 | 	if (cs) | 
 | 570 | 		cpus_and(*pmask, cs->cpus_allowed, cpu_online_map); | 
 | 571 | 	else | 
 | 572 | 		*pmask = cpu_online_map; | 
 | 573 | 	BUG_ON(!cpus_intersects(*pmask, cpu_online_map)); | 
 | 574 | } | 
 | 575 |  | 
 | 576 | /* | 
 | 577 |  * Return in *pmask the portion of a cpusets's mems_allowed that | 
 | 578 |  * are online.  If none are online, walk up the cpuset hierarchy | 
 | 579 |  * until we find one that does have some online mems.  If we get | 
 | 580 |  * all the way to the top and still haven't found any online mems, | 
 | 581 |  * return node_online_map. | 
 | 582 |  * | 
 | 583 |  * One way or another, we guarantee to return some non-empty subset | 
 | 584 |  * of node_online_map. | 
 | 585 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 586 |  * Call with callback_sem held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 |  */ | 
 | 588 |  | 
 | 589 | static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) | 
 | 590 | { | 
 | 591 | 	while (cs && !nodes_intersects(cs->mems_allowed, node_online_map)) | 
 | 592 | 		cs = cs->parent; | 
 | 593 | 	if (cs) | 
 | 594 | 		nodes_and(*pmask, cs->mems_allowed, node_online_map); | 
 | 595 | 	else | 
 | 596 | 		*pmask = node_online_map; | 
 | 597 | 	BUG_ON(!nodes_intersects(*pmask, node_online_map)); | 
 | 598 | } | 
 | 599 |  | 
| Paul Jackson | cf2a473 | 2006-01-08 01:01:54 -0800 | [diff] [blame] | 600 | /** | 
 | 601 |  * cpuset_update_task_memory_state - update task memory placement | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 |  * | 
| Paul Jackson | cf2a473 | 2006-01-08 01:01:54 -0800 | [diff] [blame] | 603 |  * If the current tasks cpusets mems_allowed changed behind our | 
 | 604 |  * backs, update current->mems_allowed, mems_generation and task NUMA | 
 | 605 |  * mempolicy to the new value. | 
 | 606 |  * | 
 | 607 |  * Task mempolicy is updated by rebinding it relative to the | 
 | 608 |  * current->cpuset if a task has its memory placement changed. | 
 | 609 |  * Do not call this routine if in_interrupt(). | 
 | 610 |  * | 
 | 611 |  * Call without callback_sem or task_lock() held.  May be called | 
| Paul Jackson | c417f02 | 2006-01-08 01:02:01 -0800 | [diff] [blame] | 612 |  * with or without manage_sem held.  Doesn't need task_lock to guard | 
| Paul Jackson | cf2a473 | 2006-01-08 01:01:54 -0800 | [diff] [blame] | 613 |  * against another task changing a non-NULL cpuset pointer to NULL, | 
 | 614 |  * as that is only done by a task on itself, and if the current task | 
 | 615 |  * is here, it is not simultaneously in the exit code NULL'ing its | 
 | 616 |  * cpuset pointer.  This routine also might acquire callback_sem and | 
 | 617 |  * current->mm->mmap_sem during call. | 
| Paul Jackson | 5aa15b5 | 2005-10-30 15:02:28 -0800 | [diff] [blame] | 618 |  * | 
| Paul Jackson | 6b9c260 | 2006-01-08 01:02:02 -0800 | [diff] [blame] | 619 |  * Reading current->cpuset->mems_generation doesn't need task_lock | 
 | 620 |  * to guard the current->cpuset derefence, because it is guarded | 
 | 621 |  * from concurrent freeing of current->cpuset by attach_task(), | 
 | 622 |  * using RCU. | 
 | 623 |  * | 
 | 624 |  * The rcu_dereference() is technically probably not needed, | 
 | 625 |  * as I don't actually mind if I see a new cpuset pointer but | 
 | 626 |  * an old value of mems_generation.  However this really only | 
 | 627 |  * matters on alpha systems using cpusets heavily.  If I dropped | 
 | 628 |  * that rcu_dereference(), it would save them a memory barrier. | 
 | 629 |  * For all other arch's, rcu_dereference is a no-op anyway, and for | 
 | 630 |  * alpha systems not using cpusets, another planned optimization, | 
 | 631 |  * avoiding the rcu critical section for tasks in the root cpuset | 
 | 632 |  * which is statically allocated, so can't vanish, will make this | 
 | 633 |  * irrelevant.  Better to use RCU as intended, than to engage in | 
 | 634 |  * some cute trick to save a memory barrier that is impossible to | 
 | 635 |  * test, for alpha systems using cpusets heavily, which might not | 
 | 636 |  * even exist. | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 637 |  * | 
 | 638 |  * This routine is needed to update the per-task mems_allowed data, | 
 | 639 |  * within the tasks context, when it is trying to allocate memory | 
 | 640 |  * (in various mm/mempolicy.c routines) and notices that some other | 
 | 641 |  * task has been modifying its cpuset. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 |  */ | 
 | 643 |  | 
| Paul Jackson | cf2a473 | 2006-01-08 01:01:54 -0800 | [diff] [blame] | 644 | void cpuset_update_task_memory_state() | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | { | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 646 | 	int my_cpusets_mem_gen; | 
| Paul Jackson | cf2a473 | 2006-01-08 01:01:54 -0800 | [diff] [blame] | 647 | 	struct task_struct *tsk = current; | 
| Paul Jackson | 6b9c260 | 2006-01-08 01:02:02 -0800 | [diff] [blame] | 648 | 	struct cpuset *cs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 |  | 
| Paul Jackson | 03a285f | 2006-01-08 01:02:04 -0800 | [diff] [blame] | 650 | 	if (tsk->cpuset == &top_cpuset) { | 
 | 651 | 		/* Don't need rcu for top_cpuset.  It's never freed. */ | 
 | 652 | 		my_cpusets_mem_gen = top_cpuset.mems_generation; | 
 | 653 | 	} else { | 
 | 654 | 		rcu_read_lock(); | 
 | 655 | 		cs = rcu_dereference(tsk->cpuset); | 
 | 656 | 		my_cpusets_mem_gen = cs->mems_generation; | 
 | 657 | 		rcu_read_unlock(); | 
 | 658 | 	} | 
| Paul Jackson | cf2a473 | 2006-01-08 01:01:54 -0800 | [diff] [blame] | 659 |  | 
 | 660 | 	if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 661 | 		down(&callback_sem); | 
| Paul Jackson | cf2a473 | 2006-01-08 01:01:54 -0800 | [diff] [blame] | 662 | 		task_lock(tsk); | 
 | 663 | 		cs = tsk->cpuset;	/* Maybe changed when task not locked */ | 
| Paul Jackson | cf2a473 | 2006-01-08 01:01:54 -0800 | [diff] [blame] | 664 | 		guarantee_online_mems(cs, &tsk->mems_allowed); | 
 | 665 | 		tsk->cpuset_mems_generation = cs->mems_generation; | 
 | 666 | 		task_unlock(tsk); | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 667 | 		up(&callback_sem); | 
| Paul Jackson | 74cb215 | 2006-01-08 01:01:56 -0800 | [diff] [blame] | 668 | 		mpol_rebind_task(tsk, &tsk->mems_allowed); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | 	} | 
 | 670 | } | 
 | 671 |  | 
 | 672 | /* | 
 | 673 |  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? | 
 | 674 |  * | 
 | 675 |  * One cpuset is a subset of another if all its allowed CPUs and | 
 | 676 |  * Memory Nodes are a subset of the other, and its exclusive flags | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 677 |  * are only set if the other's are set.  Call holding manage_sem. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 |  */ | 
 | 679 |  | 
 | 680 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) | 
 | 681 | { | 
 | 682 | 	return	cpus_subset(p->cpus_allowed, q->cpus_allowed) && | 
 | 683 | 		nodes_subset(p->mems_allowed, q->mems_allowed) && | 
 | 684 | 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) && | 
 | 685 | 		is_mem_exclusive(p) <= is_mem_exclusive(q); | 
 | 686 | } | 
 | 687 |  | 
 | 688 | /* | 
 | 689 |  * validate_change() - Used to validate that any proposed cpuset change | 
 | 690 |  *		       follows the structural rules for cpusets. | 
 | 691 |  * | 
 | 692 |  * If we replaced the flag and mask values of the current cpuset | 
 | 693 |  * (cur) with those values in the trial cpuset (trial), would | 
 | 694 |  * our various subset and exclusive rules still be valid?  Presumes | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 695 |  * manage_sem held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 |  * | 
 | 697 |  * 'cur' is the address of an actual, in-use cpuset.  Operations | 
 | 698 |  * such as list traversal that depend on the actual address of the | 
 | 699 |  * cpuset in the list must use cur below, not trial. | 
 | 700 |  * | 
 | 701 |  * 'trial' is the address of bulk structure copy of cur, with | 
 | 702 |  * perhaps one or more of the fields cpus_allowed, mems_allowed, | 
 | 703 |  * or flags changed to new, trial values. | 
 | 704 |  * | 
 | 705 |  * Return 0 if valid, -errno if not. | 
 | 706 |  */ | 
 | 707 |  | 
 | 708 | static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | 
 | 709 | { | 
 | 710 | 	struct cpuset *c, *par; | 
 | 711 |  | 
 | 712 | 	/* Each of our child cpusets must be a subset of us */ | 
 | 713 | 	list_for_each_entry(c, &cur->children, sibling) { | 
 | 714 | 		if (!is_cpuset_subset(c, trial)) | 
 | 715 | 			return -EBUSY; | 
 | 716 | 	} | 
 | 717 |  | 
 | 718 | 	/* Remaining checks don't apply to root cpuset */ | 
 | 719 | 	if ((par = cur->parent) == NULL) | 
 | 720 | 		return 0; | 
 | 721 |  | 
 | 722 | 	/* We must be a subset of our parent cpuset */ | 
 | 723 | 	if (!is_cpuset_subset(trial, par)) | 
 | 724 | 		return -EACCES; | 
 | 725 |  | 
 | 726 | 	/* If either I or some sibling (!= me) is exclusive, we can't overlap */ | 
 | 727 | 	list_for_each_entry(c, &par->children, sibling) { | 
 | 728 | 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && | 
 | 729 | 		    c != cur && | 
 | 730 | 		    cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) | 
 | 731 | 			return -EINVAL; | 
 | 732 | 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && | 
 | 733 | 		    c != cur && | 
 | 734 | 		    nodes_intersects(trial->mems_allowed, c->mems_allowed)) | 
 | 735 | 			return -EINVAL; | 
 | 736 | 	} | 
 | 737 |  | 
 | 738 | 	return 0; | 
 | 739 | } | 
 | 740 |  | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 741 | /* | 
 | 742 |  * For a given cpuset cur, partition the system as follows | 
 | 743 |  * a. All cpus in the parent cpuset's cpus_allowed that are not part of any | 
 | 744 |  *    exclusive child cpusets | 
 | 745 |  * b. All cpus in the current cpuset's cpus_allowed that are not part of any | 
 | 746 |  *    exclusive child cpusets | 
 | 747 |  * Build these two partitions by calling partition_sched_domains | 
 | 748 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 749 |  * Call with manage_sem held.  May nest a call to the | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 750 |  * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. | 
 | 751 |  */ | 
| Paul Jackson | 212d6d2 | 2005-08-25 12:47:56 -0700 | [diff] [blame] | 752 |  | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 753 | static void update_cpu_domains(struct cpuset *cur) | 
 | 754 | { | 
 | 755 | 	struct cpuset *c, *par = cur->parent; | 
 | 756 | 	cpumask_t pspan, cspan; | 
 | 757 |  | 
 | 758 | 	if (par == NULL || cpus_empty(cur->cpus_allowed)) | 
 | 759 | 		return; | 
 | 760 |  | 
 | 761 | 	/* | 
 | 762 | 	 * Get all cpus from parent's cpus_allowed not part of exclusive | 
 | 763 | 	 * children | 
 | 764 | 	 */ | 
 | 765 | 	pspan = par->cpus_allowed; | 
 | 766 | 	list_for_each_entry(c, &par->children, sibling) { | 
 | 767 | 		if (is_cpu_exclusive(c)) | 
 | 768 | 			cpus_andnot(pspan, pspan, c->cpus_allowed); | 
 | 769 | 	} | 
 | 770 | 	if (is_removed(cur) || !is_cpu_exclusive(cur)) { | 
 | 771 | 		cpus_or(pspan, pspan, cur->cpus_allowed); | 
 | 772 | 		if (cpus_equal(pspan, cur->cpus_allowed)) | 
 | 773 | 			return; | 
 | 774 | 		cspan = CPU_MASK_NONE; | 
 | 775 | 	} else { | 
 | 776 | 		if (cpus_empty(pspan)) | 
 | 777 | 			return; | 
 | 778 | 		cspan = cur->cpus_allowed; | 
 | 779 | 		/* | 
 | 780 | 		 * Get all cpus from current cpuset's cpus_allowed not part | 
 | 781 | 		 * of exclusive children | 
 | 782 | 		 */ | 
 | 783 | 		list_for_each_entry(c, &cur->children, sibling) { | 
 | 784 | 			if (is_cpu_exclusive(c)) | 
 | 785 | 				cpus_andnot(cspan, cspan, c->cpus_allowed); | 
 | 786 | 		} | 
 | 787 | 	} | 
 | 788 |  | 
 | 789 | 	lock_cpu_hotplug(); | 
 | 790 | 	partition_sched_domains(&pspan, &cspan); | 
 | 791 | 	unlock_cpu_hotplug(); | 
 | 792 | } | 
 | 793 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 794 | /* | 
 | 795 |  * Call with manage_sem held.  May take callback_sem during call. | 
 | 796 |  */ | 
 | 797 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | static int update_cpumask(struct cpuset *cs, char *buf) | 
 | 799 | { | 
 | 800 | 	struct cpuset trialcs; | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 801 | 	int retval, cpus_unchanged; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 |  | 
 | 803 | 	trialcs = *cs; | 
 | 804 | 	retval = cpulist_parse(buf, trialcs.cpus_allowed); | 
 | 805 | 	if (retval < 0) | 
 | 806 | 		return retval; | 
 | 807 | 	cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map); | 
 | 808 | 	if (cpus_empty(trialcs.cpus_allowed)) | 
 | 809 | 		return -ENOSPC; | 
 | 810 | 	retval = validate_change(cs, &trialcs); | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 811 | 	if (retval < 0) | 
 | 812 | 		return retval; | 
 | 813 | 	cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed); | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 814 | 	down(&callback_sem); | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 815 | 	cs->cpus_allowed = trialcs.cpus_allowed; | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 816 | 	up(&callback_sem); | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 817 | 	if (is_cpu_exclusive(cs) && !cpus_unchanged) | 
 | 818 | 		update_cpu_domains(cs); | 
 | 819 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | } | 
 | 821 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 822 | /* | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 823 |  * Handle user request to change the 'mems' memory placement | 
 | 824 |  * of a cpuset.  Needs to validate the request, update the | 
 | 825 |  * cpusets mems_allowed and mems_generation, and for each | 
| Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 826 |  * task in the cpuset, rebind any vma mempolicies and if | 
 | 827 |  * the cpuset is marked 'memory_migrate', migrate the tasks | 
 | 828 |  * pages to the new memory. | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 829 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 830 |  * Call with manage_sem held.  May take callback_sem during call. | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 831 |  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, | 
 | 832 |  * lock each such tasks mm->mmap_sem, scan its vma's and rebind | 
 | 833 |  * their mempolicies to the cpusets new mems_allowed. | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 834 |  */ | 
 | 835 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | static int update_nodemask(struct cpuset *cs, char *buf) | 
 | 837 | { | 
 | 838 | 	struct cpuset trialcs; | 
| Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 839 | 	nodemask_t oldmem; | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 840 | 	struct task_struct *g, *p; | 
 | 841 | 	struct mm_struct **mmarray; | 
 | 842 | 	int i, n, ntasks; | 
| Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 843 | 	int migrate; | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 844 | 	int fudge; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | 	int retval; | 
 | 846 |  | 
 | 847 | 	trialcs = *cs; | 
 | 848 | 	retval = nodelist_parse(buf, trialcs.mems_allowed); | 
 | 849 | 	if (retval < 0) | 
| Paul Jackson | 59dac16 | 2006-01-08 01:01:52 -0800 | [diff] [blame] | 850 | 		goto done; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | 	nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map); | 
| Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 852 | 	oldmem = cs->mems_allowed; | 
 | 853 | 	if (nodes_equal(oldmem, trialcs.mems_allowed)) { | 
 | 854 | 		retval = 0;		/* Too easy - nothing to do */ | 
 | 855 | 		goto done; | 
 | 856 | 	} | 
| Paul Jackson | 59dac16 | 2006-01-08 01:01:52 -0800 | [diff] [blame] | 857 | 	if (nodes_empty(trialcs.mems_allowed)) { | 
 | 858 | 		retval = -ENOSPC; | 
 | 859 | 		goto done; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | 	} | 
| Paul Jackson | 59dac16 | 2006-01-08 01:01:52 -0800 | [diff] [blame] | 861 | 	retval = validate_change(cs, &trialcs); | 
 | 862 | 	if (retval < 0) | 
 | 863 | 		goto done; | 
 | 864 |  | 
 | 865 | 	down(&callback_sem); | 
 | 866 | 	cs->mems_allowed = trialcs.mems_allowed; | 
 | 867 | 	atomic_inc(&cpuset_mems_generation); | 
 | 868 | 	cs->mems_generation = atomic_read(&cpuset_mems_generation); | 
 | 869 | 	up(&callback_sem); | 
 | 870 |  | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 871 | 	set_cpuset_being_rebound(cs);		/* causes mpol_copy() rebind */ | 
 | 872 |  | 
 | 873 | 	fudge = 10;				/* spare mmarray[] slots */ | 
 | 874 | 	fudge += cpus_weight(cs->cpus_allowed);	/* imagine one fork-bomb/cpu */ | 
 | 875 | 	retval = -ENOMEM; | 
 | 876 |  | 
 | 877 | 	/* | 
 | 878 | 	 * Allocate mmarray[] to hold mm reference for each task | 
 | 879 | 	 * in cpuset cs.  Can't kmalloc GFP_KERNEL while holding | 
 | 880 | 	 * tasklist_lock.  We could use GFP_ATOMIC, but with a | 
 | 881 | 	 * few more lines of code, we can retry until we get a big | 
 | 882 | 	 * enough mmarray[] w/o using GFP_ATOMIC. | 
 | 883 | 	 */ | 
 | 884 | 	while (1) { | 
 | 885 | 		ntasks = atomic_read(&cs->count);	/* guess */ | 
 | 886 | 		ntasks += fudge; | 
 | 887 | 		mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); | 
 | 888 | 		if (!mmarray) | 
 | 889 | 			goto done; | 
 | 890 | 		write_lock_irq(&tasklist_lock);		/* block fork */ | 
 | 891 | 		if (atomic_read(&cs->count) <= ntasks) | 
 | 892 | 			break;				/* got enough */ | 
 | 893 | 		write_unlock_irq(&tasklist_lock);	/* try again */ | 
 | 894 | 		kfree(mmarray); | 
 | 895 | 	} | 
 | 896 |  | 
 | 897 | 	n = 0; | 
 | 898 |  | 
 | 899 | 	/* Load up mmarray[] with mm reference for each task in cpuset. */ | 
 | 900 | 	do_each_thread(g, p) { | 
 | 901 | 		struct mm_struct *mm; | 
 | 902 |  | 
 | 903 | 		if (n >= ntasks) { | 
 | 904 | 			printk(KERN_WARNING | 
 | 905 | 				"Cpuset mempolicy rebind incomplete.\n"); | 
 | 906 | 			continue; | 
 | 907 | 		} | 
 | 908 | 		if (p->cpuset != cs) | 
 | 909 | 			continue; | 
 | 910 | 		mm = get_task_mm(p); | 
 | 911 | 		if (!mm) | 
 | 912 | 			continue; | 
 | 913 | 		mmarray[n++] = mm; | 
 | 914 | 	} while_each_thread(g, p); | 
 | 915 | 	write_unlock_irq(&tasklist_lock); | 
 | 916 |  | 
 | 917 | 	/* | 
 | 918 | 	 * Now that we've dropped the tasklist spinlock, we can | 
 | 919 | 	 * rebind the vma mempolicies of each mm in mmarray[] to their | 
 | 920 | 	 * new cpuset, and release that mm.  The mpol_rebind_mm() | 
 | 921 | 	 * call takes mmap_sem, which we couldn't take while holding | 
 | 922 | 	 * tasklist_lock.  Forks can happen again now - the mpol_copy() | 
 | 923 | 	 * cpuset_being_rebound check will catch such forks, and rebind | 
 | 924 | 	 * their vma mempolicies too.  Because we still hold the global | 
 | 925 | 	 * cpuset manage_sem, we know that no other rebind effort will | 
 | 926 | 	 * be contending for the global variable cpuset_being_rebound. | 
 | 927 | 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm() | 
| Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 928 | 	 * is idempotent.  Also migrate pages in each mm to new nodes. | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 929 | 	 */ | 
| Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 930 | 	migrate = is_memory_migrate(cs); | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 931 | 	for (i = 0; i < n; i++) { | 
 | 932 | 		struct mm_struct *mm = mmarray[i]; | 
 | 933 |  | 
 | 934 | 		mpol_rebind_mm(mm, &cs->mems_allowed); | 
| Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 935 | 		if (migrate) { | 
 | 936 | 			do_migrate_pages(mm, &oldmem, &cs->mems_allowed, | 
 | 937 | 							MPOL_MF_MOVE_ALL); | 
 | 938 | 		} | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 939 | 		mmput(mm); | 
 | 940 | 	} | 
 | 941 |  | 
 | 942 | 	/* We're done rebinding vma's to this cpusets new mems_allowed. */ | 
 | 943 | 	kfree(mmarray); | 
 | 944 | 	set_cpuset_being_rebound(NULL); | 
 | 945 | 	retval = 0; | 
| Paul Jackson | 59dac16 | 2006-01-08 01:01:52 -0800 | [diff] [blame] | 946 | done: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | 	return retval; | 
 | 948 | } | 
 | 949 |  | 
 | 950 | /* | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 951 |  * Call with manage_sem held. | 
 | 952 |  */ | 
 | 953 |  | 
 | 954 | static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) | 
 | 955 | { | 
 | 956 | 	if (simple_strtoul(buf, NULL, 10) != 0) | 
 | 957 | 		cpuset_memory_pressure_enabled = 1; | 
 | 958 | 	else | 
 | 959 | 		cpuset_memory_pressure_enabled = 0; | 
 | 960 | 	return 0; | 
 | 961 | } | 
 | 962 |  | 
 | 963 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 |  * update_flag - read a 0 or a 1 in a file and update associated flag | 
 | 965 |  * bit:	the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 966 |  *				CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 |  * cs:	the cpuset to update | 
 | 968 |  * buf:	the buffer where we read the 0 or 1 | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 969 |  * | 
 | 970 |  * Call with manage_sem held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 |  */ | 
 | 972 |  | 
 | 973 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) | 
 | 974 | { | 
 | 975 | 	int turning_on; | 
 | 976 | 	struct cpuset trialcs; | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 977 | 	int err, cpu_exclusive_changed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 |  | 
 | 979 | 	turning_on = (simple_strtoul(buf, NULL, 10) != 0); | 
 | 980 |  | 
 | 981 | 	trialcs = *cs; | 
 | 982 | 	if (turning_on) | 
 | 983 | 		set_bit(bit, &trialcs.flags); | 
 | 984 | 	else | 
 | 985 | 		clear_bit(bit, &trialcs.flags); | 
 | 986 |  | 
 | 987 | 	err = validate_change(cs, &trialcs); | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 988 | 	if (err < 0) | 
 | 989 | 		return err; | 
 | 990 | 	cpu_exclusive_changed = | 
 | 991 | 		(is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 992 | 	down(&callback_sem); | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 993 | 	if (turning_on) | 
 | 994 | 		set_bit(bit, &cs->flags); | 
 | 995 | 	else | 
 | 996 | 		clear_bit(bit, &cs->flags); | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 997 | 	up(&callback_sem); | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 998 |  | 
 | 999 | 	if (cpu_exclusive_changed) | 
 | 1000 |                 update_cpu_domains(cs); | 
 | 1001 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | } | 
 | 1003 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1004 | /* | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1005 |  * Frequency meter - How fast is some event occuring? | 
 | 1006 |  * | 
 | 1007 |  * These routines manage a digitally filtered, constant time based, | 
 | 1008 |  * event frequency meter.  There are four routines: | 
 | 1009 |  *   fmeter_init() - initialize a frequency meter. | 
 | 1010 |  *   fmeter_markevent() - called each time the event happens. | 
 | 1011 |  *   fmeter_getrate() - returns the recent rate of such events. | 
 | 1012 |  *   fmeter_update() - internal routine used to update fmeter. | 
 | 1013 |  * | 
 | 1014 |  * A common data structure is passed to each of these routines, | 
 | 1015 |  * which is used to keep track of the state required to manage the | 
 | 1016 |  * frequency meter and its digital filter. | 
 | 1017 |  * | 
 | 1018 |  * The filter works on the number of events marked per unit time. | 
 | 1019 |  * The filter is single-pole low-pass recursive (IIR).  The time unit | 
 | 1020 |  * is 1 second.  Arithmetic is done using 32-bit integers scaled to | 
 | 1021 |  * simulate 3 decimal digits of precision (multiplied by 1000). | 
 | 1022 |  * | 
 | 1023 |  * With an FM_COEF of 933, and a time base of 1 second, the filter | 
 | 1024 |  * has a half-life of 10 seconds, meaning that if the events quit | 
 | 1025 |  * happening, then the rate returned from the fmeter_getrate() | 
 | 1026 |  * will be cut in half each 10 seconds, until it converges to zero. | 
 | 1027 |  * | 
 | 1028 |  * It is not worth doing a real infinitely recursive filter.  If more | 
 | 1029 |  * than FM_MAXTICKS ticks have elapsed since the last filter event, | 
 | 1030 |  * just compute FM_MAXTICKS ticks worth, by which point the level | 
 | 1031 |  * will be stable. | 
 | 1032 |  * | 
 | 1033 |  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid | 
 | 1034 |  * arithmetic overflow in the fmeter_update() routine. | 
 | 1035 |  * | 
 | 1036 |  * Given the simple 32 bit integer arithmetic used, this meter works | 
 | 1037 |  * best for reporting rates between one per millisecond (msec) and | 
 | 1038 |  * one per 32 (approx) seconds.  At constant rates faster than one | 
 | 1039 |  * per msec it maxes out at values just under 1,000,000.  At constant | 
 | 1040 |  * rates between one per msec, and one per second it will stabilize | 
 | 1041 |  * to a value N*1000, where N is the rate of events per second. | 
 | 1042 |  * At constant rates between one per second and one per 32 seconds, | 
 | 1043 |  * it will be choppy, moving up on the seconds that have an event, | 
 | 1044 |  * and then decaying until the next event.  At rates slower than | 
 | 1045 |  * about one in 32 seconds, it decays all the way back to zero between | 
 | 1046 |  * each event. | 
 | 1047 |  */ | 
 | 1048 |  | 
 | 1049 | #define FM_COEF 933		/* coefficient for half-life of 10 secs */ | 
 | 1050 | #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */ | 
 | 1051 | #define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */ | 
 | 1052 | #define FM_SCALE 1000		/* faux fixed point scale */ | 
 | 1053 |  | 
 | 1054 | /* Initialize a frequency meter */ | 
 | 1055 | static void fmeter_init(struct fmeter *fmp) | 
 | 1056 | { | 
 | 1057 | 	fmp->cnt = 0; | 
 | 1058 | 	fmp->val = 0; | 
 | 1059 | 	fmp->time = 0; | 
 | 1060 | 	spin_lock_init(&fmp->lock); | 
 | 1061 | } | 
 | 1062 |  | 
 | 1063 | /* Internal meter update - process cnt events and update value */ | 
 | 1064 | static void fmeter_update(struct fmeter *fmp) | 
 | 1065 | { | 
 | 1066 | 	time_t now = get_seconds(); | 
 | 1067 | 	time_t ticks = now - fmp->time; | 
 | 1068 |  | 
 | 1069 | 	if (ticks == 0) | 
 | 1070 | 		return; | 
 | 1071 |  | 
 | 1072 | 	ticks = min(FM_MAXTICKS, ticks); | 
 | 1073 | 	while (ticks-- > 0) | 
 | 1074 | 		fmp->val = (FM_COEF * fmp->val) / FM_SCALE; | 
 | 1075 | 	fmp->time = now; | 
 | 1076 |  | 
 | 1077 | 	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; | 
 | 1078 | 	fmp->cnt = 0; | 
 | 1079 | } | 
 | 1080 |  | 
 | 1081 | /* Process any previous ticks, then bump cnt by one (times scale). */ | 
 | 1082 | static void fmeter_markevent(struct fmeter *fmp) | 
 | 1083 | { | 
 | 1084 | 	spin_lock(&fmp->lock); | 
 | 1085 | 	fmeter_update(fmp); | 
 | 1086 | 	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); | 
 | 1087 | 	spin_unlock(&fmp->lock); | 
 | 1088 | } | 
 | 1089 |  | 
 | 1090 | /* Process any previous ticks, then return current value. */ | 
 | 1091 | static int fmeter_getrate(struct fmeter *fmp) | 
 | 1092 | { | 
 | 1093 | 	int val; | 
 | 1094 |  | 
 | 1095 | 	spin_lock(&fmp->lock); | 
 | 1096 | 	fmeter_update(fmp); | 
 | 1097 | 	val = fmp->val; | 
 | 1098 | 	spin_unlock(&fmp->lock); | 
 | 1099 | 	return val; | 
 | 1100 | } | 
 | 1101 |  | 
 | 1102 | /* | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1103 |  * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly | 
 | 1104 |  * writing the path of the old cpuset in 'ppathbuf' if it needs to be | 
 | 1105 |  * notified on release. | 
 | 1106 |  * | 
 | 1107 |  * Call holding manage_sem.  May take callback_sem and task_lock of | 
 | 1108 |  * the task 'pid' during call. | 
 | 1109 |  */ | 
 | 1110 |  | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1111 | static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1112 | { | 
 | 1113 | 	pid_t pid; | 
 | 1114 | 	struct task_struct *tsk; | 
 | 1115 | 	struct cpuset *oldcs; | 
 | 1116 | 	cpumask_t cpus; | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1117 | 	nodemask_t from, to; | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1118 | 	struct mm_struct *mm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1119 |  | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1120 | 	if (sscanf(pidbuf, "%d", &pid) != 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 | 		return -EIO; | 
 | 1122 | 	if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) | 
 | 1123 | 		return -ENOSPC; | 
 | 1124 |  | 
 | 1125 | 	if (pid) { | 
 | 1126 | 		read_lock(&tasklist_lock); | 
 | 1127 |  | 
 | 1128 | 		tsk = find_task_by_pid(pid); | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1129 | 		if (!tsk || tsk->flags & PF_EXITING) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 | 			read_unlock(&tasklist_lock); | 
 | 1131 | 			return -ESRCH; | 
 | 1132 | 		} | 
 | 1133 |  | 
 | 1134 | 		get_task_struct(tsk); | 
 | 1135 | 		read_unlock(&tasklist_lock); | 
 | 1136 |  | 
 | 1137 | 		if ((current->euid) && (current->euid != tsk->uid) | 
 | 1138 | 		    && (current->euid != tsk->suid)) { | 
 | 1139 | 			put_task_struct(tsk); | 
 | 1140 | 			return -EACCES; | 
 | 1141 | 		} | 
 | 1142 | 	} else { | 
 | 1143 | 		tsk = current; | 
 | 1144 | 		get_task_struct(tsk); | 
 | 1145 | 	} | 
 | 1146 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1147 | 	down(&callback_sem); | 
 | 1148 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | 	task_lock(tsk); | 
 | 1150 | 	oldcs = tsk->cpuset; | 
 | 1151 | 	if (!oldcs) { | 
 | 1152 | 		task_unlock(tsk); | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1153 | 		up(&callback_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 | 		put_task_struct(tsk); | 
 | 1155 | 		return -ESRCH; | 
 | 1156 | 	} | 
 | 1157 | 	atomic_inc(&cs->count); | 
| Paul Jackson | 6b9c260 | 2006-01-08 01:02:02 -0800 | [diff] [blame] | 1158 | 	rcu_assign_pointer(tsk->cpuset, cs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | 	task_unlock(tsk); | 
 | 1160 |  | 
 | 1161 | 	guarantee_online_cpus(cs, &cpus); | 
 | 1162 | 	set_cpus_allowed(tsk, cpus); | 
 | 1163 |  | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1164 | 	from = oldcs->mems_allowed; | 
 | 1165 | 	to = cs->mems_allowed; | 
 | 1166 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1167 | 	up(&callback_sem); | 
| Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1168 |  | 
 | 1169 | 	mm = get_task_mm(tsk); | 
 | 1170 | 	if (mm) { | 
 | 1171 | 		mpol_rebind_mm(mm, &to); | 
 | 1172 | 		mmput(mm); | 
 | 1173 | 	} | 
 | 1174 |  | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1175 | 	if (is_memory_migrate(cs)) | 
 | 1176 | 		do_migrate_pages(tsk->mm, &from, &to, MPOL_MF_MOVE_ALL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1177 | 	put_task_struct(tsk); | 
| Paul Jackson | 6b9c260 | 2006-01-08 01:02:02 -0800 | [diff] [blame] | 1178 | 	synchronize_rcu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1179 | 	if (atomic_dec_and_test(&oldcs->count)) | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1180 | 		check_for_release(oldcs, ppathbuf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1181 | 	return 0; | 
 | 1182 | } | 
 | 1183 |  | 
 | 1184 | /* The various types of files and directories in a cpuset file system */ | 
 | 1185 |  | 
 | 1186 | typedef enum { | 
 | 1187 | 	FILE_ROOT, | 
 | 1188 | 	FILE_DIR, | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1189 | 	FILE_MEMORY_MIGRATE, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1190 | 	FILE_CPULIST, | 
 | 1191 | 	FILE_MEMLIST, | 
 | 1192 | 	FILE_CPU_EXCLUSIVE, | 
 | 1193 | 	FILE_MEM_EXCLUSIVE, | 
 | 1194 | 	FILE_NOTIFY_ON_RELEASE, | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1195 | 	FILE_MEMORY_PRESSURE_ENABLED, | 
 | 1196 | 	FILE_MEMORY_PRESSURE, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | 	FILE_TASKLIST, | 
 | 1198 | } cpuset_filetype_t; | 
 | 1199 |  | 
 | 1200 | static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf, | 
 | 1201 | 					size_t nbytes, loff_t *unused_ppos) | 
 | 1202 | { | 
 | 1203 | 	struct cpuset *cs = __d_cs(file->f_dentry->d_parent); | 
 | 1204 | 	struct cftype *cft = __d_cft(file->f_dentry); | 
 | 1205 | 	cpuset_filetype_t type = cft->private; | 
 | 1206 | 	char *buffer; | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1207 | 	char *pathbuf = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 | 	int retval = 0; | 
 | 1209 |  | 
 | 1210 | 	/* Crude upper limit on largest legitimate cpulist user might write. */ | 
 | 1211 | 	if (nbytes > 100 + 6 * NR_CPUS) | 
 | 1212 | 		return -E2BIG; | 
 | 1213 |  | 
 | 1214 | 	/* +1 for nul-terminator */ | 
 | 1215 | 	if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0) | 
 | 1216 | 		return -ENOMEM; | 
 | 1217 |  | 
 | 1218 | 	if (copy_from_user(buffer, userbuf, nbytes)) { | 
 | 1219 | 		retval = -EFAULT; | 
 | 1220 | 		goto out1; | 
 | 1221 | 	} | 
 | 1222 | 	buffer[nbytes] = 0;	/* nul-terminate */ | 
 | 1223 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1224 | 	down(&manage_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1225 |  | 
 | 1226 | 	if (is_removed(cs)) { | 
 | 1227 | 		retval = -ENODEV; | 
 | 1228 | 		goto out2; | 
 | 1229 | 	} | 
 | 1230 |  | 
 | 1231 | 	switch (type) { | 
 | 1232 | 	case FILE_CPULIST: | 
 | 1233 | 		retval = update_cpumask(cs, buffer); | 
 | 1234 | 		break; | 
 | 1235 | 	case FILE_MEMLIST: | 
 | 1236 | 		retval = update_nodemask(cs, buffer); | 
 | 1237 | 		break; | 
 | 1238 | 	case FILE_CPU_EXCLUSIVE: | 
 | 1239 | 		retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer); | 
 | 1240 | 		break; | 
 | 1241 | 	case FILE_MEM_EXCLUSIVE: | 
 | 1242 | 		retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); | 
 | 1243 | 		break; | 
 | 1244 | 	case FILE_NOTIFY_ON_RELEASE: | 
 | 1245 | 		retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer); | 
 | 1246 | 		break; | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1247 | 	case FILE_MEMORY_MIGRATE: | 
 | 1248 | 		retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); | 
 | 1249 | 		break; | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1250 | 	case FILE_MEMORY_PRESSURE_ENABLED: | 
 | 1251 | 		retval = update_memory_pressure_enabled(cs, buffer); | 
 | 1252 | 		break; | 
 | 1253 | 	case FILE_MEMORY_PRESSURE: | 
 | 1254 | 		retval = -EACCES; | 
 | 1255 | 		break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1256 | 	case FILE_TASKLIST: | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1257 | 		retval = attach_task(cs, buffer, &pathbuf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1258 | 		break; | 
 | 1259 | 	default: | 
 | 1260 | 		retval = -EINVAL; | 
 | 1261 | 		goto out2; | 
 | 1262 | 	} | 
 | 1263 |  | 
 | 1264 | 	if (retval == 0) | 
 | 1265 | 		retval = nbytes; | 
 | 1266 | out2: | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1267 | 	up(&manage_sem); | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1268 | 	cpuset_release_agent(pathbuf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | out1: | 
 | 1270 | 	kfree(buffer); | 
 | 1271 | 	return retval; | 
 | 1272 | } | 
 | 1273 |  | 
 | 1274 | static ssize_t cpuset_file_write(struct file *file, const char __user *buf, | 
 | 1275 | 						size_t nbytes, loff_t *ppos) | 
 | 1276 | { | 
 | 1277 | 	ssize_t retval = 0; | 
 | 1278 | 	struct cftype *cft = __d_cft(file->f_dentry); | 
 | 1279 | 	if (!cft) | 
 | 1280 | 		return -ENODEV; | 
 | 1281 |  | 
 | 1282 | 	/* special function ? */ | 
 | 1283 | 	if (cft->write) | 
 | 1284 | 		retval = cft->write(file, buf, nbytes, ppos); | 
 | 1285 | 	else | 
 | 1286 | 		retval = cpuset_common_file_write(file, buf, nbytes, ppos); | 
 | 1287 |  | 
 | 1288 | 	return retval; | 
 | 1289 | } | 
 | 1290 |  | 
 | 1291 | /* | 
 | 1292 |  * These ascii lists should be read in a single call, by using a user | 
 | 1293 |  * buffer large enough to hold the entire map.  If read in smaller | 
 | 1294 |  * chunks, there is no guarantee of atomicity.  Since the display format | 
 | 1295 |  * used, list of ranges of sequential numbers, is variable length, | 
 | 1296 |  * and since these maps can change value dynamically, one could read | 
 | 1297 |  * gibberish by doing partial reads while a list was changing. | 
 | 1298 |  * A single large read to a buffer that crosses a page boundary is | 
 | 1299 |  * ok, because the result being copied to user land is not recomputed | 
 | 1300 |  * across a page fault. | 
 | 1301 |  */ | 
 | 1302 |  | 
 | 1303 | static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) | 
 | 1304 | { | 
 | 1305 | 	cpumask_t mask; | 
 | 1306 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1307 | 	down(&callback_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | 	mask = cs->cpus_allowed; | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1309 | 	up(&callback_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 |  | 
 | 1311 | 	return cpulist_scnprintf(page, PAGE_SIZE, mask); | 
 | 1312 | } | 
 | 1313 |  | 
 | 1314 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) | 
 | 1315 | { | 
 | 1316 | 	nodemask_t mask; | 
 | 1317 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1318 | 	down(&callback_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | 	mask = cs->mems_allowed; | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1320 | 	up(&callback_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1321 |  | 
 | 1322 | 	return nodelist_scnprintf(page, PAGE_SIZE, mask); | 
 | 1323 | } | 
 | 1324 |  | 
 | 1325 | static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, | 
 | 1326 | 				size_t nbytes, loff_t *ppos) | 
 | 1327 | { | 
 | 1328 | 	struct cftype *cft = __d_cft(file->f_dentry); | 
 | 1329 | 	struct cpuset *cs = __d_cs(file->f_dentry->d_parent); | 
 | 1330 | 	cpuset_filetype_t type = cft->private; | 
 | 1331 | 	char *page; | 
 | 1332 | 	ssize_t retval = 0; | 
 | 1333 | 	char *s; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 |  | 
 | 1335 | 	if (!(page = (char *)__get_free_page(GFP_KERNEL))) | 
 | 1336 | 		return -ENOMEM; | 
 | 1337 |  | 
 | 1338 | 	s = page; | 
 | 1339 |  | 
 | 1340 | 	switch (type) { | 
 | 1341 | 	case FILE_CPULIST: | 
 | 1342 | 		s += cpuset_sprintf_cpulist(s, cs); | 
 | 1343 | 		break; | 
 | 1344 | 	case FILE_MEMLIST: | 
 | 1345 | 		s += cpuset_sprintf_memlist(s, cs); | 
 | 1346 | 		break; | 
 | 1347 | 	case FILE_CPU_EXCLUSIVE: | 
 | 1348 | 		*s++ = is_cpu_exclusive(cs) ? '1' : '0'; | 
 | 1349 | 		break; | 
 | 1350 | 	case FILE_MEM_EXCLUSIVE: | 
 | 1351 | 		*s++ = is_mem_exclusive(cs) ? '1' : '0'; | 
 | 1352 | 		break; | 
 | 1353 | 	case FILE_NOTIFY_ON_RELEASE: | 
 | 1354 | 		*s++ = notify_on_release(cs) ? '1' : '0'; | 
 | 1355 | 		break; | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1356 | 	case FILE_MEMORY_MIGRATE: | 
 | 1357 | 		*s++ = is_memory_migrate(cs) ? '1' : '0'; | 
 | 1358 | 		break; | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1359 | 	case FILE_MEMORY_PRESSURE_ENABLED: | 
 | 1360 | 		*s++ = cpuset_memory_pressure_enabled ? '1' : '0'; | 
 | 1361 | 		break; | 
 | 1362 | 	case FILE_MEMORY_PRESSURE: | 
 | 1363 | 		s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter)); | 
 | 1364 | 		break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1365 | 	default: | 
 | 1366 | 		retval = -EINVAL; | 
 | 1367 | 		goto out; | 
 | 1368 | 	} | 
 | 1369 | 	*s++ = '\n'; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 |  | 
| Al Viro | eacaa1f | 2005-09-30 03:26:43 +0100 | [diff] [blame] | 1371 | 	retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1372 | out: | 
 | 1373 | 	free_page((unsigned long)page); | 
 | 1374 | 	return retval; | 
 | 1375 | } | 
 | 1376 |  | 
 | 1377 | static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes, | 
 | 1378 | 								loff_t *ppos) | 
 | 1379 | { | 
 | 1380 | 	ssize_t retval = 0; | 
 | 1381 | 	struct cftype *cft = __d_cft(file->f_dentry); | 
 | 1382 | 	if (!cft) | 
 | 1383 | 		return -ENODEV; | 
 | 1384 |  | 
 | 1385 | 	/* special function ? */ | 
 | 1386 | 	if (cft->read) | 
 | 1387 | 		retval = cft->read(file, buf, nbytes, ppos); | 
 | 1388 | 	else | 
 | 1389 | 		retval = cpuset_common_file_read(file, buf, nbytes, ppos); | 
 | 1390 |  | 
 | 1391 | 	return retval; | 
 | 1392 | } | 
 | 1393 |  | 
 | 1394 | static int cpuset_file_open(struct inode *inode, struct file *file) | 
 | 1395 | { | 
 | 1396 | 	int err; | 
 | 1397 | 	struct cftype *cft; | 
 | 1398 |  | 
 | 1399 | 	err = generic_file_open(inode, file); | 
 | 1400 | 	if (err) | 
 | 1401 | 		return err; | 
 | 1402 |  | 
 | 1403 | 	cft = __d_cft(file->f_dentry); | 
 | 1404 | 	if (!cft) | 
 | 1405 | 		return -ENODEV; | 
 | 1406 | 	if (cft->open) | 
 | 1407 | 		err = cft->open(inode, file); | 
 | 1408 | 	else | 
 | 1409 | 		err = 0; | 
 | 1410 |  | 
 | 1411 | 	return err; | 
 | 1412 | } | 
 | 1413 |  | 
 | 1414 | static int cpuset_file_release(struct inode *inode, struct file *file) | 
 | 1415 | { | 
 | 1416 | 	struct cftype *cft = __d_cft(file->f_dentry); | 
 | 1417 | 	if (cft->release) | 
 | 1418 | 		return cft->release(inode, file); | 
 | 1419 | 	return 0; | 
 | 1420 | } | 
 | 1421 |  | 
| Paul Jackson | 18a19cb | 2005-10-30 15:02:31 -0800 | [diff] [blame] | 1422 | /* | 
 | 1423 |  * cpuset_rename - Only allow simple rename of directories in place. | 
 | 1424 |  */ | 
 | 1425 | static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry, | 
 | 1426 |                   struct inode *new_dir, struct dentry *new_dentry) | 
 | 1427 | { | 
 | 1428 | 	if (!S_ISDIR(old_dentry->d_inode->i_mode)) | 
 | 1429 | 		return -ENOTDIR; | 
 | 1430 | 	if (new_dentry->d_inode) | 
 | 1431 | 		return -EEXIST; | 
 | 1432 | 	if (old_dir != new_dir) | 
 | 1433 | 		return -EIO; | 
 | 1434 | 	return simple_rename(old_dir, old_dentry, new_dir, new_dentry); | 
 | 1435 | } | 
 | 1436 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | static struct file_operations cpuset_file_operations = { | 
 | 1438 | 	.read = cpuset_file_read, | 
 | 1439 | 	.write = cpuset_file_write, | 
 | 1440 | 	.llseek = generic_file_llseek, | 
 | 1441 | 	.open = cpuset_file_open, | 
 | 1442 | 	.release = cpuset_file_release, | 
 | 1443 | }; | 
 | 1444 |  | 
 | 1445 | static struct inode_operations cpuset_dir_inode_operations = { | 
 | 1446 | 	.lookup = simple_lookup, | 
 | 1447 | 	.mkdir = cpuset_mkdir, | 
 | 1448 | 	.rmdir = cpuset_rmdir, | 
| Paul Jackson | 18a19cb | 2005-10-30 15:02:31 -0800 | [diff] [blame] | 1449 | 	.rename = cpuset_rename, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1450 | }; | 
 | 1451 |  | 
 | 1452 | static int cpuset_create_file(struct dentry *dentry, int mode) | 
 | 1453 | { | 
 | 1454 | 	struct inode *inode; | 
 | 1455 |  | 
 | 1456 | 	if (!dentry) | 
 | 1457 | 		return -ENOENT; | 
 | 1458 | 	if (dentry->d_inode) | 
 | 1459 | 		return -EEXIST; | 
 | 1460 |  | 
 | 1461 | 	inode = cpuset_new_inode(mode); | 
 | 1462 | 	if (!inode) | 
 | 1463 | 		return -ENOMEM; | 
 | 1464 |  | 
 | 1465 | 	if (S_ISDIR(mode)) { | 
 | 1466 | 		inode->i_op = &cpuset_dir_inode_operations; | 
 | 1467 | 		inode->i_fop = &simple_dir_operations; | 
 | 1468 |  | 
 | 1469 | 		/* start off with i_nlink == 2 (for "." entry) */ | 
 | 1470 | 		inode->i_nlink++; | 
 | 1471 | 	} else if (S_ISREG(mode)) { | 
 | 1472 | 		inode->i_size = 0; | 
 | 1473 | 		inode->i_fop = &cpuset_file_operations; | 
 | 1474 | 	} | 
 | 1475 |  | 
 | 1476 | 	d_instantiate(dentry, inode); | 
 | 1477 | 	dget(dentry);	/* Extra count - pin the dentry in core */ | 
 | 1478 | 	return 0; | 
 | 1479 | } | 
 | 1480 |  | 
 | 1481 | /* | 
 | 1482 |  *	cpuset_create_dir - create a directory for an object. | 
| Paul Jackson | c5b2aff | 2006-01-08 01:01:51 -0800 | [diff] [blame] | 1483 |  *	cs:	the cpuset we create the directory for. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1484 |  *		It must have a valid ->parent field | 
 | 1485 |  *		And we are going to fill its ->dentry field. | 
 | 1486 |  *	name:	The name to give to the cpuset directory. Will be copied. | 
 | 1487 |  *	mode:	mode to set on new directory. | 
 | 1488 |  */ | 
 | 1489 |  | 
 | 1490 | static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode) | 
 | 1491 | { | 
 | 1492 | 	struct dentry *dentry = NULL; | 
 | 1493 | 	struct dentry *parent; | 
 | 1494 | 	int error = 0; | 
 | 1495 |  | 
 | 1496 | 	parent = cs->parent->dentry; | 
 | 1497 | 	dentry = cpuset_get_dentry(parent, name); | 
 | 1498 | 	if (IS_ERR(dentry)) | 
 | 1499 | 		return PTR_ERR(dentry); | 
 | 1500 | 	error = cpuset_create_file(dentry, S_IFDIR | mode); | 
 | 1501 | 	if (!error) { | 
 | 1502 | 		dentry->d_fsdata = cs; | 
 | 1503 | 		parent->d_inode->i_nlink++; | 
 | 1504 | 		cs->dentry = dentry; | 
 | 1505 | 	} | 
 | 1506 | 	dput(dentry); | 
 | 1507 |  | 
 | 1508 | 	return error; | 
 | 1509 | } | 
 | 1510 |  | 
 | 1511 | static int cpuset_add_file(struct dentry *dir, const struct cftype *cft) | 
 | 1512 | { | 
 | 1513 | 	struct dentry *dentry; | 
 | 1514 | 	int error; | 
 | 1515 |  | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1516 | 	mutex_lock(&dir->d_inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | 	dentry = cpuset_get_dentry(dir, cft->name); | 
 | 1518 | 	if (!IS_ERR(dentry)) { | 
 | 1519 | 		error = cpuset_create_file(dentry, 0644 | S_IFREG); | 
 | 1520 | 		if (!error) | 
 | 1521 | 			dentry->d_fsdata = (void *)cft; | 
 | 1522 | 		dput(dentry); | 
 | 1523 | 	} else | 
 | 1524 | 		error = PTR_ERR(dentry); | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1525 | 	mutex_unlock(&dir->d_inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1526 | 	return error; | 
 | 1527 | } | 
 | 1528 |  | 
 | 1529 | /* | 
 | 1530 |  * Stuff for reading the 'tasks' file. | 
 | 1531 |  * | 
 | 1532 |  * Reading this file can return large amounts of data if a cpuset has | 
 | 1533 |  * *lots* of attached tasks. So it may need several calls to read(), | 
 | 1534 |  * but we cannot guarantee that the information we produce is correct | 
 | 1535 |  * unless we produce it entirely atomically. | 
 | 1536 |  * | 
 | 1537 |  * Upon tasks file open(), a struct ctr_struct is allocated, that | 
 | 1538 |  * will have a pointer to an array (also allocated here).  The struct | 
 | 1539 |  * ctr_struct * is stored in file->private_data.  Its resources will | 
 | 1540 |  * be freed by release() when the file is closed.  The array is used | 
 | 1541 |  * to sprintf the PIDs and then used by read(). | 
 | 1542 |  */ | 
 | 1543 |  | 
 | 1544 | /* cpusets_tasks_read array */ | 
 | 1545 |  | 
 | 1546 | struct ctr_struct { | 
 | 1547 | 	char *buf; | 
 | 1548 | 	int bufsz; | 
 | 1549 | }; | 
 | 1550 |  | 
 | 1551 | /* | 
 | 1552 |  * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'. | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1553 |  * Return actual number of pids loaded.  No need to task_lock(p) | 
 | 1554 |  * when reading out p->cpuset, as we don't really care if it changes | 
 | 1555 |  * on the next cycle, and we are not going to try to dereference it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 |  */ | 
 | 1557 | static inline int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) | 
 | 1558 | { | 
 | 1559 | 	int n = 0; | 
 | 1560 | 	struct task_struct *g, *p; | 
 | 1561 |  | 
 | 1562 | 	read_lock(&tasklist_lock); | 
 | 1563 |  | 
 | 1564 | 	do_each_thread(g, p) { | 
 | 1565 | 		if (p->cpuset == cs) { | 
 | 1566 | 			pidarray[n++] = p->pid; | 
 | 1567 | 			if (unlikely(n == npids)) | 
 | 1568 | 				goto array_full; | 
 | 1569 | 		} | 
 | 1570 | 	} while_each_thread(g, p); | 
 | 1571 |  | 
 | 1572 | array_full: | 
 | 1573 | 	read_unlock(&tasklist_lock); | 
 | 1574 | 	return n; | 
 | 1575 | } | 
 | 1576 |  | 
 | 1577 | static int cmppid(const void *a, const void *b) | 
 | 1578 | { | 
 | 1579 | 	return *(pid_t *)a - *(pid_t *)b; | 
 | 1580 | } | 
 | 1581 |  | 
 | 1582 | /* | 
 | 1583 |  * Convert array 'a' of 'npids' pid_t's to a string of newline separated | 
 | 1584 |  * decimal pids in 'buf'.  Don't write more than 'sz' chars, but return | 
 | 1585 |  * count 'cnt' of how many chars would be written if buf were large enough. | 
 | 1586 |  */ | 
 | 1587 | static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) | 
 | 1588 | { | 
 | 1589 | 	int cnt = 0; | 
 | 1590 | 	int i; | 
 | 1591 |  | 
 | 1592 | 	for (i = 0; i < npids; i++) | 
 | 1593 | 		cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); | 
 | 1594 | 	return cnt; | 
 | 1595 | } | 
 | 1596 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1597 | /* | 
 | 1598 |  * Handle an open on 'tasks' file.  Prepare a buffer listing the | 
 | 1599 |  * process id's of tasks currently attached to the cpuset being opened. | 
 | 1600 |  * | 
 | 1601 |  * Does not require any specific cpuset semaphores, and does not take any. | 
 | 1602 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1603 | static int cpuset_tasks_open(struct inode *unused, struct file *file) | 
 | 1604 | { | 
 | 1605 | 	struct cpuset *cs = __d_cs(file->f_dentry->d_parent); | 
 | 1606 | 	struct ctr_struct *ctr; | 
 | 1607 | 	pid_t *pidarray; | 
 | 1608 | 	int npids; | 
 | 1609 | 	char c; | 
 | 1610 |  | 
 | 1611 | 	if (!(file->f_mode & FMODE_READ)) | 
 | 1612 | 		return 0; | 
 | 1613 |  | 
 | 1614 | 	ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); | 
 | 1615 | 	if (!ctr) | 
 | 1616 | 		goto err0; | 
 | 1617 |  | 
 | 1618 | 	/* | 
 | 1619 | 	 * If cpuset gets more users after we read count, we won't have | 
 | 1620 | 	 * enough space - tough.  This race is indistinguishable to the | 
 | 1621 | 	 * caller from the case that the additional cpuset users didn't | 
 | 1622 | 	 * show up until sometime later on. | 
 | 1623 | 	 */ | 
 | 1624 | 	npids = atomic_read(&cs->count); | 
 | 1625 | 	pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); | 
 | 1626 | 	if (!pidarray) | 
 | 1627 | 		goto err1; | 
 | 1628 |  | 
 | 1629 | 	npids = pid_array_load(pidarray, npids, cs); | 
 | 1630 | 	sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); | 
 | 1631 |  | 
 | 1632 | 	/* Call pid_array_to_buf() twice, first just to get bufsz */ | 
 | 1633 | 	ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; | 
 | 1634 | 	ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); | 
 | 1635 | 	if (!ctr->buf) | 
 | 1636 | 		goto err2; | 
 | 1637 | 	ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); | 
 | 1638 |  | 
 | 1639 | 	kfree(pidarray); | 
 | 1640 | 	file->private_data = ctr; | 
 | 1641 | 	return 0; | 
 | 1642 |  | 
 | 1643 | err2: | 
 | 1644 | 	kfree(pidarray); | 
 | 1645 | err1: | 
 | 1646 | 	kfree(ctr); | 
 | 1647 | err0: | 
 | 1648 | 	return -ENOMEM; | 
 | 1649 | } | 
 | 1650 |  | 
 | 1651 | static ssize_t cpuset_tasks_read(struct file *file, char __user *buf, | 
 | 1652 | 						size_t nbytes, loff_t *ppos) | 
 | 1653 | { | 
 | 1654 | 	struct ctr_struct *ctr = file->private_data; | 
 | 1655 |  | 
 | 1656 | 	if (*ppos + nbytes > ctr->bufsz) | 
 | 1657 | 		nbytes = ctr->bufsz - *ppos; | 
 | 1658 | 	if (copy_to_user(buf, ctr->buf + *ppos, nbytes)) | 
 | 1659 | 		return -EFAULT; | 
 | 1660 | 	*ppos += nbytes; | 
 | 1661 | 	return nbytes; | 
 | 1662 | } | 
 | 1663 |  | 
 | 1664 | static int cpuset_tasks_release(struct inode *unused_inode, struct file *file) | 
 | 1665 | { | 
 | 1666 | 	struct ctr_struct *ctr; | 
 | 1667 |  | 
 | 1668 | 	if (file->f_mode & FMODE_READ) { | 
 | 1669 | 		ctr = file->private_data; | 
 | 1670 | 		kfree(ctr->buf); | 
 | 1671 | 		kfree(ctr); | 
 | 1672 | 	} | 
 | 1673 | 	return 0; | 
 | 1674 | } | 
 | 1675 |  | 
 | 1676 | /* | 
 | 1677 |  * for the common functions, 'private' gives the type of file | 
 | 1678 |  */ | 
 | 1679 |  | 
 | 1680 | static struct cftype cft_tasks = { | 
 | 1681 | 	.name = "tasks", | 
 | 1682 | 	.open = cpuset_tasks_open, | 
 | 1683 | 	.read = cpuset_tasks_read, | 
 | 1684 | 	.release = cpuset_tasks_release, | 
 | 1685 | 	.private = FILE_TASKLIST, | 
 | 1686 | }; | 
 | 1687 |  | 
 | 1688 | static struct cftype cft_cpus = { | 
 | 1689 | 	.name = "cpus", | 
 | 1690 | 	.private = FILE_CPULIST, | 
 | 1691 | }; | 
 | 1692 |  | 
 | 1693 | static struct cftype cft_mems = { | 
 | 1694 | 	.name = "mems", | 
 | 1695 | 	.private = FILE_MEMLIST, | 
 | 1696 | }; | 
 | 1697 |  | 
 | 1698 | static struct cftype cft_cpu_exclusive = { | 
 | 1699 | 	.name = "cpu_exclusive", | 
 | 1700 | 	.private = FILE_CPU_EXCLUSIVE, | 
 | 1701 | }; | 
 | 1702 |  | 
 | 1703 | static struct cftype cft_mem_exclusive = { | 
 | 1704 | 	.name = "mem_exclusive", | 
 | 1705 | 	.private = FILE_MEM_EXCLUSIVE, | 
 | 1706 | }; | 
 | 1707 |  | 
 | 1708 | static struct cftype cft_notify_on_release = { | 
 | 1709 | 	.name = "notify_on_release", | 
 | 1710 | 	.private = FILE_NOTIFY_ON_RELEASE, | 
 | 1711 | }; | 
 | 1712 |  | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1713 | static struct cftype cft_memory_migrate = { | 
 | 1714 | 	.name = "memory_migrate", | 
 | 1715 | 	.private = FILE_MEMORY_MIGRATE, | 
 | 1716 | }; | 
 | 1717 |  | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1718 | static struct cftype cft_memory_pressure_enabled = { | 
 | 1719 | 	.name = "memory_pressure_enabled", | 
 | 1720 | 	.private = FILE_MEMORY_PRESSURE_ENABLED, | 
 | 1721 | }; | 
 | 1722 |  | 
 | 1723 | static struct cftype cft_memory_pressure = { | 
 | 1724 | 	.name = "memory_pressure", | 
 | 1725 | 	.private = FILE_MEMORY_PRESSURE, | 
 | 1726 | }; | 
 | 1727 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1728 | static int cpuset_populate_dir(struct dentry *cs_dentry) | 
 | 1729 | { | 
 | 1730 | 	int err; | 
 | 1731 |  | 
 | 1732 | 	if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0) | 
 | 1733 | 		return err; | 
 | 1734 | 	if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0) | 
 | 1735 | 		return err; | 
 | 1736 | 	if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0) | 
 | 1737 | 		return err; | 
 | 1738 | 	if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0) | 
 | 1739 | 		return err; | 
 | 1740 | 	if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0) | 
 | 1741 | 		return err; | 
| Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1742 | 	if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0) | 
 | 1743 | 		return err; | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1744 | 	if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0) | 
 | 1745 | 		return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1746 | 	if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0) | 
 | 1747 | 		return err; | 
 | 1748 | 	return 0; | 
 | 1749 | } | 
 | 1750 |  | 
 | 1751 | /* | 
 | 1752 |  *	cpuset_create - create a cpuset | 
 | 1753 |  *	parent:	cpuset that will be parent of the new cpuset. | 
 | 1754 |  *	name:		name of the new cpuset. Will be strcpy'ed. | 
 | 1755 |  *	mode:		mode to set on new inode | 
 | 1756 |  * | 
 | 1757 |  *	Must be called with the semaphore on the parent inode held | 
 | 1758 |  */ | 
 | 1759 |  | 
 | 1760 | static long cpuset_create(struct cpuset *parent, const char *name, int mode) | 
 | 1761 | { | 
 | 1762 | 	struct cpuset *cs; | 
 | 1763 | 	int err; | 
 | 1764 |  | 
 | 1765 | 	cs = kmalloc(sizeof(*cs), GFP_KERNEL); | 
 | 1766 | 	if (!cs) | 
 | 1767 | 		return -ENOMEM; | 
 | 1768 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1769 | 	down(&manage_sem); | 
| Paul Jackson | cf2a473 | 2006-01-08 01:01:54 -0800 | [diff] [blame] | 1770 | 	cpuset_update_task_memory_state(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1771 | 	cs->flags = 0; | 
 | 1772 | 	if (notify_on_release(parent)) | 
 | 1773 | 		set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); | 
 | 1774 | 	cs->cpus_allowed = CPU_MASK_NONE; | 
 | 1775 | 	cs->mems_allowed = NODE_MASK_NONE; | 
 | 1776 | 	atomic_set(&cs->count, 0); | 
 | 1777 | 	INIT_LIST_HEAD(&cs->sibling); | 
 | 1778 | 	INIT_LIST_HEAD(&cs->children); | 
 | 1779 | 	atomic_inc(&cpuset_mems_generation); | 
 | 1780 | 	cs->mems_generation = atomic_read(&cpuset_mems_generation); | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1781 | 	fmeter_init(&cs->fmeter); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1782 |  | 
 | 1783 | 	cs->parent = parent; | 
 | 1784 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1785 | 	down(&callback_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1786 | 	list_add(&cs->sibling, &cs->parent->children); | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 1787 | 	number_of_cpusets++; | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1788 | 	up(&callback_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1789 |  | 
 | 1790 | 	err = cpuset_create_dir(cs, name, mode); | 
 | 1791 | 	if (err < 0) | 
 | 1792 | 		goto err; | 
 | 1793 |  | 
 | 1794 | 	/* | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1795 | 	 * Release manage_sem before cpuset_populate_dir() because it | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1796 | 	 * will down() this new directory's i_mutex and if we race with | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1797 | 	 * another mkdir, we might deadlock. | 
 | 1798 | 	 */ | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1799 | 	up(&manage_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1800 |  | 
 | 1801 | 	err = cpuset_populate_dir(cs->dentry); | 
 | 1802 | 	/* If err < 0, we have a half-filled directory - oh well ;) */ | 
 | 1803 | 	return 0; | 
 | 1804 | err: | 
 | 1805 | 	list_del(&cs->sibling); | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1806 | 	up(&manage_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1807 | 	kfree(cs); | 
 | 1808 | 	return err; | 
 | 1809 | } | 
 | 1810 |  | 
 | 1811 | static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode) | 
 | 1812 | { | 
 | 1813 | 	struct cpuset *c_parent = dentry->d_parent->d_fsdata; | 
 | 1814 |  | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1815 | 	/* the vfs holds inode->i_mutex already */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1816 | 	return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); | 
 | 1817 | } | 
 | 1818 |  | 
 | 1819 | static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) | 
 | 1820 | { | 
 | 1821 | 	struct cpuset *cs = dentry->d_fsdata; | 
 | 1822 | 	struct dentry *d; | 
 | 1823 | 	struct cpuset *parent; | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1824 | 	char *pathbuf = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1825 |  | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1826 | 	/* the vfs holds both inode->i_mutex already */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1827 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1828 | 	down(&manage_sem); | 
| Paul Jackson | cf2a473 | 2006-01-08 01:01:54 -0800 | [diff] [blame] | 1829 | 	cpuset_update_task_memory_state(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1830 | 	if (atomic_read(&cs->count) > 0) { | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1831 | 		up(&manage_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1832 | 		return -EBUSY; | 
 | 1833 | 	} | 
 | 1834 | 	if (!list_empty(&cs->children)) { | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1835 | 		up(&manage_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1836 | 		return -EBUSY; | 
 | 1837 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1838 | 	parent = cs->parent; | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1839 | 	down(&callback_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1840 | 	set_bit(CS_REMOVED, &cs->flags); | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1841 | 	if (is_cpu_exclusive(cs)) | 
 | 1842 | 		update_cpu_domains(cs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1843 | 	list_del(&cs->sibling);	/* delete my sibling from parent->children */ | 
| Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1844 | 	spin_lock(&cs->dentry->d_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1845 | 	d = dget(cs->dentry); | 
 | 1846 | 	cs->dentry = NULL; | 
 | 1847 | 	spin_unlock(&d->d_lock); | 
 | 1848 | 	cpuset_d_remove_dir(d); | 
 | 1849 | 	dput(d); | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 1850 | 	number_of_cpusets--; | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1851 | 	up(&callback_sem); | 
 | 1852 | 	if (list_empty(&parent->children)) | 
 | 1853 | 		check_for_release(parent, &pathbuf); | 
 | 1854 | 	up(&manage_sem); | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1855 | 	cpuset_release_agent(pathbuf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1856 | 	return 0; | 
 | 1857 | } | 
 | 1858 |  | 
| Paul Jackson | c417f02 | 2006-01-08 01:02:01 -0800 | [diff] [blame] | 1859 | /* | 
 | 1860 |  * cpuset_init_early - just enough so that the calls to | 
 | 1861 |  * cpuset_update_task_memory_state() in early init code | 
 | 1862 |  * are harmless. | 
 | 1863 |  */ | 
 | 1864 |  | 
 | 1865 | int __init cpuset_init_early(void) | 
 | 1866 | { | 
 | 1867 | 	struct task_struct *tsk = current; | 
 | 1868 |  | 
 | 1869 | 	tsk->cpuset = &top_cpuset; | 
 | 1870 | 	tsk->cpuset->mems_generation = atomic_read(&cpuset_mems_generation); | 
 | 1871 | 	return 0; | 
 | 1872 | } | 
 | 1873 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1874 | /** | 
 | 1875 |  * cpuset_init - initialize cpusets at system boot | 
 | 1876 |  * | 
 | 1877 |  * Description: Initialize top_cpuset and the cpuset internal file system, | 
 | 1878 |  **/ | 
 | 1879 |  | 
 | 1880 | int __init cpuset_init(void) | 
 | 1881 | { | 
 | 1882 | 	struct dentry *root; | 
 | 1883 | 	int err; | 
 | 1884 |  | 
 | 1885 | 	top_cpuset.cpus_allowed = CPU_MASK_ALL; | 
 | 1886 | 	top_cpuset.mems_allowed = NODE_MASK_ALL; | 
 | 1887 |  | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1888 | 	fmeter_init(&top_cpuset.fmeter); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1889 | 	atomic_inc(&cpuset_mems_generation); | 
 | 1890 | 	top_cpuset.mems_generation = atomic_read(&cpuset_mems_generation); | 
 | 1891 |  | 
 | 1892 | 	init_task.cpuset = &top_cpuset; | 
 | 1893 |  | 
 | 1894 | 	err = register_filesystem(&cpuset_fs_type); | 
 | 1895 | 	if (err < 0) | 
 | 1896 | 		goto out; | 
 | 1897 | 	cpuset_mount = kern_mount(&cpuset_fs_type); | 
 | 1898 | 	if (IS_ERR(cpuset_mount)) { | 
 | 1899 | 		printk(KERN_ERR "cpuset: could not mount!\n"); | 
 | 1900 | 		err = PTR_ERR(cpuset_mount); | 
 | 1901 | 		cpuset_mount = NULL; | 
 | 1902 | 		goto out; | 
 | 1903 | 	} | 
 | 1904 | 	root = cpuset_mount->mnt_sb->s_root; | 
 | 1905 | 	root->d_fsdata = &top_cpuset; | 
 | 1906 | 	root->d_inode->i_nlink++; | 
 | 1907 | 	top_cpuset.dentry = root; | 
 | 1908 | 	root->d_inode->i_op = &cpuset_dir_inode_operations; | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 1909 | 	number_of_cpusets = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1910 | 	err = cpuset_populate_dir(root); | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1911 | 	/* memory_pressure_enabled is in root cpuset only */ | 
 | 1912 | 	if (err == 0) | 
 | 1913 | 		err = cpuset_add_file(root, &cft_memory_pressure_enabled); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1914 | out: | 
 | 1915 | 	return err; | 
 | 1916 | } | 
 | 1917 |  | 
 | 1918 | /** | 
 | 1919 |  * cpuset_init_smp - initialize cpus_allowed | 
 | 1920 |  * | 
 | 1921 |  * Description: Finish top cpuset after cpu, node maps are initialized | 
 | 1922 |  **/ | 
 | 1923 |  | 
 | 1924 | void __init cpuset_init_smp(void) | 
 | 1925 | { | 
 | 1926 | 	top_cpuset.cpus_allowed = cpu_online_map; | 
 | 1927 | 	top_cpuset.mems_allowed = node_online_map; | 
 | 1928 | } | 
 | 1929 |  | 
 | 1930 | /** | 
 | 1931 |  * cpuset_fork - attach newly forked task to its parents cpuset. | 
| Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 1932 |  * @tsk: pointer to task_struct of forking parent process. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1933 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1934 |  * Description: A task inherits its parent's cpuset at fork(). | 
 | 1935 |  * | 
 | 1936 |  * A pointer to the shared cpuset was automatically copied in fork.c | 
 | 1937 |  * by dup_task_struct().  However, we ignore that copy, since it was | 
 | 1938 |  * not made under the protection of task_lock(), so might no longer be | 
 | 1939 |  * a valid cpuset pointer.  attach_task() might have already changed | 
 | 1940 |  * current->cpuset, allowing the previously referenced cpuset to | 
 | 1941 |  * be removed and freed.  Instead, we task_lock(current) and copy | 
 | 1942 |  * its present value of current->cpuset for our freshly forked child. | 
 | 1943 |  * | 
 | 1944 |  * At the point that cpuset_fork() is called, 'current' is the parent | 
 | 1945 |  * task, and the passed argument 'child' points to the child task. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1946 |  **/ | 
 | 1947 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1948 | void cpuset_fork(struct task_struct *child) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1949 | { | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1950 | 	task_lock(current); | 
 | 1951 | 	child->cpuset = current->cpuset; | 
 | 1952 | 	atomic_inc(&child->cpuset->count); | 
 | 1953 | 	task_unlock(current); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1954 | } | 
 | 1955 |  | 
 | 1956 | /** | 
 | 1957 |  * cpuset_exit - detach cpuset from exiting task | 
 | 1958 |  * @tsk: pointer to task_struct of exiting process | 
 | 1959 |  * | 
 | 1960 |  * Description: Detach cpuset from @tsk and release it. | 
 | 1961 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1962 |  * Note that cpusets marked notify_on_release force every task in | 
 | 1963 |  * them to take the global manage_sem semaphore when exiting. | 
 | 1964 |  * This could impact scaling on very large systems.  Be reluctant to | 
 | 1965 |  * use notify_on_release cpusets where very high task exit scaling | 
 | 1966 |  * is required on large systems. | 
| Paul Jackson | 2efe86b | 2005-05-27 02:02:43 -0700 | [diff] [blame] | 1967 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1968 |  * Don't even think about derefencing 'cs' after the cpuset use count | 
 | 1969 |  * goes to zero, except inside a critical section guarded by manage_sem | 
 | 1970 |  * or callback_sem.   Otherwise a zero cpuset use count is a license to | 
 | 1971 |  * any other task to nuke the cpuset immediately, via cpuset_rmdir(). | 
 | 1972 |  * | 
 | 1973 |  * This routine has to take manage_sem, not callback_sem, because | 
 | 1974 |  * it is holding that semaphore while calling check_for_release(), | 
 | 1975 |  * which calls kmalloc(), so can't be called holding callback__sem(). | 
 | 1976 |  * | 
 | 1977 |  * We don't need to task_lock() this reference to tsk->cpuset, | 
 | 1978 |  * because tsk is already marked PF_EXITING, so attach_task() won't | 
| Paul Jackson | b4b2641 | 2006-01-08 01:01:53 -0800 | [diff] [blame] | 1979 |  * mess with it, or task is a failed fork, never visible to attach_task. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1980 |  **/ | 
 | 1981 |  | 
 | 1982 | void cpuset_exit(struct task_struct *tsk) | 
 | 1983 | { | 
 | 1984 | 	struct cpuset *cs; | 
 | 1985 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1986 | 	cs = tsk->cpuset; | 
 | 1987 | 	tsk->cpuset = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1988 |  | 
| Paul Jackson | 2efe86b | 2005-05-27 02:02:43 -0700 | [diff] [blame] | 1989 | 	if (notify_on_release(cs)) { | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1990 | 		char *pathbuf = NULL; | 
 | 1991 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1992 | 		down(&manage_sem); | 
| Paul Jackson | 2efe86b | 2005-05-27 02:02:43 -0700 | [diff] [blame] | 1993 | 		if (atomic_dec_and_test(&cs->count)) | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1994 | 			check_for_release(cs, &pathbuf); | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1995 | 		up(&manage_sem); | 
| Paul Jackson | 3077a26 | 2005-08-09 10:07:59 -0700 | [diff] [blame] | 1996 | 		cpuset_release_agent(pathbuf); | 
| Paul Jackson | 2efe86b | 2005-05-27 02:02:43 -0700 | [diff] [blame] | 1997 | 	} else { | 
 | 1998 | 		atomic_dec(&cs->count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | 	} | 
 | 2000 | } | 
 | 2001 |  | 
 | 2002 | /** | 
 | 2003 |  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. | 
 | 2004 |  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | 
 | 2005 |  * | 
 | 2006 |  * Description: Returns the cpumask_t cpus_allowed of the cpuset | 
 | 2007 |  * attached to the specified @tsk.  Guaranteed to return some non-empty | 
 | 2008 |  * subset of cpu_online_map, even if this means going outside the | 
 | 2009 |  * tasks cpuset. | 
 | 2010 |  **/ | 
 | 2011 |  | 
| Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2012 | cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2013 | { | 
 | 2014 | 	cpumask_t mask; | 
 | 2015 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2016 | 	down(&callback_sem); | 
| Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2017 | 	task_lock(tsk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2018 | 	guarantee_online_cpus(tsk->cpuset, &mask); | 
| Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2019 | 	task_unlock(tsk); | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2020 | 	up(&callback_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2021 |  | 
 | 2022 | 	return mask; | 
 | 2023 | } | 
 | 2024 |  | 
 | 2025 | void cpuset_init_current_mems_allowed(void) | 
 | 2026 | { | 
 | 2027 | 	current->mems_allowed = NODE_MASK_ALL; | 
 | 2028 | } | 
 | 2029 |  | 
| Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 2030 | /** | 
| Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2031 |  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. | 
 | 2032 |  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. | 
 | 2033 |  * | 
 | 2034 |  * Description: Returns the nodemask_t mems_allowed of the cpuset | 
 | 2035 |  * attached to the specified @tsk.  Guaranteed to return some non-empty | 
 | 2036 |  * subset of node_online_map, even if this means going outside the | 
 | 2037 |  * tasks cpuset. | 
 | 2038 |  **/ | 
 | 2039 |  | 
 | 2040 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) | 
 | 2041 | { | 
 | 2042 | 	nodemask_t mask; | 
 | 2043 |  | 
 | 2044 | 	down(&callback_sem); | 
 | 2045 | 	task_lock(tsk); | 
 | 2046 | 	guarantee_online_mems(tsk->cpuset, &mask); | 
 | 2047 | 	task_unlock(tsk); | 
 | 2048 | 	up(&callback_sem); | 
 | 2049 |  | 
 | 2050 | 	return mask; | 
 | 2051 | } | 
 | 2052 |  | 
 | 2053 | /** | 
| Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 2054 |  * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed | 
 | 2055 |  * @zl: the zonelist to be checked | 
 | 2056 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2057 |  * Are any of the nodes on zonelist zl allowed in current->mems_allowed? | 
 | 2058 |  */ | 
 | 2059 | int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) | 
 | 2060 | { | 
 | 2061 | 	int i; | 
 | 2062 |  | 
 | 2063 | 	for (i = 0; zl->zones[i]; i++) { | 
 | 2064 | 		int nid = zl->zones[i]->zone_pgdat->node_id; | 
 | 2065 |  | 
 | 2066 | 		if (node_isset(nid, current->mems_allowed)) | 
 | 2067 | 			return 1; | 
 | 2068 | 	} | 
 | 2069 | 	return 0; | 
 | 2070 | } | 
 | 2071 |  | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2072 | /* | 
 | 2073 |  * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2074 |  * ancestor to the specified cpuset.  Call holding callback_sem. | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2075 |  * If no ancestor is mem_exclusive (an unusual configuration), then | 
 | 2076 |  * returns the root cpuset. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2077 |  */ | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2078 | static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2079 | { | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2080 | 	while (!is_mem_exclusive(cs) && cs->parent) | 
 | 2081 | 		cs = cs->parent; | 
 | 2082 | 	return cs; | 
 | 2083 | } | 
 | 2084 |  | 
 | 2085 | /** | 
 | 2086 |  * cpuset_zone_allowed - Can we allocate memory on zone z's memory node? | 
 | 2087 |  * @z: is this zone on an allowed node? | 
 | 2088 |  * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL) | 
 | 2089 |  * | 
 | 2090 |  * If we're in interrupt, yes, we can always allocate.  If zone | 
 | 2091 |  * z's node is in our tasks mems_allowed, yes.  If it's not a | 
 | 2092 |  * __GFP_HARDWALL request and this zone's nodes is in the nearest | 
 | 2093 |  * mem_exclusive cpuset ancestor to this tasks cpuset, yes. | 
 | 2094 |  * Otherwise, no. | 
 | 2095 |  * | 
 | 2096 |  * GFP_USER allocations are marked with the __GFP_HARDWALL bit, | 
 | 2097 |  * and do not allow allocations outside the current tasks cpuset. | 
 | 2098 |  * GFP_KERNEL allocations are not so marked, so can escape to the | 
 | 2099 |  * nearest mem_exclusive ancestor cpuset. | 
 | 2100 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2101 |  * Scanning up parent cpusets requires callback_sem.  The __alloc_pages() | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2102 |  * routine only calls here with __GFP_HARDWALL bit _not_ set if | 
 | 2103 |  * it's a GFP_KERNEL allocation, and all nodes in the current tasks | 
 | 2104 |  * mems_allowed came up empty on the first pass over the zonelist. | 
 | 2105 |  * So only GFP_KERNEL allocations, if all nodes in the cpuset are | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2106 |  * short of memory, might require taking the callback_sem semaphore. | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2107 |  * | 
 | 2108 |  * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() | 
 | 2109 |  * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing | 
 | 2110 |  * hardwall cpusets - no allocation on a node outside the cpuset is | 
 | 2111 |  * allowed (unless in interrupt, of course). | 
 | 2112 |  * | 
 | 2113 |  * The second loop doesn't even call here for GFP_ATOMIC requests | 
 | 2114 |  * (if the __alloc_pages() local variable 'wait' is set).  That check | 
 | 2115 |  * and the checks below have the combined affect in the second loop of | 
 | 2116 |  * the __alloc_pages() routine that: | 
 | 2117 |  *	in_interrupt - any node ok (current task context irrelevant) | 
 | 2118 |  *	GFP_ATOMIC   - any node ok | 
 | 2119 |  *	GFP_KERNEL   - any node in enclosing mem_exclusive cpuset ok | 
 | 2120 |  *	GFP_USER     - only nodes in current tasks mems allowed ok. | 
 | 2121 |  **/ | 
 | 2122 |  | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 2123 | int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2124 | { | 
 | 2125 | 	int node;			/* node that zone z is on */ | 
 | 2126 | 	const struct cpuset *cs;	/* current cpuset ancestors */ | 
 | 2127 | 	int allowed = 1;		/* is allocation in zone z allowed? */ | 
 | 2128 |  | 
 | 2129 | 	if (in_interrupt()) | 
 | 2130 | 		return 1; | 
 | 2131 | 	node = z->zone_pgdat->node_id; | 
 | 2132 | 	if (node_isset(node, current->mems_allowed)) | 
 | 2133 | 		return 1; | 
 | 2134 | 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */ | 
 | 2135 | 		return 0; | 
 | 2136 |  | 
| Bob Picco | 5563e77 | 2005-11-13 16:06:35 -0800 | [diff] [blame] | 2137 | 	if (current->flags & PF_EXITING) /* Let dying task have memory */ | 
 | 2138 | 		return 1; | 
 | 2139 |  | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2140 | 	/* Not hardwall and node outside mems_allowed: scan up cpusets */ | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2141 | 	down(&callback_sem); | 
 | 2142 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2143 | 	task_lock(current); | 
 | 2144 | 	cs = nearest_exclusive_ancestor(current->cpuset); | 
 | 2145 | 	task_unlock(current); | 
 | 2146 |  | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2147 | 	allowed = node_isset(node, cs->mems_allowed); | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2148 | 	up(&callback_sem); | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2149 | 	return allowed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2150 | } | 
 | 2151 |  | 
| Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2152 | /** | 
 | 2153 |  * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors? | 
 | 2154 |  * @p: pointer to task_struct of some other task. | 
 | 2155 |  * | 
 | 2156 |  * Description: Return true if the nearest mem_exclusive ancestor | 
 | 2157 |  * cpusets of tasks @p and current overlap.  Used by oom killer to | 
 | 2158 |  * determine if task @p's memory usage might impact the memory | 
 | 2159 |  * available to the current task. | 
 | 2160 |  * | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2161 |  * Acquires callback_sem - not suitable for calling from a fast path. | 
| Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2162 |  **/ | 
 | 2163 |  | 
 | 2164 | int cpuset_excl_nodes_overlap(const struct task_struct *p) | 
 | 2165 | { | 
 | 2166 | 	const struct cpuset *cs1, *cs2;	/* my and p's cpuset ancestors */ | 
 | 2167 | 	int overlap = 0;		/* do cpusets overlap? */ | 
 | 2168 |  | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2169 | 	down(&callback_sem); | 
 | 2170 |  | 
 | 2171 | 	task_lock(current); | 
 | 2172 | 	if (current->flags & PF_EXITING) { | 
 | 2173 | 		task_unlock(current); | 
 | 2174 | 		goto done; | 
 | 2175 | 	} | 
 | 2176 | 	cs1 = nearest_exclusive_ancestor(current->cpuset); | 
 | 2177 | 	task_unlock(current); | 
 | 2178 |  | 
 | 2179 | 	task_lock((struct task_struct *)p); | 
 | 2180 | 	if (p->flags & PF_EXITING) { | 
 | 2181 | 		task_unlock((struct task_struct *)p); | 
 | 2182 | 		goto done; | 
 | 2183 | 	} | 
 | 2184 | 	cs2 = nearest_exclusive_ancestor(p->cpuset); | 
 | 2185 | 	task_unlock((struct task_struct *)p); | 
 | 2186 |  | 
| Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2187 | 	overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed); | 
 | 2188 | done: | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2189 | 	up(&callback_sem); | 
| Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2190 |  | 
 | 2191 | 	return overlap; | 
 | 2192 | } | 
 | 2193 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2194 | /* | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2195 |  * Collection of memory_pressure is suppressed unless | 
 | 2196 |  * this flag is enabled by writing "1" to the special | 
 | 2197 |  * cpuset file 'memory_pressure_enabled' in the root cpuset. | 
 | 2198 |  */ | 
 | 2199 |  | 
| Paul Jackson | c5b2aff | 2006-01-08 01:01:51 -0800 | [diff] [blame] | 2200 | int cpuset_memory_pressure_enabled __read_mostly; | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2201 |  | 
 | 2202 | /** | 
 | 2203 |  * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. | 
 | 2204 |  * | 
 | 2205 |  * Keep a running average of the rate of synchronous (direct) | 
 | 2206 |  * page reclaim efforts initiated by tasks in each cpuset. | 
 | 2207 |  * | 
 | 2208 |  * This represents the rate at which some task in the cpuset | 
 | 2209 |  * ran low on memory on all nodes it was allowed to use, and | 
 | 2210 |  * had to enter the kernels page reclaim code in an effort to | 
 | 2211 |  * create more free memory by tossing clean pages or swapping | 
 | 2212 |  * or writing dirty pages. | 
 | 2213 |  * | 
 | 2214 |  * Display to user space in the per-cpuset read-only file | 
 | 2215 |  * "memory_pressure".  Value displayed is an integer | 
 | 2216 |  * representing the recent rate of entry into the synchronous | 
 | 2217 |  * (direct) page reclaim by any task attached to the cpuset. | 
 | 2218 |  **/ | 
 | 2219 |  | 
 | 2220 | void __cpuset_memory_pressure_bump(void) | 
 | 2221 | { | 
 | 2222 | 	struct cpuset *cs; | 
 | 2223 |  | 
 | 2224 | 	task_lock(current); | 
 | 2225 | 	cs = current->cpuset; | 
 | 2226 | 	fmeter_markevent(&cs->fmeter); | 
 | 2227 | 	task_unlock(current); | 
 | 2228 | } | 
 | 2229 |  | 
 | 2230 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2231 |  * proc_cpuset_show() | 
 | 2232 |  *  - Print tasks cpuset path into seq_file. | 
 | 2233 |  *  - Used for /proc/<pid>/cpuset. | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2234 |  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it | 
 | 2235 |  *    doesn't really matter if tsk->cpuset changes after we read it, | 
 | 2236 |  *    and we take manage_sem, keeping attach_task() from changing it | 
 | 2237 |  *    anyway. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2238 |  */ | 
 | 2239 |  | 
 | 2240 | static int proc_cpuset_show(struct seq_file *m, void *v) | 
 | 2241 | { | 
 | 2242 | 	struct cpuset *cs; | 
 | 2243 | 	struct task_struct *tsk; | 
 | 2244 | 	char *buf; | 
 | 2245 | 	int retval = 0; | 
 | 2246 |  | 
 | 2247 | 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 
 | 2248 | 	if (!buf) | 
 | 2249 | 		return -ENOMEM; | 
 | 2250 |  | 
 | 2251 | 	tsk = m->private; | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2252 | 	down(&manage_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2253 | 	cs = tsk->cpuset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2254 | 	if (!cs) { | 
 | 2255 | 		retval = -EINVAL; | 
 | 2256 | 		goto out; | 
 | 2257 | 	} | 
 | 2258 |  | 
 | 2259 | 	retval = cpuset_path(cs, buf, PAGE_SIZE); | 
 | 2260 | 	if (retval < 0) | 
 | 2261 | 		goto out; | 
 | 2262 | 	seq_puts(m, buf); | 
 | 2263 | 	seq_putc(m, '\n'); | 
 | 2264 | out: | 
| Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2265 | 	up(&manage_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2266 | 	kfree(buf); | 
 | 2267 | 	return retval; | 
 | 2268 | } | 
 | 2269 |  | 
 | 2270 | static int cpuset_open(struct inode *inode, struct file *file) | 
 | 2271 | { | 
 | 2272 | 	struct task_struct *tsk = PROC_I(inode)->task; | 
 | 2273 | 	return single_open(file, proc_cpuset_show, tsk); | 
 | 2274 | } | 
 | 2275 |  | 
 | 2276 | struct file_operations proc_cpuset_operations = { | 
 | 2277 | 	.open		= cpuset_open, | 
 | 2278 | 	.read		= seq_read, | 
 | 2279 | 	.llseek		= seq_lseek, | 
 | 2280 | 	.release	= single_release, | 
 | 2281 | }; | 
 | 2282 |  | 
 | 2283 | /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ | 
 | 2284 | char *cpuset_task_status_allowed(struct task_struct *task, char *buffer) | 
 | 2285 | { | 
 | 2286 | 	buffer += sprintf(buffer, "Cpus_allowed:\t"); | 
 | 2287 | 	buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed); | 
 | 2288 | 	buffer += sprintf(buffer, "\n"); | 
 | 2289 | 	buffer += sprintf(buffer, "Mems_allowed:\t"); | 
 | 2290 | 	buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed); | 
 | 2291 | 	buffer += sprintf(buffer, "\n"); | 
 | 2292 | 	return buffer; | 
 | 2293 | } |