Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 1 | /* |
| 2 | * kernel/cgroup.c |
| 3 | * |
| 4 | * Generic process-grouping system. |
| 5 | * |
| 6 | * Based originally on the cpuset system, extracted by Paul Menage |
| 7 | * Copyright (C) 2006 Google, Inc |
| 8 | * |
| 9 | * Copyright notices from the original cpuset code: |
| 10 | * -------------------------------------------------- |
| 11 | * Copyright (C) 2003 BULL SA. |
| 12 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
| 13 | * |
| 14 | * Portions derived from Patrick Mochel's sysfs code. |
| 15 | * sysfs is Copyright (c) 2001-3 Patrick Mochel |
| 16 | * |
| 17 | * 2003-10-10 Written by Simon Derr. |
| 18 | * 2003-10-22 Updates by Stephen Hemminger. |
| 19 | * 2004 May-July Rework by Paul Jackson. |
| 20 | * --------------------------------------------------- |
| 21 | * |
| 22 | * This file is subject to the terms and conditions of the GNU General Public |
| 23 | * License. See the file COPYING in the main directory of the Linux |
| 24 | * distribution for more details. |
| 25 | */ |
| 26 | |
| 27 | #include <linux/cgroup.h> |
| 28 | #include <linux/errno.h> |
| 29 | #include <linux/fs.h> |
| 30 | #include <linux/kernel.h> |
| 31 | #include <linux/list.h> |
| 32 | #include <linux/mm.h> |
| 33 | #include <linux/mutex.h> |
| 34 | #include <linux/mount.h> |
| 35 | #include <linux/pagemap.h> |
Paul Menage | a424316 | 2007-10-18 23:39:35 -0700 | [diff] [blame^] | 36 | #include <linux/proc_fs.h> |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 37 | #include <linux/rcupdate.h> |
| 38 | #include <linux/sched.h> |
| 39 | #include <linux/seq_file.h> |
| 40 | #include <linux/slab.h> |
| 41 | #include <linux/magic.h> |
| 42 | #include <linux/spinlock.h> |
| 43 | #include <linux/string.h> |
Paul Menage | bbcb81d | 2007-10-18 23:39:32 -0700 | [diff] [blame] | 44 | #include <linux/sort.h> |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 45 | #include <asm/atomic.h> |
| 46 | |
| 47 | /* Generate an array of cgroup subsystem pointers */ |
| 48 | #define SUBSYS(_x) &_x ## _subsys, |
| 49 | |
| 50 | static struct cgroup_subsys *subsys[] = { |
| 51 | #include <linux/cgroup_subsys.h> |
| 52 | }; |
| 53 | |
| 54 | /* |
| 55 | * A cgroupfs_root represents the root of a cgroup hierarchy, |
| 56 | * and may be associated with a superblock to form an active |
| 57 | * hierarchy |
| 58 | */ |
| 59 | struct cgroupfs_root { |
| 60 | struct super_block *sb; |
| 61 | |
| 62 | /* |
| 63 | * The bitmask of subsystems intended to be attached to this |
| 64 | * hierarchy |
| 65 | */ |
| 66 | unsigned long subsys_bits; |
| 67 | |
| 68 | /* The bitmask of subsystems currently attached to this hierarchy */ |
| 69 | unsigned long actual_subsys_bits; |
| 70 | |
| 71 | /* A list running through the attached subsystems */ |
| 72 | struct list_head subsys_list; |
| 73 | |
| 74 | /* The root cgroup for this hierarchy */ |
| 75 | struct cgroup top_cgroup; |
| 76 | |
| 77 | /* Tracks how many cgroups are currently defined in hierarchy.*/ |
| 78 | int number_of_cgroups; |
| 79 | |
| 80 | /* A list running through the mounted hierarchies */ |
| 81 | struct list_head root_list; |
| 82 | |
| 83 | /* Hierarchy-specific flags */ |
| 84 | unsigned long flags; |
| 85 | }; |
| 86 | |
| 87 | |
| 88 | /* |
| 89 | * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the |
| 90 | * subsystems that are otherwise unattached - it never has more than a |
| 91 | * single cgroup, and all tasks are part of that cgroup. |
| 92 | */ |
| 93 | static struct cgroupfs_root rootnode; |
| 94 | |
| 95 | /* The list of hierarchy roots */ |
| 96 | |
| 97 | static LIST_HEAD(roots); |
| 98 | |
| 99 | /* dummytop is a shorthand for the dummy hierarchy's top cgroup */ |
| 100 | #define dummytop (&rootnode.top_cgroup) |
| 101 | |
| 102 | /* This flag indicates whether tasks in the fork and exit paths should |
| 103 | * take callback_mutex and check for fork/exit handlers to call. This |
| 104 | * avoids us having to do extra work in the fork/exit path if none of the |
| 105 | * subsystems need to be called. |
| 106 | */ |
| 107 | static int need_forkexit_callback; |
| 108 | |
| 109 | /* bits in struct cgroup flags field */ |
| 110 | enum { |
| 111 | CONT_REMOVED, |
| 112 | }; |
| 113 | |
| 114 | /* convenient tests for these bits */ |
| 115 | inline int cgroup_is_removed(const struct cgroup *cont) |
| 116 | { |
| 117 | return test_bit(CONT_REMOVED, &cont->flags); |
| 118 | } |
| 119 | |
| 120 | /* bits in struct cgroupfs_root flags field */ |
| 121 | enum { |
| 122 | ROOT_NOPREFIX, /* mounted subsystems have no named prefix */ |
| 123 | }; |
| 124 | |
| 125 | /* |
| 126 | * for_each_subsys() allows you to iterate on each subsystem attached to |
| 127 | * an active hierarchy |
| 128 | */ |
| 129 | #define for_each_subsys(_root, _ss) \ |
| 130 | list_for_each_entry(_ss, &_root->subsys_list, sibling) |
| 131 | |
| 132 | /* for_each_root() allows you to iterate across the active hierarchies */ |
| 133 | #define for_each_root(_root) \ |
| 134 | list_for_each_entry(_root, &roots, root_list) |
| 135 | |
Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 136 | /* Each task_struct has an embedded css_set, so the get/put |
| 137 | * operation simply takes a reference count on all the cgroups |
| 138 | * referenced by subsystems in this css_set. This can end up |
| 139 | * multiple-counting some cgroups, but that's OK - the ref-count is |
| 140 | * just a busy/not-busy indicator; ensuring that we only count each |
| 141 | * cgroup once would require taking a global lock to ensure that no |
| 142 | * subsystems moved between hierarchies while we were doing so. |
| 143 | * |
| 144 | * Possible TODO: decide at boot time based on the number of |
| 145 | * registered subsystems and the number of CPUs or NUMA nodes whether |
| 146 | * it's better for performance to ref-count every subsystem, or to |
| 147 | * take a global lock and only add one ref count to each hierarchy. |
| 148 | */ |
| 149 | static void get_css_set(struct css_set *cg) |
| 150 | { |
| 151 | int i; |
| 152 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) |
| 153 | atomic_inc(&cg->subsys[i]->cgroup->count); |
| 154 | } |
| 155 | |
| 156 | static void put_css_set(struct css_set *cg) |
| 157 | { |
| 158 | int i; |
| 159 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) |
| 160 | atomic_dec(&cg->subsys[i]->cgroup->count); |
| 161 | } |
| 162 | |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 163 | /* |
| 164 | * There is one global cgroup mutex. We also require taking |
| 165 | * task_lock() when dereferencing a task's cgroup subsys pointers. |
| 166 | * See "The task_lock() exception", at the end of this comment. |
| 167 | * |
| 168 | * A task must hold cgroup_mutex to modify cgroups. |
| 169 | * |
| 170 | * Any task can increment and decrement the count field without lock. |
| 171 | * So in general, code holding cgroup_mutex can't rely on the count |
| 172 | * field not changing. However, if the count goes to zero, then only |
| 173 | * attach_task() can increment it again. Because a count of zero |
| 174 | * means that no tasks are currently attached, therefore there is no |
| 175 | * way a task attached to that cgroup can fork (the other way to |
| 176 | * increment the count). So code holding cgroup_mutex can safely |
| 177 | * assume that if the count is zero, it will stay zero. Similarly, if |
| 178 | * a task holds cgroup_mutex on a cgroup with zero count, it |
| 179 | * knows that the cgroup won't be removed, as cgroup_rmdir() |
| 180 | * needs that mutex. |
| 181 | * |
| 182 | * The cgroup_common_file_write handler for operations that modify |
| 183 | * the cgroup hierarchy holds cgroup_mutex across the entire operation, |
| 184 | * single threading all such cgroup modifications across the system. |
| 185 | * |
| 186 | * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't |
| 187 | * (usually) take cgroup_mutex. These are the two most performance |
| 188 | * critical pieces of code here. The exception occurs on cgroup_exit(), |
| 189 | * when a task in a notify_on_release cgroup exits. Then cgroup_mutex |
| 190 | * is taken, and if the cgroup count is zero, a usermode call made |
| 191 | * to /sbin/cgroup_release_agent with the name of the cgroup (path |
| 192 | * relative to the root of cgroup file system) as the argument. |
| 193 | * |
| 194 | * A cgroup can only be deleted if both its 'count' of using tasks |
| 195 | * is zero, and its list of 'children' cgroups is empty. Since all |
| 196 | * tasks in the system use _some_ cgroup, and since there is always at |
| 197 | * least one task in the system (init, pid == 1), therefore, top_cgroup |
| 198 | * always has either children cgroups and/or using tasks. So we don't |
| 199 | * need a special hack to ensure that top_cgroup cannot be deleted. |
| 200 | * |
| 201 | * The task_lock() exception |
| 202 | * |
| 203 | * The need for this exception arises from the action of |
| 204 | * attach_task(), which overwrites one tasks cgroup pointer with |
| 205 | * another. It does so using cgroup_mutexe, however there are |
| 206 | * several performance critical places that need to reference |
| 207 | * task->cgroup without the expense of grabbing a system global |
| 208 | * mutex. Therefore except as noted below, when dereferencing or, as |
| 209 | * in attach_task(), modifying a task'ss cgroup pointer we use |
| 210 | * task_lock(), which acts on a spinlock (task->alloc_lock) already in |
| 211 | * the task_struct routinely used for such matters. |
| 212 | * |
| 213 | * P.S. One more locking exception. RCU is used to guard the |
| 214 | * update of a tasks cgroup pointer by attach_task() |
| 215 | */ |
| 216 | |
| 217 | static DEFINE_MUTEX(cgroup_mutex); |
| 218 | |
| 219 | /** |
| 220 | * cgroup_lock - lock out any changes to cgroup structures |
| 221 | * |
| 222 | */ |
| 223 | |
| 224 | void cgroup_lock(void) |
| 225 | { |
| 226 | mutex_lock(&cgroup_mutex); |
| 227 | } |
| 228 | |
| 229 | /** |
| 230 | * cgroup_unlock - release lock on cgroup changes |
| 231 | * |
| 232 | * Undo the lock taken in a previous cgroup_lock() call. |
| 233 | */ |
| 234 | |
| 235 | void cgroup_unlock(void) |
| 236 | { |
| 237 | mutex_unlock(&cgroup_mutex); |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * A couple of forward declarations required, due to cyclic reference loop: |
| 242 | * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir -> |
| 243 | * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations |
| 244 | * -> cgroup_mkdir. |
| 245 | */ |
| 246 | |
| 247 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); |
| 248 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); |
| 249 | static int cgroup_populate_dir(struct cgroup *cont); |
| 250 | static struct inode_operations cgroup_dir_inode_operations; |
Paul Menage | a424316 | 2007-10-18 23:39:35 -0700 | [diff] [blame^] | 251 | static struct file_operations proc_cgroupstats_operations; |
| 252 | |
| 253 | static struct backing_dev_info cgroup_backing_dev_info = { |
| 254 | .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, |
| 255 | }; |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 256 | |
| 257 | static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) |
| 258 | { |
| 259 | struct inode *inode = new_inode(sb); |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 260 | |
| 261 | if (inode) { |
| 262 | inode->i_mode = mode; |
| 263 | inode->i_uid = current->fsuid; |
| 264 | inode->i_gid = current->fsgid; |
| 265 | inode->i_blocks = 0; |
| 266 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
| 267 | inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; |
| 268 | } |
| 269 | return inode; |
| 270 | } |
| 271 | |
| 272 | static void cgroup_diput(struct dentry *dentry, struct inode *inode) |
| 273 | { |
| 274 | /* is dentry a directory ? if so, kfree() associated cgroup */ |
| 275 | if (S_ISDIR(inode->i_mode)) { |
| 276 | struct cgroup *cont = dentry->d_fsdata; |
| 277 | BUG_ON(!(cgroup_is_removed(cont))); |
| 278 | kfree(cont); |
| 279 | } |
| 280 | iput(inode); |
| 281 | } |
| 282 | |
| 283 | static void remove_dir(struct dentry *d) |
| 284 | { |
| 285 | struct dentry *parent = dget(d->d_parent); |
| 286 | |
| 287 | d_delete(d); |
| 288 | simple_rmdir(parent->d_inode, d); |
| 289 | dput(parent); |
| 290 | } |
| 291 | |
| 292 | static void cgroup_clear_directory(struct dentry *dentry) |
| 293 | { |
| 294 | struct list_head *node; |
| 295 | |
| 296 | BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); |
| 297 | spin_lock(&dcache_lock); |
| 298 | node = dentry->d_subdirs.next; |
| 299 | while (node != &dentry->d_subdirs) { |
| 300 | struct dentry *d = list_entry(node, struct dentry, d_u.d_child); |
| 301 | list_del_init(node); |
| 302 | if (d->d_inode) { |
| 303 | /* This should never be called on a cgroup |
| 304 | * directory with child cgroups */ |
| 305 | BUG_ON(d->d_inode->i_mode & S_IFDIR); |
| 306 | d = dget_locked(d); |
| 307 | spin_unlock(&dcache_lock); |
| 308 | d_delete(d); |
| 309 | simple_unlink(dentry->d_inode, d); |
| 310 | dput(d); |
| 311 | spin_lock(&dcache_lock); |
| 312 | } |
| 313 | node = dentry->d_subdirs.next; |
| 314 | } |
| 315 | spin_unlock(&dcache_lock); |
| 316 | } |
| 317 | |
| 318 | /* |
| 319 | * NOTE : the dentry must have been dget()'ed |
| 320 | */ |
| 321 | static void cgroup_d_remove_dir(struct dentry *dentry) |
| 322 | { |
| 323 | cgroup_clear_directory(dentry); |
| 324 | |
| 325 | spin_lock(&dcache_lock); |
| 326 | list_del_init(&dentry->d_u.d_child); |
| 327 | spin_unlock(&dcache_lock); |
| 328 | remove_dir(dentry); |
| 329 | } |
| 330 | |
| 331 | static int rebind_subsystems(struct cgroupfs_root *root, |
| 332 | unsigned long final_bits) |
| 333 | { |
| 334 | unsigned long added_bits, removed_bits; |
| 335 | struct cgroup *cont = &root->top_cgroup; |
| 336 | int i; |
| 337 | |
| 338 | removed_bits = root->actual_subsys_bits & ~final_bits; |
| 339 | added_bits = final_bits & ~root->actual_subsys_bits; |
| 340 | /* Check that any added subsystems are currently free */ |
| 341 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
| 342 | unsigned long long bit = 1ull << i; |
| 343 | struct cgroup_subsys *ss = subsys[i]; |
| 344 | if (!(bit & added_bits)) |
| 345 | continue; |
| 346 | if (ss->root != &rootnode) { |
| 347 | /* Subsystem isn't free */ |
| 348 | return -EBUSY; |
| 349 | } |
| 350 | } |
| 351 | |
| 352 | /* Currently we don't handle adding/removing subsystems when |
| 353 | * any child cgroups exist. This is theoretically supportable |
| 354 | * but involves complex error handling, so it's being left until |
| 355 | * later */ |
| 356 | if (!list_empty(&cont->children)) |
| 357 | return -EBUSY; |
| 358 | |
| 359 | /* Process each subsystem */ |
| 360 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
| 361 | struct cgroup_subsys *ss = subsys[i]; |
| 362 | unsigned long bit = 1UL << i; |
| 363 | if (bit & added_bits) { |
| 364 | /* We're binding this subsystem to this hierarchy */ |
| 365 | BUG_ON(cont->subsys[i]); |
| 366 | BUG_ON(!dummytop->subsys[i]); |
| 367 | BUG_ON(dummytop->subsys[i]->cgroup != dummytop); |
| 368 | cont->subsys[i] = dummytop->subsys[i]; |
| 369 | cont->subsys[i]->cgroup = cont; |
| 370 | list_add(&ss->sibling, &root->subsys_list); |
| 371 | rcu_assign_pointer(ss->root, root); |
| 372 | if (ss->bind) |
| 373 | ss->bind(ss, cont); |
| 374 | |
| 375 | } else if (bit & removed_bits) { |
| 376 | /* We're removing this subsystem */ |
| 377 | BUG_ON(cont->subsys[i] != dummytop->subsys[i]); |
| 378 | BUG_ON(cont->subsys[i]->cgroup != cont); |
| 379 | if (ss->bind) |
| 380 | ss->bind(ss, dummytop); |
| 381 | dummytop->subsys[i]->cgroup = dummytop; |
| 382 | cont->subsys[i] = NULL; |
| 383 | rcu_assign_pointer(subsys[i]->root, &rootnode); |
| 384 | list_del(&ss->sibling); |
| 385 | } else if (bit & final_bits) { |
| 386 | /* Subsystem state should already exist */ |
| 387 | BUG_ON(!cont->subsys[i]); |
| 388 | } else { |
| 389 | /* Subsystem state shouldn't exist */ |
| 390 | BUG_ON(cont->subsys[i]); |
| 391 | } |
| 392 | } |
| 393 | root->subsys_bits = root->actual_subsys_bits = final_bits; |
| 394 | synchronize_rcu(); |
| 395 | |
| 396 | return 0; |
| 397 | } |
| 398 | |
| 399 | static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs) |
| 400 | { |
| 401 | struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info; |
| 402 | struct cgroup_subsys *ss; |
| 403 | |
| 404 | mutex_lock(&cgroup_mutex); |
| 405 | for_each_subsys(root, ss) |
| 406 | seq_printf(seq, ",%s", ss->name); |
| 407 | if (test_bit(ROOT_NOPREFIX, &root->flags)) |
| 408 | seq_puts(seq, ",noprefix"); |
| 409 | mutex_unlock(&cgroup_mutex); |
| 410 | return 0; |
| 411 | } |
| 412 | |
| 413 | struct cgroup_sb_opts { |
| 414 | unsigned long subsys_bits; |
| 415 | unsigned long flags; |
| 416 | }; |
| 417 | |
| 418 | /* Convert a hierarchy specifier into a bitmask of subsystems and |
| 419 | * flags. */ |
| 420 | static int parse_cgroupfs_options(char *data, |
| 421 | struct cgroup_sb_opts *opts) |
| 422 | { |
| 423 | char *token, *o = data ?: "all"; |
| 424 | |
| 425 | opts->subsys_bits = 0; |
| 426 | opts->flags = 0; |
| 427 | |
| 428 | while ((token = strsep(&o, ",")) != NULL) { |
| 429 | if (!*token) |
| 430 | return -EINVAL; |
| 431 | if (!strcmp(token, "all")) { |
| 432 | opts->subsys_bits = (1 << CGROUP_SUBSYS_COUNT) - 1; |
| 433 | } else if (!strcmp(token, "noprefix")) { |
| 434 | set_bit(ROOT_NOPREFIX, &opts->flags); |
| 435 | } else { |
| 436 | struct cgroup_subsys *ss; |
| 437 | int i; |
| 438 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
| 439 | ss = subsys[i]; |
| 440 | if (!strcmp(token, ss->name)) { |
| 441 | set_bit(i, &opts->subsys_bits); |
| 442 | break; |
| 443 | } |
| 444 | } |
| 445 | if (i == CGROUP_SUBSYS_COUNT) |
| 446 | return -ENOENT; |
| 447 | } |
| 448 | } |
| 449 | |
| 450 | /* We can't have an empty hierarchy */ |
| 451 | if (!opts->subsys_bits) |
| 452 | return -EINVAL; |
| 453 | |
| 454 | return 0; |
| 455 | } |
| 456 | |
| 457 | static int cgroup_remount(struct super_block *sb, int *flags, char *data) |
| 458 | { |
| 459 | int ret = 0; |
| 460 | struct cgroupfs_root *root = sb->s_fs_info; |
| 461 | struct cgroup *cont = &root->top_cgroup; |
| 462 | struct cgroup_sb_opts opts; |
| 463 | |
| 464 | mutex_lock(&cont->dentry->d_inode->i_mutex); |
| 465 | mutex_lock(&cgroup_mutex); |
| 466 | |
| 467 | /* See what subsystems are wanted */ |
| 468 | ret = parse_cgroupfs_options(data, &opts); |
| 469 | if (ret) |
| 470 | goto out_unlock; |
| 471 | |
| 472 | /* Don't allow flags to change at remount */ |
| 473 | if (opts.flags != root->flags) { |
| 474 | ret = -EINVAL; |
| 475 | goto out_unlock; |
| 476 | } |
| 477 | |
| 478 | ret = rebind_subsystems(root, opts.subsys_bits); |
| 479 | |
| 480 | /* (re)populate subsystem files */ |
| 481 | if (!ret) |
| 482 | cgroup_populate_dir(cont); |
| 483 | |
| 484 | out_unlock: |
| 485 | mutex_unlock(&cgroup_mutex); |
| 486 | mutex_unlock(&cont->dentry->d_inode->i_mutex); |
| 487 | return ret; |
| 488 | } |
| 489 | |
| 490 | static struct super_operations cgroup_ops = { |
| 491 | .statfs = simple_statfs, |
| 492 | .drop_inode = generic_delete_inode, |
| 493 | .show_options = cgroup_show_options, |
| 494 | .remount_fs = cgroup_remount, |
| 495 | }; |
| 496 | |
| 497 | static void init_cgroup_root(struct cgroupfs_root *root) |
| 498 | { |
| 499 | struct cgroup *cont = &root->top_cgroup; |
| 500 | INIT_LIST_HEAD(&root->subsys_list); |
| 501 | INIT_LIST_HEAD(&root->root_list); |
| 502 | root->number_of_cgroups = 1; |
| 503 | cont->root = root; |
| 504 | cont->top_cgroup = cont; |
| 505 | INIT_LIST_HEAD(&cont->sibling); |
| 506 | INIT_LIST_HEAD(&cont->children); |
| 507 | } |
| 508 | |
| 509 | static int cgroup_test_super(struct super_block *sb, void *data) |
| 510 | { |
| 511 | struct cgroupfs_root *new = data; |
| 512 | struct cgroupfs_root *root = sb->s_fs_info; |
| 513 | |
| 514 | /* First check subsystems */ |
| 515 | if (new->subsys_bits != root->subsys_bits) |
| 516 | return 0; |
| 517 | |
| 518 | /* Next check flags */ |
| 519 | if (new->flags != root->flags) |
| 520 | return 0; |
| 521 | |
| 522 | return 1; |
| 523 | } |
| 524 | |
| 525 | static int cgroup_set_super(struct super_block *sb, void *data) |
| 526 | { |
| 527 | int ret; |
| 528 | struct cgroupfs_root *root = data; |
| 529 | |
| 530 | ret = set_anon_super(sb, NULL); |
| 531 | if (ret) |
| 532 | return ret; |
| 533 | |
| 534 | sb->s_fs_info = root; |
| 535 | root->sb = sb; |
| 536 | |
| 537 | sb->s_blocksize = PAGE_CACHE_SIZE; |
| 538 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
| 539 | sb->s_magic = CGROUP_SUPER_MAGIC; |
| 540 | sb->s_op = &cgroup_ops; |
| 541 | |
| 542 | return 0; |
| 543 | } |
| 544 | |
| 545 | static int cgroup_get_rootdir(struct super_block *sb) |
| 546 | { |
| 547 | struct inode *inode = |
| 548 | cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb); |
| 549 | struct dentry *dentry; |
| 550 | |
| 551 | if (!inode) |
| 552 | return -ENOMEM; |
| 553 | |
| 554 | inode->i_op = &simple_dir_inode_operations; |
| 555 | inode->i_fop = &simple_dir_operations; |
| 556 | inode->i_op = &cgroup_dir_inode_operations; |
| 557 | /* directories start off with i_nlink == 2 (for "." entry) */ |
| 558 | inc_nlink(inode); |
| 559 | dentry = d_alloc_root(inode); |
| 560 | if (!dentry) { |
| 561 | iput(inode); |
| 562 | return -ENOMEM; |
| 563 | } |
| 564 | sb->s_root = dentry; |
| 565 | return 0; |
| 566 | } |
| 567 | |
| 568 | static int cgroup_get_sb(struct file_system_type *fs_type, |
| 569 | int flags, const char *unused_dev_name, |
| 570 | void *data, struct vfsmount *mnt) |
| 571 | { |
| 572 | struct cgroup_sb_opts opts; |
| 573 | int ret = 0; |
| 574 | struct super_block *sb; |
| 575 | struct cgroupfs_root *root; |
| 576 | |
| 577 | /* First find the desired set of subsystems */ |
| 578 | ret = parse_cgroupfs_options(data, &opts); |
| 579 | if (ret) |
| 580 | return ret; |
| 581 | |
| 582 | root = kzalloc(sizeof(*root), GFP_KERNEL); |
| 583 | if (!root) |
| 584 | return -ENOMEM; |
| 585 | |
| 586 | init_cgroup_root(root); |
| 587 | root->subsys_bits = opts.subsys_bits; |
| 588 | root->flags = opts.flags; |
| 589 | |
| 590 | sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root); |
| 591 | |
| 592 | if (IS_ERR(sb)) { |
| 593 | kfree(root); |
| 594 | return PTR_ERR(sb); |
| 595 | } |
| 596 | |
| 597 | if (sb->s_fs_info != root) { |
| 598 | /* Reusing an existing superblock */ |
| 599 | BUG_ON(sb->s_root == NULL); |
| 600 | kfree(root); |
| 601 | root = NULL; |
| 602 | } else { |
| 603 | /* New superblock */ |
| 604 | struct cgroup *cont = &root->top_cgroup; |
| 605 | |
| 606 | BUG_ON(sb->s_root != NULL); |
| 607 | |
| 608 | ret = cgroup_get_rootdir(sb); |
| 609 | if (ret) |
| 610 | goto drop_new_super; |
| 611 | |
| 612 | mutex_lock(&cgroup_mutex); |
| 613 | |
| 614 | ret = rebind_subsystems(root, root->subsys_bits); |
| 615 | if (ret == -EBUSY) { |
| 616 | mutex_unlock(&cgroup_mutex); |
| 617 | goto drop_new_super; |
| 618 | } |
| 619 | |
| 620 | /* EBUSY should be the only error here */ |
| 621 | BUG_ON(ret); |
| 622 | |
| 623 | list_add(&root->root_list, &roots); |
| 624 | |
| 625 | sb->s_root->d_fsdata = &root->top_cgroup; |
| 626 | root->top_cgroup.dentry = sb->s_root; |
| 627 | |
| 628 | BUG_ON(!list_empty(&cont->sibling)); |
| 629 | BUG_ON(!list_empty(&cont->children)); |
| 630 | BUG_ON(root->number_of_cgroups != 1); |
| 631 | |
| 632 | /* |
| 633 | * I believe that it's safe to nest i_mutex inside |
| 634 | * cgroup_mutex in this case, since no-one else can |
| 635 | * be accessing this directory yet. But we still need |
| 636 | * to teach lockdep that this is the case - currently |
| 637 | * a cgroupfs remount triggers a lockdep warning |
| 638 | */ |
| 639 | mutex_lock(&cont->dentry->d_inode->i_mutex); |
| 640 | cgroup_populate_dir(cont); |
| 641 | mutex_unlock(&cont->dentry->d_inode->i_mutex); |
| 642 | mutex_unlock(&cgroup_mutex); |
| 643 | } |
| 644 | |
| 645 | return simple_set_mnt(mnt, sb); |
| 646 | |
| 647 | drop_new_super: |
| 648 | up_write(&sb->s_umount); |
| 649 | deactivate_super(sb); |
| 650 | return ret; |
| 651 | } |
| 652 | |
| 653 | static void cgroup_kill_sb(struct super_block *sb) { |
| 654 | struct cgroupfs_root *root = sb->s_fs_info; |
| 655 | struct cgroup *cont = &root->top_cgroup; |
| 656 | int ret; |
| 657 | |
| 658 | BUG_ON(!root); |
| 659 | |
| 660 | BUG_ON(root->number_of_cgroups != 1); |
| 661 | BUG_ON(!list_empty(&cont->children)); |
| 662 | BUG_ON(!list_empty(&cont->sibling)); |
| 663 | |
| 664 | mutex_lock(&cgroup_mutex); |
| 665 | |
| 666 | /* Rebind all subsystems back to the default hierarchy */ |
| 667 | ret = rebind_subsystems(root, 0); |
| 668 | /* Shouldn't be able to fail ... */ |
| 669 | BUG_ON(ret); |
| 670 | |
| 671 | if (!list_empty(&root->root_list)) |
| 672 | list_del(&root->root_list); |
| 673 | mutex_unlock(&cgroup_mutex); |
| 674 | |
| 675 | kfree(root); |
| 676 | kill_litter_super(sb); |
| 677 | } |
| 678 | |
| 679 | static struct file_system_type cgroup_fs_type = { |
| 680 | .name = "cgroup", |
| 681 | .get_sb = cgroup_get_sb, |
| 682 | .kill_sb = cgroup_kill_sb, |
| 683 | }; |
| 684 | |
| 685 | static inline struct cgroup *__d_cont(struct dentry *dentry) |
| 686 | { |
| 687 | return dentry->d_fsdata; |
| 688 | } |
| 689 | |
| 690 | static inline struct cftype *__d_cft(struct dentry *dentry) |
| 691 | { |
| 692 | return dentry->d_fsdata; |
| 693 | } |
| 694 | |
| 695 | /* |
| 696 | * Called with cgroup_mutex held. Writes path of cgroup into buf. |
| 697 | * Returns 0 on success, -errno on error. |
| 698 | */ |
| 699 | int cgroup_path(const struct cgroup *cont, char *buf, int buflen) |
| 700 | { |
| 701 | char *start; |
| 702 | |
| 703 | if (cont == dummytop) { |
| 704 | /* |
| 705 | * Inactive subsystems have no dentry for their root |
| 706 | * cgroup |
| 707 | */ |
| 708 | strcpy(buf, "/"); |
| 709 | return 0; |
| 710 | } |
| 711 | |
| 712 | start = buf + buflen; |
| 713 | |
| 714 | *--start = '\0'; |
| 715 | for (;;) { |
| 716 | int len = cont->dentry->d_name.len; |
| 717 | if ((start -= len) < buf) |
| 718 | return -ENAMETOOLONG; |
| 719 | memcpy(start, cont->dentry->d_name.name, len); |
| 720 | cont = cont->parent; |
| 721 | if (!cont) |
| 722 | break; |
| 723 | if (!cont->parent) |
| 724 | continue; |
| 725 | if (--start < buf) |
| 726 | return -ENAMETOOLONG; |
| 727 | *start = '/'; |
| 728 | } |
| 729 | memmove(buf, start, buf + buflen - start); |
| 730 | return 0; |
| 731 | } |
| 732 | |
Paul Menage | bbcb81d | 2007-10-18 23:39:32 -0700 | [diff] [blame] | 733 | /* |
| 734 | * Return the first subsystem attached to a cgroup's hierarchy, and |
| 735 | * its subsystem id. |
| 736 | */ |
| 737 | |
| 738 | static void get_first_subsys(const struct cgroup *cont, |
| 739 | struct cgroup_subsys_state **css, int *subsys_id) |
| 740 | { |
| 741 | const struct cgroupfs_root *root = cont->root; |
| 742 | const struct cgroup_subsys *test_ss; |
| 743 | BUG_ON(list_empty(&root->subsys_list)); |
| 744 | test_ss = list_entry(root->subsys_list.next, |
| 745 | struct cgroup_subsys, sibling); |
| 746 | if (css) { |
| 747 | *css = cont->subsys[test_ss->subsys_id]; |
| 748 | BUG_ON(!*css); |
| 749 | } |
| 750 | if (subsys_id) |
| 751 | *subsys_id = test_ss->subsys_id; |
| 752 | } |
| 753 | |
| 754 | /* |
| 755 | * Attach task 'tsk' to cgroup 'cont' |
| 756 | * |
| 757 | * Call holding cgroup_mutex. May take task_lock of |
| 758 | * the task 'pid' during call. |
| 759 | */ |
| 760 | static int attach_task(struct cgroup *cont, struct task_struct *tsk) |
| 761 | { |
| 762 | int retval = 0; |
| 763 | struct cgroup_subsys *ss; |
| 764 | struct cgroup *oldcont; |
| 765 | struct css_set *cg = &tsk->cgroups; |
| 766 | struct cgroupfs_root *root = cont->root; |
| 767 | int i; |
| 768 | int subsys_id; |
| 769 | |
| 770 | get_first_subsys(cont, NULL, &subsys_id); |
| 771 | |
| 772 | /* Nothing to do if the task is already in that cgroup */ |
| 773 | oldcont = task_cgroup(tsk, subsys_id); |
| 774 | if (cont == oldcont) |
| 775 | return 0; |
| 776 | |
| 777 | for_each_subsys(root, ss) { |
| 778 | if (ss->can_attach) { |
| 779 | retval = ss->can_attach(ss, cont, tsk); |
| 780 | if (retval) { |
| 781 | return retval; |
| 782 | } |
| 783 | } |
| 784 | } |
| 785 | |
| 786 | task_lock(tsk); |
| 787 | if (tsk->flags & PF_EXITING) { |
| 788 | task_unlock(tsk); |
| 789 | return -ESRCH; |
| 790 | } |
| 791 | /* Update the css_set pointers for the subsystems in this |
| 792 | * hierarchy */ |
| 793 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
| 794 | if (root->subsys_bits & (1ull << i)) { |
| 795 | /* Subsystem is in this hierarchy. So we want |
| 796 | * the subsystem state from the new |
| 797 | * cgroup. Transfer the refcount from the |
| 798 | * old to the new */ |
| 799 | atomic_inc(&cont->count); |
| 800 | atomic_dec(&cg->subsys[i]->cgroup->count); |
| 801 | rcu_assign_pointer(cg->subsys[i], cont->subsys[i]); |
| 802 | } |
| 803 | } |
| 804 | task_unlock(tsk); |
| 805 | |
| 806 | for_each_subsys(root, ss) { |
| 807 | if (ss->attach) { |
| 808 | ss->attach(ss, cont, oldcont, tsk); |
| 809 | } |
| 810 | } |
| 811 | |
| 812 | synchronize_rcu(); |
| 813 | return 0; |
| 814 | } |
| 815 | |
| 816 | /* |
| 817 | * Attach task with pid 'pid' to cgroup 'cont'. Call with |
| 818 | * cgroup_mutex, may take task_lock of task |
| 819 | */ |
| 820 | static int attach_task_by_pid(struct cgroup *cont, char *pidbuf) |
| 821 | { |
| 822 | pid_t pid; |
| 823 | struct task_struct *tsk; |
| 824 | int ret; |
| 825 | |
| 826 | if (sscanf(pidbuf, "%d", &pid) != 1) |
| 827 | return -EIO; |
| 828 | |
| 829 | if (pid) { |
| 830 | rcu_read_lock(); |
| 831 | tsk = find_task_by_pid(pid); |
| 832 | if (!tsk || tsk->flags & PF_EXITING) { |
| 833 | rcu_read_unlock(); |
| 834 | return -ESRCH; |
| 835 | } |
| 836 | get_task_struct(tsk); |
| 837 | rcu_read_unlock(); |
| 838 | |
| 839 | if ((current->euid) && (current->euid != tsk->uid) |
| 840 | && (current->euid != tsk->suid)) { |
| 841 | put_task_struct(tsk); |
| 842 | return -EACCES; |
| 843 | } |
| 844 | } else { |
| 845 | tsk = current; |
| 846 | get_task_struct(tsk); |
| 847 | } |
| 848 | |
| 849 | ret = attach_task(cont, tsk); |
| 850 | put_task_struct(tsk); |
| 851 | return ret; |
| 852 | } |
| 853 | |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 854 | /* The various types of files and directories in a cgroup file system */ |
| 855 | |
| 856 | enum cgroup_filetype { |
| 857 | FILE_ROOT, |
| 858 | FILE_DIR, |
| 859 | FILE_TASKLIST, |
| 860 | }; |
| 861 | |
Paul Menage | 355e0c4 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 862 | static ssize_t cgroup_write_uint(struct cgroup *cont, struct cftype *cft, |
| 863 | struct file *file, |
| 864 | const char __user *userbuf, |
| 865 | size_t nbytes, loff_t *unused_ppos) |
| 866 | { |
| 867 | char buffer[64]; |
| 868 | int retval = 0; |
| 869 | u64 val; |
| 870 | char *end; |
| 871 | |
| 872 | if (!nbytes) |
| 873 | return -EINVAL; |
| 874 | if (nbytes >= sizeof(buffer)) |
| 875 | return -E2BIG; |
| 876 | if (copy_from_user(buffer, userbuf, nbytes)) |
| 877 | return -EFAULT; |
| 878 | |
| 879 | buffer[nbytes] = 0; /* nul-terminate */ |
| 880 | |
| 881 | /* strip newline if necessary */ |
| 882 | if (nbytes && (buffer[nbytes-1] == '\n')) |
| 883 | buffer[nbytes-1] = 0; |
| 884 | val = simple_strtoull(buffer, &end, 0); |
| 885 | if (*end) |
| 886 | return -EINVAL; |
| 887 | |
| 888 | /* Pass to subsystem */ |
| 889 | retval = cft->write_uint(cont, cft, val); |
| 890 | if (!retval) |
| 891 | retval = nbytes; |
| 892 | return retval; |
| 893 | } |
| 894 | |
Paul Menage | bbcb81d | 2007-10-18 23:39:32 -0700 | [diff] [blame] | 895 | static ssize_t cgroup_common_file_write(struct cgroup *cont, |
| 896 | struct cftype *cft, |
| 897 | struct file *file, |
| 898 | const char __user *userbuf, |
| 899 | size_t nbytes, loff_t *unused_ppos) |
| 900 | { |
| 901 | enum cgroup_filetype type = cft->private; |
| 902 | char *buffer; |
| 903 | int retval = 0; |
| 904 | |
| 905 | if (nbytes >= PATH_MAX) |
| 906 | return -E2BIG; |
| 907 | |
| 908 | /* +1 for nul-terminator */ |
| 909 | buffer = kmalloc(nbytes + 1, GFP_KERNEL); |
| 910 | if (buffer == NULL) |
| 911 | return -ENOMEM; |
| 912 | |
| 913 | if (copy_from_user(buffer, userbuf, nbytes)) { |
| 914 | retval = -EFAULT; |
| 915 | goto out1; |
| 916 | } |
| 917 | buffer[nbytes] = 0; /* nul-terminate */ |
| 918 | |
| 919 | mutex_lock(&cgroup_mutex); |
| 920 | |
| 921 | if (cgroup_is_removed(cont)) { |
| 922 | retval = -ENODEV; |
| 923 | goto out2; |
| 924 | } |
| 925 | |
| 926 | switch (type) { |
| 927 | case FILE_TASKLIST: |
| 928 | retval = attach_task_by_pid(cont, buffer); |
| 929 | break; |
| 930 | default: |
| 931 | retval = -EINVAL; |
| 932 | goto out2; |
| 933 | } |
| 934 | |
| 935 | if (retval == 0) |
| 936 | retval = nbytes; |
| 937 | out2: |
| 938 | mutex_unlock(&cgroup_mutex); |
| 939 | out1: |
| 940 | kfree(buffer); |
| 941 | return retval; |
| 942 | } |
| 943 | |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 944 | static ssize_t cgroup_file_write(struct file *file, const char __user *buf, |
| 945 | size_t nbytes, loff_t *ppos) |
| 946 | { |
| 947 | struct cftype *cft = __d_cft(file->f_dentry); |
| 948 | struct cgroup *cont = __d_cont(file->f_dentry->d_parent); |
| 949 | |
| 950 | if (!cft) |
| 951 | return -ENODEV; |
Paul Menage | 355e0c4 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 952 | if (cft->write) |
| 953 | return cft->write(cont, cft, file, buf, nbytes, ppos); |
| 954 | if (cft->write_uint) |
| 955 | return cgroup_write_uint(cont, cft, file, buf, nbytes, ppos); |
| 956 | return -EINVAL; |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 957 | } |
| 958 | |
| 959 | static ssize_t cgroup_read_uint(struct cgroup *cont, struct cftype *cft, |
| 960 | struct file *file, |
| 961 | char __user *buf, size_t nbytes, |
| 962 | loff_t *ppos) |
| 963 | { |
| 964 | char tmp[64]; |
| 965 | u64 val = cft->read_uint(cont, cft); |
| 966 | int len = sprintf(tmp, "%llu\n", (unsigned long long) val); |
| 967 | |
| 968 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); |
| 969 | } |
| 970 | |
| 971 | static ssize_t cgroup_file_read(struct file *file, char __user *buf, |
| 972 | size_t nbytes, loff_t *ppos) |
| 973 | { |
| 974 | struct cftype *cft = __d_cft(file->f_dentry); |
| 975 | struct cgroup *cont = __d_cont(file->f_dentry->d_parent); |
| 976 | |
| 977 | if (!cft) |
| 978 | return -ENODEV; |
| 979 | |
| 980 | if (cft->read) |
| 981 | return cft->read(cont, cft, file, buf, nbytes, ppos); |
| 982 | if (cft->read_uint) |
| 983 | return cgroup_read_uint(cont, cft, file, buf, nbytes, ppos); |
| 984 | return -EINVAL; |
| 985 | } |
| 986 | |
| 987 | static int cgroup_file_open(struct inode *inode, struct file *file) |
| 988 | { |
| 989 | int err; |
| 990 | struct cftype *cft; |
| 991 | |
| 992 | err = generic_file_open(inode, file); |
| 993 | if (err) |
| 994 | return err; |
| 995 | |
| 996 | cft = __d_cft(file->f_dentry); |
| 997 | if (!cft) |
| 998 | return -ENODEV; |
| 999 | if (cft->open) |
| 1000 | err = cft->open(inode, file); |
| 1001 | else |
| 1002 | err = 0; |
| 1003 | |
| 1004 | return err; |
| 1005 | } |
| 1006 | |
| 1007 | static int cgroup_file_release(struct inode *inode, struct file *file) |
| 1008 | { |
| 1009 | struct cftype *cft = __d_cft(file->f_dentry); |
| 1010 | if (cft->release) |
| 1011 | return cft->release(inode, file); |
| 1012 | return 0; |
| 1013 | } |
| 1014 | |
| 1015 | /* |
| 1016 | * cgroup_rename - Only allow simple rename of directories in place. |
| 1017 | */ |
| 1018 | static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, |
| 1019 | struct inode *new_dir, struct dentry *new_dentry) |
| 1020 | { |
| 1021 | if (!S_ISDIR(old_dentry->d_inode->i_mode)) |
| 1022 | return -ENOTDIR; |
| 1023 | if (new_dentry->d_inode) |
| 1024 | return -EEXIST; |
| 1025 | if (old_dir != new_dir) |
| 1026 | return -EIO; |
| 1027 | return simple_rename(old_dir, old_dentry, new_dir, new_dentry); |
| 1028 | } |
| 1029 | |
| 1030 | static struct file_operations cgroup_file_operations = { |
| 1031 | .read = cgroup_file_read, |
| 1032 | .write = cgroup_file_write, |
| 1033 | .llseek = generic_file_llseek, |
| 1034 | .open = cgroup_file_open, |
| 1035 | .release = cgroup_file_release, |
| 1036 | }; |
| 1037 | |
| 1038 | static struct inode_operations cgroup_dir_inode_operations = { |
| 1039 | .lookup = simple_lookup, |
| 1040 | .mkdir = cgroup_mkdir, |
| 1041 | .rmdir = cgroup_rmdir, |
| 1042 | .rename = cgroup_rename, |
| 1043 | }; |
| 1044 | |
| 1045 | static int cgroup_create_file(struct dentry *dentry, int mode, |
| 1046 | struct super_block *sb) |
| 1047 | { |
| 1048 | static struct dentry_operations cgroup_dops = { |
| 1049 | .d_iput = cgroup_diput, |
| 1050 | }; |
| 1051 | |
| 1052 | struct inode *inode; |
| 1053 | |
| 1054 | if (!dentry) |
| 1055 | return -ENOENT; |
| 1056 | if (dentry->d_inode) |
| 1057 | return -EEXIST; |
| 1058 | |
| 1059 | inode = cgroup_new_inode(mode, sb); |
| 1060 | if (!inode) |
| 1061 | return -ENOMEM; |
| 1062 | |
| 1063 | if (S_ISDIR(mode)) { |
| 1064 | inode->i_op = &cgroup_dir_inode_operations; |
| 1065 | inode->i_fop = &simple_dir_operations; |
| 1066 | |
| 1067 | /* start off with i_nlink == 2 (for "." entry) */ |
| 1068 | inc_nlink(inode); |
| 1069 | |
| 1070 | /* start with the directory inode held, so that we can |
| 1071 | * populate it without racing with another mkdir */ |
| 1072 | mutex_lock(&inode->i_mutex); |
| 1073 | } else if (S_ISREG(mode)) { |
| 1074 | inode->i_size = 0; |
| 1075 | inode->i_fop = &cgroup_file_operations; |
| 1076 | } |
| 1077 | dentry->d_op = &cgroup_dops; |
| 1078 | d_instantiate(dentry, inode); |
| 1079 | dget(dentry); /* Extra count - pin the dentry in core */ |
| 1080 | return 0; |
| 1081 | } |
| 1082 | |
| 1083 | /* |
| 1084 | * cgroup_create_dir - create a directory for an object. |
| 1085 | * cont: the cgroup we create the directory for. |
| 1086 | * It must have a valid ->parent field |
| 1087 | * And we are going to fill its ->dentry field. |
| 1088 | * dentry: dentry of the new container |
| 1089 | * mode: mode to set on new directory. |
| 1090 | */ |
| 1091 | static int cgroup_create_dir(struct cgroup *cont, struct dentry *dentry, |
| 1092 | int mode) |
| 1093 | { |
| 1094 | struct dentry *parent; |
| 1095 | int error = 0; |
| 1096 | |
| 1097 | parent = cont->parent->dentry; |
| 1098 | error = cgroup_create_file(dentry, S_IFDIR | mode, cont->root->sb); |
| 1099 | if (!error) { |
| 1100 | dentry->d_fsdata = cont; |
| 1101 | inc_nlink(parent->d_inode); |
| 1102 | cont->dentry = dentry; |
| 1103 | dget(dentry); |
| 1104 | } |
| 1105 | dput(dentry); |
| 1106 | |
| 1107 | return error; |
| 1108 | } |
| 1109 | |
| 1110 | int cgroup_add_file(struct cgroup *cont, |
| 1111 | struct cgroup_subsys *subsys, |
| 1112 | const struct cftype *cft) |
| 1113 | { |
| 1114 | struct dentry *dir = cont->dentry; |
| 1115 | struct dentry *dentry; |
| 1116 | int error; |
| 1117 | |
| 1118 | char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; |
| 1119 | if (subsys && !test_bit(ROOT_NOPREFIX, &cont->root->flags)) { |
| 1120 | strcpy(name, subsys->name); |
| 1121 | strcat(name, "."); |
| 1122 | } |
| 1123 | strcat(name, cft->name); |
| 1124 | BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); |
| 1125 | dentry = lookup_one_len(name, dir, strlen(name)); |
| 1126 | if (!IS_ERR(dentry)) { |
| 1127 | error = cgroup_create_file(dentry, 0644 | S_IFREG, |
| 1128 | cont->root->sb); |
| 1129 | if (!error) |
| 1130 | dentry->d_fsdata = (void *)cft; |
| 1131 | dput(dentry); |
| 1132 | } else |
| 1133 | error = PTR_ERR(dentry); |
| 1134 | return error; |
| 1135 | } |
| 1136 | |
| 1137 | int cgroup_add_files(struct cgroup *cont, |
| 1138 | struct cgroup_subsys *subsys, |
| 1139 | const struct cftype cft[], |
| 1140 | int count) |
| 1141 | { |
| 1142 | int i, err; |
| 1143 | for (i = 0; i < count; i++) { |
| 1144 | err = cgroup_add_file(cont, subsys, &cft[i]); |
| 1145 | if (err) |
| 1146 | return err; |
| 1147 | } |
| 1148 | return 0; |
| 1149 | } |
| 1150 | |
Paul Menage | bbcb81d | 2007-10-18 23:39:32 -0700 | [diff] [blame] | 1151 | /* Count the number of tasks in a cgroup. Could be made more |
| 1152 | * time-efficient but less space-efficient with more linked lists |
| 1153 | * running through each cgroup and the css_set structures that |
| 1154 | * referenced it. Must be called with tasklist_lock held for read or |
| 1155 | * write or in an rcu critical section. |
| 1156 | */ |
| 1157 | int __cgroup_task_count(const struct cgroup *cont) |
| 1158 | { |
| 1159 | int count = 0; |
| 1160 | struct task_struct *g, *p; |
| 1161 | struct cgroup_subsys_state *css; |
| 1162 | int subsys_id; |
| 1163 | |
| 1164 | get_first_subsys(cont, &css, &subsys_id); |
| 1165 | do_each_thread(g, p) { |
| 1166 | if (task_subsys_state(p, subsys_id) == css) |
| 1167 | count ++; |
| 1168 | } while_each_thread(g, p); |
| 1169 | return count; |
| 1170 | } |
| 1171 | |
| 1172 | /* |
| 1173 | * Stuff for reading the 'tasks' file. |
| 1174 | * |
| 1175 | * Reading this file can return large amounts of data if a cgroup has |
| 1176 | * *lots* of attached tasks. So it may need several calls to read(), |
| 1177 | * but we cannot guarantee that the information we produce is correct |
| 1178 | * unless we produce it entirely atomically. |
| 1179 | * |
| 1180 | * Upon tasks file open(), a struct ctr_struct is allocated, that |
| 1181 | * will have a pointer to an array (also allocated here). The struct |
| 1182 | * ctr_struct * is stored in file->private_data. Its resources will |
| 1183 | * be freed by release() when the file is closed. The array is used |
| 1184 | * to sprintf the PIDs and then used by read(). |
| 1185 | */ |
| 1186 | struct ctr_struct { |
| 1187 | char *buf; |
| 1188 | int bufsz; |
| 1189 | }; |
| 1190 | |
| 1191 | /* |
| 1192 | * Load into 'pidarray' up to 'npids' of the tasks using cgroup |
| 1193 | * 'cont'. Return actual number of pids loaded. No need to |
| 1194 | * task_lock(p) when reading out p->cgroup, since we're in an RCU |
| 1195 | * read section, so the css_set can't go away, and is |
| 1196 | * immutable after creation. |
| 1197 | */ |
| 1198 | static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cont) |
| 1199 | { |
| 1200 | int n = 0; |
| 1201 | struct task_struct *g, *p; |
| 1202 | struct cgroup_subsys_state *css; |
| 1203 | int subsys_id; |
| 1204 | |
| 1205 | get_first_subsys(cont, &css, &subsys_id); |
| 1206 | rcu_read_lock(); |
| 1207 | do_each_thread(g, p) { |
| 1208 | if (task_subsys_state(p, subsys_id) == css) { |
| 1209 | pidarray[n++] = pid_nr(task_pid(p)); |
| 1210 | if (unlikely(n == npids)) |
| 1211 | goto array_full; |
| 1212 | } |
| 1213 | } while_each_thread(g, p); |
| 1214 | |
| 1215 | array_full: |
| 1216 | rcu_read_unlock(); |
| 1217 | return n; |
| 1218 | } |
| 1219 | |
| 1220 | static int cmppid(const void *a, const void *b) |
| 1221 | { |
| 1222 | return *(pid_t *)a - *(pid_t *)b; |
| 1223 | } |
| 1224 | |
| 1225 | /* |
| 1226 | * Convert array 'a' of 'npids' pid_t's to a string of newline separated |
| 1227 | * decimal pids in 'buf'. Don't write more than 'sz' chars, but return |
| 1228 | * count 'cnt' of how many chars would be written if buf were large enough. |
| 1229 | */ |
| 1230 | static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) |
| 1231 | { |
| 1232 | int cnt = 0; |
| 1233 | int i; |
| 1234 | |
| 1235 | for (i = 0; i < npids; i++) |
| 1236 | cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); |
| 1237 | return cnt; |
| 1238 | } |
| 1239 | |
| 1240 | /* |
| 1241 | * Handle an open on 'tasks' file. Prepare a buffer listing the |
| 1242 | * process id's of tasks currently attached to the cgroup being opened. |
| 1243 | * |
| 1244 | * Does not require any specific cgroup mutexes, and does not take any. |
| 1245 | */ |
| 1246 | static int cgroup_tasks_open(struct inode *unused, struct file *file) |
| 1247 | { |
| 1248 | struct cgroup *cont = __d_cont(file->f_dentry->d_parent); |
| 1249 | struct ctr_struct *ctr; |
| 1250 | pid_t *pidarray; |
| 1251 | int npids; |
| 1252 | char c; |
| 1253 | |
| 1254 | if (!(file->f_mode & FMODE_READ)) |
| 1255 | return 0; |
| 1256 | |
| 1257 | ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); |
| 1258 | if (!ctr) |
| 1259 | goto err0; |
| 1260 | |
| 1261 | /* |
| 1262 | * If cgroup gets more users after we read count, we won't have |
| 1263 | * enough space - tough. This race is indistinguishable to the |
| 1264 | * caller from the case that the additional cgroup users didn't |
| 1265 | * show up until sometime later on. |
| 1266 | */ |
| 1267 | npids = cgroup_task_count(cont); |
| 1268 | if (npids) { |
| 1269 | pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); |
| 1270 | if (!pidarray) |
| 1271 | goto err1; |
| 1272 | |
| 1273 | npids = pid_array_load(pidarray, npids, cont); |
| 1274 | sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); |
| 1275 | |
| 1276 | /* Call pid_array_to_buf() twice, first just to get bufsz */ |
| 1277 | ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; |
| 1278 | ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); |
| 1279 | if (!ctr->buf) |
| 1280 | goto err2; |
| 1281 | ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); |
| 1282 | |
| 1283 | kfree(pidarray); |
| 1284 | } else { |
| 1285 | ctr->buf = 0; |
| 1286 | ctr->bufsz = 0; |
| 1287 | } |
| 1288 | file->private_data = ctr; |
| 1289 | return 0; |
| 1290 | |
| 1291 | err2: |
| 1292 | kfree(pidarray); |
| 1293 | err1: |
| 1294 | kfree(ctr); |
| 1295 | err0: |
| 1296 | return -ENOMEM; |
| 1297 | } |
| 1298 | |
| 1299 | static ssize_t cgroup_tasks_read(struct cgroup *cont, |
| 1300 | struct cftype *cft, |
| 1301 | struct file *file, char __user *buf, |
| 1302 | size_t nbytes, loff_t *ppos) |
| 1303 | { |
| 1304 | struct ctr_struct *ctr = file->private_data; |
| 1305 | |
| 1306 | return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz); |
| 1307 | } |
| 1308 | |
| 1309 | static int cgroup_tasks_release(struct inode *unused_inode, |
| 1310 | struct file *file) |
| 1311 | { |
| 1312 | struct ctr_struct *ctr; |
| 1313 | |
| 1314 | if (file->f_mode & FMODE_READ) { |
| 1315 | ctr = file->private_data; |
| 1316 | kfree(ctr->buf); |
| 1317 | kfree(ctr); |
| 1318 | } |
| 1319 | return 0; |
| 1320 | } |
| 1321 | |
| 1322 | /* |
| 1323 | * for the common functions, 'private' gives the type of file |
| 1324 | */ |
| 1325 | static struct cftype cft_tasks = { |
| 1326 | .name = "tasks", |
| 1327 | .open = cgroup_tasks_open, |
| 1328 | .read = cgroup_tasks_read, |
| 1329 | .write = cgroup_common_file_write, |
| 1330 | .release = cgroup_tasks_release, |
| 1331 | .private = FILE_TASKLIST, |
| 1332 | }; |
| 1333 | |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 1334 | static int cgroup_populate_dir(struct cgroup *cont) |
| 1335 | { |
| 1336 | int err; |
| 1337 | struct cgroup_subsys *ss; |
| 1338 | |
| 1339 | /* First clear out any existing files */ |
| 1340 | cgroup_clear_directory(cont->dentry); |
| 1341 | |
Paul Menage | bbcb81d | 2007-10-18 23:39:32 -0700 | [diff] [blame] | 1342 | err = cgroup_add_file(cont, NULL, &cft_tasks); |
| 1343 | if (err < 0) |
| 1344 | return err; |
| 1345 | |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 1346 | for_each_subsys(cont->root, ss) { |
| 1347 | if (ss->populate && (err = ss->populate(ss, cont)) < 0) |
| 1348 | return err; |
| 1349 | } |
| 1350 | |
| 1351 | return 0; |
| 1352 | } |
| 1353 | |
| 1354 | static void init_cgroup_css(struct cgroup_subsys_state *css, |
| 1355 | struct cgroup_subsys *ss, |
| 1356 | struct cgroup *cont) |
| 1357 | { |
| 1358 | css->cgroup = cont; |
| 1359 | atomic_set(&css->refcnt, 0); |
| 1360 | css->flags = 0; |
| 1361 | if (cont == dummytop) |
| 1362 | set_bit(CSS_ROOT, &css->flags); |
| 1363 | BUG_ON(cont->subsys[ss->subsys_id]); |
| 1364 | cont->subsys[ss->subsys_id] = css; |
| 1365 | } |
| 1366 | |
| 1367 | /* |
| 1368 | * cgroup_create - create a cgroup |
| 1369 | * parent: cgroup that will be parent of the new cgroup. |
| 1370 | * name: name of the new cgroup. Will be strcpy'ed. |
| 1371 | * mode: mode to set on new inode |
| 1372 | * |
| 1373 | * Must be called with the mutex on the parent inode held |
| 1374 | */ |
| 1375 | |
| 1376 | static long cgroup_create(struct cgroup *parent, struct dentry *dentry, |
| 1377 | int mode) |
| 1378 | { |
| 1379 | struct cgroup *cont; |
| 1380 | struct cgroupfs_root *root = parent->root; |
| 1381 | int err = 0; |
| 1382 | struct cgroup_subsys *ss; |
| 1383 | struct super_block *sb = root->sb; |
| 1384 | |
| 1385 | cont = kzalloc(sizeof(*cont), GFP_KERNEL); |
| 1386 | if (!cont) |
| 1387 | return -ENOMEM; |
| 1388 | |
| 1389 | /* Grab a reference on the superblock so the hierarchy doesn't |
| 1390 | * get deleted on unmount if there are child cgroups. This |
| 1391 | * can be done outside cgroup_mutex, since the sb can't |
| 1392 | * disappear while someone has an open control file on the |
| 1393 | * fs */ |
| 1394 | atomic_inc(&sb->s_active); |
| 1395 | |
| 1396 | mutex_lock(&cgroup_mutex); |
| 1397 | |
| 1398 | cont->flags = 0; |
| 1399 | INIT_LIST_HEAD(&cont->sibling); |
| 1400 | INIT_LIST_HEAD(&cont->children); |
| 1401 | |
| 1402 | cont->parent = parent; |
| 1403 | cont->root = parent->root; |
| 1404 | cont->top_cgroup = parent->top_cgroup; |
| 1405 | |
| 1406 | for_each_subsys(root, ss) { |
| 1407 | struct cgroup_subsys_state *css = ss->create(ss, cont); |
| 1408 | if (IS_ERR(css)) { |
| 1409 | err = PTR_ERR(css); |
| 1410 | goto err_destroy; |
| 1411 | } |
| 1412 | init_cgroup_css(css, ss, cont); |
| 1413 | } |
| 1414 | |
| 1415 | list_add(&cont->sibling, &cont->parent->children); |
| 1416 | root->number_of_cgroups++; |
| 1417 | |
| 1418 | err = cgroup_create_dir(cont, dentry, mode); |
| 1419 | if (err < 0) |
| 1420 | goto err_remove; |
| 1421 | |
| 1422 | /* The cgroup directory was pre-locked for us */ |
| 1423 | BUG_ON(!mutex_is_locked(&cont->dentry->d_inode->i_mutex)); |
| 1424 | |
| 1425 | err = cgroup_populate_dir(cont); |
| 1426 | /* If err < 0, we have a half-filled directory - oh well ;) */ |
| 1427 | |
| 1428 | mutex_unlock(&cgroup_mutex); |
| 1429 | mutex_unlock(&cont->dentry->d_inode->i_mutex); |
| 1430 | |
| 1431 | return 0; |
| 1432 | |
| 1433 | err_remove: |
| 1434 | |
| 1435 | list_del(&cont->sibling); |
| 1436 | root->number_of_cgroups--; |
| 1437 | |
| 1438 | err_destroy: |
| 1439 | |
| 1440 | for_each_subsys(root, ss) { |
| 1441 | if (cont->subsys[ss->subsys_id]) |
| 1442 | ss->destroy(ss, cont); |
| 1443 | } |
| 1444 | |
| 1445 | mutex_unlock(&cgroup_mutex); |
| 1446 | |
| 1447 | /* Release the reference count that we took on the superblock */ |
| 1448 | deactivate_super(sb); |
| 1449 | |
| 1450 | kfree(cont); |
| 1451 | return err; |
| 1452 | } |
| 1453 | |
| 1454 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
| 1455 | { |
| 1456 | struct cgroup *c_parent = dentry->d_parent->d_fsdata; |
| 1457 | |
| 1458 | /* the vfs holds inode->i_mutex already */ |
| 1459 | return cgroup_create(c_parent, dentry, mode | S_IFDIR); |
| 1460 | } |
| 1461 | |
| 1462 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) |
| 1463 | { |
| 1464 | struct cgroup *cont = dentry->d_fsdata; |
| 1465 | struct dentry *d; |
| 1466 | struct cgroup *parent; |
| 1467 | struct cgroup_subsys *ss; |
| 1468 | struct super_block *sb; |
| 1469 | struct cgroupfs_root *root; |
| 1470 | int css_busy = 0; |
| 1471 | |
| 1472 | /* the vfs holds both inode->i_mutex already */ |
| 1473 | |
| 1474 | mutex_lock(&cgroup_mutex); |
| 1475 | if (atomic_read(&cont->count) != 0) { |
| 1476 | mutex_unlock(&cgroup_mutex); |
| 1477 | return -EBUSY; |
| 1478 | } |
| 1479 | if (!list_empty(&cont->children)) { |
| 1480 | mutex_unlock(&cgroup_mutex); |
| 1481 | return -EBUSY; |
| 1482 | } |
| 1483 | |
| 1484 | parent = cont->parent; |
| 1485 | root = cont->root; |
| 1486 | sb = root->sb; |
| 1487 | |
| 1488 | /* Check the reference count on each subsystem. Since we |
| 1489 | * already established that there are no tasks in the |
| 1490 | * cgroup, if the css refcount is also 0, then there should |
| 1491 | * be no outstanding references, so the subsystem is safe to |
| 1492 | * destroy */ |
| 1493 | for_each_subsys(root, ss) { |
| 1494 | struct cgroup_subsys_state *css; |
| 1495 | css = cont->subsys[ss->subsys_id]; |
| 1496 | if (atomic_read(&css->refcnt)) { |
| 1497 | css_busy = 1; |
| 1498 | break; |
| 1499 | } |
| 1500 | } |
| 1501 | if (css_busy) { |
| 1502 | mutex_unlock(&cgroup_mutex); |
| 1503 | return -EBUSY; |
| 1504 | } |
| 1505 | |
| 1506 | for_each_subsys(root, ss) { |
| 1507 | if (cont->subsys[ss->subsys_id]) |
| 1508 | ss->destroy(ss, cont); |
| 1509 | } |
| 1510 | |
| 1511 | set_bit(CONT_REMOVED, &cont->flags); |
| 1512 | /* delete my sibling from parent->children */ |
| 1513 | list_del(&cont->sibling); |
| 1514 | spin_lock(&cont->dentry->d_lock); |
| 1515 | d = dget(cont->dentry); |
| 1516 | cont->dentry = NULL; |
| 1517 | spin_unlock(&d->d_lock); |
| 1518 | |
| 1519 | cgroup_d_remove_dir(d); |
| 1520 | dput(d); |
| 1521 | root->number_of_cgroups--; |
| 1522 | |
| 1523 | mutex_unlock(&cgroup_mutex); |
| 1524 | /* Drop the active superblock reference that we took when we |
| 1525 | * created the cgroup */ |
| 1526 | deactivate_super(sb); |
| 1527 | return 0; |
| 1528 | } |
| 1529 | |
| 1530 | static void cgroup_init_subsys(struct cgroup_subsys *ss) |
| 1531 | { |
| 1532 | struct task_struct *g, *p; |
| 1533 | struct cgroup_subsys_state *css; |
| 1534 | printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name); |
| 1535 | |
| 1536 | /* Create the top cgroup state for this subsystem */ |
| 1537 | ss->root = &rootnode; |
| 1538 | css = ss->create(ss, dummytop); |
| 1539 | /* We don't handle early failures gracefully */ |
| 1540 | BUG_ON(IS_ERR(css)); |
| 1541 | init_cgroup_css(css, ss, dummytop); |
| 1542 | |
| 1543 | /* Update all tasks to contain a subsys pointer to this state |
| 1544 | * - since the subsystem is newly registered, all tasks are in |
| 1545 | * the subsystem's top cgroup. */ |
| 1546 | |
| 1547 | /* If this subsystem requested that it be notified with fork |
| 1548 | * events, we should send it one now for every process in the |
| 1549 | * system */ |
| 1550 | |
| 1551 | read_lock(&tasklist_lock); |
| 1552 | init_task.cgroups.subsys[ss->subsys_id] = css; |
| 1553 | if (ss->fork) |
| 1554 | ss->fork(ss, &init_task); |
| 1555 | |
| 1556 | do_each_thread(g, p) { |
| 1557 | printk(KERN_INFO "Setting task %p css to %p (%d)\n", css, p, p->pid); |
| 1558 | p->cgroups.subsys[ss->subsys_id] = css; |
| 1559 | if (ss->fork) |
| 1560 | ss->fork(ss, p); |
| 1561 | } while_each_thread(g, p); |
| 1562 | read_unlock(&tasklist_lock); |
| 1563 | |
| 1564 | need_forkexit_callback |= ss->fork || ss->exit; |
| 1565 | |
| 1566 | ss->active = 1; |
| 1567 | } |
| 1568 | |
| 1569 | /** |
| 1570 | * cgroup_init_early - initialize cgroups at system boot, and |
| 1571 | * initialize any subsystems that request early init. |
| 1572 | */ |
| 1573 | int __init cgroup_init_early(void) |
| 1574 | { |
| 1575 | int i; |
| 1576 | init_cgroup_root(&rootnode); |
| 1577 | list_add(&rootnode.root_list, &roots); |
| 1578 | |
| 1579 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
| 1580 | struct cgroup_subsys *ss = subsys[i]; |
| 1581 | |
| 1582 | BUG_ON(!ss->name); |
| 1583 | BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); |
| 1584 | BUG_ON(!ss->create); |
| 1585 | BUG_ON(!ss->destroy); |
| 1586 | if (ss->subsys_id != i) { |
| 1587 | printk(KERN_ERR "Subsys %s id == %d\n", |
| 1588 | ss->name, ss->subsys_id); |
| 1589 | BUG(); |
| 1590 | } |
| 1591 | |
| 1592 | if (ss->early_init) |
| 1593 | cgroup_init_subsys(ss); |
| 1594 | } |
| 1595 | return 0; |
| 1596 | } |
| 1597 | |
| 1598 | /** |
| 1599 | * cgroup_init - register cgroup filesystem and /proc file, and |
| 1600 | * initialize any subsystems that didn't request early init. |
| 1601 | */ |
| 1602 | int __init cgroup_init(void) |
| 1603 | { |
| 1604 | int err; |
| 1605 | int i; |
Paul Menage | a424316 | 2007-10-18 23:39:35 -0700 | [diff] [blame^] | 1606 | struct proc_dir_entry *entry; |
| 1607 | |
| 1608 | err = bdi_init(&cgroup_backing_dev_info); |
| 1609 | if (err) |
| 1610 | return err; |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 1611 | |
| 1612 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
| 1613 | struct cgroup_subsys *ss = subsys[i]; |
| 1614 | if (!ss->early_init) |
| 1615 | cgroup_init_subsys(ss); |
| 1616 | } |
| 1617 | |
| 1618 | err = register_filesystem(&cgroup_fs_type); |
| 1619 | if (err < 0) |
| 1620 | goto out; |
| 1621 | |
Paul Menage | a424316 | 2007-10-18 23:39:35 -0700 | [diff] [blame^] | 1622 | entry = create_proc_entry("cgroups", 0, NULL); |
| 1623 | if (entry) |
| 1624 | entry->proc_fops = &proc_cgroupstats_operations; |
| 1625 | |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 1626 | out: |
Paul Menage | a424316 | 2007-10-18 23:39:35 -0700 | [diff] [blame^] | 1627 | if (err) |
| 1628 | bdi_destroy(&cgroup_backing_dev_info); |
| 1629 | |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 1630 | return err; |
| 1631 | } |
Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 1632 | |
Paul Menage | a424316 | 2007-10-18 23:39:35 -0700 | [diff] [blame^] | 1633 | /* |
| 1634 | * proc_cgroup_show() |
| 1635 | * - Print task's cgroup paths into seq_file, one line for each hierarchy |
| 1636 | * - Used for /proc/<pid>/cgroup. |
| 1637 | * - No need to task_lock(tsk) on this tsk->cgroup reference, as it |
| 1638 | * doesn't really matter if tsk->cgroup changes after we read it, |
| 1639 | * and we take cgroup_mutex, keeping attach_task() from changing it |
| 1640 | * anyway. No need to check that tsk->cgroup != NULL, thanks to |
| 1641 | * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks |
| 1642 | * cgroup to top_cgroup. |
| 1643 | */ |
| 1644 | |
| 1645 | /* TODO: Use a proper seq_file iterator */ |
| 1646 | static int proc_cgroup_show(struct seq_file *m, void *v) |
| 1647 | { |
| 1648 | struct pid *pid; |
| 1649 | struct task_struct *tsk; |
| 1650 | char *buf; |
| 1651 | int retval; |
| 1652 | struct cgroupfs_root *root; |
| 1653 | |
| 1654 | retval = -ENOMEM; |
| 1655 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 1656 | if (!buf) |
| 1657 | goto out; |
| 1658 | |
| 1659 | retval = -ESRCH; |
| 1660 | pid = m->private; |
| 1661 | tsk = get_pid_task(pid, PIDTYPE_PID); |
| 1662 | if (!tsk) |
| 1663 | goto out_free; |
| 1664 | |
| 1665 | retval = 0; |
| 1666 | |
| 1667 | mutex_lock(&cgroup_mutex); |
| 1668 | |
| 1669 | for_each_root(root) { |
| 1670 | struct cgroup_subsys *ss; |
| 1671 | struct cgroup *cont; |
| 1672 | int subsys_id; |
| 1673 | int count = 0; |
| 1674 | |
| 1675 | /* Skip this hierarchy if it has no active subsystems */ |
| 1676 | if (!root->actual_subsys_bits) |
| 1677 | continue; |
| 1678 | for_each_subsys(root, ss) |
| 1679 | seq_printf(m, "%s%s", count++ ? "," : "", ss->name); |
| 1680 | seq_putc(m, ':'); |
| 1681 | get_first_subsys(&root->top_cgroup, NULL, &subsys_id); |
| 1682 | cont = task_cgroup(tsk, subsys_id); |
| 1683 | retval = cgroup_path(cont, buf, PAGE_SIZE); |
| 1684 | if (retval < 0) |
| 1685 | goto out_unlock; |
| 1686 | seq_puts(m, buf); |
| 1687 | seq_putc(m, '\n'); |
| 1688 | } |
| 1689 | |
| 1690 | out_unlock: |
| 1691 | mutex_unlock(&cgroup_mutex); |
| 1692 | put_task_struct(tsk); |
| 1693 | out_free: |
| 1694 | kfree(buf); |
| 1695 | out: |
| 1696 | return retval; |
| 1697 | } |
| 1698 | |
| 1699 | static int cgroup_open(struct inode *inode, struct file *file) |
| 1700 | { |
| 1701 | struct pid *pid = PROC_I(inode)->pid; |
| 1702 | return single_open(file, proc_cgroup_show, pid); |
| 1703 | } |
| 1704 | |
| 1705 | struct file_operations proc_cgroup_operations = { |
| 1706 | .open = cgroup_open, |
| 1707 | .read = seq_read, |
| 1708 | .llseek = seq_lseek, |
| 1709 | .release = single_release, |
| 1710 | }; |
| 1711 | |
| 1712 | /* Display information about each subsystem and each hierarchy */ |
| 1713 | static int proc_cgroupstats_show(struct seq_file *m, void *v) |
| 1714 | { |
| 1715 | int i; |
| 1716 | struct cgroupfs_root *root; |
| 1717 | |
| 1718 | mutex_lock(&cgroup_mutex); |
| 1719 | seq_puts(m, "Hierarchies:\n"); |
| 1720 | for_each_root(root) { |
| 1721 | struct cgroup_subsys *ss; |
| 1722 | int first = 1; |
| 1723 | seq_printf(m, "%p: bits=%lx cgroups=%d (", root, |
| 1724 | root->subsys_bits, root->number_of_cgroups); |
| 1725 | for_each_subsys(root, ss) { |
| 1726 | seq_printf(m, "%s%s", first ? "" : ", ", ss->name); |
| 1727 | first = false; |
| 1728 | } |
| 1729 | seq_putc(m, ')'); |
| 1730 | if (root->sb) { |
| 1731 | seq_printf(m, " s_active=%d", |
| 1732 | atomic_read(&root->sb->s_active)); |
| 1733 | } |
| 1734 | seq_putc(m, '\n'); |
| 1735 | } |
| 1736 | seq_puts(m, "Subsystems:\n"); |
| 1737 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
| 1738 | struct cgroup_subsys *ss = subsys[i]; |
| 1739 | seq_printf(m, "%d: name=%s hierarchy=%p\n", |
| 1740 | i, ss->name, ss->root); |
| 1741 | } |
| 1742 | mutex_unlock(&cgroup_mutex); |
| 1743 | return 0; |
| 1744 | } |
| 1745 | |
| 1746 | static int cgroupstats_open(struct inode *inode, struct file *file) |
| 1747 | { |
| 1748 | return single_open(file, proc_cgroupstats_show, 0); |
| 1749 | } |
| 1750 | |
| 1751 | static struct file_operations proc_cgroupstats_operations = { |
| 1752 | .open = cgroupstats_open, |
| 1753 | .read = seq_read, |
| 1754 | .llseek = seq_lseek, |
| 1755 | .release = single_release, |
| 1756 | }; |
| 1757 | |
Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 1758 | /** |
| 1759 | * cgroup_fork - attach newly forked task to its parents cgroup. |
| 1760 | * @tsk: pointer to task_struct of forking parent process. |
| 1761 | * |
| 1762 | * Description: A task inherits its parent's cgroup at fork(). |
| 1763 | * |
| 1764 | * A pointer to the shared css_set was automatically copied in |
| 1765 | * fork.c by dup_task_struct(). However, we ignore that copy, since |
| 1766 | * it was not made under the protection of RCU or cgroup_mutex, so |
| 1767 | * might no longer be a valid cgroup pointer. attach_task() might |
| 1768 | * have already changed current->cgroup, allowing the previously |
| 1769 | * referenced cgroup to be removed and freed. |
| 1770 | * |
| 1771 | * At the point that cgroup_fork() is called, 'current' is the parent |
| 1772 | * task, and the passed argument 'child' points to the child task. |
| 1773 | */ |
| 1774 | void cgroup_fork(struct task_struct *child) |
| 1775 | { |
| 1776 | rcu_read_lock(); |
| 1777 | child->cgroups = rcu_dereference(current->cgroups); |
| 1778 | get_css_set(&child->cgroups); |
| 1779 | rcu_read_unlock(); |
| 1780 | } |
| 1781 | |
| 1782 | /** |
| 1783 | * cgroup_fork_callbacks - called on a new task very soon before |
| 1784 | * adding it to the tasklist. No need to take any locks since no-one |
| 1785 | * can be operating on this task |
| 1786 | */ |
| 1787 | void cgroup_fork_callbacks(struct task_struct *child) |
| 1788 | { |
| 1789 | if (need_forkexit_callback) { |
| 1790 | int i; |
| 1791 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
| 1792 | struct cgroup_subsys *ss = subsys[i]; |
| 1793 | if (ss->fork) |
| 1794 | ss->fork(ss, child); |
| 1795 | } |
| 1796 | } |
| 1797 | } |
| 1798 | |
| 1799 | /** |
| 1800 | * cgroup_exit - detach cgroup from exiting task |
| 1801 | * @tsk: pointer to task_struct of exiting process |
| 1802 | * |
| 1803 | * Description: Detach cgroup from @tsk and release it. |
| 1804 | * |
| 1805 | * Note that cgroups marked notify_on_release force every task in |
| 1806 | * them to take the global cgroup_mutex mutex when exiting. |
| 1807 | * This could impact scaling on very large systems. Be reluctant to |
| 1808 | * use notify_on_release cgroups where very high task exit scaling |
| 1809 | * is required on large systems. |
| 1810 | * |
| 1811 | * the_top_cgroup_hack: |
| 1812 | * |
| 1813 | * Set the exiting tasks cgroup to the root cgroup (top_cgroup). |
| 1814 | * |
| 1815 | * We call cgroup_exit() while the task is still competent to |
| 1816 | * handle notify_on_release(), then leave the task attached to the |
| 1817 | * root cgroup in each hierarchy for the remainder of its exit. |
| 1818 | * |
| 1819 | * To do this properly, we would increment the reference count on |
| 1820 | * top_cgroup, and near the very end of the kernel/exit.c do_exit() |
| 1821 | * code we would add a second cgroup function call, to drop that |
| 1822 | * reference. This would just create an unnecessary hot spot on |
| 1823 | * the top_cgroup reference count, to no avail. |
| 1824 | * |
| 1825 | * Normally, holding a reference to a cgroup without bumping its |
| 1826 | * count is unsafe. The cgroup could go away, or someone could |
| 1827 | * attach us to a different cgroup, decrementing the count on |
| 1828 | * the first cgroup that we never incremented. But in this case, |
| 1829 | * top_cgroup isn't going away, and either task has PF_EXITING set, |
| 1830 | * which wards off any attach_task() attempts, or task is a failed |
| 1831 | * fork, never visible to attach_task. |
| 1832 | * |
| 1833 | */ |
| 1834 | void cgroup_exit(struct task_struct *tsk, int run_callbacks) |
| 1835 | { |
| 1836 | int i; |
| 1837 | |
| 1838 | if (run_callbacks && need_forkexit_callback) { |
| 1839 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
| 1840 | struct cgroup_subsys *ss = subsys[i]; |
| 1841 | if (ss->exit) |
| 1842 | ss->exit(ss, tsk); |
| 1843 | } |
| 1844 | } |
| 1845 | /* Reassign the task to the init_css_set. */ |
| 1846 | task_lock(tsk); |
| 1847 | put_css_set(&tsk->cgroups); |
| 1848 | tsk->cgroups = init_task.cgroups; |
| 1849 | task_unlock(tsk); |
| 1850 | } |
Paul Menage | 697f416 | 2007-10-18 23:39:34 -0700 | [diff] [blame] | 1851 | |
| 1852 | /** |
| 1853 | * cgroup_clone - duplicate the current cgroup in the hierarchy |
| 1854 | * that the given subsystem is attached to, and move this task into |
| 1855 | * the new child |
| 1856 | */ |
| 1857 | int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) |
| 1858 | { |
| 1859 | struct dentry *dentry; |
| 1860 | int ret = 0; |
| 1861 | char nodename[MAX_CGROUP_TYPE_NAMELEN]; |
| 1862 | struct cgroup *parent, *child; |
| 1863 | struct inode *inode; |
| 1864 | struct css_set *cg; |
| 1865 | struct cgroupfs_root *root; |
| 1866 | struct cgroup_subsys *ss; |
| 1867 | |
| 1868 | /* We shouldn't be called by an unregistered subsystem */ |
| 1869 | BUG_ON(!subsys->active); |
| 1870 | |
| 1871 | /* First figure out what hierarchy and cgroup we're dealing |
| 1872 | * with, and pin them so we can drop cgroup_mutex */ |
| 1873 | mutex_lock(&cgroup_mutex); |
| 1874 | again: |
| 1875 | root = subsys->root; |
| 1876 | if (root == &rootnode) { |
| 1877 | printk(KERN_INFO |
| 1878 | "Not cloning cgroup for unused subsystem %s\n", |
| 1879 | subsys->name); |
| 1880 | mutex_unlock(&cgroup_mutex); |
| 1881 | return 0; |
| 1882 | } |
| 1883 | cg = &tsk->cgroups; |
| 1884 | parent = task_cgroup(tsk, subsys->subsys_id); |
| 1885 | |
| 1886 | snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid); |
| 1887 | |
| 1888 | /* Pin the hierarchy */ |
| 1889 | atomic_inc(&parent->root->sb->s_active); |
| 1890 | |
| 1891 | mutex_unlock(&cgroup_mutex); |
| 1892 | |
| 1893 | /* Now do the VFS work to create a cgroup */ |
| 1894 | inode = parent->dentry->d_inode; |
| 1895 | |
| 1896 | /* Hold the parent directory mutex across this operation to |
| 1897 | * stop anyone else deleting the new cgroup */ |
| 1898 | mutex_lock(&inode->i_mutex); |
| 1899 | dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename)); |
| 1900 | if (IS_ERR(dentry)) { |
| 1901 | printk(KERN_INFO |
| 1902 | "Couldn't allocate dentry for %s: %ld\n", nodename, |
| 1903 | PTR_ERR(dentry)); |
| 1904 | ret = PTR_ERR(dentry); |
| 1905 | goto out_release; |
| 1906 | } |
| 1907 | |
| 1908 | /* Create the cgroup directory, which also creates the cgroup */ |
| 1909 | ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755); |
| 1910 | child = __d_cont(dentry); |
| 1911 | dput(dentry); |
| 1912 | if (ret) { |
| 1913 | printk(KERN_INFO |
| 1914 | "Failed to create cgroup %s: %d\n", nodename, |
| 1915 | ret); |
| 1916 | goto out_release; |
| 1917 | } |
| 1918 | |
| 1919 | if (!child) { |
| 1920 | printk(KERN_INFO |
| 1921 | "Couldn't find new cgroup %s\n", nodename); |
| 1922 | ret = -ENOMEM; |
| 1923 | goto out_release; |
| 1924 | } |
| 1925 | |
| 1926 | /* The cgroup now exists. Retake cgroup_mutex and check |
| 1927 | * that we're still in the same state that we thought we |
| 1928 | * were. */ |
| 1929 | mutex_lock(&cgroup_mutex); |
| 1930 | if ((root != subsys->root) || |
| 1931 | (parent != task_cgroup(tsk, subsys->subsys_id))) { |
| 1932 | /* Aargh, we raced ... */ |
| 1933 | mutex_unlock(&inode->i_mutex); |
| 1934 | |
| 1935 | deactivate_super(parent->root->sb); |
| 1936 | /* The cgroup is still accessible in the VFS, but |
| 1937 | * we're not going to try to rmdir() it at this |
| 1938 | * point. */ |
| 1939 | printk(KERN_INFO |
| 1940 | "Race in cgroup_clone() - leaking cgroup %s\n", |
| 1941 | nodename); |
| 1942 | goto again; |
| 1943 | } |
| 1944 | |
| 1945 | /* do any required auto-setup */ |
| 1946 | for_each_subsys(root, ss) { |
| 1947 | if (ss->post_clone) |
| 1948 | ss->post_clone(ss, child); |
| 1949 | } |
| 1950 | |
| 1951 | /* All seems fine. Finish by moving the task into the new cgroup */ |
| 1952 | ret = attach_task(child, tsk); |
| 1953 | mutex_unlock(&cgroup_mutex); |
| 1954 | |
| 1955 | out_release: |
| 1956 | mutex_unlock(&inode->i_mutex); |
| 1957 | deactivate_super(parent->root->sb); |
| 1958 | return ret; |
| 1959 | } |
| 1960 | |
| 1961 | /* |
| 1962 | * See if "cont" is a descendant of the current task's cgroup in |
| 1963 | * the appropriate hierarchy |
| 1964 | * |
| 1965 | * If we are sending in dummytop, then presumably we are creating |
| 1966 | * the top cgroup in the subsystem. |
| 1967 | * |
| 1968 | * Called only by the ns (nsproxy) cgroup. |
| 1969 | */ |
| 1970 | int cgroup_is_descendant(const struct cgroup *cont) |
| 1971 | { |
| 1972 | int ret; |
| 1973 | struct cgroup *target; |
| 1974 | int subsys_id; |
| 1975 | |
| 1976 | if (cont == dummytop) |
| 1977 | return 1; |
| 1978 | |
| 1979 | get_first_subsys(cont, NULL, &subsys_id); |
| 1980 | target = task_cgroup(current, subsys_id); |
| 1981 | while (cont != target && cont!= cont->top_cgroup) |
| 1982 | cont = cont->parent; |
| 1983 | ret = (cont == target); |
| 1984 | return ret; |
| 1985 | } |