blob: 36066d8a4911e353905ad2ec52d8d29b0bff9dc9 [file] [log] [blame]
Paul Menageddbcc7e2007-10-18 23:39:30 -07001/*
Paul Menageddbcc7e2007-10-18 23:39:30 -07002 * Generic process-grouping system.
3 *
4 * Based originally on the cpuset system, extracted by Paul Menage
5 * Copyright (C) 2006 Google, Inc
6 *
7 * Copyright notices from the original cpuset code:
8 * --------------------------------------------------
9 * Copyright (C) 2003 BULL SA.
10 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
11 *
12 * Portions derived from Patrick Mochel's sysfs code.
13 * sysfs is Copyright (c) 2001-3 Patrick Mochel
14 *
15 * 2003-10-10 Written by Simon Derr.
16 * 2003-10-22 Updates by Stephen Hemminger.
17 * 2004 May-July Rework by Paul Jackson.
18 * ---------------------------------------------------
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24
25#include <linux/cgroup.h>
26#include <linux/errno.h>
27#include <linux/fs.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/mm.h>
31#include <linux/mutex.h>
32#include <linux/mount.h>
33#include <linux/pagemap.h>
Paul Menagea4243162007-10-18 23:39:35 -070034#include <linux/proc_fs.h>
Paul Menageddbcc7e2007-10-18 23:39:30 -070035#include <linux/rcupdate.h>
36#include <linux/sched.h>
Paul Menage817929e2007-10-18 23:39:36 -070037#include <linux/backing-dev.h>
Paul Menageddbcc7e2007-10-18 23:39:30 -070038#include <linux/seq_file.h>
39#include <linux/slab.h>
40#include <linux/magic.h>
41#include <linux/spinlock.h>
42#include <linux/string.h>
Paul Menagebbcb81d2007-10-18 23:39:32 -070043#include <linux/sort.h>
Paul Menage81a6a5c2007-10-18 23:39:38 -070044#include <linux/kmod.h>
Balbir Singh846c7bb2007-10-18 23:39:44 -070045#include <linux/delayacct.h>
46#include <linux/cgroupstats.h>
47
Paul Menageddbcc7e2007-10-18 23:39:30 -070048#include <asm/atomic.h>
49
Paul Menage81a6a5c2007-10-18 23:39:38 -070050static DEFINE_MUTEX(cgroup_mutex);
51
Paul Menageddbcc7e2007-10-18 23:39:30 -070052/* Generate an array of cgroup subsystem pointers */
53#define SUBSYS(_x) &_x ## _subsys,
54
55static struct cgroup_subsys *subsys[] = {
56#include <linux/cgroup_subsys.h>
57};
58
59/*
60 * A cgroupfs_root represents the root of a cgroup hierarchy,
61 * and may be associated with a superblock to form an active
62 * hierarchy
63 */
64struct cgroupfs_root {
65 struct super_block *sb;
66
67 /*
68 * The bitmask of subsystems intended to be attached to this
69 * hierarchy
70 */
71 unsigned long subsys_bits;
72
73 /* The bitmask of subsystems currently attached to this hierarchy */
74 unsigned long actual_subsys_bits;
75
76 /* A list running through the attached subsystems */
77 struct list_head subsys_list;
78
79 /* The root cgroup for this hierarchy */
80 struct cgroup top_cgroup;
81
82 /* Tracks how many cgroups are currently defined in hierarchy.*/
83 int number_of_cgroups;
84
85 /* A list running through the mounted hierarchies */
86 struct list_head root_list;
87
88 /* Hierarchy-specific flags */
89 unsigned long flags;
Paul Menage81a6a5c2007-10-18 23:39:38 -070090
91 /* The path to use for release notifications. No locking
92 * between setting and use - so if userspace updates this
93 * while child cgroups exist, you could miss a
94 * notification. We ensure that it's always a valid
95 * NUL-terminated string */
96 char release_agent_path[PATH_MAX];
Paul Menageddbcc7e2007-10-18 23:39:30 -070097};
98
99
100/*
101 * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
102 * subsystems that are otherwise unattached - it never has more than a
103 * single cgroup, and all tasks are part of that cgroup.
104 */
105static struct cgroupfs_root rootnode;
106
107/* The list of hierarchy roots */
108
109static LIST_HEAD(roots);
Paul Menage817929e2007-10-18 23:39:36 -0700110static int root_count;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700111
112/* dummytop is a shorthand for the dummy hierarchy's top cgroup */
113#define dummytop (&rootnode.top_cgroup)
114
115/* This flag indicates whether tasks in the fork and exit paths should
Li Zefana043e3b2008-02-23 15:24:09 -0800116 * check for fork/exit handlers to call. This avoids us having to do
117 * extra work in the fork/exit path if none of the subsystems need to
118 * be called.
Paul Menageddbcc7e2007-10-18 23:39:30 -0700119 */
120static int need_forkexit_callback;
121
122/* bits in struct cgroup flags field */
123enum {
Paul Menage81a6a5c2007-10-18 23:39:38 -0700124 /* Control Group is dead */
Paul Menagebd89aab2007-10-18 23:40:44 -0700125 CGRP_REMOVED,
Paul Menage81a6a5c2007-10-18 23:39:38 -0700126 /* Control Group has previously had a child cgroup or a task,
Paul Menagebd89aab2007-10-18 23:40:44 -0700127 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
128 CGRP_RELEASABLE,
Paul Menage81a6a5c2007-10-18 23:39:38 -0700129 /* Control Group requires release notifications to userspace */
Paul Menagebd89aab2007-10-18 23:40:44 -0700130 CGRP_NOTIFY_ON_RELEASE,
Paul Menageddbcc7e2007-10-18 23:39:30 -0700131};
132
133/* convenient tests for these bits */
Paul Menagebd89aab2007-10-18 23:40:44 -0700134inline int cgroup_is_removed(const struct cgroup *cgrp)
Paul Menageddbcc7e2007-10-18 23:39:30 -0700135{
Paul Menagebd89aab2007-10-18 23:40:44 -0700136 return test_bit(CGRP_REMOVED, &cgrp->flags);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700137}
138
139/* bits in struct cgroupfs_root flags field */
140enum {
141 ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
142};
143
Adrian Bunke9685a02008-02-07 00:13:46 -0800144static int cgroup_is_releasable(const struct cgroup *cgrp)
Paul Menage81a6a5c2007-10-18 23:39:38 -0700145{
146 const int bits =
Paul Menagebd89aab2007-10-18 23:40:44 -0700147 (1 << CGRP_RELEASABLE) |
148 (1 << CGRP_NOTIFY_ON_RELEASE);
149 return (cgrp->flags & bits) == bits;
Paul Menage81a6a5c2007-10-18 23:39:38 -0700150}
151
Adrian Bunke9685a02008-02-07 00:13:46 -0800152static int notify_on_release(const struct cgroup *cgrp)
Paul Menage81a6a5c2007-10-18 23:39:38 -0700153{
Paul Menagebd89aab2007-10-18 23:40:44 -0700154 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
Paul Menage81a6a5c2007-10-18 23:39:38 -0700155}
156
Paul Menageddbcc7e2007-10-18 23:39:30 -0700157/*
158 * for_each_subsys() allows you to iterate on each subsystem attached to
159 * an active hierarchy
160 */
161#define for_each_subsys(_root, _ss) \
162list_for_each_entry(_ss, &_root->subsys_list, sibling)
163
164/* for_each_root() allows you to iterate across the active hierarchies */
165#define for_each_root(_root) \
166list_for_each_entry(_root, &roots, root_list)
167
Paul Menage81a6a5c2007-10-18 23:39:38 -0700168/* the list of cgroups eligible for automatic release. Protected by
169 * release_list_lock */
170static LIST_HEAD(release_list);
171static DEFINE_SPINLOCK(release_list_lock);
172static void cgroup_release_agent(struct work_struct *work);
173static DECLARE_WORK(release_agent_work, cgroup_release_agent);
Paul Menagebd89aab2007-10-18 23:40:44 -0700174static void check_for_release(struct cgroup *cgrp);
Paul Menage81a6a5c2007-10-18 23:39:38 -0700175
Paul Menage817929e2007-10-18 23:39:36 -0700176/* Link structure for associating css_set objects with cgroups */
177struct cg_cgroup_link {
178 /*
179 * List running through cg_cgroup_links associated with a
180 * cgroup, anchored on cgroup->css_sets
181 */
Paul Menagebd89aab2007-10-18 23:40:44 -0700182 struct list_head cgrp_link_list;
Paul Menage817929e2007-10-18 23:39:36 -0700183 /*
184 * List running through cg_cgroup_links pointing at a
185 * single css_set object, anchored on css_set->cg_links
186 */
187 struct list_head cg_link_list;
188 struct css_set *cg;
189};
190
191/* The default css_set - used by init and its children prior to any
192 * hierarchies being mounted. It contains a pointer to the root state
193 * for each subsystem. Also used to anchor the list of css_sets. Not
194 * reference-counted, to improve performance when child cgroups
195 * haven't been created.
196 */
197
198static struct css_set init_css_set;
199static struct cg_cgroup_link init_css_set_link;
200
201/* css_set_lock protects the list of css_set objects, and the
202 * chain of tasks off each css_set. Nests outside task->alloc_lock
203 * due to cgroup_iter_start() */
204static DEFINE_RWLOCK(css_set_lock);
205static int css_set_count;
206
207/* We don't maintain the lists running through each css_set to its
208 * task until after the first call to cgroup_iter_start(). This
209 * reduces the fork()/exit() overhead for people who have cgroups
210 * compiled into their kernel but not actually in use */
211static int use_task_css_set_links;
212
213/* When we create or destroy a css_set, the operation simply
214 * takes/releases a reference count on all the cgroups referenced
215 * by subsystems in this css_set. This can end up multiple-counting
216 * some cgroups, but that's OK - the ref-count is just a
217 * busy/not-busy indicator; ensuring that we only count each cgroup
218 * once would require taking a global lock to ensure that no
Paul Menageb4f48b62007-10-18 23:39:33 -0700219 * subsystems moved between hierarchies while we were doing so.
220 *
221 * Possible TODO: decide at boot time based on the number of
222 * registered subsystems and the number of CPUs or NUMA nodes whether
223 * it's better for performance to ref-count every subsystem, or to
224 * take a global lock and only add one ref count to each hierarchy.
225 */
Paul Menageb4f48b62007-10-18 23:39:33 -0700226
Paul Menage817929e2007-10-18 23:39:36 -0700227/*
228 * unlink a css_set from the list and free it
229 */
Paul Menage81a6a5c2007-10-18 23:39:38 -0700230static void unlink_css_set(struct css_set *cg)
Paul Menageb4f48b62007-10-18 23:39:33 -0700231{
Paul Menage817929e2007-10-18 23:39:36 -0700232 write_lock(&css_set_lock);
233 list_del(&cg->list);
234 css_set_count--;
235 while (!list_empty(&cg->cg_links)) {
236 struct cg_cgroup_link *link;
237 link = list_entry(cg->cg_links.next,
238 struct cg_cgroup_link, cg_link_list);
239 list_del(&link->cg_link_list);
Paul Menagebd89aab2007-10-18 23:40:44 -0700240 list_del(&link->cgrp_link_list);
Paul Menage817929e2007-10-18 23:39:36 -0700241 kfree(link);
242 }
243 write_unlock(&css_set_lock);
Paul Menage81a6a5c2007-10-18 23:39:38 -0700244}
245
246static void __release_css_set(struct kref *k, int taskexit)
247{
248 int i;
249 struct css_set *cg = container_of(k, struct css_set, ref);
250
251 unlink_css_set(cg);
252
253 rcu_read_lock();
254 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
Paul Menagebd89aab2007-10-18 23:40:44 -0700255 struct cgroup *cgrp = cg->subsys[i]->cgroup;
256 if (atomic_dec_and_test(&cgrp->count) &&
257 notify_on_release(cgrp)) {
Paul Menage81a6a5c2007-10-18 23:39:38 -0700258 if (taskexit)
Paul Menagebd89aab2007-10-18 23:40:44 -0700259 set_bit(CGRP_RELEASABLE, &cgrp->flags);
260 check_for_release(cgrp);
Paul Menage81a6a5c2007-10-18 23:39:38 -0700261 }
262 }
263 rcu_read_unlock();
Paul Menage817929e2007-10-18 23:39:36 -0700264 kfree(cg);
265}
266
Paul Menage81a6a5c2007-10-18 23:39:38 -0700267static void release_css_set(struct kref *k)
268{
269 __release_css_set(k, 0);
270}
271
272static void release_css_set_taskexit(struct kref *k)
273{
274 __release_css_set(k, 1);
275}
276
Paul Menage817929e2007-10-18 23:39:36 -0700277/*
278 * refcounted get/put for css_set objects
279 */
280static inline void get_css_set(struct css_set *cg)
281{
282 kref_get(&cg->ref);
283}
284
285static inline void put_css_set(struct css_set *cg)
286{
287 kref_put(&cg->ref, release_css_set);
288}
289
Paul Menage81a6a5c2007-10-18 23:39:38 -0700290static inline void put_css_set_taskexit(struct css_set *cg)
291{
292 kref_put(&cg->ref, release_css_set_taskexit);
293}
294
Paul Menage817929e2007-10-18 23:39:36 -0700295/*
296 * find_existing_css_set() is a helper for
297 * find_css_set(), and checks to see whether an existing
298 * css_set is suitable. This currently walks a linked-list for
299 * simplicity; a later patch will use a hash table for better
300 * performance
301 *
302 * oldcg: the cgroup group that we're using before the cgroup
303 * transition
304 *
Paul Menagebd89aab2007-10-18 23:40:44 -0700305 * cgrp: the cgroup that we're moving into
Paul Menage817929e2007-10-18 23:39:36 -0700306 *
307 * template: location in which to build the desired set of subsystem
308 * state objects for the new cgroup group
309 */
Paul Menage817929e2007-10-18 23:39:36 -0700310static struct css_set *find_existing_css_set(
311 struct css_set *oldcg,
Paul Menagebd89aab2007-10-18 23:40:44 -0700312 struct cgroup *cgrp,
Paul Menage817929e2007-10-18 23:39:36 -0700313 struct cgroup_subsys_state *template[])
314{
315 int i;
Paul Menagebd89aab2007-10-18 23:40:44 -0700316 struct cgroupfs_root *root = cgrp->root;
Paul Menage817929e2007-10-18 23:39:36 -0700317 struct list_head *l = &init_css_set.list;
318
319 /* Built the set of subsystem state objects that we want to
320 * see in the new css_set */
321 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
322 if (root->subsys_bits & (1ull << i)) {
323 /* Subsystem is in this hierarchy. So we want
324 * the subsystem state from the new
325 * cgroup */
Paul Menagebd89aab2007-10-18 23:40:44 -0700326 template[i] = cgrp->subsys[i];
Paul Menage817929e2007-10-18 23:39:36 -0700327 } else {
328 /* Subsystem is not in this hierarchy, so we
329 * don't want to change the subsystem state */
330 template[i] = oldcg->subsys[i];
331 }
332 }
333
334 /* Look through existing cgroup groups to find one to reuse */
335 do {
336 struct css_set *cg =
337 list_entry(l, struct css_set, list);
338
339 if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) {
340 /* All subsystems matched */
341 return cg;
342 }
343 /* Try the next cgroup group */
344 l = l->next;
345 } while (l != &init_css_set.list);
346
347 /* No existing cgroup group matched */
348 return NULL;
349}
350
351/*
352 * allocate_cg_links() allocates "count" cg_cgroup_link structures
Paul Menagebd89aab2007-10-18 23:40:44 -0700353 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
Paul Menage817929e2007-10-18 23:39:36 -0700354 * success or a negative error
355 */
Paul Menage817929e2007-10-18 23:39:36 -0700356static int allocate_cg_links(int count, struct list_head *tmp)
357{
358 struct cg_cgroup_link *link;
359 int i;
360 INIT_LIST_HEAD(tmp);
361 for (i = 0; i < count; i++) {
362 link = kmalloc(sizeof(*link), GFP_KERNEL);
363 if (!link) {
364 while (!list_empty(tmp)) {
365 link = list_entry(tmp->next,
366 struct cg_cgroup_link,
Paul Menagebd89aab2007-10-18 23:40:44 -0700367 cgrp_link_list);
368 list_del(&link->cgrp_link_list);
Paul Menage817929e2007-10-18 23:39:36 -0700369 kfree(link);
370 }
371 return -ENOMEM;
372 }
Paul Menagebd89aab2007-10-18 23:40:44 -0700373 list_add(&link->cgrp_link_list, tmp);
Paul Menage817929e2007-10-18 23:39:36 -0700374 }
375 return 0;
376}
377
378static void free_cg_links(struct list_head *tmp)
379{
380 while (!list_empty(tmp)) {
381 struct cg_cgroup_link *link;
382 link = list_entry(tmp->next,
383 struct cg_cgroup_link,
Paul Menagebd89aab2007-10-18 23:40:44 -0700384 cgrp_link_list);
385 list_del(&link->cgrp_link_list);
Paul Menage817929e2007-10-18 23:39:36 -0700386 kfree(link);
387 }
388}
389
390/*
391 * find_css_set() takes an existing cgroup group and a
392 * cgroup object, and returns a css_set object that's
393 * equivalent to the old group, but with the given cgroup
394 * substituted into the appropriate hierarchy. Must be called with
395 * cgroup_mutex held
396 */
Paul Menage817929e2007-10-18 23:39:36 -0700397static struct css_set *find_css_set(
Paul Menagebd89aab2007-10-18 23:40:44 -0700398 struct css_set *oldcg, struct cgroup *cgrp)
Paul Menage817929e2007-10-18 23:39:36 -0700399{
400 struct css_set *res;
401 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
402 int i;
403
404 struct list_head tmp_cg_links;
405 struct cg_cgroup_link *link;
406
407 /* First see if we already have a cgroup group that matches
408 * the desired set */
409 write_lock(&css_set_lock);
Paul Menagebd89aab2007-10-18 23:40:44 -0700410 res = find_existing_css_set(oldcg, cgrp, template);
Paul Menage817929e2007-10-18 23:39:36 -0700411 if (res)
412 get_css_set(res);
413 write_unlock(&css_set_lock);
414
415 if (res)
416 return res;
417
418 res = kmalloc(sizeof(*res), GFP_KERNEL);
419 if (!res)
420 return NULL;
421
422 /* Allocate all the cg_cgroup_link objects that we'll need */
423 if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
424 kfree(res);
425 return NULL;
426 }
427
428 kref_init(&res->ref);
429 INIT_LIST_HEAD(&res->cg_links);
430 INIT_LIST_HEAD(&res->tasks);
431
432 /* Copy the set of subsystem state objects generated in
433 * find_existing_css_set() */
434 memcpy(res->subsys, template, sizeof(res->subsys));
435
436 write_lock(&css_set_lock);
437 /* Add reference counts and links from the new css_set. */
438 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
Paul Menagebd89aab2007-10-18 23:40:44 -0700439 struct cgroup *cgrp = res->subsys[i]->cgroup;
Paul Menage817929e2007-10-18 23:39:36 -0700440 struct cgroup_subsys *ss = subsys[i];
Paul Menagebd89aab2007-10-18 23:40:44 -0700441 atomic_inc(&cgrp->count);
Paul Menage817929e2007-10-18 23:39:36 -0700442 /*
443 * We want to add a link once per cgroup, so we
444 * only do it for the first subsystem in each
445 * hierarchy
446 */
447 if (ss->root->subsys_list.next == &ss->sibling) {
448 BUG_ON(list_empty(&tmp_cg_links));
449 link = list_entry(tmp_cg_links.next,
450 struct cg_cgroup_link,
Paul Menagebd89aab2007-10-18 23:40:44 -0700451 cgrp_link_list);
452 list_del(&link->cgrp_link_list);
453 list_add(&link->cgrp_link_list, &cgrp->css_sets);
Paul Menage817929e2007-10-18 23:39:36 -0700454 link->cg = res;
455 list_add(&link->cg_link_list, &res->cg_links);
456 }
457 }
458 if (list_empty(&rootnode.subsys_list)) {
459 link = list_entry(tmp_cg_links.next,
460 struct cg_cgroup_link,
Paul Menagebd89aab2007-10-18 23:40:44 -0700461 cgrp_link_list);
462 list_del(&link->cgrp_link_list);
463 list_add(&link->cgrp_link_list, &dummytop->css_sets);
Paul Menage817929e2007-10-18 23:39:36 -0700464 link->cg = res;
465 list_add(&link->cg_link_list, &res->cg_links);
466 }
467
468 BUG_ON(!list_empty(&tmp_cg_links));
469
470 /* Link this cgroup group into the list */
471 list_add(&res->list, &init_css_set.list);
472 css_set_count++;
473 INIT_LIST_HEAD(&res->tasks);
474 write_unlock(&css_set_lock);
475
476 return res;
Paul Menageb4f48b62007-10-18 23:39:33 -0700477}
478
Paul Menageddbcc7e2007-10-18 23:39:30 -0700479/*
480 * There is one global cgroup mutex. We also require taking
481 * task_lock() when dereferencing a task's cgroup subsys pointers.
482 * See "The task_lock() exception", at the end of this comment.
483 *
484 * A task must hold cgroup_mutex to modify cgroups.
485 *
486 * Any task can increment and decrement the count field without lock.
487 * So in general, code holding cgroup_mutex can't rely on the count
488 * field not changing. However, if the count goes to zero, then only
Cliff Wickman956db3c2008-02-07 00:14:43 -0800489 * cgroup_attach_task() can increment it again. Because a count of zero
Paul Menageddbcc7e2007-10-18 23:39:30 -0700490 * means that no tasks are currently attached, therefore there is no
491 * way a task attached to that cgroup can fork (the other way to
492 * increment the count). So code holding cgroup_mutex can safely
493 * assume that if the count is zero, it will stay zero. Similarly, if
494 * a task holds cgroup_mutex on a cgroup with zero count, it
495 * knows that the cgroup won't be removed, as cgroup_rmdir()
496 * needs that mutex.
497 *
498 * The cgroup_common_file_write handler for operations that modify
499 * the cgroup hierarchy holds cgroup_mutex across the entire operation,
500 * single threading all such cgroup modifications across the system.
501 *
502 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
503 * (usually) take cgroup_mutex. These are the two most performance
504 * critical pieces of code here. The exception occurs on cgroup_exit(),
505 * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
506 * is taken, and if the cgroup count is zero, a usermode call made
Li Zefana043e3b2008-02-23 15:24:09 -0800507 * to the release agent with the name of the cgroup (path relative to
508 * the root of cgroup file system) as the argument.
Paul Menageddbcc7e2007-10-18 23:39:30 -0700509 *
510 * A cgroup can only be deleted if both its 'count' of using tasks
511 * is zero, and its list of 'children' cgroups is empty. Since all
512 * tasks in the system use _some_ cgroup, and since there is always at
513 * least one task in the system (init, pid == 1), therefore, top_cgroup
514 * always has either children cgroups and/or using tasks. So we don't
515 * need a special hack to ensure that top_cgroup cannot be deleted.
516 *
517 * The task_lock() exception
518 *
519 * The need for this exception arises from the action of
Cliff Wickman956db3c2008-02-07 00:14:43 -0800520 * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
Li Zefana043e3b2008-02-23 15:24:09 -0800521 * another. It does so using cgroup_mutex, however there are
Paul Menageddbcc7e2007-10-18 23:39:30 -0700522 * several performance critical places that need to reference
523 * task->cgroup without the expense of grabbing a system global
524 * mutex. Therefore except as noted below, when dereferencing or, as
Cliff Wickman956db3c2008-02-07 00:14:43 -0800525 * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
Paul Menageddbcc7e2007-10-18 23:39:30 -0700526 * task_lock(), which acts on a spinlock (task->alloc_lock) already in
527 * the task_struct routinely used for such matters.
528 *
529 * P.S. One more locking exception. RCU is used to guard the
Cliff Wickman956db3c2008-02-07 00:14:43 -0800530 * update of a tasks cgroup pointer by cgroup_attach_task()
Paul Menageddbcc7e2007-10-18 23:39:30 -0700531 */
532
Paul Menageddbcc7e2007-10-18 23:39:30 -0700533/**
534 * cgroup_lock - lock out any changes to cgroup structures
535 *
536 */
Paul Menageddbcc7e2007-10-18 23:39:30 -0700537void cgroup_lock(void)
538{
539 mutex_lock(&cgroup_mutex);
540}
541
542/**
543 * cgroup_unlock - release lock on cgroup changes
544 *
545 * Undo the lock taken in a previous cgroup_lock() call.
546 */
Paul Menageddbcc7e2007-10-18 23:39:30 -0700547void cgroup_unlock(void)
548{
549 mutex_unlock(&cgroup_mutex);
550}
551
552/*
553 * A couple of forward declarations required, due to cyclic reference loop:
554 * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
555 * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
556 * -> cgroup_mkdir.
557 */
558
559static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
560static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
Paul Menagebd89aab2007-10-18 23:40:44 -0700561static int cgroup_populate_dir(struct cgroup *cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700562static struct inode_operations cgroup_dir_inode_operations;
Paul Menagea4243162007-10-18 23:39:35 -0700563static struct file_operations proc_cgroupstats_operations;
564
565static struct backing_dev_info cgroup_backing_dev_info = {
566 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
567};
Paul Menageddbcc7e2007-10-18 23:39:30 -0700568
569static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
570{
571 struct inode *inode = new_inode(sb);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700572
573 if (inode) {
574 inode->i_mode = mode;
575 inode->i_uid = current->fsuid;
576 inode->i_gid = current->fsgid;
577 inode->i_blocks = 0;
578 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
579 inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
580 }
581 return inode;
582}
583
KAMEZAWA Hiroyuki4fca88c2008-02-07 00:14:27 -0800584/*
585 * Call subsys's pre_destroy handler.
586 * This is called before css refcnt check.
587 */
KAMEZAWA Hiroyuki4fca88c2008-02-07 00:14:27 -0800588static void cgroup_call_pre_destroy(struct cgroup *cgrp)
589{
590 struct cgroup_subsys *ss;
591 for_each_subsys(cgrp->root, ss)
592 if (ss->pre_destroy && cgrp->subsys[ss->subsys_id])
593 ss->pre_destroy(ss, cgrp);
594 return;
595}
596
Paul Menageddbcc7e2007-10-18 23:39:30 -0700597static void cgroup_diput(struct dentry *dentry, struct inode *inode)
598{
599 /* is dentry a directory ? if so, kfree() associated cgroup */
600 if (S_ISDIR(inode->i_mode)) {
Paul Menagebd89aab2007-10-18 23:40:44 -0700601 struct cgroup *cgrp = dentry->d_fsdata;
Paul Menage8dc4f3e2008-02-07 00:13:45 -0800602 struct cgroup_subsys *ss;
Paul Menagebd89aab2007-10-18 23:40:44 -0700603 BUG_ON(!(cgroup_is_removed(cgrp)));
Paul Menage81a6a5c2007-10-18 23:39:38 -0700604 /* It's possible for external users to be holding css
605 * reference counts on a cgroup; css_put() needs to
606 * be able to access the cgroup after decrementing
607 * the reference count in order to know if it needs to
608 * queue the cgroup to be handled by the release
609 * agent */
610 synchronize_rcu();
Paul Menage8dc4f3e2008-02-07 00:13:45 -0800611
612 mutex_lock(&cgroup_mutex);
613 /*
614 * Release the subsystem state objects.
615 */
616 for_each_subsys(cgrp->root, ss) {
617 if (cgrp->subsys[ss->subsys_id])
618 ss->destroy(ss, cgrp);
619 }
620
621 cgrp->root->number_of_cgroups--;
622 mutex_unlock(&cgroup_mutex);
623
624 /* Drop the active superblock reference that we took when we
625 * created the cgroup */
626 deactivate_super(cgrp->root->sb);
627
Paul Menagebd89aab2007-10-18 23:40:44 -0700628 kfree(cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700629 }
630 iput(inode);
631}
632
633static void remove_dir(struct dentry *d)
634{
635 struct dentry *parent = dget(d->d_parent);
636
637 d_delete(d);
638 simple_rmdir(parent->d_inode, d);
639 dput(parent);
640}
641
642static void cgroup_clear_directory(struct dentry *dentry)
643{
644 struct list_head *node;
645
646 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
647 spin_lock(&dcache_lock);
648 node = dentry->d_subdirs.next;
649 while (node != &dentry->d_subdirs) {
650 struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
651 list_del_init(node);
652 if (d->d_inode) {
653 /* This should never be called on a cgroup
654 * directory with child cgroups */
655 BUG_ON(d->d_inode->i_mode & S_IFDIR);
656 d = dget_locked(d);
657 spin_unlock(&dcache_lock);
658 d_delete(d);
659 simple_unlink(dentry->d_inode, d);
660 dput(d);
661 spin_lock(&dcache_lock);
662 }
663 node = dentry->d_subdirs.next;
664 }
665 spin_unlock(&dcache_lock);
666}
667
668/*
669 * NOTE : the dentry must have been dget()'ed
670 */
671static void cgroup_d_remove_dir(struct dentry *dentry)
672{
673 cgroup_clear_directory(dentry);
674
675 spin_lock(&dcache_lock);
676 list_del_init(&dentry->d_u.d_child);
677 spin_unlock(&dcache_lock);
678 remove_dir(dentry);
679}
680
681static int rebind_subsystems(struct cgroupfs_root *root,
682 unsigned long final_bits)
683{
684 unsigned long added_bits, removed_bits;
Paul Menagebd89aab2007-10-18 23:40:44 -0700685 struct cgroup *cgrp = &root->top_cgroup;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700686 int i;
687
688 removed_bits = root->actual_subsys_bits & ~final_bits;
689 added_bits = final_bits & ~root->actual_subsys_bits;
690 /* Check that any added subsystems are currently free */
691 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
692 unsigned long long bit = 1ull << i;
693 struct cgroup_subsys *ss = subsys[i];
694 if (!(bit & added_bits))
695 continue;
696 if (ss->root != &rootnode) {
697 /* Subsystem isn't free */
698 return -EBUSY;
699 }
700 }
701
702 /* Currently we don't handle adding/removing subsystems when
703 * any child cgroups exist. This is theoretically supportable
704 * but involves complex error handling, so it's being left until
705 * later */
Paul Menagebd89aab2007-10-18 23:40:44 -0700706 if (!list_empty(&cgrp->children))
Paul Menageddbcc7e2007-10-18 23:39:30 -0700707 return -EBUSY;
708
709 /* Process each subsystem */
710 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
711 struct cgroup_subsys *ss = subsys[i];
712 unsigned long bit = 1UL << i;
713 if (bit & added_bits) {
714 /* We're binding this subsystem to this hierarchy */
Paul Menagebd89aab2007-10-18 23:40:44 -0700715 BUG_ON(cgrp->subsys[i]);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700716 BUG_ON(!dummytop->subsys[i]);
717 BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
Paul Menagebd89aab2007-10-18 23:40:44 -0700718 cgrp->subsys[i] = dummytop->subsys[i];
719 cgrp->subsys[i]->cgroup = cgrp;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700720 list_add(&ss->sibling, &root->subsys_list);
721 rcu_assign_pointer(ss->root, root);
722 if (ss->bind)
Paul Menagebd89aab2007-10-18 23:40:44 -0700723 ss->bind(ss, cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700724
725 } else if (bit & removed_bits) {
726 /* We're removing this subsystem */
Paul Menagebd89aab2007-10-18 23:40:44 -0700727 BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
728 BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700729 if (ss->bind)
730 ss->bind(ss, dummytop);
731 dummytop->subsys[i]->cgroup = dummytop;
Paul Menagebd89aab2007-10-18 23:40:44 -0700732 cgrp->subsys[i] = NULL;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700733 rcu_assign_pointer(subsys[i]->root, &rootnode);
734 list_del(&ss->sibling);
735 } else if (bit & final_bits) {
736 /* Subsystem state should already exist */
Paul Menagebd89aab2007-10-18 23:40:44 -0700737 BUG_ON(!cgrp->subsys[i]);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700738 } else {
739 /* Subsystem state shouldn't exist */
Paul Menagebd89aab2007-10-18 23:40:44 -0700740 BUG_ON(cgrp->subsys[i]);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700741 }
742 }
743 root->subsys_bits = root->actual_subsys_bits = final_bits;
744 synchronize_rcu();
745
746 return 0;
747}
748
749static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
750{
751 struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
752 struct cgroup_subsys *ss;
753
754 mutex_lock(&cgroup_mutex);
755 for_each_subsys(root, ss)
756 seq_printf(seq, ",%s", ss->name);
757 if (test_bit(ROOT_NOPREFIX, &root->flags))
758 seq_puts(seq, ",noprefix");
Paul Menage81a6a5c2007-10-18 23:39:38 -0700759 if (strlen(root->release_agent_path))
760 seq_printf(seq, ",release_agent=%s", root->release_agent_path);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700761 mutex_unlock(&cgroup_mutex);
762 return 0;
763}
764
765struct cgroup_sb_opts {
766 unsigned long subsys_bits;
767 unsigned long flags;
Paul Menage81a6a5c2007-10-18 23:39:38 -0700768 char *release_agent;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700769};
770
771/* Convert a hierarchy specifier into a bitmask of subsystems and
772 * flags. */
773static int parse_cgroupfs_options(char *data,
774 struct cgroup_sb_opts *opts)
775{
776 char *token, *o = data ?: "all";
777
778 opts->subsys_bits = 0;
779 opts->flags = 0;
Paul Menage81a6a5c2007-10-18 23:39:38 -0700780 opts->release_agent = NULL;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700781
782 while ((token = strsep(&o, ",")) != NULL) {
783 if (!*token)
784 return -EINVAL;
785 if (!strcmp(token, "all")) {
786 opts->subsys_bits = (1 << CGROUP_SUBSYS_COUNT) - 1;
787 } else if (!strcmp(token, "noprefix")) {
788 set_bit(ROOT_NOPREFIX, &opts->flags);
Paul Menage81a6a5c2007-10-18 23:39:38 -0700789 } else if (!strncmp(token, "release_agent=", 14)) {
790 /* Specifying two release agents is forbidden */
791 if (opts->release_agent)
792 return -EINVAL;
793 opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL);
794 if (!opts->release_agent)
795 return -ENOMEM;
796 strncpy(opts->release_agent, token + 14, PATH_MAX - 1);
797 opts->release_agent[PATH_MAX - 1] = 0;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700798 } else {
799 struct cgroup_subsys *ss;
800 int i;
801 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
802 ss = subsys[i];
803 if (!strcmp(token, ss->name)) {
804 set_bit(i, &opts->subsys_bits);
805 break;
806 }
807 }
808 if (i == CGROUP_SUBSYS_COUNT)
809 return -ENOENT;
810 }
811 }
812
813 /* We can't have an empty hierarchy */
814 if (!opts->subsys_bits)
815 return -EINVAL;
816
817 return 0;
818}
819
820static int cgroup_remount(struct super_block *sb, int *flags, char *data)
821{
822 int ret = 0;
823 struct cgroupfs_root *root = sb->s_fs_info;
Paul Menagebd89aab2007-10-18 23:40:44 -0700824 struct cgroup *cgrp = &root->top_cgroup;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700825 struct cgroup_sb_opts opts;
826
Paul Menagebd89aab2007-10-18 23:40:44 -0700827 mutex_lock(&cgrp->dentry->d_inode->i_mutex);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700828 mutex_lock(&cgroup_mutex);
829
830 /* See what subsystems are wanted */
831 ret = parse_cgroupfs_options(data, &opts);
832 if (ret)
833 goto out_unlock;
834
835 /* Don't allow flags to change at remount */
836 if (opts.flags != root->flags) {
837 ret = -EINVAL;
838 goto out_unlock;
839 }
840
841 ret = rebind_subsystems(root, opts.subsys_bits);
842
843 /* (re)populate subsystem files */
844 if (!ret)
Paul Menagebd89aab2007-10-18 23:40:44 -0700845 cgroup_populate_dir(cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700846
Paul Menage81a6a5c2007-10-18 23:39:38 -0700847 if (opts.release_agent)
848 strcpy(root->release_agent_path, opts.release_agent);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700849 out_unlock:
Paul Menage81a6a5c2007-10-18 23:39:38 -0700850 if (opts.release_agent)
851 kfree(opts.release_agent);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700852 mutex_unlock(&cgroup_mutex);
Paul Menagebd89aab2007-10-18 23:40:44 -0700853 mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700854 return ret;
855}
856
857static struct super_operations cgroup_ops = {
858 .statfs = simple_statfs,
859 .drop_inode = generic_delete_inode,
860 .show_options = cgroup_show_options,
861 .remount_fs = cgroup_remount,
862};
863
864static void init_cgroup_root(struct cgroupfs_root *root)
865{
Paul Menagebd89aab2007-10-18 23:40:44 -0700866 struct cgroup *cgrp = &root->top_cgroup;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700867 INIT_LIST_HEAD(&root->subsys_list);
868 INIT_LIST_HEAD(&root->root_list);
869 root->number_of_cgroups = 1;
Paul Menagebd89aab2007-10-18 23:40:44 -0700870 cgrp->root = root;
871 cgrp->top_cgroup = cgrp;
872 INIT_LIST_HEAD(&cgrp->sibling);
873 INIT_LIST_HEAD(&cgrp->children);
874 INIT_LIST_HEAD(&cgrp->css_sets);
875 INIT_LIST_HEAD(&cgrp->release_list);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700876}
877
878static int cgroup_test_super(struct super_block *sb, void *data)
879{
880 struct cgroupfs_root *new = data;
881 struct cgroupfs_root *root = sb->s_fs_info;
882
883 /* First check subsystems */
884 if (new->subsys_bits != root->subsys_bits)
885 return 0;
886
887 /* Next check flags */
888 if (new->flags != root->flags)
889 return 0;
890
891 return 1;
892}
893
894static int cgroup_set_super(struct super_block *sb, void *data)
895{
896 int ret;
897 struct cgroupfs_root *root = data;
898
899 ret = set_anon_super(sb, NULL);
900 if (ret)
901 return ret;
902
903 sb->s_fs_info = root;
904 root->sb = sb;
905
906 sb->s_blocksize = PAGE_CACHE_SIZE;
907 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
908 sb->s_magic = CGROUP_SUPER_MAGIC;
909 sb->s_op = &cgroup_ops;
910
911 return 0;
912}
913
914static int cgroup_get_rootdir(struct super_block *sb)
915{
916 struct inode *inode =
917 cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
918 struct dentry *dentry;
919
920 if (!inode)
921 return -ENOMEM;
922
923 inode->i_op = &simple_dir_inode_operations;
924 inode->i_fop = &simple_dir_operations;
925 inode->i_op = &cgroup_dir_inode_operations;
926 /* directories start off with i_nlink == 2 (for "." entry) */
927 inc_nlink(inode);
928 dentry = d_alloc_root(inode);
929 if (!dentry) {
930 iput(inode);
931 return -ENOMEM;
932 }
933 sb->s_root = dentry;
934 return 0;
935}
936
937static int cgroup_get_sb(struct file_system_type *fs_type,
938 int flags, const char *unused_dev_name,
939 void *data, struct vfsmount *mnt)
940{
941 struct cgroup_sb_opts opts;
942 int ret = 0;
943 struct super_block *sb;
944 struct cgroupfs_root *root;
Paul Menage817929e2007-10-18 23:39:36 -0700945 struct list_head tmp_cg_links, *l;
946 INIT_LIST_HEAD(&tmp_cg_links);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700947
948 /* First find the desired set of subsystems */
949 ret = parse_cgroupfs_options(data, &opts);
Paul Menage81a6a5c2007-10-18 23:39:38 -0700950 if (ret) {
951 if (opts.release_agent)
952 kfree(opts.release_agent);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700953 return ret;
Paul Menage81a6a5c2007-10-18 23:39:38 -0700954 }
Paul Menageddbcc7e2007-10-18 23:39:30 -0700955
956 root = kzalloc(sizeof(*root), GFP_KERNEL);
957 if (!root)
958 return -ENOMEM;
959
960 init_cgroup_root(root);
961 root->subsys_bits = opts.subsys_bits;
962 root->flags = opts.flags;
Paul Menage81a6a5c2007-10-18 23:39:38 -0700963 if (opts.release_agent) {
964 strcpy(root->release_agent_path, opts.release_agent);
965 kfree(opts.release_agent);
966 }
Paul Menageddbcc7e2007-10-18 23:39:30 -0700967
968 sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root);
969
970 if (IS_ERR(sb)) {
971 kfree(root);
972 return PTR_ERR(sb);
973 }
974
975 if (sb->s_fs_info != root) {
976 /* Reusing an existing superblock */
977 BUG_ON(sb->s_root == NULL);
978 kfree(root);
979 root = NULL;
980 } else {
981 /* New superblock */
Paul Menagebd89aab2007-10-18 23:40:44 -0700982 struct cgroup *cgrp = &root->top_cgroup;
Paul Menage817929e2007-10-18 23:39:36 -0700983 struct inode *inode;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700984
985 BUG_ON(sb->s_root != NULL);
986
987 ret = cgroup_get_rootdir(sb);
988 if (ret)
989 goto drop_new_super;
Paul Menage817929e2007-10-18 23:39:36 -0700990 inode = sb->s_root->d_inode;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700991
Paul Menage817929e2007-10-18 23:39:36 -0700992 mutex_lock(&inode->i_mutex);
Paul Menageddbcc7e2007-10-18 23:39:30 -0700993 mutex_lock(&cgroup_mutex);
994
Paul Menage817929e2007-10-18 23:39:36 -0700995 /*
996 * We're accessing css_set_count without locking
997 * css_set_lock here, but that's OK - it can only be
998 * increased by someone holding cgroup_lock, and
999 * that's us. The worst that can happen is that we
1000 * have some link structures left over
1001 */
1002 ret = allocate_cg_links(css_set_count, &tmp_cg_links);
1003 if (ret) {
1004 mutex_unlock(&cgroup_mutex);
1005 mutex_unlock(&inode->i_mutex);
1006 goto drop_new_super;
1007 }
1008
Paul Menageddbcc7e2007-10-18 23:39:30 -07001009 ret = rebind_subsystems(root, root->subsys_bits);
1010 if (ret == -EBUSY) {
1011 mutex_unlock(&cgroup_mutex);
Paul Menage817929e2007-10-18 23:39:36 -07001012 mutex_unlock(&inode->i_mutex);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001013 goto drop_new_super;
1014 }
1015
1016 /* EBUSY should be the only error here */
1017 BUG_ON(ret);
1018
1019 list_add(&root->root_list, &roots);
Paul Menage817929e2007-10-18 23:39:36 -07001020 root_count++;
Paul Menageddbcc7e2007-10-18 23:39:30 -07001021
1022 sb->s_root->d_fsdata = &root->top_cgroup;
1023 root->top_cgroup.dentry = sb->s_root;
1024
Paul Menage817929e2007-10-18 23:39:36 -07001025 /* Link the top cgroup in this hierarchy into all
1026 * the css_set objects */
1027 write_lock(&css_set_lock);
1028 l = &init_css_set.list;
1029 do {
1030 struct css_set *cg;
1031 struct cg_cgroup_link *link;
1032 cg = list_entry(l, struct css_set, list);
1033 BUG_ON(list_empty(&tmp_cg_links));
1034 link = list_entry(tmp_cg_links.next,
1035 struct cg_cgroup_link,
Paul Menagebd89aab2007-10-18 23:40:44 -07001036 cgrp_link_list);
1037 list_del(&link->cgrp_link_list);
Paul Menage817929e2007-10-18 23:39:36 -07001038 link->cg = cg;
Paul Menagebd89aab2007-10-18 23:40:44 -07001039 list_add(&link->cgrp_link_list,
Paul Menage817929e2007-10-18 23:39:36 -07001040 &root->top_cgroup.css_sets);
1041 list_add(&link->cg_link_list, &cg->cg_links);
1042 l = l->next;
1043 } while (l != &init_css_set.list);
1044 write_unlock(&css_set_lock);
1045
1046 free_cg_links(&tmp_cg_links);
1047
Paul Menagebd89aab2007-10-18 23:40:44 -07001048 BUG_ON(!list_empty(&cgrp->sibling));
1049 BUG_ON(!list_empty(&cgrp->children));
Paul Menageddbcc7e2007-10-18 23:39:30 -07001050 BUG_ON(root->number_of_cgroups != 1);
1051
Paul Menagebd89aab2007-10-18 23:40:44 -07001052 cgroup_populate_dir(cgrp);
Paul Menage817929e2007-10-18 23:39:36 -07001053 mutex_unlock(&inode->i_mutex);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001054 mutex_unlock(&cgroup_mutex);
1055 }
1056
1057 return simple_set_mnt(mnt, sb);
1058
1059 drop_new_super:
1060 up_write(&sb->s_umount);
1061 deactivate_super(sb);
Paul Menage817929e2007-10-18 23:39:36 -07001062 free_cg_links(&tmp_cg_links);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001063 return ret;
1064}
1065
1066static void cgroup_kill_sb(struct super_block *sb) {
1067 struct cgroupfs_root *root = sb->s_fs_info;
Paul Menagebd89aab2007-10-18 23:40:44 -07001068 struct cgroup *cgrp = &root->top_cgroup;
Paul Menageddbcc7e2007-10-18 23:39:30 -07001069 int ret;
1070
1071 BUG_ON(!root);
1072
1073 BUG_ON(root->number_of_cgroups != 1);
Paul Menagebd89aab2007-10-18 23:40:44 -07001074 BUG_ON(!list_empty(&cgrp->children));
1075 BUG_ON(!list_empty(&cgrp->sibling));
Paul Menageddbcc7e2007-10-18 23:39:30 -07001076
1077 mutex_lock(&cgroup_mutex);
1078
1079 /* Rebind all subsystems back to the default hierarchy */
1080 ret = rebind_subsystems(root, 0);
1081 /* Shouldn't be able to fail ... */
1082 BUG_ON(ret);
1083
Paul Menage817929e2007-10-18 23:39:36 -07001084 /*
1085 * Release all the links from css_sets to this hierarchy's
1086 * root cgroup
1087 */
1088 write_lock(&css_set_lock);
Paul Menagebd89aab2007-10-18 23:40:44 -07001089 while (!list_empty(&cgrp->css_sets)) {
Paul Menage817929e2007-10-18 23:39:36 -07001090 struct cg_cgroup_link *link;
Paul Menagebd89aab2007-10-18 23:40:44 -07001091 link = list_entry(cgrp->css_sets.next,
1092 struct cg_cgroup_link, cgrp_link_list);
Paul Menage817929e2007-10-18 23:39:36 -07001093 list_del(&link->cg_link_list);
Paul Menagebd89aab2007-10-18 23:40:44 -07001094 list_del(&link->cgrp_link_list);
Paul Menage817929e2007-10-18 23:39:36 -07001095 kfree(link);
1096 }
1097 write_unlock(&css_set_lock);
1098
1099 if (!list_empty(&root->root_list)) {
Paul Menageddbcc7e2007-10-18 23:39:30 -07001100 list_del(&root->root_list);
Paul Menage817929e2007-10-18 23:39:36 -07001101 root_count--;
1102 }
Paul Menageddbcc7e2007-10-18 23:39:30 -07001103 mutex_unlock(&cgroup_mutex);
1104
1105 kfree(root);
1106 kill_litter_super(sb);
1107}
1108
1109static struct file_system_type cgroup_fs_type = {
1110 .name = "cgroup",
1111 .get_sb = cgroup_get_sb,
1112 .kill_sb = cgroup_kill_sb,
1113};
1114
Paul Menagebd89aab2007-10-18 23:40:44 -07001115static inline struct cgroup *__d_cgrp(struct dentry *dentry)
Paul Menageddbcc7e2007-10-18 23:39:30 -07001116{
1117 return dentry->d_fsdata;
1118}
1119
1120static inline struct cftype *__d_cft(struct dentry *dentry)
1121{
1122 return dentry->d_fsdata;
1123}
1124
Li Zefana043e3b2008-02-23 15:24:09 -08001125/**
1126 * cgroup_path - generate the path of a cgroup
1127 * @cgrp: the cgroup in question
1128 * @buf: the buffer to write the path into
1129 * @buflen: the length of the buffer
1130 *
1131 * Called with cgroup_mutex held. Writes path of cgroup into buf.
Paul Menageddbcc7e2007-10-18 23:39:30 -07001132 * Returns 0 on success, -errno on error.
1133 */
Paul Menagebd89aab2007-10-18 23:40:44 -07001134int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
Paul Menageddbcc7e2007-10-18 23:39:30 -07001135{
1136 char *start;
1137
Paul Menagebd89aab2007-10-18 23:40:44 -07001138 if (cgrp == dummytop) {
Paul Menageddbcc7e2007-10-18 23:39:30 -07001139 /*
1140 * Inactive subsystems have no dentry for their root
1141 * cgroup
1142 */
1143 strcpy(buf, "/");
1144 return 0;
1145 }
1146
1147 start = buf + buflen;
1148
1149 *--start = '\0';
1150 for (;;) {
Paul Menagebd89aab2007-10-18 23:40:44 -07001151 int len = cgrp->dentry->d_name.len;
Paul Menageddbcc7e2007-10-18 23:39:30 -07001152 if ((start -= len) < buf)
1153 return -ENAMETOOLONG;
Paul Menagebd89aab2007-10-18 23:40:44 -07001154 memcpy(start, cgrp->dentry->d_name.name, len);
1155 cgrp = cgrp->parent;
1156 if (!cgrp)
Paul Menageddbcc7e2007-10-18 23:39:30 -07001157 break;
Paul Menagebd89aab2007-10-18 23:40:44 -07001158 if (!cgrp->parent)
Paul Menageddbcc7e2007-10-18 23:39:30 -07001159 continue;
1160 if (--start < buf)
1161 return -ENAMETOOLONG;
1162 *start = '/';
1163 }
1164 memmove(buf, start, buf + buflen - start);
1165 return 0;
1166}
1167
Paul Menagebbcb81d2007-10-18 23:39:32 -07001168/*
1169 * Return the first subsystem attached to a cgroup's hierarchy, and
1170 * its subsystem id.
1171 */
1172
Paul Menagebd89aab2007-10-18 23:40:44 -07001173static void get_first_subsys(const struct cgroup *cgrp,
Paul Menagebbcb81d2007-10-18 23:39:32 -07001174 struct cgroup_subsys_state **css, int *subsys_id)
1175{
Paul Menagebd89aab2007-10-18 23:40:44 -07001176 const struct cgroupfs_root *root = cgrp->root;
Paul Menagebbcb81d2007-10-18 23:39:32 -07001177 const struct cgroup_subsys *test_ss;
1178 BUG_ON(list_empty(&root->subsys_list));
1179 test_ss = list_entry(root->subsys_list.next,
1180 struct cgroup_subsys, sibling);
1181 if (css) {
Paul Menagebd89aab2007-10-18 23:40:44 -07001182 *css = cgrp->subsys[test_ss->subsys_id];
Paul Menagebbcb81d2007-10-18 23:39:32 -07001183 BUG_ON(!*css);
1184 }
1185 if (subsys_id)
1186 *subsys_id = test_ss->subsys_id;
1187}
1188
Li Zefana043e3b2008-02-23 15:24:09 -08001189/**
1190 * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
1191 * @cgrp: the cgroup the task is attaching to
1192 * @tsk: the task to be attached
Paul Menagebbcb81d2007-10-18 23:39:32 -07001193 *
Li Zefana043e3b2008-02-23 15:24:09 -08001194 * Call holding cgroup_mutex. May take task_lock of
1195 * the task 'tsk' during call.
Paul Menagebbcb81d2007-10-18 23:39:32 -07001196 */
Cliff Wickman956db3c2008-02-07 00:14:43 -08001197int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
Paul Menagebbcb81d2007-10-18 23:39:32 -07001198{
1199 int retval = 0;
1200 struct cgroup_subsys *ss;
Paul Menagebd89aab2007-10-18 23:40:44 -07001201 struct cgroup *oldcgrp;
Paul Menage817929e2007-10-18 23:39:36 -07001202 struct css_set *cg = tsk->cgroups;
1203 struct css_set *newcg;
Paul Menagebd89aab2007-10-18 23:40:44 -07001204 struct cgroupfs_root *root = cgrp->root;
Paul Menagebbcb81d2007-10-18 23:39:32 -07001205 int subsys_id;
1206
Paul Menagebd89aab2007-10-18 23:40:44 -07001207 get_first_subsys(cgrp, NULL, &subsys_id);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001208
1209 /* Nothing to do if the task is already in that cgroup */
Paul Menagebd89aab2007-10-18 23:40:44 -07001210 oldcgrp = task_cgroup(tsk, subsys_id);
1211 if (cgrp == oldcgrp)
Paul Menagebbcb81d2007-10-18 23:39:32 -07001212 return 0;
1213
1214 for_each_subsys(root, ss) {
1215 if (ss->can_attach) {
Paul Menagebd89aab2007-10-18 23:40:44 -07001216 retval = ss->can_attach(ss, cgrp, tsk);
Paul Jacksone18f6312008-02-07 00:13:44 -08001217 if (retval)
Paul Menagebbcb81d2007-10-18 23:39:32 -07001218 return retval;
Paul Menagebbcb81d2007-10-18 23:39:32 -07001219 }
1220 }
1221
Paul Menage817929e2007-10-18 23:39:36 -07001222 /*
1223 * Locate or allocate a new css_set for this task,
1224 * based on its final set of cgroups
1225 */
Paul Menagebd89aab2007-10-18 23:40:44 -07001226 newcg = find_css_set(cg, cgrp);
Paul Jacksone18f6312008-02-07 00:13:44 -08001227 if (!newcg)
Paul Menage817929e2007-10-18 23:39:36 -07001228 return -ENOMEM;
Paul Menage817929e2007-10-18 23:39:36 -07001229
Paul Menagebbcb81d2007-10-18 23:39:32 -07001230 task_lock(tsk);
1231 if (tsk->flags & PF_EXITING) {
1232 task_unlock(tsk);
Paul Menage817929e2007-10-18 23:39:36 -07001233 put_css_set(newcg);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001234 return -ESRCH;
1235 }
Paul Menage817929e2007-10-18 23:39:36 -07001236 rcu_assign_pointer(tsk->cgroups, newcg);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001237 task_unlock(tsk);
1238
Paul Menage817929e2007-10-18 23:39:36 -07001239 /* Update the css_set linked lists if we're using them */
1240 write_lock(&css_set_lock);
1241 if (!list_empty(&tsk->cg_list)) {
1242 list_del(&tsk->cg_list);
1243 list_add(&tsk->cg_list, &newcg->tasks);
1244 }
1245 write_unlock(&css_set_lock);
1246
Paul Menagebbcb81d2007-10-18 23:39:32 -07001247 for_each_subsys(root, ss) {
Paul Jacksone18f6312008-02-07 00:13:44 -08001248 if (ss->attach)
Paul Menagebd89aab2007-10-18 23:40:44 -07001249 ss->attach(ss, cgrp, oldcgrp, tsk);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001250 }
Paul Menagebd89aab2007-10-18 23:40:44 -07001251 set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001252 synchronize_rcu();
Paul Menage817929e2007-10-18 23:39:36 -07001253 put_css_set(cg);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001254 return 0;
1255}
1256
1257/*
Paul Menagebd89aab2007-10-18 23:40:44 -07001258 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with
Paul Menagebbcb81d2007-10-18 23:39:32 -07001259 * cgroup_mutex, may take task_lock of task
1260 */
Paul Menagebd89aab2007-10-18 23:40:44 -07001261static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf)
Paul Menagebbcb81d2007-10-18 23:39:32 -07001262{
1263 pid_t pid;
1264 struct task_struct *tsk;
1265 int ret;
1266
1267 if (sscanf(pidbuf, "%d", &pid) != 1)
1268 return -EIO;
1269
1270 if (pid) {
1271 rcu_read_lock();
Pavel Emelyanov73507f32008-02-07 00:14:47 -08001272 tsk = find_task_by_vpid(pid);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001273 if (!tsk || tsk->flags & PF_EXITING) {
1274 rcu_read_unlock();
1275 return -ESRCH;
1276 }
1277 get_task_struct(tsk);
1278 rcu_read_unlock();
1279
1280 if ((current->euid) && (current->euid != tsk->uid)
1281 && (current->euid != tsk->suid)) {
1282 put_task_struct(tsk);
1283 return -EACCES;
1284 }
1285 } else {
1286 tsk = current;
1287 get_task_struct(tsk);
1288 }
1289
Cliff Wickman956db3c2008-02-07 00:14:43 -08001290 ret = cgroup_attach_task(cgrp, tsk);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001291 put_task_struct(tsk);
1292 return ret;
1293}
1294
Paul Menageddbcc7e2007-10-18 23:39:30 -07001295/* The various types of files and directories in a cgroup file system */
Paul Menageddbcc7e2007-10-18 23:39:30 -07001296enum cgroup_filetype {
1297 FILE_ROOT,
1298 FILE_DIR,
1299 FILE_TASKLIST,
Paul Menage81a6a5c2007-10-18 23:39:38 -07001300 FILE_NOTIFY_ON_RELEASE,
1301 FILE_RELEASABLE,
1302 FILE_RELEASE_AGENT,
Paul Menageddbcc7e2007-10-18 23:39:30 -07001303};
1304
Paul Menagebd89aab2007-10-18 23:40:44 -07001305static ssize_t cgroup_write_uint(struct cgroup *cgrp, struct cftype *cft,
Paul Menage355e0c42007-10-18 23:39:33 -07001306 struct file *file,
1307 const char __user *userbuf,
1308 size_t nbytes, loff_t *unused_ppos)
1309{
1310 char buffer[64];
1311 int retval = 0;
1312 u64 val;
1313 char *end;
1314
1315 if (!nbytes)
1316 return -EINVAL;
1317 if (nbytes >= sizeof(buffer))
1318 return -E2BIG;
1319 if (copy_from_user(buffer, userbuf, nbytes))
1320 return -EFAULT;
1321
1322 buffer[nbytes] = 0; /* nul-terminate */
1323
1324 /* strip newline if necessary */
1325 if (nbytes && (buffer[nbytes-1] == '\n'))
1326 buffer[nbytes-1] = 0;
1327 val = simple_strtoull(buffer, &end, 0);
1328 if (*end)
1329 return -EINVAL;
1330
1331 /* Pass to subsystem */
Paul Menagebd89aab2007-10-18 23:40:44 -07001332 retval = cft->write_uint(cgrp, cft, val);
Paul Menage355e0c42007-10-18 23:39:33 -07001333 if (!retval)
1334 retval = nbytes;
1335 return retval;
1336}
1337
Paul Menagebd89aab2007-10-18 23:40:44 -07001338static ssize_t cgroup_common_file_write(struct cgroup *cgrp,
Paul Menagebbcb81d2007-10-18 23:39:32 -07001339 struct cftype *cft,
1340 struct file *file,
1341 const char __user *userbuf,
1342 size_t nbytes, loff_t *unused_ppos)
1343{
1344 enum cgroup_filetype type = cft->private;
1345 char *buffer;
1346 int retval = 0;
1347
1348 if (nbytes >= PATH_MAX)
1349 return -E2BIG;
1350
1351 /* +1 for nul-terminator */
1352 buffer = kmalloc(nbytes + 1, GFP_KERNEL);
1353 if (buffer == NULL)
1354 return -ENOMEM;
1355
1356 if (copy_from_user(buffer, userbuf, nbytes)) {
1357 retval = -EFAULT;
1358 goto out1;
1359 }
1360 buffer[nbytes] = 0; /* nul-terminate */
Paul Jackson622d42c2008-02-07 00:13:44 -08001361 strstrip(buffer); /* strip -just- trailing whitespace */
Paul Menagebbcb81d2007-10-18 23:39:32 -07001362
1363 mutex_lock(&cgroup_mutex);
1364
Paul Menage8dc4f3e2008-02-07 00:13:45 -08001365 /*
1366 * This was already checked for in cgroup_file_write(), but
1367 * check again now we're holding cgroup_mutex.
1368 */
Paul Menagebd89aab2007-10-18 23:40:44 -07001369 if (cgroup_is_removed(cgrp)) {
Paul Menagebbcb81d2007-10-18 23:39:32 -07001370 retval = -ENODEV;
1371 goto out2;
1372 }
1373
1374 switch (type) {
1375 case FILE_TASKLIST:
Paul Menagebd89aab2007-10-18 23:40:44 -07001376 retval = attach_task_by_pid(cgrp, buffer);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001377 break;
Paul Menage81a6a5c2007-10-18 23:39:38 -07001378 case FILE_NOTIFY_ON_RELEASE:
Paul Menagebd89aab2007-10-18 23:40:44 -07001379 clear_bit(CGRP_RELEASABLE, &cgrp->flags);
Paul Menage81a6a5c2007-10-18 23:39:38 -07001380 if (simple_strtoul(buffer, NULL, 10) != 0)
Paul Menagebd89aab2007-10-18 23:40:44 -07001381 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
Paul Menage81a6a5c2007-10-18 23:39:38 -07001382 else
Paul Menagebd89aab2007-10-18 23:40:44 -07001383 clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
Paul Menage81a6a5c2007-10-18 23:39:38 -07001384 break;
1385 case FILE_RELEASE_AGENT:
Paul Jackson622d42c2008-02-07 00:13:44 -08001386 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
1387 strcpy(cgrp->root->release_agent_path, buffer);
Paul Menage81a6a5c2007-10-18 23:39:38 -07001388 break;
Paul Menagebbcb81d2007-10-18 23:39:32 -07001389 default:
1390 retval = -EINVAL;
1391 goto out2;
1392 }
1393
1394 if (retval == 0)
1395 retval = nbytes;
1396out2:
1397 mutex_unlock(&cgroup_mutex);
1398out1:
1399 kfree(buffer);
1400 return retval;
1401}
1402
Paul Menageddbcc7e2007-10-18 23:39:30 -07001403static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
1404 size_t nbytes, loff_t *ppos)
1405{
1406 struct cftype *cft = __d_cft(file->f_dentry);
Paul Menagebd89aab2007-10-18 23:40:44 -07001407 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001408
Paul Menage8dc4f3e2008-02-07 00:13:45 -08001409 if (!cft || cgroup_is_removed(cgrp))
Paul Menageddbcc7e2007-10-18 23:39:30 -07001410 return -ENODEV;
Paul Menage355e0c42007-10-18 23:39:33 -07001411 if (cft->write)
Paul Menagebd89aab2007-10-18 23:40:44 -07001412 return cft->write(cgrp, cft, file, buf, nbytes, ppos);
Paul Menage355e0c42007-10-18 23:39:33 -07001413 if (cft->write_uint)
Paul Menagebd89aab2007-10-18 23:40:44 -07001414 return cgroup_write_uint(cgrp, cft, file, buf, nbytes, ppos);
Paul Menage355e0c42007-10-18 23:39:33 -07001415 return -EINVAL;
Paul Menageddbcc7e2007-10-18 23:39:30 -07001416}
1417
Paul Menagebd89aab2007-10-18 23:40:44 -07001418static ssize_t cgroup_read_uint(struct cgroup *cgrp, struct cftype *cft,
Paul Menageddbcc7e2007-10-18 23:39:30 -07001419 struct file *file,
1420 char __user *buf, size_t nbytes,
1421 loff_t *ppos)
1422{
1423 char tmp[64];
Paul Menagebd89aab2007-10-18 23:40:44 -07001424 u64 val = cft->read_uint(cgrp, cft);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001425 int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
1426
1427 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
1428}
1429
Paul Menagebd89aab2007-10-18 23:40:44 -07001430static ssize_t cgroup_common_file_read(struct cgroup *cgrp,
Paul Menage81a6a5c2007-10-18 23:39:38 -07001431 struct cftype *cft,
1432 struct file *file,
1433 char __user *buf,
1434 size_t nbytes, loff_t *ppos)
1435{
1436 enum cgroup_filetype type = cft->private;
1437 char *page;
1438 ssize_t retval = 0;
1439 char *s;
1440
1441 if (!(page = (char *)__get_free_page(GFP_KERNEL)))
1442 return -ENOMEM;
1443
1444 s = page;
1445
1446 switch (type) {
1447 case FILE_RELEASE_AGENT:
1448 {
1449 struct cgroupfs_root *root;
1450 size_t n;
1451 mutex_lock(&cgroup_mutex);
Paul Menagebd89aab2007-10-18 23:40:44 -07001452 root = cgrp->root;
Paul Menage81a6a5c2007-10-18 23:39:38 -07001453 n = strnlen(root->release_agent_path,
1454 sizeof(root->release_agent_path));
1455 n = min(n, (size_t) PAGE_SIZE);
1456 strncpy(s, root->release_agent_path, n);
1457 mutex_unlock(&cgroup_mutex);
1458 s += n;
1459 break;
1460 }
1461 default:
1462 retval = -EINVAL;
1463 goto out;
1464 }
1465 *s++ = '\n';
1466
1467 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1468out:
1469 free_page((unsigned long)page);
1470 return retval;
1471}
1472
Paul Menageddbcc7e2007-10-18 23:39:30 -07001473static ssize_t cgroup_file_read(struct file *file, char __user *buf,
1474 size_t nbytes, loff_t *ppos)
1475{
1476 struct cftype *cft = __d_cft(file->f_dentry);
Paul Menagebd89aab2007-10-18 23:40:44 -07001477 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001478
Paul Menage8dc4f3e2008-02-07 00:13:45 -08001479 if (!cft || cgroup_is_removed(cgrp))
Paul Menageddbcc7e2007-10-18 23:39:30 -07001480 return -ENODEV;
1481
1482 if (cft->read)
Paul Menagebd89aab2007-10-18 23:40:44 -07001483 return cft->read(cgrp, cft, file, buf, nbytes, ppos);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001484 if (cft->read_uint)
Paul Menagebd89aab2007-10-18 23:40:44 -07001485 return cgroup_read_uint(cgrp, cft, file, buf, nbytes, ppos);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001486 return -EINVAL;
1487}
1488
1489static int cgroup_file_open(struct inode *inode, struct file *file)
1490{
1491 int err;
1492 struct cftype *cft;
1493
1494 err = generic_file_open(inode, file);
1495 if (err)
1496 return err;
1497
1498 cft = __d_cft(file->f_dentry);
1499 if (!cft)
1500 return -ENODEV;
1501 if (cft->open)
1502 err = cft->open(inode, file);
1503 else
1504 err = 0;
1505
1506 return err;
1507}
1508
1509static int cgroup_file_release(struct inode *inode, struct file *file)
1510{
1511 struct cftype *cft = __d_cft(file->f_dentry);
1512 if (cft->release)
1513 return cft->release(inode, file);
1514 return 0;
1515}
1516
1517/*
1518 * cgroup_rename - Only allow simple rename of directories in place.
1519 */
1520static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
1521 struct inode *new_dir, struct dentry *new_dentry)
1522{
1523 if (!S_ISDIR(old_dentry->d_inode->i_mode))
1524 return -ENOTDIR;
1525 if (new_dentry->d_inode)
1526 return -EEXIST;
1527 if (old_dir != new_dir)
1528 return -EIO;
1529 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1530}
1531
1532static struct file_operations cgroup_file_operations = {
1533 .read = cgroup_file_read,
1534 .write = cgroup_file_write,
1535 .llseek = generic_file_llseek,
1536 .open = cgroup_file_open,
1537 .release = cgroup_file_release,
1538};
1539
1540static struct inode_operations cgroup_dir_inode_operations = {
1541 .lookup = simple_lookup,
1542 .mkdir = cgroup_mkdir,
1543 .rmdir = cgroup_rmdir,
1544 .rename = cgroup_rename,
1545};
1546
1547static int cgroup_create_file(struct dentry *dentry, int mode,
1548 struct super_block *sb)
1549{
1550 static struct dentry_operations cgroup_dops = {
1551 .d_iput = cgroup_diput,
1552 };
1553
1554 struct inode *inode;
1555
1556 if (!dentry)
1557 return -ENOENT;
1558 if (dentry->d_inode)
1559 return -EEXIST;
1560
1561 inode = cgroup_new_inode(mode, sb);
1562 if (!inode)
1563 return -ENOMEM;
1564
1565 if (S_ISDIR(mode)) {
1566 inode->i_op = &cgroup_dir_inode_operations;
1567 inode->i_fop = &simple_dir_operations;
1568
1569 /* start off with i_nlink == 2 (for "." entry) */
1570 inc_nlink(inode);
1571
1572 /* start with the directory inode held, so that we can
1573 * populate it without racing with another mkdir */
Paul Menage817929e2007-10-18 23:39:36 -07001574 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001575 } else if (S_ISREG(mode)) {
1576 inode->i_size = 0;
1577 inode->i_fop = &cgroup_file_operations;
1578 }
1579 dentry->d_op = &cgroup_dops;
1580 d_instantiate(dentry, inode);
1581 dget(dentry); /* Extra count - pin the dentry in core */
1582 return 0;
1583}
1584
1585/*
Li Zefana043e3b2008-02-23 15:24:09 -08001586 * cgroup_create_dir - create a directory for an object.
1587 * @cgrp: the cgroup we create the directory for. It must have a valid
1588 * ->parent field. And we are going to fill its ->dentry field.
1589 * @dentry: dentry of the new cgroup
1590 * @mode: mode to set on new directory.
Paul Menageddbcc7e2007-10-18 23:39:30 -07001591 */
Paul Menagebd89aab2007-10-18 23:40:44 -07001592static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
Paul Menageddbcc7e2007-10-18 23:39:30 -07001593 int mode)
1594{
1595 struct dentry *parent;
1596 int error = 0;
1597
Paul Menagebd89aab2007-10-18 23:40:44 -07001598 parent = cgrp->parent->dentry;
1599 error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001600 if (!error) {
Paul Menagebd89aab2007-10-18 23:40:44 -07001601 dentry->d_fsdata = cgrp;
Paul Menageddbcc7e2007-10-18 23:39:30 -07001602 inc_nlink(parent->d_inode);
Paul Menagebd89aab2007-10-18 23:40:44 -07001603 cgrp->dentry = dentry;
Paul Menageddbcc7e2007-10-18 23:39:30 -07001604 dget(dentry);
1605 }
1606 dput(dentry);
1607
1608 return error;
1609}
1610
Paul Menagebd89aab2007-10-18 23:40:44 -07001611int cgroup_add_file(struct cgroup *cgrp,
Paul Menageddbcc7e2007-10-18 23:39:30 -07001612 struct cgroup_subsys *subsys,
1613 const struct cftype *cft)
1614{
Paul Menagebd89aab2007-10-18 23:40:44 -07001615 struct dentry *dir = cgrp->dentry;
Paul Menageddbcc7e2007-10-18 23:39:30 -07001616 struct dentry *dentry;
1617 int error;
1618
1619 char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
Paul Menagebd89aab2007-10-18 23:40:44 -07001620 if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
Paul Menageddbcc7e2007-10-18 23:39:30 -07001621 strcpy(name, subsys->name);
1622 strcat(name, ".");
1623 }
1624 strcat(name, cft->name);
1625 BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
1626 dentry = lookup_one_len(name, dir, strlen(name));
1627 if (!IS_ERR(dentry)) {
1628 error = cgroup_create_file(dentry, 0644 | S_IFREG,
Paul Menagebd89aab2007-10-18 23:40:44 -07001629 cgrp->root->sb);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001630 if (!error)
1631 dentry->d_fsdata = (void *)cft;
1632 dput(dentry);
1633 } else
1634 error = PTR_ERR(dentry);
1635 return error;
1636}
1637
Paul Menagebd89aab2007-10-18 23:40:44 -07001638int cgroup_add_files(struct cgroup *cgrp,
Paul Menageddbcc7e2007-10-18 23:39:30 -07001639 struct cgroup_subsys *subsys,
1640 const struct cftype cft[],
1641 int count)
1642{
1643 int i, err;
1644 for (i = 0; i < count; i++) {
Paul Menagebd89aab2007-10-18 23:40:44 -07001645 err = cgroup_add_file(cgrp, subsys, &cft[i]);
Paul Menageddbcc7e2007-10-18 23:39:30 -07001646 if (err)
1647 return err;
1648 }
1649 return 0;
1650}
1651
Li Zefana043e3b2008-02-23 15:24:09 -08001652/**
1653 * cgroup_task_count - count the number of tasks in a cgroup.
1654 * @cgrp: the cgroup in question
1655 *
1656 * Return the number of tasks in the cgroup.
1657 */
Paul Menagebd89aab2007-10-18 23:40:44 -07001658int cgroup_task_count(const struct cgroup *cgrp)
Paul Menagebbcb81d2007-10-18 23:39:32 -07001659{
1660 int count = 0;
Paul Menage817929e2007-10-18 23:39:36 -07001661 struct list_head *l;
Paul Menagebbcb81d2007-10-18 23:39:32 -07001662
Paul Menage817929e2007-10-18 23:39:36 -07001663 read_lock(&css_set_lock);
Paul Menagebd89aab2007-10-18 23:40:44 -07001664 l = cgrp->css_sets.next;
1665 while (l != &cgrp->css_sets) {
Paul Menage817929e2007-10-18 23:39:36 -07001666 struct cg_cgroup_link *link =
Paul Menagebd89aab2007-10-18 23:40:44 -07001667 list_entry(l, struct cg_cgroup_link, cgrp_link_list);
Paul Menage817929e2007-10-18 23:39:36 -07001668 count += atomic_read(&link->cg->ref.refcount);
1669 l = l->next;
1670 }
1671 read_unlock(&css_set_lock);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001672 return count;
1673}
1674
1675/*
Paul Menage817929e2007-10-18 23:39:36 -07001676 * Advance a list_head iterator. The iterator should be positioned at
1677 * the start of a css_set
1678 */
Paul Menagebd89aab2007-10-18 23:40:44 -07001679static void cgroup_advance_iter(struct cgroup *cgrp,
Paul Menage817929e2007-10-18 23:39:36 -07001680 struct cgroup_iter *it)
1681{
1682 struct list_head *l = it->cg_link;
1683 struct cg_cgroup_link *link;
1684 struct css_set *cg;
1685
1686 /* Advance to the next non-empty css_set */
1687 do {
1688 l = l->next;
Paul Menagebd89aab2007-10-18 23:40:44 -07001689 if (l == &cgrp->css_sets) {
Paul Menage817929e2007-10-18 23:39:36 -07001690 it->cg_link = NULL;
1691 return;
1692 }
Paul Menagebd89aab2007-10-18 23:40:44 -07001693 link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
Paul Menage817929e2007-10-18 23:39:36 -07001694 cg = link->cg;
1695 } while (list_empty(&cg->tasks));
1696 it->cg_link = l;
1697 it->task = cg->tasks.next;
1698}
1699
Cliff Wickman31a7df02008-02-07 00:14:42 -08001700/*
1701 * To reduce the fork() overhead for systems that are not actually
1702 * using their cgroups capability, we don't maintain the lists running
1703 * through each css_set to its tasks until we see the list actually
1704 * used - in other words after the first call to cgroup_iter_start().
1705 *
1706 * The tasklist_lock is not held here, as do_each_thread() and
1707 * while_each_thread() are protected by RCU.
1708 */
1709void cgroup_enable_task_cg_lists(void)
1710{
1711 struct task_struct *p, *g;
1712 write_lock(&css_set_lock);
1713 use_task_css_set_links = 1;
1714 do_each_thread(g, p) {
1715 task_lock(p);
1716 if (list_empty(&p->cg_list))
1717 list_add(&p->cg_list, &p->cgroups->tasks);
1718 task_unlock(p);
1719 } while_each_thread(g, p);
1720 write_unlock(&css_set_lock);
1721}
1722
Paul Menagebd89aab2007-10-18 23:40:44 -07001723void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
Paul Menage817929e2007-10-18 23:39:36 -07001724{
1725 /*
1726 * The first time anyone tries to iterate across a cgroup,
1727 * we need to enable the list linking each css_set to its
1728 * tasks, and fix up all existing tasks.
1729 */
Cliff Wickman31a7df02008-02-07 00:14:42 -08001730 if (!use_task_css_set_links)
1731 cgroup_enable_task_cg_lists();
1732
Paul Menage817929e2007-10-18 23:39:36 -07001733 read_lock(&css_set_lock);
Paul Menagebd89aab2007-10-18 23:40:44 -07001734 it->cg_link = &cgrp->css_sets;
1735 cgroup_advance_iter(cgrp, it);
Paul Menage817929e2007-10-18 23:39:36 -07001736}
1737
Paul Menagebd89aab2007-10-18 23:40:44 -07001738struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
Paul Menage817929e2007-10-18 23:39:36 -07001739 struct cgroup_iter *it)
1740{
1741 struct task_struct *res;
1742 struct list_head *l = it->task;
1743
1744 /* If the iterator cg is NULL, we have no tasks */
1745 if (!it->cg_link)
1746 return NULL;
1747 res = list_entry(l, struct task_struct, cg_list);
1748 /* Advance iterator to find next entry */
1749 l = l->next;
1750 if (l == &res->cgroups->tasks) {
1751 /* We reached the end of this task list - move on to
1752 * the next cg_cgroup_link */
Paul Menagebd89aab2007-10-18 23:40:44 -07001753 cgroup_advance_iter(cgrp, it);
Paul Menage817929e2007-10-18 23:39:36 -07001754 } else {
1755 it->task = l;
1756 }
1757 return res;
1758}
1759
Paul Menagebd89aab2007-10-18 23:40:44 -07001760void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
Paul Menage817929e2007-10-18 23:39:36 -07001761{
1762 read_unlock(&css_set_lock);
1763}
1764
Cliff Wickman31a7df02008-02-07 00:14:42 -08001765static inline int started_after_time(struct task_struct *t1,
1766 struct timespec *time,
1767 struct task_struct *t2)
1768{
1769 int start_diff = timespec_compare(&t1->start_time, time);
1770 if (start_diff > 0) {
1771 return 1;
1772 } else if (start_diff < 0) {
1773 return 0;
1774 } else {
1775 /*
1776 * Arbitrarily, if two processes started at the same
1777 * time, we'll say that the lower pointer value
1778 * started first. Note that t2 may have exited by now
1779 * so this may not be a valid pointer any longer, but
1780 * that's fine - it still serves to distinguish
1781 * between two tasks started (effectively) simultaneously.
1782 */
1783 return t1 > t2;
1784 }
1785}
1786
1787/*
1788 * This function is a callback from heap_insert() and is used to order
1789 * the heap.
1790 * In this case we order the heap in descending task start time.
1791 */
1792static inline int started_after(void *p1, void *p2)
1793{
1794 struct task_struct *t1 = p1;
1795 struct task_struct *t2 = p2;
1796 return started_after_time(t1, &t2->start_time, t2);
1797}
1798
1799/**
1800 * cgroup_scan_tasks - iterate though all the tasks in a cgroup
1801 * @scan: struct cgroup_scanner containing arguments for the scan
1802 *
1803 * Arguments include pointers to callback functions test_task() and
1804 * process_task().
1805 * Iterate through all the tasks in a cgroup, calling test_task() for each,
1806 * and if it returns true, call process_task() for it also.
1807 * The test_task pointer may be NULL, meaning always true (select all tasks).
1808 * Effectively duplicates cgroup_iter_{start,next,end}()
1809 * but does not lock css_set_lock for the call to process_task().
1810 * The struct cgroup_scanner may be embedded in any structure of the caller's
1811 * creation.
1812 * It is guaranteed that process_task() will act on every task that
1813 * is a member of the cgroup for the duration of this call. This
1814 * function may or may not call process_task() for tasks that exit
1815 * or move to a different cgroup during the call, or are forked or
1816 * move into the cgroup during the call.
1817 *
1818 * Note that test_task() may be called with locks held, and may in some
1819 * situations be called multiple times for the same task, so it should
1820 * be cheap.
1821 * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
1822 * pre-allocated and will be used for heap operations (and its "gt" member will
1823 * be overwritten), else a temporary heap will be used (allocation of which
1824 * may cause this function to fail).
1825 */
1826int cgroup_scan_tasks(struct cgroup_scanner *scan)
1827{
1828 int retval, i;
1829 struct cgroup_iter it;
1830 struct task_struct *p, *dropped;
1831 /* Never dereference latest_task, since it's not refcounted */
1832 struct task_struct *latest_task = NULL;
1833 struct ptr_heap tmp_heap;
1834 struct ptr_heap *heap;
1835 struct timespec latest_time = { 0, 0 };
1836
1837 if (scan->heap) {
1838 /* The caller supplied our heap and pre-allocated its memory */
1839 heap = scan->heap;
1840 heap->gt = &started_after;
1841 } else {
1842 /* We need to allocate our own heap memory */
1843 heap = &tmp_heap;
1844 retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
1845 if (retval)
1846 /* cannot allocate the heap */
1847 return retval;
1848 }
1849
1850 again:
1851 /*
1852 * Scan tasks in the cgroup, using the scanner's "test_task" callback
1853 * to determine which are of interest, and using the scanner's
1854 * "process_task" callback to process any of them that need an update.
1855 * Since we don't want to hold any locks during the task updates,
1856 * gather tasks to be processed in a heap structure.
1857 * The heap is sorted by descending task start time.
1858 * If the statically-sized heap fills up, we overflow tasks that
1859 * started later, and in future iterations only consider tasks that
1860 * started after the latest task in the previous pass. This
1861 * guarantees forward progress and that we don't miss any tasks.
1862 */
1863 heap->size = 0;
1864 cgroup_iter_start(scan->cg, &it);
1865 while ((p = cgroup_iter_next(scan->cg, &it))) {
1866 /*
1867 * Only affect tasks that qualify per the caller's callback,
1868 * if he provided one
1869 */
1870 if (scan->test_task && !scan->test_task(p, scan))
1871 continue;
1872 /*
1873 * Only process tasks that started after the last task
1874 * we processed
1875 */
1876 if (!started_after_time(p, &latest_time, latest_task))
1877 continue;
1878 dropped = heap_insert(heap, p);
1879 if (dropped == NULL) {
1880 /*
1881 * The new task was inserted; the heap wasn't
1882 * previously full
1883 */
1884 get_task_struct(p);
1885 } else if (dropped != p) {
1886 /*
1887 * The new task was inserted, and pushed out a
1888 * different task
1889 */
1890 get_task_struct(p);
1891 put_task_struct(dropped);
1892 }
1893 /*
1894 * Else the new task was newer than anything already in
1895 * the heap and wasn't inserted
1896 */
1897 }
1898 cgroup_iter_end(scan->cg, &it);
1899
1900 if (heap->size) {
1901 for (i = 0; i < heap->size; i++) {
1902 struct task_struct *p = heap->ptrs[i];
1903 if (i == 0) {
1904 latest_time = p->start_time;
1905 latest_task = p;
1906 }
1907 /* Process the task per the caller's callback */
1908 scan->process_task(p, scan);
1909 put_task_struct(p);
1910 }
1911 /*
1912 * If we had to process any tasks at all, scan again
1913 * in case some of them were in the middle of forking
1914 * children that didn't get processed.
1915 * Not the most efficient way to do it, but it avoids
1916 * having to take callback_mutex in the fork path
1917 */
1918 goto again;
1919 }
1920 if (heap == &tmp_heap)
1921 heap_free(&tmp_heap);
1922 return 0;
1923}
1924
Paul Menage817929e2007-10-18 23:39:36 -07001925/*
Paul Menagebbcb81d2007-10-18 23:39:32 -07001926 * Stuff for reading the 'tasks' file.
1927 *
1928 * Reading this file can return large amounts of data if a cgroup has
1929 * *lots* of attached tasks. So it may need several calls to read(),
1930 * but we cannot guarantee that the information we produce is correct
1931 * unless we produce it entirely atomically.
1932 *
1933 * Upon tasks file open(), a struct ctr_struct is allocated, that
1934 * will have a pointer to an array (also allocated here). The struct
1935 * ctr_struct * is stored in file->private_data. Its resources will
1936 * be freed by release() when the file is closed. The array is used
1937 * to sprintf the PIDs and then used by read().
1938 */
1939struct ctr_struct {
1940 char *buf;
1941 int bufsz;
1942};
1943
1944/*
1945 * Load into 'pidarray' up to 'npids' of the tasks using cgroup
Paul Menagebd89aab2007-10-18 23:40:44 -07001946 * 'cgrp'. Return actual number of pids loaded. No need to
Paul Menagebbcb81d2007-10-18 23:39:32 -07001947 * task_lock(p) when reading out p->cgroup, since we're in an RCU
1948 * read section, so the css_set can't go away, and is
1949 * immutable after creation.
1950 */
Paul Menagebd89aab2007-10-18 23:40:44 -07001951static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp)
Paul Menagebbcb81d2007-10-18 23:39:32 -07001952{
1953 int n = 0;
Paul Menage817929e2007-10-18 23:39:36 -07001954 struct cgroup_iter it;
1955 struct task_struct *tsk;
Paul Menagebd89aab2007-10-18 23:40:44 -07001956 cgroup_iter_start(cgrp, &it);
1957 while ((tsk = cgroup_iter_next(cgrp, &it))) {
Paul Menage817929e2007-10-18 23:39:36 -07001958 if (unlikely(n == npids))
1959 break;
Pavel Emelyanov73507f32008-02-07 00:14:47 -08001960 pidarray[n++] = task_pid_vnr(tsk);
Paul Menage817929e2007-10-18 23:39:36 -07001961 }
Paul Menagebd89aab2007-10-18 23:40:44 -07001962 cgroup_iter_end(cgrp, &it);
Paul Menagebbcb81d2007-10-18 23:39:32 -07001963 return n;
1964}
1965
Balbir Singh846c7bb2007-10-18 23:39:44 -07001966/**
Li Zefana043e3b2008-02-23 15:24:09 -08001967 * cgroupstats_build - build and fill cgroupstats
Balbir Singh846c7bb2007-10-18 23:39:44 -07001968 * @stats: cgroupstats to fill information into
1969 * @dentry: A dentry entry belonging to the cgroup for which stats have
1970 * been requested.
Li Zefana043e3b2008-02-23 15:24:09 -08001971 *
1972 * Build and fill cgroupstats so that taskstats can export it to user
1973 * space.
Balbir Singh846c7bb2007-10-18 23:39:44 -07001974 */
1975int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
1976{
1977 int ret = -EINVAL;
Paul Menagebd89aab2007-10-18 23:40:44 -07001978 struct cgroup *cgrp;
Balbir Singh846c7bb2007-10-18 23:39:44 -07001979 struct cgroup_iter it;
1980 struct task_struct *tsk;
1981 /*
1982 * Validate dentry by checking the superblock operations
1983 */
1984 if (dentry->d_sb->s_op != &cgroup_ops)
1985 goto err;
1986
1987 ret = 0;
Paul Menagebd89aab2007-10-18 23:40:44 -07001988 cgrp = dentry->d_fsdata;
Balbir Singh846c7bb2007-10-18 23:39:44 -07001989 rcu_read_lock();
1990
Paul Menagebd89aab2007-10-18 23:40:44 -07001991 cgroup_iter_start(cgrp, &it);
1992 while ((tsk = cgroup_iter_next(cgrp, &it))) {
Balbir Singh846c7bb2007-10-18 23:39:44 -07001993 switch (tsk->state) {
1994 case TASK_RUNNING:
1995 stats->nr_running++;
1996 break;
1997 case TASK_INTERRUPTIBLE:
1998 stats->nr_sleeping++;
1999 break;
2000 case TASK_UNINTERRUPTIBLE:
2001 stats->nr_uninterruptible++;
2002 break;
2003 case TASK_STOPPED:
2004 stats->nr_stopped++;
2005 break;
2006 default:
2007 if (delayacct_is_task_waiting_on_io(tsk))
2008 stats->nr_io_wait++;
2009 break;
2010 }
2011 }
Paul Menagebd89aab2007-10-18 23:40:44 -07002012 cgroup_iter_end(cgrp, &it);
Balbir Singh846c7bb2007-10-18 23:39:44 -07002013
2014 rcu_read_unlock();
2015err:
2016 return ret;
2017}
2018
Paul Menagebbcb81d2007-10-18 23:39:32 -07002019static int cmppid(const void *a, const void *b)
2020{
2021 return *(pid_t *)a - *(pid_t *)b;
2022}
2023
2024/*
2025 * Convert array 'a' of 'npids' pid_t's to a string of newline separated
2026 * decimal pids in 'buf'. Don't write more than 'sz' chars, but return
2027 * count 'cnt' of how many chars would be written if buf were large enough.
2028 */
2029static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
2030{
2031 int cnt = 0;
2032 int i;
2033
2034 for (i = 0; i < npids; i++)
2035 cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
2036 return cnt;
2037}
2038
2039/*
2040 * Handle an open on 'tasks' file. Prepare a buffer listing the
2041 * process id's of tasks currently attached to the cgroup being opened.
2042 *
2043 * Does not require any specific cgroup mutexes, and does not take any.
2044 */
2045static int cgroup_tasks_open(struct inode *unused, struct file *file)
2046{
Paul Menagebd89aab2007-10-18 23:40:44 -07002047 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
Paul Menagebbcb81d2007-10-18 23:39:32 -07002048 struct ctr_struct *ctr;
2049 pid_t *pidarray;
2050 int npids;
2051 char c;
2052
2053 if (!(file->f_mode & FMODE_READ))
2054 return 0;
2055
2056 ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
2057 if (!ctr)
2058 goto err0;
2059
2060 /*
2061 * If cgroup gets more users after we read count, we won't have
2062 * enough space - tough. This race is indistinguishable to the
2063 * caller from the case that the additional cgroup users didn't
2064 * show up until sometime later on.
2065 */
Paul Menagebd89aab2007-10-18 23:40:44 -07002066 npids = cgroup_task_count(cgrp);
Paul Menagebbcb81d2007-10-18 23:39:32 -07002067 if (npids) {
2068 pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
2069 if (!pidarray)
2070 goto err1;
2071
Paul Menagebd89aab2007-10-18 23:40:44 -07002072 npids = pid_array_load(pidarray, npids, cgrp);
Paul Menagebbcb81d2007-10-18 23:39:32 -07002073 sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
2074
2075 /* Call pid_array_to_buf() twice, first just to get bufsz */
2076 ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
2077 ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
2078 if (!ctr->buf)
2079 goto err2;
2080 ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
2081
2082 kfree(pidarray);
2083 } else {
2084 ctr->buf = 0;
2085 ctr->bufsz = 0;
2086 }
2087 file->private_data = ctr;
2088 return 0;
2089
2090err2:
2091 kfree(pidarray);
2092err1:
2093 kfree(ctr);
2094err0:
2095 return -ENOMEM;
2096}
2097
Paul Menagebd89aab2007-10-18 23:40:44 -07002098static ssize_t cgroup_tasks_read(struct cgroup *cgrp,
Paul Menagebbcb81d2007-10-18 23:39:32 -07002099 struct cftype *cft,
2100 struct file *file, char __user *buf,
2101 size_t nbytes, loff_t *ppos)
2102{
2103 struct ctr_struct *ctr = file->private_data;
2104
2105 return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz);
2106}
2107
2108static int cgroup_tasks_release(struct inode *unused_inode,
2109 struct file *file)
2110{
2111 struct ctr_struct *ctr;
2112
2113 if (file->f_mode & FMODE_READ) {
2114 ctr = file->private_data;
2115 kfree(ctr->buf);
2116 kfree(ctr);
2117 }
2118 return 0;
2119}
2120
Paul Menagebd89aab2007-10-18 23:40:44 -07002121static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
Paul Menage81a6a5c2007-10-18 23:39:38 -07002122 struct cftype *cft)
2123{
Paul Menagebd89aab2007-10-18 23:40:44 -07002124 return notify_on_release(cgrp);
Paul Menage81a6a5c2007-10-18 23:39:38 -07002125}
2126
Paul Menagebd89aab2007-10-18 23:40:44 -07002127static u64 cgroup_read_releasable(struct cgroup *cgrp, struct cftype *cft)
Paul Menage81a6a5c2007-10-18 23:39:38 -07002128{
Paul Menagebd89aab2007-10-18 23:40:44 -07002129 return test_bit(CGRP_RELEASABLE, &cgrp->flags);
Paul Menage81a6a5c2007-10-18 23:39:38 -07002130}
2131
Paul Menagebbcb81d2007-10-18 23:39:32 -07002132/*
2133 * for the common functions, 'private' gives the type of file
2134 */
Paul Menage81a6a5c2007-10-18 23:39:38 -07002135static struct cftype files[] = {
2136 {
2137 .name = "tasks",
2138 .open = cgroup_tasks_open,
2139 .read = cgroup_tasks_read,
2140 .write = cgroup_common_file_write,
2141 .release = cgroup_tasks_release,
2142 .private = FILE_TASKLIST,
2143 },
2144
2145 {
2146 .name = "notify_on_release",
2147 .read_uint = cgroup_read_notify_on_release,
2148 .write = cgroup_common_file_write,
2149 .private = FILE_NOTIFY_ON_RELEASE,
2150 },
2151
2152 {
2153 .name = "releasable",
2154 .read_uint = cgroup_read_releasable,
2155 .private = FILE_RELEASABLE,
2156 }
2157};
2158
2159static struct cftype cft_release_agent = {
2160 .name = "release_agent",
2161 .read = cgroup_common_file_read,
Paul Menagebbcb81d2007-10-18 23:39:32 -07002162 .write = cgroup_common_file_write,
Paul Menage81a6a5c2007-10-18 23:39:38 -07002163 .private = FILE_RELEASE_AGENT,
Paul Menagebbcb81d2007-10-18 23:39:32 -07002164};
2165
Paul Menagebd89aab2007-10-18 23:40:44 -07002166static int cgroup_populate_dir(struct cgroup *cgrp)
Paul Menageddbcc7e2007-10-18 23:39:30 -07002167{
2168 int err;
2169 struct cgroup_subsys *ss;
2170
2171 /* First clear out any existing files */
Paul Menagebd89aab2007-10-18 23:40:44 -07002172 cgroup_clear_directory(cgrp->dentry);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002173
Paul Menagebd89aab2007-10-18 23:40:44 -07002174 err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files));
Paul Menagebbcb81d2007-10-18 23:39:32 -07002175 if (err < 0)
2176 return err;
2177
Paul Menagebd89aab2007-10-18 23:40:44 -07002178 if (cgrp == cgrp->top_cgroup) {
2179 if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0)
Paul Menage81a6a5c2007-10-18 23:39:38 -07002180 return err;
2181 }
2182
Paul Menagebd89aab2007-10-18 23:40:44 -07002183 for_each_subsys(cgrp->root, ss) {
2184 if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
Paul Menageddbcc7e2007-10-18 23:39:30 -07002185 return err;
2186 }
2187
2188 return 0;
2189}
2190
2191static void init_cgroup_css(struct cgroup_subsys_state *css,
2192 struct cgroup_subsys *ss,
Paul Menagebd89aab2007-10-18 23:40:44 -07002193 struct cgroup *cgrp)
Paul Menageddbcc7e2007-10-18 23:39:30 -07002194{
Paul Menagebd89aab2007-10-18 23:40:44 -07002195 css->cgroup = cgrp;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002196 atomic_set(&css->refcnt, 0);
2197 css->flags = 0;
Paul Menagebd89aab2007-10-18 23:40:44 -07002198 if (cgrp == dummytop)
Paul Menageddbcc7e2007-10-18 23:39:30 -07002199 set_bit(CSS_ROOT, &css->flags);
Paul Menagebd89aab2007-10-18 23:40:44 -07002200 BUG_ON(cgrp->subsys[ss->subsys_id]);
2201 cgrp->subsys[ss->subsys_id] = css;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002202}
2203
2204/*
Li Zefana043e3b2008-02-23 15:24:09 -08002205 * cgroup_create - create a cgroup
2206 * @parent: cgroup that will be parent of the new cgroup
2207 * @dentry: dentry of the new cgroup
2208 * @mode: mode to set on new inode
Paul Menageddbcc7e2007-10-18 23:39:30 -07002209 *
Li Zefana043e3b2008-02-23 15:24:09 -08002210 * Must be called with the mutex on the parent inode held
Paul Menageddbcc7e2007-10-18 23:39:30 -07002211 */
Paul Menageddbcc7e2007-10-18 23:39:30 -07002212static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
2213 int mode)
2214{
Paul Menagebd89aab2007-10-18 23:40:44 -07002215 struct cgroup *cgrp;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002216 struct cgroupfs_root *root = parent->root;
2217 int err = 0;
2218 struct cgroup_subsys *ss;
2219 struct super_block *sb = root->sb;
2220
Paul Menagebd89aab2007-10-18 23:40:44 -07002221 cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
2222 if (!cgrp)
Paul Menageddbcc7e2007-10-18 23:39:30 -07002223 return -ENOMEM;
2224
2225 /* Grab a reference on the superblock so the hierarchy doesn't
2226 * get deleted on unmount if there are child cgroups. This
2227 * can be done outside cgroup_mutex, since the sb can't
2228 * disappear while someone has an open control file on the
2229 * fs */
2230 atomic_inc(&sb->s_active);
2231
2232 mutex_lock(&cgroup_mutex);
2233
Paul Menagebd89aab2007-10-18 23:40:44 -07002234 cgrp->flags = 0;
2235 INIT_LIST_HEAD(&cgrp->sibling);
2236 INIT_LIST_HEAD(&cgrp->children);
2237 INIT_LIST_HEAD(&cgrp->css_sets);
2238 INIT_LIST_HEAD(&cgrp->release_list);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002239
Paul Menagebd89aab2007-10-18 23:40:44 -07002240 cgrp->parent = parent;
2241 cgrp->root = parent->root;
2242 cgrp->top_cgroup = parent->top_cgroup;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002243
2244 for_each_subsys(root, ss) {
Paul Menagebd89aab2007-10-18 23:40:44 -07002245 struct cgroup_subsys_state *css = ss->create(ss, cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002246 if (IS_ERR(css)) {
2247 err = PTR_ERR(css);
2248 goto err_destroy;
2249 }
Paul Menagebd89aab2007-10-18 23:40:44 -07002250 init_cgroup_css(css, ss, cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002251 }
2252
Paul Menagebd89aab2007-10-18 23:40:44 -07002253 list_add(&cgrp->sibling, &cgrp->parent->children);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002254 root->number_of_cgroups++;
2255
Paul Menagebd89aab2007-10-18 23:40:44 -07002256 err = cgroup_create_dir(cgrp, dentry, mode);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002257 if (err < 0)
2258 goto err_remove;
2259
2260 /* The cgroup directory was pre-locked for us */
Paul Menagebd89aab2007-10-18 23:40:44 -07002261 BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
Paul Menageddbcc7e2007-10-18 23:39:30 -07002262
Paul Menagebd89aab2007-10-18 23:40:44 -07002263 err = cgroup_populate_dir(cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002264 /* If err < 0, we have a half-filled directory - oh well ;) */
2265
2266 mutex_unlock(&cgroup_mutex);
Paul Menagebd89aab2007-10-18 23:40:44 -07002267 mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002268
2269 return 0;
2270
2271 err_remove:
2272
Paul Menagebd89aab2007-10-18 23:40:44 -07002273 list_del(&cgrp->sibling);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002274 root->number_of_cgroups--;
2275
2276 err_destroy:
2277
2278 for_each_subsys(root, ss) {
Paul Menagebd89aab2007-10-18 23:40:44 -07002279 if (cgrp->subsys[ss->subsys_id])
2280 ss->destroy(ss, cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002281 }
2282
2283 mutex_unlock(&cgroup_mutex);
2284
2285 /* Release the reference count that we took on the superblock */
2286 deactivate_super(sb);
2287
Paul Menagebd89aab2007-10-18 23:40:44 -07002288 kfree(cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002289 return err;
2290}
2291
2292static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2293{
2294 struct cgroup *c_parent = dentry->d_parent->d_fsdata;
2295
2296 /* the vfs holds inode->i_mutex already */
2297 return cgroup_create(c_parent, dentry, mode | S_IFDIR);
2298}
2299
Paul Menagebd89aab2007-10-18 23:40:44 -07002300static inline int cgroup_has_css_refs(struct cgroup *cgrp)
Paul Menage81a6a5c2007-10-18 23:39:38 -07002301{
2302 /* Check the reference count on each subsystem. Since we
2303 * already established that there are no tasks in the
2304 * cgroup, if the css refcount is also 0, then there should
2305 * be no outstanding references, so the subsystem is safe to
2306 * destroy. We scan across all subsystems rather than using
2307 * the per-hierarchy linked list of mounted subsystems since
2308 * we can be called via check_for_release() with no
2309 * synchronization other than RCU, and the subsystem linked
2310 * list isn't RCU-safe */
2311 int i;
2312 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2313 struct cgroup_subsys *ss = subsys[i];
2314 struct cgroup_subsys_state *css;
2315 /* Skip subsystems not in this hierarchy */
Paul Menagebd89aab2007-10-18 23:40:44 -07002316 if (ss->root != cgrp->root)
Paul Menage81a6a5c2007-10-18 23:39:38 -07002317 continue;
Paul Menagebd89aab2007-10-18 23:40:44 -07002318 css = cgrp->subsys[ss->subsys_id];
Paul Menage81a6a5c2007-10-18 23:39:38 -07002319 /* When called from check_for_release() it's possible
2320 * that by this point the cgroup has been removed
2321 * and the css deleted. But a false-positive doesn't
2322 * matter, since it can only happen if the cgroup
2323 * has been deleted and hence no longer needs the
2324 * release agent to be called anyway. */
Paul Jacksone18f6312008-02-07 00:13:44 -08002325 if (css && atomic_read(&css->refcnt))
Paul Menage81a6a5c2007-10-18 23:39:38 -07002326 return 1;
Paul Menage81a6a5c2007-10-18 23:39:38 -07002327 }
2328 return 0;
2329}
2330
Paul Menageddbcc7e2007-10-18 23:39:30 -07002331static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2332{
Paul Menagebd89aab2007-10-18 23:40:44 -07002333 struct cgroup *cgrp = dentry->d_fsdata;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002334 struct dentry *d;
2335 struct cgroup *parent;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002336 struct super_block *sb;
2337 struct cgroupfs_root *root;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002338
2339 /* the vfs holds both inode->i_mutex already */
2340
2341 mutex_lock(&cgroup_mutex);
Paul Menagebd89aab2007-10-18 23:40:44 -07002342 if (atomic_read(&cgrp->count) != 0) {
Paul Menageddbcc7e2007-10-18 23:39:30 -07002343 mutex_unlock(&cgroup_mutex);
2344 return -EBUSY;
2345 }
Paul Menagebd89aab2007-10-18 23:40:44 -07002346 if (!list_empty(&cgrp->children)) {
Paul Menageddbcc7e2007-10-18 23:39:30 -07002347 mutex_unlock(&cgroup_mutex);
2348 return -EBUSY;
2349 }
2350
Paul Menagebd89aab2007-10-18 23:40:44 -07002351 parent = cgrp->parent;
2352 root = cgrp->root;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002353 sb = root->sb;
Li Zefana043e3b2008-02-23 15:24:09 -08002354
KAMEZAWA Hiroyuki4fca88c2008-02-07 00:14:27 -08002355 /*
Li Zefana043e3b2008-02-23 15:24:09 -08002356 * Call pre_destroy handlers of subsys. Notify subsystems
2357 * that rmdir() request comes.
KAMEZAWA Hiroyuki4fca88c2008-02-07 00:14:27 -08002358 */
2359 cgroup_call_pre_destroy(cgrp);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002360
Paul Menagebd89aab2007-10-18 23:40:44 -07002361 if (cgroup_has_css_refs(cgrp)) {
Paul Menageddbcc7e2007-10-18 23:39:30 -07002362 mutex_unlock(&cgroup_mutex);
2363 return -EBUSY;
2364 }
2365
Paul Menage81a6a5c2007-10-18 23:39:38 -07002366 spin_lock(&release_list_lock);
Paul Menagebd89aab2007-10-18 23:40:44 -07002367 set_bit(CGRP_REMOVED, &cgrp->flags);
2368 if (!list_empty(&cgrp->release_list))
2369 list_del(&cgrp->release_list);
Paul Menage81a6a5c2007-10-18 23:39:38 -07002370 spin_unlock(&release_list_lock);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002371 /* delete my sibling from parent->children */
Paul Menagebd89aab2007-10-18 23:40:44 -07002372 list_del(&cgrp->sibling);
2373 spin_lock(&cgrp->dentry->d_lock);
2374 d = dget(cgrp->dentry);
2375 cgrp->dentry = NULL;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002376 spin_unlock(&d->d_lock);
2377
2378 cgroup_d_remove_dir(d);
2379 dput(d);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002380
Paul Menagebd89aab2007-10-18 23:40:44 -07002381 set_bit(CGRP_RELEASABLE, &parent->flags);
Paul Menage81a6a5c2007-10-18 23:39:38 -07002382 check_for_release(parent);
2383
Paul Menageddbcc7e2007-10-18 23:39:30 -07002384 mutex_unlock(&cgroup_mutex);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002385 return 0;
2386}
2387
2388static void cgroup_init_subsys(struct cgroup_subsys *ss)
2389{
Paul Menageddbcc7e2007-10-18 23:39:30 -07002390 struct cgroup_subsys_state *css;
Paul Menage817929e2007-10-18 23:39:36 -07002391 struct list_head *l;
Diego Callejacfe36bd2007-11-14 16:58:54 -08002392
2393 printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002394
2395 /* Create the top cgroup state for this subsystem */
2396 ss->root = &rootnode;
2397 css = ss->create(ss, dummytop);
2398 /* We don't handle early failures gracefully */
2399 BUG_ON(IS_ERR(css));
2400 init_cgroup_css(css, ss, dummytop);
2401
Paul Menage817929e2007-10-18 23:39:36 -07002402 /* Update all cgroup groups to contain a subsys
2403 * pointer to this state - since the subsystem is
2404 * newly registered, all tasks and hence all cgroup
2405 * groups are in the subsystem's top cgroup. */
2406 write_lock(&css_set_lock);
2407 l = &init_css_set.list;
2408 do {
2409 struct css_set *cg =
2410 list_entry(l, struct css_set, list);
2411 cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
2412 l = l->next;
2413 } while (l != &init_css_set.list);
2414 write_unlock(&css_set_lock);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002415
2416 /* If this subsystem requested that it be notified with fork
2417 * events, we should send it one now for every process in the
2418 * system */
Paul Menage81a6a5c2007-10-18 23:39:38 -07002419 if (ss->fork) {
2420 struct task_struct *g, *p;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002421
Paul Menage81a6a5c2007-10-18 23:39:38 -07002422 read_lock(&tasklist_lock);
2423 do_each_thread(g, p) {
2424 ss->fork(ss, p);
2425 } while_each_thread(g, p);
2426 read_unlock(&tasklist_lock);
2427 }
Paul Menageddbcc7e2007-10-18 23:39:30 -07002428
2429 need_forkexit_callback |= ss->fork || ss->exit;
2430
2431 ss->active = 1;
2432}
2433
2434/**
Li Zefana043e3b2008-02-23 15:24:09 -08002435 * cgroup_init_early - cgroup initialization at system boot
2436 *
2437 * Initialize cgroups at system boot, and initialize any
2438 * subsystems that request early init.
Paul Menageddbcc7e2007-10-18 23:39:30 -07002439 */
2440int __init cgroup_init_early(void)
2441{
2442 int i;
Paul Menage817929e2007-10-18 23:39:36 -07002443 kref_init(&init_css_set.ref);
2444 kref_get(&init_css_set.ref);
2445 INIT_LIST_HEAD(&init_css_set.list);
2446 INIT_LIST_HEAD(&init_css_set.cg_links);
2447 INIT_LIST_HEAD(&init_css_set.tasks);
2448 css_set_count = 1;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002449 init_cgroup_root(&rootnode);
2450 list_add(&rootnode.root_list, &roots);
Paul Menage817929e2007-10-18 23:39:36 -07002451 root_count = 1;
2452 init_task.cgroups = &init_css_set;
2453
2454 init_css_set_link.cg = &init_css_set;
Paul Menagebd89aab2007-10-18 23:40:44 -07002455 list_add(&init_css_set_link.cgrp_link_list,
Paul Menage817929e2007-10-18 23:39:36 -07002456 &rootnode.top_cgroup.css_sets);
2457 list_add(&init_css_set_link.cg_link_list,
2458 &init_css_set.cg_links);
Paul Menageddbcc7e2007-10-18 23:39:30 -07002459
2460 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2461 struct cgroup_subsys *ss = subsys[i];
2462
2463 BUG_ON(!ss->name);
2464 BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
2465 BUG_ON(!ss->create);
2466 BUG_ON(!ss->destroy);
2467 if (ss->subsys_id != i) {
Diego Callejacfe36bd2007-11-14 16:58:54 -08002468 printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
Paul Menageddbcc7e2007-10-18 23:39:30 -07002469 ss->name, ss->subsys_id);
2470 BUG();
2471 }
2472
2473 if (ss->early_init)
2474 cgroup_init_subsys(ss);
2475 }
2476 return 0;
2477}
2478
2479/**
Li Zefana043e3b2008-02-23 15:24:09 -08002480 * cgroup_init - cgroup initialization
2481 *
2482 * Register cgroup filesystem and /proc file, and initialize
2483 * any subsystems that didn't request early init.
Paul Menageddbcc7e2007-10-18 23:39:30 -07002484 */
2485int __init cgroup_init(void)
2486{
2487 int err;
2488 int i;
Paul Menagea4243162007-10-18 23:39:35 -07002489 struct proc_dir_entry *entry;
2490
2491 err = bdi_init(&cgroup_backing_dev_info);
2492 if (err)
2493 return err;
Paul Menageddbcc7e2007-10-18 23:39:30 -07002494
2495 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2496 struct cgroup_subsys *ss = subsys[i];
2497 if (!ss->early_init)
2498 cgroup_init_subsys(ss);
2499 }
2500
2501 err = register_filesystem(&cgroup_fs_type);
2502 if (err < 0)
2503 goto out;
2504
Paul Menagea4243162007-10-18 23:39:35 -07002505 entry = create_proc_entry("cgroups", 0, NULL);
2506 if (entry)
2507 entry->proc_fops = &proc_cgroupstats_operations;
2508
Paul Menageddbcc7e2007-10-18 23:39:30 -07002509out:
Paul Menagea4243162007-10-18 23:39:35 -07002510 if (err)
2511 bdi_destroy(&cgroup_backing_dev_info);
2512
Paul Menageddbcc7e2007-10-18 23:39:30 -07002513 return err;
2514}
Paul Menageb4f48b62007-10-18 23:39:33 -07002515
Paul Menagea4243162007-10-18 23:39:35 -07002516/*
2517 * proc_cgroup_show()
2518 * - Print task's cgroup paths into seq_file, one line for each hierarchy
2519 * - Used for /proc/<pid>/cgroup.
2520 * - No need to task_lock(tsk) on this tsk->cgroup reference, as it
2521 * doesn't really matter if tsk->cgroup changes after we read it,
Cliff Wickman956db3c2008-02-07 00:14:43 -08002522 * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
Paul Menagea4243162007-10-18 23:39:35 -07002523 * anyway. No need to check that tsk->cgroup != NULL, thanks to
2524 * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
2525 * cgroup to top_cgroup.
2526 */
2527
2528/* TODO: Use a proper seq_file iterator */
2529static int proc_cgroup_show(struct seq_file *m, void *v)
2530{
2531 struct pid *pid;
2532 struct task_struct *tsk;
2533 char *buf;
2534 int retval;
2535 struct cgroupfs_root *root;
2536
2537 retval = -ENOMEM;
2538 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2539 if (!buf)
2540 goto out;
2541
2542 retval = -ESRCH;
2543 pid = m->private;
2544 tsk = get_pid_task(pid, PIDTYPE_PID);
2545 if (!tsk)
2546 goto out_free;
2547
2548 retval = 0;
2549
2550 mutex_lock(&cgroup_mutex);
2551
2552 for_each_root(root) {
2553 struct cgroup_subsys *ss;
Paul Menagebd89aab2007-10-18 23:40:44 -07002554 struct cgroup *cgrp;
Paul Menagea4243162007-10-18 23:39:35 -07002555 int subsys_id;
2556 int count = 0;
2557
2558 /* Skip this hierarchy if it has no active subsystems */
2559 if (!root->actual_subsys_bits)
2560 continue;
2561 for_each_subsys(root, ss)
2562 seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
2563 seq_putc(m, ':');
2564 get_first_subsys(&root->top_cgroup, NULL, &subsys_id);
Paul Menagebd89aab2007-10-18 23:40:44 -07002565 cgrp = task_cgroup(tsk, subsys_id);
2566 retval = cgroup_path(cgrp, buf, PAGE_SIZE);
Paul Menagea4243162007-10-18 23:39:35 -07002567 if (retval < 0)
2568 goto out_unlock;
2569 seq_puts(m, buf);
2570 seq_putc(m, '\n');
2571 }
2572
2573out_unlock:
2574 mutex_unlock(&cgroup_mutex);
2575 put_task_struct(tsk);
2576out_free:
2577 kfree(buf);
2578out:
2579 return retval;
2580}
2581
2582static int cgroup_open(struct inode *inode, struct file *file)
2583{
2584 struct pid *pid = PROC_I(inode)->pid;
2585 return single_open(file, proc_cgroup_show, pid);
2586}
2587
2588struct file_operations proc_cgroup_operations = {
2589 .open = cgroup_open,
2590 .read = seq_read,
2591 .llseek = seq_lseek,
2592 .release = single_release,
2593};
2594
2595/* Display information about each subsystem and each hierarchy */
2596static int proc_cgroupstats_show(struct seq_file *m, void *v)
2597{
2598 int i;
Paul Menagea4243162007-10-18 23:39:35 -07002599
Paul Menage817929e2007-10-18 23:39:36 -07002600 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\n");
Paul Menagea4243162007-10-18 23:39:35 -07002601 mutex_lock(&cgroup_mutex);
Paul Menagea4243162007-10-18 23:39:35 -07002602 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2603 struct cgroup_subsys *ss = subsys[i];
Paul Menage817929e2007-10-18 23:39:36 -07002604 seq_printf(m, "%s\t%lu\t%d\n",
2605 ss->name, ss->root->subsys_bits,
2606 ss->root->number_of_cgroups);
Paul Menagea4243162007-10-18 23:39:35 -07002607 }
2608 mutex_unlock(&cgroup_mutex);
2609 return 0;
2610}
2611
2612static int cgroupstats_open(struct inode *inode, struct file *file)
2613{
2614 return single_open(file, proc_cgroupstats_show, 0);
2615}
2616
2617static struct file_operations proc_cgroupstats_operations = {
2618 .open = cgroupstats_open,
2619 .read = seq_read,
2620 .llseek = seq_lseek,
2621 .release = single_release,
2622};
2623
Paul Menageb4f48b62007-10-18 23:39:33 -07002624/**
2625 * cgroup_fork - attach newly forked task to its parents cgroup.
Li Zefana043e3b2008-02-23 15:24:09 -08002626 * @child: pointer to task_struct of forking parent process.
Paul Menageb4f48b62007-10-18 23:39:33 -07002627 *
2628 * Description: A task inherits its parent's cgroup at fork().
2629 *
2630 * A pointer to the shared css_set was automatically copied in
2631 * fork.c by dup_task_struct(). However, we ignore that copy, since
2632 * it was not made under the protection of RCU or cgroup_mutex, so
Cliff Wickman956db3c2008-02-07 00:14:43 -08002633 * might no longer be a valid cgroup pointer. cgroup_attach_task() might
Paul Menage817929e2007-10-18 23:39:36 -07002634 * have already changed current->cgroups, allowing the previously
2635 * referenced cgroup group to be removed and freed.
Paul Menageb4f48b62007-10-18 23:39:33 -07002636 *
2637 * At the point that cgroup_fork() is called, 'current' is the parent
2638 * task, and the passed argument 'child' points to the child task.
2639 */
2640void cgroup_fork(struct task_struct *child)
2641{
Paul Menage817929e2007-10-18 23:39:36 -07002642 task_lock(current);
2643 child->cgroups = current->cgroups;
2644 get_css_set(child->cgroups);
2645 task_unlock(current);
2646 INIT_LIST_HEAD(&child->cg_list);
Paul Menageb4f48b62007-10-18 23:39:33 -07002647}
2648
2649/**
Li Zefana043e3b2008-02-23 15:24:09 -08002650 * cgroup_fork_callbacks - run fork callbacks
2651 * @child: the new task
2652 *
2653 * Called on a new task very soon before adding it to the
2654 * tasklist. No need to take any locks since no-one can
2655 * be operating on this task.
Paul Menageb4f48b62007-10-18 23:39:33 -07002656 */
2657void cgroup_fork_callbacks(struct task_struct *child)
2658{
2659 if (need_forkexit_callback) {
2660 int i;
2661 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2662 struct cgroup_subsys *ss = subsys[i];
2663 if (ss->fork)
2664 ss->fork(ss, child);
2665 }
2666 }
2667}
2668
2669/**
Li Zefana043e3b2008-02-23 15:24:09 -08002670 * cgroup_post_fork - called on a new task after adding it to the task list
2671 * @child: the task in question
2672 *
2673 * Adds the task to the list running through its css_set if necessary.
2674 * Has to be after the task is visible on the task list in case we race
2675 * with the first call to cgroup_iter_start() - to guarantee that the
2676 * new task ends up on its list.
2677 */
Paul Menage817929e2007-10-18 23:39:36 -07002678void cgroup_post_fork(struct task_struct *child)
2679{
2680 if (use_task_css_set_links) {
2681 write_lock(&css_set_lock);
2682 if (list_empty(&child->cg_list))
2683 list_add(&child->cg_list, &child->cgroups->tasks);
2684 write_unlock(&css_set_lock);
2685 }
2686}
2687/**
Paul Menageb4f48b62007-10-18 23:39:33 -07002688 * cgroup_exit - detach cgroup from exiting task
2689 * @tsk: pointer to task_struct of exiting process
Li Zefana043e3b2008-02-23 15:24:09 -08002690 * @run_callback: run exit callbacks?
Paul Menageb4f48b62007-10-18 23:39:33 -07002691 *
2692 * Description: Detach cgroup from @tsk and release it.
2693 *
2694 * Note that cgroups marked notify_on_release force every task in
2695 * them to take the global cgroup_mutex mutex when exiting.
2696 * This could impact scaling on very large systems. Be reluctant to
2697 * use notify_on_release cgroups where very high task exit scaling
2698 * is required on large systems.
2699 *
2700 * the_top_cgroup_hack:
2701 *
2702 * Set the exiting tasks cgroup to the root cgroup (top_cgroup).
2703 *
2704 * We call cgroup_exit() while the task is still competent to
2705 * handle notify_on_release(), then leave the task attached to the
2706 * root cgroup in each hierarchy for the remainder of its exit.
2707 *
2708 * To do this properly, we would increment the reference count on
2709 * top_cgroup, and near the very end of the kernel/exit.c do_exit()
2710 * code we would add a second cgroup function call, to drop that
2711 * reference. This would just create an unnecessary hot spot on
2712 * the top_cgroup reference count, to no avail.
2713 *
2714 * Normally, holding a reference to a cgroup without bumping its
2715 * count is unsafe. The cgroup could go away, or someone could
2716 * attach us to a different cgroup, decrementing the count on
2717 * the first cgroup that we never incremented. But in this case,
2718 * top_cgroup isn't going away, and either task has PF_EXITING set,
Cliff Wickman956db3c2008-02-07 00:14:43 -08002719 * which wards off any cgroup_attach_task() attempts, or task is a failed
2720 * fork, never visible to cgroup_attach_task.
Paul Menageb4f48b62007-10-18 23:39:33 -07002721 */
2722void cgroup_exit(struct task_struct *tsk, int run_callbacks)
2723{
2724 int i;
Paul Menage817929e2007-10-18 23:39:36 -07002725 struct css_set *cg;
Paul Menageb4f48b62007-10-18 23:39:33 -07002726
2727 if (run_callbacks && need_forkexit_callback) {
2728 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2729 struct cgroup_subsys *ss = subsys[i];
2730 if (ss->exit)
2731 ss->exit(ss, tsk);
2732 }
2733 }
Paul Menage817929e2007-10-18 23:39:36 -07002734
2735 /*
2736 * Unlink from the css_set task list if necessary.
2737 * Optimistically check cg_list before taking
2738 * css_set_lock
2739 */
2740 if (!list_empty(&tsk->cg_list)) {
2741 write_lock(&css_set_lock);
2742 if (!list_empty(&tsk->cg_list))
2743 list_del(&tsk->cg_list);
2744 write_unlock(&css_set_lock);
2745 }
2746
Paul Menageb4f48b62007-10-18 23:39:33 -07002747 /* Reassign the task to the init_css_set. */
2748 task_lock(tsk);
Paul Menage817929e2007-10-18 23:39:36 -07002749 cg = tsk->cgroups;
2750 tsk->cgroups = &init_css_set;
Paul Menageb4f48b62007-10-18 23:39:33 -07002751 task_unlock(tsk);
Paul Menage817929e2007-10-18 23:39:36 -07002752 if (cg)
Paul Menage81a6a5c2007-10-18 23:39:38 -07002753 put_css_set_taskexit(cg);
Paul Menageb4f48b62007-10-18 23:39:33 -07002754}
Paul Menage697f4162007-10-18 23:39:34 -07002755
2756/**
Li Zefana043e3b2008-02-23 15:24:09 -08002757 * cgroup_clone - clone the cgroup the given subsystem is attached to
2758 * @tsk: the task to be moved
2759 * @subsys: the given subsystem
2760 *
2761 * Duplicate the current cgroup in the hierarchy that the given
2762 * subsystem is attached to, and move this task into the new
2763 * child.
Paul Menage697f4162007-10-18 23:39:34 -07002764 */
2765int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
2766{
2767 struct dentry *dentry;
2768 int ret = 0;
2769 char nodename[MAX_CGROUP_TYPE_NAMELEN];
2770 struct cgroup *parent, *child;
2771 struct inode *inode;
2772 struct css_set *cg;
2773 struct cgroupfs_root *root;
2774 struct cgroup_subsys *ss;
2775
2776 /* We shouldn't be called by an unregistered subsystem */
2777 BUG_ON(!subsys->active);
2778
2779 /* First figure out what hierarchy and cgroup we're dealing
2780 * with, and pin them so we can drop cgroup_mutex */
2781 mutex_lock(&cgroup_mutex);
2782 again:
2783 root = subsys->root;
2784 if (root == &rootnode) {
2785 printk(KERN_INFO
2786 "Not cloning cgroup for unused subsystem %s\n",
2787 subsys->name);
2788 mutex_unlock(&cgroup_mutex);
2789 return 0;
2790 }
Paul Menage817929e2007-10-18 23:39:36 -07002791 cg = tsk->cgroups;
Paul Menage697f4162007-10-18 23:39:34 -07002792 parent = task_cgroup(tsk, subsys->subsys_id);
2793
2794 snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid);
2795
2796 /* Pin the hierarchy */
2797 atomic_inc(&parent->root->sb->s_active);
2798
Paul Menage817929e2007-10-18 23:39:36 -07002799 /* Keep the cgroup alive */
2800 get_css_set(cg);
Paul Menage697f4162007-10-18 23:39:34 -07002801 mutex_unlock(&cgroup_mutex);
2802
2803 /* Now do the VFS work to create a cgroup */
2804 inode = parent->dentry->d_inode;
2805
2806 /* Hold the parent directory mutex across this operation to
2807 * stop anyone else deleting the new cgroup */
2808 mutex_lock(&inode->i_mutex);
2809 dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
2810 if (IS_ERR(dentry)) {
2811 printk(KERN_INFO
Diego Callejacfe36bd2007-11-14 16:58:54 -08002812 "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
Paul Menage697f4162007-10-18 23:39:34 -07002813 PTR_ERR(dentry));
2814 ret = PTR_ERR(dentry);
2815 goto out_release;
2816 }
2817
2818 /* Create the cgroup directory, which also creates the cgroup */
2819 ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755);
Paul Menagebd89aab2007-10-18 23:40:44 -07002820 child = __d_cgrp(dentry);
Paul Menage697f4162007-10-18 23:39:34 -07002821 dput(dentry);
2822 if (ret) {
2823 printk(KERN_INFO
2824 "Failed to create cgroup %s: %d\n", nodename,
2825 ret);
2826 goto out_release;
2827 }
2828
2829 if (!child) {
2830 printk(KERN_INFO
2831 "Couldn't find new cgroup %s\n", nodename);
2832 ret = -ENOMEM;
2833 goto out_release;
2834 }
2835
2836 /* The cgroup now exists. Retake cgroup_mutex and check
2837 * that we're still in the same state that we thought we
2838 * were. */
2839 mutex_lock(&cgroup_mutex);
2840 if ((root != subsys->root) ||
2841 (parent != task_cgroup(tsk, subsys->subsys_id))) {
2842 /* Aargh, we raced ... */
2843 mutex_unlock(&inode->i_mutex);
Paul Menage817929e2007-10-18 23:39:36 -07002844 put_css_set(cg);
Paul Menage697f4162007-10-18 23:39:34 -07002845
2846 deactivate_super(parent->root->sb);
2847 /* The cgroup is still accessible in the VFS, but
2848 * we're not going to try to rmdir() it at this
2849 * point. */
2850 printk(KERN_INFO
2851 "Race in cgroup_clone() - leaking cgroup %s\n",
2852 nodename);
2853 goto again;
2854 }
2855
2856 /* do any required auto-setup */
2857 for_each_subsys(root, ss) {
2858 if (ss->post_clone)
2859 ss->post_clone(ss, child);
2860 }
2861
2862 /* All seems fine. Finish by moving the task into the new cgroup */
Cliff Wickman956db3c2008-02-07 00:14:43 -08002863 ret = cgroup_attach_task(child, tsk);
Paul Menage697f4162007-10-18 23:39:34 -07002864 mutex_unlock(&cgroup_mutex);
2865
2866 out_release:
2867 mutex_unlock(&inode->i_mutex);
Paul Menage81a6a5c2007-10-18 23:39:38 -07002868
2869 mutex_lock(&cgroup_mutex);
Paul Menage817929e2007-10-18 23:39:36 -07002870 put_css_set(cg);
Paul Menage81a6a5c2007-10-18 23:39:38 -07002871 mutex_unlock(&cgroup_mutex);
Paul Menage697f4162007-10-18 23:39:34 -07002872 deactivate_super(parent->root->sb);
2873 return ret;
2874}
2875
Li Zefana043e3b2008-02-23 15:24:09 -08002876/**
2877 * cgroup_is_descendant - see if @cgrp is a descendant of current task's cgrp
2878 * @cgrp: the cgroup in question
2879 *
2880 * See if @cgrp is a descendant of the current task's cgroup in
2881 * the appropriate hierarchy.
Paul Menage697f4162007-10-18 23:39:34 -07002882 *
2883 * If we are sending in dummytop, then presumably we are creating
2884 * the top cgroup in the subsystem.
2885 *
2886 * Called only by the ns (nsproxy) cgroup.
2887 */
Paul Menagebd89aab2007-10-18 23:40:44 -07002888int cgroup_is_descendant(const struct cgroup *cgrp)
Paul Menage697f4162007-10-18 23:39:34 -07002889{
2890 int ret;
2891 struct cgroup *target;
2892 int subsys_id;
2893
Paul Menagebd89aab2007-10-18 23:40:44 -07002894 if (cgrp == dummytop)
Paul Menage697f4162007-10-18 23:39:34 -07002895 return 1;
2896
Paul Menagebd89aab2007-10-18 23:40:44 -07002897 get_first_subsys(cgrp, NULL, &subsys_id);
Paul Menage697f4162007-10-18 23:39:34 -07002898 target = task_cgroup(current, subsys_id);
Paul Menagebd89aab2007-10-18 23:40:44 -07002899 while (cgrp != target && cgrp!= cgrp->top_cgroup)
2900 cgrp = cgrp->parent;
2901 ret = (cgrp == target);
Paul Menage697f4162007-10-18 23:39:34 -07002902 return ret;
2903}
Paul Menage81a6a5c2007-10-18 23:39:38 -07002904
Paul Menagebd89aab2007-10-18 23:40:44 -07002905static void check_for_release(struct cgroup *cgrp)
Paul Menage81a6a5c2007-10-18 23:39:38 -07002906{
2907 /* All of these checks rely on RCU to keep the cgroup
2908 * structure alive */
Paul Menagebd89aab2007-10-18 23:40:44 -07002909 if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count)
2910 && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) {
Paul Menage81a6a5c2007-10-18 23:39:38 -07002911 /* Control Group is currently removeable. If it's not
2912 * already queued for a userspace notification, queue
2913 * it now */
2914 int need_schedule_work = 0;
2915 spin_lock(&release_list_lock);
Paul Menagebd89aab2007-10-18 23:40:44 -07002916 if (!cgroup_is_removed(cgrp) &&
2917 list_empty(&cgrp->release_list)) {
2918 list_add(&cgrp->release_list, &release_list);
Paul Menage81a6a5c2007-10-18 23:39:38 -07002919 need_schedule_work = 1;
2920 }
2921 spin_unlock(&release_list_lock);
2922 if (need_schedule_work)
2923 schedule_work(&release_agent_work);
2924 }
2925}
2926
2927void __css_put(struct cgroup_subsys_state *css)
2928{
Paul Menagebd89aab2007-10-18 23:40:44 -07002929 struct cgroup *cgrp = css->cgroup;
Paul Menage81a6a5c2007-10-18 23:39:38 -07002930 rcu_read_lock();
Paul Menagebd89aab2007-10-18 23:40:44 -07002931 if (atomic_dec_and_test(&css->refcnt) && notify_on_release(cgrp)) {
2932 set_bit(CGRP_RELEASABLE, &cgrp->flags);
2933 check_for_release(cgrp);
Paul Menage81a6a5c2007-10-18 23:39:38 -07002934 }
2935 rcu_read_unlock();
2936}
2937
2938/*
2939 * Notify userspace when a cgroup is released, by running the
2940 * configured release agent with the name of the cgroup (path
2941 * relative to the root of cgroup file system) as the argument.
2942 *
2943 * Most likely, this user command will try to rmdir this cgroup.
2944 *
2945 * This races with the possibility that some other task will be
2946 * attached to this cgroup before it is removed, or that some other
2947 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
2948 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
2949 * unused, and this cgroup will be reprieved from its death sentence,
2950 * to continue to serve a useful existence. Next time it's released,
2951 * we will get notified again, if it still has 'notify_on_release' set.
2952 *
2953 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
2954 * means only wait until the task is successfully execve()'d. The
2955 * separate release agent task is forked by call_usermodehelper(),
2956 * then control in this thread returns here, without waiting for the
2957 * release agent task. We don't bother to wait because the caller of
2958 * this routine has no use for the exit status of the release agent
2959 * task, so no sense holding our caller up for that.
Paul Menage81a6a5c2007-10-18 23:39:38 -07002960 */
Paul Menage81a6a5c2007-10-18 23:39:38 -07002961static void cgroup_release_agent(struct work_struct *work)
2962{
2963 BUG_ON(work != &release_agent_work);
2964 mutex_lock(&cgroup_mutex);
2965 spin_lock(&release_list_lock);
2966 while (!list_empty(&release_list)) {
2967 char *argv[3], *envp[3];
2968 int i;
2969 char *pathbuf;
Paul Menagebd89aab2007-10-18 23:40:44 -07002970 struct cgroup *cgrp = list_entry(release_list.next,
Paul Menage81a6a5c2007-10-18 23:39:38 -07002971 struct cgroup,
2972 release_list);
Paul Menagebd89aab2007-10-18 23:40:44 -07002973 list_del_init(&cgrp->release_list);
Paul Menage81a6a5c2007-10-18 23:39:38 -07002974 spin_unlock(&release_list_lock);
2975 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2976 if (!pathbuf) {
2977 spin_lock(&release_list_lock);
2978 continue;
2979 }
2980
Paul Menagebd89aab2007-10-18 23:40:44 -07002981 if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) {
Paul Menage81a6a5c2007-10-18 23:39:38 -07002982 kfree(pathbuf);
2983 spin_lock(&release_list_lock);
2984 continue;
2985 }
2986
2987 i = 0;
Paul Menagebd89aab2007-10-18 23:40:44 -07002988 argv[i++] = cgrp->root->release_agent_path;
Paul Menage81a6a5c2007-10-18 23:39:38 -07002989 argv[i++] = (char *)pathbuf;
2990 argv[i] = NULL;
2991
2992 i = 0;
2993 /* minimal command environment */
2994 envp[i++] = "HOME=/";
2995 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
2996 envp[i] = NULL;
2997
2998 /* Drop the lock while we invoke the usermode helper,
2999 * since the exec could involve hitting disk and hence
3000 * be a slow process */
3001 mutex_unlock(&cgroup_mutex);
3002 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
3003 kfree(pathbuf);
3004 mutex_lock(&cgroup_mutex);
3005 spin_lock(&release_list_lock);
3006 }
3007 spin_unlock(&release_list_lock);
3008 mutex_unlock(&cgroup_mutex);
3009}