blob: dfb1e75b3d3c9830d2af895226f2da583f2ec05d [file] [log] [blame]
Matt Helsleydc52ddc2008-10-18 20:27:21 -07001/*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Matt Helsleydc52ddc2008-10-18 20:27:21 -070019#include <linux/cgroup.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/freezer.h>
23#include <linux/seq_file.h>
24
25enum freezer_state {
Matt Helsley81dcf332008-10-18 20:27:23 -070026 CGROUP_THAWED = 0,
27 CGROUP_FREEZING,
28 CGROUP_FROZEN,
Matt Helsleydc52ddc2008-10-18 20:27:21 -070029};
30
31struct freezer {
32 struct cgroup_subsys_state css;
33 enum freezer_state state;
34 spinlock_t lock; /* protects _writes_ to state */
35};
36
37static inline struct freezer *cgroup_freezer(
38 struct cgroup *cgroup)
39{
40 return container_of(
41 cgroup_subsys_state(cgroup, freezer_subsys_id),
42 struct freezer, css);
43}
44
45static inline struct freezer *task_freezer(struct task_struct *task)
46{
47 return container_of(task_subsys_state(task, freezer_subsys_id),
48 struct freezer, css);
49}
50
Tomasz Buchertd5de4dd2010-10-27 15:33:32 -070051static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
52{
53 enum freezer_state state = task_freezer(task)->state;
54 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
55}
56
Matt Helsley5a7aadf2010-03-26 23:51:44 +010057int cgroup_freezing_or_frozen(struct task_struct *task)
Matt Helsleydc52ddc2008-10-18 20:27:21 -070058{
Tomasz Buchertd5de4dd2010-10-27 15:33:32 -070059 int result;
Matt Helsleydc52ddc2008-10-18 20:27:21 -070060 task_lock(task);
Tomasz Buchertd5de4dd2010-10-27 15:33:32 -070061 result = __cgroup_freezing_or_frozen(task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -070062 task_unlock(task);
Tomasz Buchertd5de4dd2010-10-27 15:33:32 -070063 return result;
Matt Helsleydc52ddc2008-10-18 20:27:21 -070064}
65
66/*
67 * cgroups_write_string() limits the size of freezer state strings to
68 * CGROUP_LOCAL_BUFFER_SIZE
69 */
70static const char *freezer_state_strs[] = {
Matt Helsley81dcf332008-10-18 20:27:23 -070071 "THAWED",
Matt Helsleydc52ddc2008-10-18 20:27:21 -070072 "FREEZING",
73 "FROZEN",
74};
75
76/*
77 * State diagram
78 * Transitions are caused by userspace writes to the freezer.state file.
79 * The values in parenthesis are state labels. The rest are edge labels.
80 *
Matt Helsley81dcf332008-10-18 20:27:23 -070081 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
82 * ^ ^ | |
83 * | \_______THAWED_______/ |
84 * \__________________________THAWED____________/
Matt Helsleydc52ddc2008-10-18 20:27:21 -070085 */
86
87struct cgroup_subsys freezer_subsys;
88
89/* Locks taken and their ordering
90 * ------------------------------
Matt Helsleydc52ddc2008-10-18 20:27:21 -070091 * cgroup_mutex (AKA cgroup_lock)
Matt Helsleydc52ddc2008-10-18 20:27:21 -070092 * freezer->lock
Matt Helsley8f775782010-05-10 23:18:47 +020093 * css_set_lock
94 * task->alloc_lock (AKA task_lock)
Matt Helsleydc52ddc2008-10-18 20:27:21 -070095 * task->sighand->siglock
96 *
97 * cgroup code forces css_set_lock to be taken before task->alloc_lock
98 *
99 * freezer_create(), freezer_destroy():
100 * cgroup_mutex [ by cgroup core ]
101 *
Matt Helsley8f775782010-05-10 23:18:47 +0200102 * freezer_can_attach():
103 * cgroup_mutex (held by caller of can_attach)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700104 *
Matt Helsley8f775782010-05-10 23:18:47 +0200105 * cgroup_freezing_or_frozen():
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700106 * task->alloc_lock (to get task's cgroup)
107 *
108 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700109 * freezer->lock
110 * sighand->siglock (if the cgroup is freezing)
111 *
112 * freezer_read():
113 * cgroup_mutex
114 * freezer->lock
Matt Helsley8f775782010-05-10 23:18:47 +0200115 * write_lock css_set_lock (cgroup iterator start)
116 * task->alloc_lock
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700117 * read_lock css_set_lock (cgroup iterator start)
118 *
119 * freezer_write() (freeze):
120 * cgroup_mutex
121 * freezer->lock
Matt Helsley8f775782010-05-10 23:18:47 +0200122 * write_lock css_set_lock (cgroup iterator start)
123 * task->alloc_lock
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700124 * read_lock css_set_lock (cgroup iterator start)
Matt Helsley8f775782010-05-10 23:18:47 +0200125 * sighand->siglock (fake signal delivery inside freeze_task())
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700126 *
127 * freezer_write() (unfreeze):
128 * cgroup_mutex
129 * freezer->lock
Matt Helsley8f775782010-05-10 23:18:47 +0200130 * write_lock css_set_lock (cgroup iterator start)
131 * task->alloc_lock
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700132 * read_lock css_set_lock (cgroup iterator start)
Matt Helsley8f775782010-05-10 23:18:47 +0200133 * task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700134 * sighand->siglock
135 */
136static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
137 struct cgroup *cgroup)
138{
139 struct freezer *freezer;
140
141 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
142 if (!freezer)
143 return ERR_PTR(-ENOMEM);
144
145 spin_lock_init(&freezer->lock);
Matt Helsley81dcf332008-10-18 20:27:23 -0700146 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700147 return &freezer->css;
148}
149
150static void freezer_destroy(struct cgroup_subsys *ss,
151 struct cgroup *cgroup)
152{
153 kfree(cgroup_freezer(cgroup));
154}
155
Michal Hocko953d0c82011-11-22 07:44:47 -0800156/* task is frozen or will freeze immediately when next it gets woken */
157static bool is_task_frozen_enough(struct task_struct *task)
158{
159 return frozen(task) ||
160 (task_is_stopped_or_traced(task) && freezing(task));
161}
162
Matt Helsley957a4ee2008-10-18 20:27:22 -0700163/*
164 * The call to cgroup_lock() in the freezer.state write method prevents
165 * a write to that file racing against an attach, and hence the
166 * can_attach() result will remain valid until the attach completes.
167 */
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700168static int freezer_can_attach(struct cgroup_subsys *ss,
169 struct cgroup *new_cgroup,
Ben Blumf780bdb2011-05-26 16:25:19 -0700170 struct task_struct *task)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700171{
172 struct freezer *freezer;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 if ((current != task) && (!capable(CAP_SYS_ADMIN))) {
175 const struct cred *cred = current_cred(), *tcred;
176
177 tcred = __task_cred(task);
178 if (cred->euid != tcred->uid && cred->euid != tcred->suid)
179 return -EPERM;
180 }
181
Li Zefan80a6a2c2008-10-29 14:00:52 -0700182 /*
183 * Anything frozen can't move or be moved to/from.
Li Zefan80a6a2c2008-10-29 14:00:52 -0700184 */
Matt Helsley957a4ee2008-10-18 20:27:22 -0700185
Tomasz Buchert0bdba582010-10-27 15:33:33 -0700186 freezer = cgroup_freezer(new_cgroup);
187 if (freezer->state != CGROUP_THAWED)
Matt Helsley957a4ee2008-10-18 20:27:22 -0700188 return -EBUSY;
189
Ben Blumf780bdb2011-05-26 16:25:19 -0700190 return 0;
191}
192
193static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
194{
Tomasz Buchert0bdba582010-10-27 15:33:33 -0700195 rcu_read_lock();
Ben Blumf780bdb2011-05-26 16:25:19 -0700196 if (__cgroup_freezing_or_frozen(tsk)) {
Tomasz Buchert0bdba582010-10-27 15:33:33 -0700197 rcu_read_unlock();
Matt Helsley957a4ee2008-10-18 20:27:22 -0700198 return -EBUSY;
Tomasz Buchert0bdba582010-10-27 15:33:33 -0700199 }
200 rcu_read_unlock();
Li Zefan80a6a2c2008-10-29 14:00:52 -0700201 return 0;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700202}
203
204static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
205{
206 struct freezer *freezer;
207
Li Zefan68744672008-11-12 13:26:49 -0800208 /*
209 * No lock is needed, since the task isn't on tasklist yet,
210 * so it can't be moved to another cgroup, which means the
211 * freezer won't be removed and will be valid during this
Paul E. McKenney8b46f882010-04-21 13:02:08 -0700212 * function call. Nevertheless, apply RCU read-side critical
213 * section to suppress RCU lockdep false positives.
Li Zefan68744672008-11-12 13:26:49 -0800214 */
Paul E. McKenney8b46f882010-04-21 13:02:08 -0700215 rcu_read_lock();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700216 freezer = task_freezer(task);
Paul E. McKenney8b46f882010-04-21 13:02:08 -0700217 rcu_read_unlock();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700218
Li Zefan3b1b3f62008-11-12 13:26:50 -0800219 /*
220 * The root cgroup is non-freezable, so we can skip the
221 * following check.
222 */
223 if (!freezer->css.cgroup->parent)
224 return;
225
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700226 spin_lock_irq(&freezer->lock);
Li Zefan7ccb9742008-10-29 14:00:51 -0700227 BUG_ON(freezer->state == CGROUP_FROZEN);
228
Matt Helsley81dcf332008-10-18 20:27:23 -0700229 /* Locking avoids race with FREEZING -> THAWED transitions. */
230 if (freezer->state == CGROUP_FREEZING)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700231 freeze_task(task, true);
232 spin_unlock_irq(&freezer->lock);
233}
234
235/*
236 * caller must hold freezer->lock
237 */
Tomasz Buchert2d3cbf82010-10-27 15:33:34 -0700238static void update_if_frozen(struct cgroup *cgroup,
Matt Helsley1aece342008-10-18 20:27:24 -0700239 struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700240{
241 struct cgroup_iter it;
242 struct task_struct *task;
243 unsigned int nfrozen = 0, ntotal = 0;
Tomasz Buchert2d3cbf82010-10-27 15:33:34 -0700244 enum freezer_state old_state = freezer->state;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700245
246 cgroup_iter_start(cgroup, &it);
247 while ((task = cgroup_iter_next(cgroup, &it))) {
248 ntotal++;
Michal Hocko953d0c82011-11-22 07:44:47 -0800249 if (is_task_frozen_enough(task))
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700250 nfrozen++;
251 }
252
Tomasz Buchert2d3cbf82010-10-27 15:33:34 -0700253 if (old_state == CGROUP_THAWED) {
254 BUG_ON(nfrozen > 0);
255 } else if (old_state == CGROUP_FREEZING) {
256 if (nfrozen == ntotal)
257 freezer->state = CGROUP_FROZEN;
258 } else { /* old_state == CGROUP_FROZEN */
259 BUG_ON(nfrozen != ntotal);
260 }
261
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700262 cgroup_iter_end(cgroup, &it);
263}
264
265static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
266 struct seq_file *m)
267{
268 struct freezer *freezer;
269 enum freezer_state state;
270
271 if (!cgroup_lock_live_group(cgroup))
272 return -ENODEV;
273
274 freezer = cgroup_freezer(cgroup);
275 spin_lock_irq(&freezer->lock);
276 state = freezer->state;
Matt Helsley81dcf332008-10-18 20:27:23 -0700277 if (state == CGROUP_FREEZING) {
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700278 /* We change from FREEZING to FROZEN lazily if the cgroup was
279 * only partially frozen when we exitted write. */
Tomasz Buchert2d3cbf82010-10-27 15:33:34 -0700280 update_if_frozen(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700281 state = freezer->state;
282 }
283 spin_unlock_irq(&freezer->lock);
284 cgroup_unlock();
285
286 seq_puts(m, freezer_state_strs[state]);
287 seq_putc(m, '\n');
288 return 0;
289}
290
291static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
292{
293 struct cgroup_iter it;
294 struct task_struct *task;
295 unsigned int num_cant_freeze_now = 0;
296
Matt Helsley81dcf332008-10-18 20:27:23 -0700297 freezer->state = CGROUP_FREEZING;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700298 cgroup_iter_start(cgroup, &it);
299 while ((task = cgroup_iter_next(cgroup, &it))) {
300 if (!freeze_task(task, true))
301 continue;
Michal Hocko953d0c82011-11-22 07:44:47 -0800302 if (is_task_frozen_enough(task))
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700303 continue;
304 if (!freezing(task) && !freezer_should_skip(task))
305 num_cant_freeze_now++;
306 }
307 cgroup_iter_end(cgroup, &it);
308
309 return num_cant_freeze_now ? -EBUSY : 0;
310}
311
Li Zefan00c2e632008-10-29 14:00:53 -0700312static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700313{
314 struct cgroup_iter it;
315 struct task_struct *task;
316
317 cgroup_iter_start(cgroup, &it);
318 while ((task = cgroup_iter_next(cgroup, &it))) {
Li Zefan00c2e632008-10-29 14:00:53 -0700319 thaw_process(task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700320 }
321 cgroup_iter_end(cgroup, &it);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700322
Li Zefan00c2e632008-10-29 14:00:53 -0700323 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700324}
325
326static int freezer_change_state(struct cgroup *cgroup,
327 enum freezer_state goal_state)
328{
329 struct freezer *freezer;
330 int retval = 0;
331
332 freezer = cgroup_freezer(cgroup);
Li Zefan51308ee2008-10-29 14:00:54 -0700333
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700334 spin_lock_irq(&freezer->lock);
Li Zefan51308ee2008-10-29 14:00:54 -0700335
Tomasz Buchert2d3cbf82010-10-27 15:33:34 -0700336 update_if_frozen(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700337 if (goal_state == freezer->state)
338 goto out;
Li Zefan51308ee2008-10-29 14:00:54 -0700339
340 switch (goal_state) {
Matt Helsley81dcf332008-10-18 20:27:23 -0700341 case CGROUP_THAWED:
Li Zefan00c2e632008-10-29 14:00:53 -0700342 unfreeze_cgroup(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700343 break;
Li Zefan51308ee2008-10-29 14:00:54 -0700344 case CGROUP_FROZEN:
345 retval = try_to_freeze_cgroup(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700346 break;
Li Zefan51308ee2008-10-29 14:00:54 -0700347 default:
348 BUG();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700349 }
350out:
351 spin_unlock_irq(&freezer->lock);
352
353 return retval;
354}
355
356static int freezer_write(struct cgroup *cgroup,
357 struct cftype *cft,
358 const char *buffer)
359{
360 int retval;
361 enum freezer_state goal_state;
362
Matt Helsley81dcf332008-10-18 20:27:23 -0700363 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
364 goal_state = CGROUP_THAWED;
365 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
366 goal_state = CGROUP_FROZEN;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700367 else
Li Zefan3b1b3f62008-11-12 13:26:50 -0800368 return -EINVAL;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700369
370 if (!cgroup_lock_live_group(cgroup))
371 return -ENODEV;
372 retval = freezer_change_state(cgroup, goal_state);
373 cgroup_unlock();
374 return retval;
375}
376
377static struct cftype files[] = {
378 {
379 .name = "state",
380 .read_seq_string = freezer_read,
381 .write_string = freezer_write,
382 },
383};
384
385static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
386{
Li Zefan3b1b3f62008-11-12 13:26:50 -0800387 if (!cgroup->parent)
388 return 0;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700389 return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
390}
391
392struct cgroup_subsys freezer_subsys = {
393 .name = "freezer",
394 .create = freezer_create,
395 .destroy = freezer_destroy,
396 .populate = freezer_populate,
397 .subsys_id = freezer_subsys_id,
398 .can_attach = freezer_can_attach,
Ben Blumf780bdb2011-05-26 16:25:19 -0700399 .can_attach_task = freezer_can_attach_task,
400 .pre_attach = NULL,
401 .attach_task = NULL,
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700402 .attach = NULL,
403 .fork = freezer_fork,
404 .exit = NULL,
405};