blob: ef22a8c243a974e3c81d2c29718f52f9a3769c48 [file] [log] [blame]
Nigel Cunningham7dfb7102006-12-06 20:34:23 -08001/* Freezer declarations */
2
Rafael J. Wysocki83144182007-07-17 04:03:35 -07003#ifndef FREEZER_H_INCLUDED
4#define FREEZER_H_INCLUDED
5
Mandeep Singh Bainese7f8bab2013-05-06 23:50:09 +00006#include <linux/debug_locks.h>
Randy Dunlap5c543ef2006-12-10 02:18:58 -08007#include <linux/sched.h>
Rafael J. Wysockie42837b2007-10-18 03:04:45 -07008#include <linux/wait.h>
Tejun Heoa3201222011-11-21 12:32:25 -08009#include <linux/atomic.h>
Randy Dunlap5c543ef2006-12-10 02:18:58 -080010
Matt Helsley8174f152008-10-18 20:27:19 -070011#ifdef CONFIG_FREEZER
Tejun Heoa3201222011-11-21 12:32:25 -080012extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
13extern bool pm_freezing; /* PM freezing in effect */
14extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
15
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080016/*
17 * Check if a process has been frozen
18 */
Tejun Heo948246f2011-11-21 12:32:25 -080019static inline bool frozen(struct task_struct *p)
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080020{
21 return p->flags & PF_FROZEN;
22}
23
Tejun Heoa3201222011-11-21 12:32:25 -080024extern bool freezing_slow_path(struct task_struct *p);
25
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080026/*
27 * Check if there is a request to freeze a process
28 */
Tejun Heoa3201222011-11-21 12:32:25 -080029static inline bool freezing(struct task_struct *p)
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080030{
Tejun Heoa3201222011-11-21 12:32:25 -080031 if (likely(!atomic_read(&system_freezing_cnt)))
32 return false;
33 return freezing_slow_path(p);
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080034}
35
Matt Helsleydc52ddc2008-10-18 20:27:21 -070036/* Takes and releases task alloc lock using task_lock() */
Tejun Heoa5be2d02011-11-21 12:32:23 -080037extern void __thaw_task(struct task_struct *t);
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080038
Tejun Heo8a32c442011-11-21 12:32:23 -080039extern bool __refrigerator(bool check_kthr_stop);
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080040extern int freeze_processes(void);
Rafael J. Wysocki2aede852011-09-26 20:32:27 +020041extern int freeze_kernel_threads(void);
Rafael J. Wysockia9b6f562006-12-06 20:34:37 -080042extern void thaw_processes(void);
Rafael J. Wysocki181e9bd2012-01-29 20:35:52 +010043extern void thaw_kernel_threads(void);
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080044
Colin Cross8d43d952012-08-15 13:10:04 -070045/*
46 * HACK: prevent sleeping while atomic warnings due to ARM signal handling
47 * disabling irqs
48 */
49static inline bool try_to_freeze_nowarn(void)
50{
51 if (likely(!freezing(current)))
52 return false;
53 return __refrigerator(false);
54}
55
Colin Cross3be91672013-05-06 23:50:06 +000056/*
57 * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
58 * If try_to_freeze causes a lockdep warning it means the caller may deadlock
59 */
60static inline bool try_to_freeze_unsafe(void)
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080061{
Steve Muckledc0eed42012-05-23 08:49:36 -070062/* This causes problems for ARM targets and is a known
63 * problem upstream.
64 * might_sleep();
65 */
Tejun Heoa0acae02011-11-21 12:32:22 -080066 if (likely(!freezing(current)))
67 return false;
Tejun Heo8a32c442011-11-21 12:32:23 -080068 return __refrigerator(false);
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080069}
Nigel Cunninghamff395932006-12-06 20:34:28 -080070
Colin Cross3be91672013-05-06 23:50:06 +000071static inline bool try_to_freeze(void)
72{
Mandeep Singh Bainese7f8bab2013-05-06 23:50:09 +000073 if (!(current->flags & PF_NOFREEZE))
74 debug_check_no_locks_held();
Colin Cross3be91672013-05-06 23:50:06 +000075 return try_to_freeze_unsafe();
76}
77
Tejun Heo839e3402011-11-21 12:32:26 -080078extern bool freeze_task(struct task_struct *p);
Tejun Heo34b087e2011-11-23 09:28:17 -080079extern bool set_freezable(void);
Matt Helsley8174f152008-10-18 20:27:19 -070080
Matt Helsleydc52ddc2008-10-18 20:27:21 -070081#ifdef CONFIG_CGROUP_FREEZER
Tejun Heo22b4e112011-11-21 12:32:25 -080082extern bool cgroup_freezing(struct task_struct *task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -070083#else /* !CONFIG_CGROUP_FREEZER */
Tejun Heo22b4e112011-11-21 12:32:25 -080084static inline bool cgroup_freezing(struct task_struct *task)
Matt Helsley5a7aadf2010-03-26 23:51:44 +010085{
Tejun Heo22b4e112011-11-21 12:32:25 -080086 return false;
Matt Helsley5a7aadf2010-03-26 23:51:44 +010087}
Matt Helsleydc52ddc2008-10-18 20:27:21 -070088#endif /* !CONFIG_CGROUP_FREEZER */
89
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -070090/*
91 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
92 * calls wait_for_completion(&vfork) and reset right after it returns from this
93 * function. Next, the parent should call try_to_freeze() to freeze itself
94 * appropriately in case the child has exited before the freezing of tasks is
95 * complete. However, we don't want kernel threads to be frozen in unexpected
96 * places, so we allow them to block freeze_processes() instead or to set
Srivatsa S. Bhat467de1f2011-12-06 23:17:51 +010097 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
98 * parent won't really block freeze_processes(), since ____call_usermodehelper()
99 * (the child) does a little before exec/exit and it can't be frozen before
100 * waking up the parent.
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700101 */
102
Srivatsa S. Bhat467de1f2011-12-06 23:17:51 +0100103
Tejun Heocd924e92012-10-16 15:03:14 -0700104/**
105 * freezer_do_not_count - tell freezer to ignore %current
106 *
107 * Tell freezers to ignore the current task when determining whether the
108 * target frozen state is reached. IOW, the current task will be
109 * considered frozen enough by freezers.
110 *
111 * The caller shouldn't do anything which isn't allowed for a frozen task
112 * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
113 * wrap a scheduling operation and nothing much else.
114 */
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700115static inline void freezer_do_not_count(void)
116{
Srivatsa S. Bhat467de1f2011-12-06 23:17:51 +0100117 current->flags |= PF_FREEZER_SKIP;
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700118}
119
Tejun Heocd924e92012-10-16 15:03:14 -0700120/**
121 * freezer_count - tell freezer to stop ignoring %current
122 *
123 * Undo freezer_do_not_count(). It tells freezers that %current should be
124 * considered again and tries to freeze if freezing condition is already in
125 * effect.
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700126 */
127static inline void freezer_count(void)
128{
Srivatsa S. Bhat467de1f2011-12-06 23:17:51 +0100129 current->flags &= ~PF_FREEZER_SKIP;
Tejun Heocd924e92012-10-16 15:03:14 -0700130 /*
131 * If freezing is in progress, the following paired with smp_mb()
132 * in freezer_should_skip() ensures that either we see %true
133 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
134 */
135 smp_mb();
Srivatsa S. Bhat467de1f2011-12-06 23:17:51 +0100136 try_to_freeze();
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700137}
138
Colin Cross3be91672013-05-06 23:50:06 +0000139/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
140static inline void freezer_count_unsafe(void)
141{
142 current->flags &= ~PF_FREEZER_SKIP;
143 smp_mb();
144 try_to_freeze_unsafe();
145}
146
147/**
148 * freezer_should_skip - whether to skip a task when determining frozen
149 * state is reached
150 * @p: task in quesion
151 *
152 * This function is used by freezers after establishing %true freezing() to
153 * test whether a task should be skipped when determining the target frozen
154 * state is reached. IOW, if this function returns %true, @p is considered
155 * frozen enough.
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700156 */
Tejun Heocd924e92012-10-16 15:03:14 -0700157static inline bool freezer_should_skip(struct task_struct *p)
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700158{
Tejun Heocd924e92012-10-16 15:03:14 -0700159 /*
160 * The following smp_mb() paired with the one in freezer_count()
161 * ensures that either freezer_count() sees %true freezing() or we
162 * see cleared %PF_FREEZER_SKIP and return %false. This makes it
163 * impossible for a task to slip frozen state testing after
164 * clearing %PF_FREEZER_SKIP.
165 */
166 smp_mb();
167 return p->flags & PF_FREEZER_SKIP;
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700168}
Nigel Cunninghamff395932006-12-06 20:34:28 -0800169
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700170/*
Colin Cross13098dc2013-05-06 23:50:13 +0000171 * These functions are intended to be used whenever you want allow a task that's
Jeff Laytond3103102011-12-01 22:44:39 +0100172 * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
173 * that neither return any clear indication of whether a freeze event happened
174 * while in this function.
175 */
176
177/* Like schedule(), but should not block the freezer. */
Colin Cross13098dc2013-05-06 23:50:13 +0000178static inline void freezable_schedule(void)
179{
180 freezer_do_not_count();
181 schedule();
182 freezer_count();
183}
Jeff Laytond3103102011-12-01 22:44:39 +0100184
Colin Cross3be91672013-05-06 23:50:06 +0000185/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
Colin Cross13098dc2013-05-06 23:50:13 +0000186static inline void freezable_schedule_unsafe(void)
187{
188 freezer_do_not_count();
189 schedule();
190 freezer_count_unsafe();
191}
Colin Cross3be91672013-05-06 23:50:06 +0000192
Colin Cross44cd98f2013-05-06 23:50:14 +0000193/*
194 * Like freezable_schedule_timeout(), but should not block the freezer. Do not
195 * call this with locks held.
196 */
197static inline long freezable_schedule_timeout(long timeout)
198{
199 long __retval;
200 freezer_do_not_count();
201 __retval = schedule_timeout(timeout);
202 freezer_count();
203 return __retval;
204}
205
206/*
207 * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
208 * call this with locks held.
209 */
210static inline long freezable_schedule_timeout_interruptible(long timeout)
211{
212 long __retval;
213 freezer_do_not_count();
214 __retval = schedule_timeout_interruptible(timeout);
215 freezer_count();
216 return __retval;
217}
218
Jeff Laytond3103102011-12-01 22:44:39 +0100219/* Like schedule_timeout_killable(), but should not block the freezer. */
Colin Cross13098dc2013-05-06 23:50:13 +0000220static inline long freezable_schedule_timeout_killable(long timeout)
221{
222 long __retval;
223 freezer_do_not_count();
224 __retval = schedule_timeout_killable(timeout);
225 freezer_count();
226 return __retval;
227}
Jeff Laytond3103102011-12-01 22:44:39 +0100228
Colin Cross3be91672013-05-06 23:50:06 +0000229/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
Colin Cross13098dc2013-05-06 23:50:13 +0000230static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
231{
232 long __retval;
233 freezer_do_not_count();
234 __retval = schedule_timeout_killable(timeout);
235 freezer_count_unsafe();
236 return __retval;
237}
Colin Cross3be91672013-05-06 23:50:06 +0000238
Jeff Laytond3103102011-12-01 22:44:39 +0100239/*
Colin Cross44cd98f2013-05-06 23:50:14 +0000240 * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
241 * call this with locks held.
242 */
243static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
244 unsigned long delta, const enum hrtimer_mode mode)
245{
246 int __retval;
247 freezer_do_not_count();
248 __retval = schedule_hrtimeout_range(expires, delta, mode);
249 freezer_count();
250 return __retval;
251}
252
253/*
Jeff Laytonf06ac722011-10-19 15:30:40 -0400254 * Freezer-friendly wrappers around wait_event_interruptible(),
255 * wait_event_killable() and wait_event_interruptible_timeout(), originally
256 * defined in <linux/wait.h>
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700257 */
258
Jeff Laytonf06ac722011-10-19 15:30:40 -0400259#define wait_event_freezekillable(wq, condition) \
260({ \
261 int __retval; \
Oleg Nesterov6f35c4a2011-11-03 16:07:49 -0700262 freezer_do_not_count(); \
263 __retval = wait_event_killable(wq, (condition)); \
264 freezer_count(); \
Jeff Laytonf06ac722011-10-19 15:30:40 -0400265 __retval; \
266})
267
Colin Crosscf46c3d2013-05-07 17:52:05 +0000268/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
269#define wait_event_freezekillable_unsafe(wq, condition) \
270({ \
271 int __retval; \
272 freezer_do_not_count(); \
273 __retval = wait_event_killable(wq, (condition)); \
274 freezer_count_unsafe(); \
275 __retval; \
276})
277
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700278#define wait_event_freezable(wq, condition) \
279({ \
280 int __retval; \
Colin Cross3b784312013-05-06 23:50:12 +0000281 freezer_do_not_count(); \
282 __retval = wait_event_interruptible(wq, (condition)); \
283 freezer_count(); \
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700284 __retval; \
285})
286
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700287#define wait_event_freezable_timeout(wq, condition, timeout) \
288({ \
289 long __retval = timeout; \
Colin Cross3b784312013-05-06 23:50:12 +0000290 freezer_do_not_count(); \
291 __retval = wait_event_interruptible_timeout(wq, (condition), \
292 __retval); \
293 freezer_count(); \
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700294 __retval; \
295})
Oleg Nesterov24b7ead2011-11-23 09:28:17 -0800296
Colin Cross44cd98f2013-05-06 23:50:14 +0000297#define wait_event_freezable_exclusive(wq, condition) \
298({ \
299 int __retval; \
300 freezer_do_not_count(); \
301 __retval = wait_event_interruptible_exclusive(wq, condition); \
302 freezer_count(); \
303 __retval; \
304})
305
306
Matt Helsley8174f152008-10-18 20:27:19 -0700307#else /* !CONFIG_FREEZER */
Tejun Heo948246f2011-11-21 12:32:25 -0800308static inline bool frozen(struct task_struct *p) { return false; }
Tejun Heoa3201222011-11-21 12:32:25 -0800309static inline bool freezing(struct task_struct *p) { return false; }
Stephen Rothwell62c9ea62011-11-25 00:44:55 +0100310static inline void __thaw_task(struct task_struct *t) {}
Nigel Cunningham7dfb7102006-12-06 20:34:23 -0800311
Tejun Heo8a32c442011-11-21 12:32:23 -0800312static inline bool __refrigerator(bool check_kthr_stop) { return false; }
Rafael J. Wysocki2aede852011-09-26 20:32:27 +0200313static inline int freeze_processes(void) { return -ENOSYS; }
314static inline int freeze_kernel_threads(void) { return -ENOSYS; }
Nigel Cunningham7dfb7102006-12-06 20:34:23 -0800315static inline void thaw_processes(void) {}
Rafael J. Wysocki181e9bd2012-01-29 20:35:52 +0100316static inline void thaw_kernel_threads(void) {}
Nigel Cunningham7dfb7102006-12-06 20:34:23 -0800317
Ben Jonesc69159b2012-11-21 13:32:02 +0000318static inline bool try_to_freeze_nowarn(void) { return false; }
Tejun Heoa0acae02011-11-21 12:32:22 -0800319static inline bool try_to_freeze(void) { return false; }
Nigel Cunningham7dfb7102006-12-06 20:34:23 -0800320
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -0700321static inline void freezer_do_not_count(void) {}
322static inline void freezer_count(void) {}
323static inline int freezer_should_skip(struct task_struct *p) { return 0; }
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700324static inline void set_freezable(void) {}
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700325
Jeff Laytond3103102011-12-01 22:44:39 +0100326#define freezable_schedule() schedule()
327
Colin Cross3be91672013-05-06 23:50:06 +0000328#define freezable_schedule_unsafe() schedule()
329
Colin Cross44cd98f2013-05-06 23:50:14 +0000330#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
331
332#define freezable_schedule_timeout_interruptible(timeout) \
333 schedule_timeout_interruptible(timeout)
334
Jeff Laytond3103102011-12-01 22:44:39 +0100335#define freezable_schedule_timeout_killable(timeout) \
336 schedule_timeout_killable(timeout)
337
Colin Cross3be91672013-05-06 23:50:06 +0000338#define freezable_schedule_timeout_killable_unsafe(timeout) \
339 schedule_timeout_killable(timeout)
340
Colin Cross44cd98f2013-05-06 23:50:14 +0000341#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
342 schedule_hrtimeout_range(expires, delta, mode)
343
Rafael J. Wysockie42837b2007-10-18 03:04:45 -0700344#define wait_event_freezable(wq, condition) \
345 wait_event_interruptible(wq, condition)
346
347#define wait_event_freezable_timeout(wq, condition, timeout) \
348 wait_event_interruptible_timeout(wq, condition, timeout)
349
Colin Cross44cd98f2013-05-06 23:50:14 +0000350#define wait_event_freezable_exclusive(wq, condition) \
351 wait_event_interruptible_exclusive(wq, condition)
352
Steve Frenche0c8ea12011-10-25 10:02:53 -0500353#define wait_event_freezekillable(wq, condition) \
354 wait_event_killable(wq, condition)
355
Colin Crosscf46c3d2013-05-07 17:52:05 +0000356#define wait_event_freezekillable_unsafe(wq, condition) \
357 wait_event_killable(wq, condition)
358
Matt Helsley8174f152008-10-18 20:27:19 -0700359#endif /* !CONFIG_FREEZER */
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700360
361#endif /* FREEZER_H_INCLUDED */