blob: 04d3a5a9d56310d2b29d821bd5349ba3c9f56be2 [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001
2#ifndef _LINUX_WORKQUEUE_H
3#define _LINUX_WORKQUEUE_H
4
5#include <linux/timer.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/lockdep.h>
9#include <linux/threads.h>
10#include <linux/atomic.h>
11
12struct workqueue_struct;
13
14struct work_struct;
15typedef void (*work_func_t)(struct work_struct *work);
16
17#define work_data_bits(work) ((unsigned long *)(&(work)->data))
18
19enum {
20 WORK_STRUCT_PENDING_BIT = 0,
21 WORK_STRUCT_DELAYED_BIT = 1,
22 WORK_STRUCT_CWQ_BIT = 2,
23 WORK_STRUCT_LINKED_BIT = 3,
24#ifdef CONFIG_DEBUG_OBJECTS_WORK
25 WORK_STRUCT_STATIC_BIT = 4,
26 WORK_STRUCT_COLOR_SHIFT = 5,
27#else
28 WORK_STRUCT_COLOR_SHIFT = 4,
29#endif
30
31 WORK_STRUCT_COLOR_BITS = 4,
32
33 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
34 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
35 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
36 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
37#ifdef CONFIG_DEBUG_OBJECTS_WORK
38 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
39#else
40 WORK_STRUCT_STATIC = 0,
41#endif
42
43 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
44 WORK_NO_COLOR = WORK_NR_COLORS,
45
46
47 WORK_CPU_UNBOUND = NR_CPUS,
48 WORK_CPU_NONE = NR_CPUS + 1,
49 WORK_CPU_LAST = WORK_CPU_NONE,
50
51 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
52 WORK_STRUCT_COLOR_BITS,
53
54 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
55 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
56 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
57
58
59 WORK_BUSY_PENDING = 1 << 0,
60 WORK_BUSY_RUNNING = 1 << 1,
61};
62
63struct work_struct {
64 atomic_long_t data;
65 struct list_head entry;
66 work_func_t func;
67#ifdef CONFIG_LOCKDEP
68 struct lockdep_map lockdep_map;
69#endif
70};
71
72#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
73#define WORK_DATA_STATIC_INIT() \
74 ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
75
76struct delayed_work {
77 struct work_struct work;
78 struct timer_list timer;
79};
80
81static inline struct delayed_work *to_delayed_work(struct work_struct *work)
82{
83 return container_of(work, struct delayed_work, work);
84}
85
86struct execute_work {
87 struct work_struct work;
88};
89
90#ifdef CONFIG_LOCKDEP
91#define __WORK_INIT_LOCKDEP_MAP(n, k) \
92 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
93#else
94#define __WORK_INIT_LOCKDEP_MAP(n, k)
95#endif
96
97#define __WORK_INITIALIZER(n, f) { \
98 .data = WORK_DATA_STATIC_INIT(), \
99 .entry = { &(n).entry, &(n).entry }, \
100 .func = (f), \
101 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
102 }
103
104#define __DELAYED_WORK_INITIALIZER(n, f) { \
105 .work = __WORK_INITIALIZER((n).work, (f)), \
106 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
107 }
108
109#define __DEFERRED_WORK_INITIALIZER(n, f) { \
110 .work = __WORK_INITIALIZER((n).work, (f)), \
111 .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \
112 }
113
114#define DECLARE_WORK(n, f) \
115 struct work_struct n = __WORK_INITIALIZER(n, f)
116
117#define DECLARE_DELAYED_WORK(n, f) \
118 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
119
120#define DECLARE_DEFERRED_WORK(n, f) \
121 struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f)
122
123#define PREPARE_WORK(_work, _func) \
124 do { \
125 (_work)->func = (_func); \
126 } while (0)
127
128#define PREPARE_DELAYED_WORK(_work, _func) \
129 PREPARE_WORK(&(_work)->work, (_func))
130
131#ifdef CONFIG_DEBUG_OBJECTS_WORK
132extern void __init_work(struct work_struct *work, int onstack);
133extern void destroy_work_on_stack(struct work_struct *work);
134static inline unsigned int work_static(struct work_struct *work)
135{
136 return *work_data_bits(work) & WORK_STRUCT_STATIC;
137}
138#else
139static inline void __init_work(struct work_struct *work, int onstack) { }
140static inline void destroy_work_on_stack(struct work_struct *work) { }
141static inline unsigned int work_static(struct work_struct *work) { return 0; }
142#endif
143
144#ifdef CONFIG_LOCKDEP
145#define __INIT_WORK(_work, _func, _onstack) \
146 do { \
147 static struct lock_class_key __key; \
148 \
149 __init_work((_work), _onstack); \
150 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
151 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
152 INIT_LIST_HEAD(&(_work)->entry); \
153 PREPARE_WORK((_work), (_func)); \
154 } while (0)
155#else
156#define __INIT_WORK(_work, _func, _onstack) \
157 do { \
158 __init_work((_work), _onstack); \
159 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
160 INIT_LIST_HEAD(&(_work)->entry); \
161 PREPARE_WORK((_work), (_func)); \
162 } while (0)
163#endif
164
165#define INIT_WORK(_work, _func) \
166 do { \
167 __INIT_WORK((_work), (_func), 0); \
168 } while (0)
169
170#define INIT_WORK_ONSTACK(_work, _func) \
171 do { \
172 __INIT_WORK((_work), (_func), 1); \
173 } while (0)
174
175#define INIT_DELAYED_WORK(_work, _func) \
176 do { \
177 INIT_WORK(&(_work)->work, (_func)); \
178 init_timer(&(_work)->timer); \
179 } while (0)
180
181#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
182 do { \
183 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
184 init_timer_on_stack(&(_work)->timer); \
185 } while (0)
186
187#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
188 do { \
189 INIT_WORK(&(_work)->work, (_func)); \
190 init_timer_deferrable(&(_work)->timer); \
191 } while (0)
192
193#define work_pending(work) \
194 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
195
196#define delayed_work_pending(w) \
197 work_pending(&(w)->work)
198
199#define work_clear_pending(work) \
200 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
201
202enum {
203 WQ_NON_REENTRANT = 1 << 0,
204 WQ_UNBOUND = 1 << 1,
205 WQ_FREEZABLE = 1 << 2,
206 WQ_MEM_RECLAIM = 1 << 3,
207 WQ_HIGHPRI = 1 << 4,
208 WQ_CPU_INTENSIVE = 1 << 5,
209
210 WQ_DRAINING = 1 << 6,
211 WQ_RESCUER = 1 << 7,
212
213 WQ_MAX_ACTIVE = 512,
214 WQ_MAX_UNBOUND_PER_CPU = 4,
215 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
216};
217
218#define WQ_UNBOUND_MAX_ACTIVE \
219 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
220
221extern struct workqueue_struct *system_wq;
222extern struct workqueue_struct *system_long_wq;
223extern struct workqueue_struct *system_nrt_wq;
224extern struct workqueue_struct *system_unbound_wq;
225extern struct workqueue_struct *system_freezable_wq;
226extern struct workqueue_struct *system_nrt_freezable_wq;
227
228extern struct workqueue_struct *
229__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
230 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
231
232#ifdef CONFIG_LOCKDEP
233#define alloc_workqueue(fmt, flags, max_active, args...) \
234({ \
235 static struct lock_class_key __key; \
236 const char *__lock_name; \
237 \
238 if (__builtin_constant_p(fmt)) \
239 __lock_name = (fmt); \
240 else \
241 __lock_name = #fmt; \
242 \
243 __alloc_workqueue_key((fmt), (flags), (max_active), \
244 &__key, __lock_name, ##args); \
245})
246#else
247#define alloc_workqueue(fmt, flags, max_active, args...) \
248 __alloc_workqueue_key((fmt), (flags), (max_active), \
249 NULL, NULL, ##args)
250#endif
251
252#define alloc_ordered_workqueue(fmt, flags, args...) \
253 alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
254
255#define create_workqueue(name) \
256 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
257#define create_freezable_workqueue(name) \
258 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
259#define create_singlethread_workqueue(name) \
260 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
261
262extern void destroy_workqueue(struct workqueue_struct *wq);
263
264extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
265extern int queue_work_on(int cpu, struct workqueue_struct *wq,
266 struct work_struct *work);
267extern int queue_delayed_work(struct workqueue_struct *wq,
268 struct delayed_work *work, unsigned long delay);
269extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
270 struct delayed_work *work, unsigned long delay);
271
272extern void flush_workqueue(struct workqueue_struct *wq);
273extern void drain_workqueue(struct workqueue_struct *wq);
274extern void flush_scheduled_work(void);
275
276extern int schedule_work(struct work_struct *work);
277extern int schedule_work_on(int cpu, struct work_struct *work);
278extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
279extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
280 unsigned long delay);
281extern int schedule_on_each_cpu(work_func_t func);
282extern int keventd_up(void);
283
284int execute_in_process_context(work_func_t fn, struct execute_work *);
285
286extern bool flush_work(struct work_struct *work);
287extern bool flush_work_sync(struct work_struct *work);
288extern bool cancel_work_sync(struct work_struct *work);
289
290extern bool flush_delayed_work(struct delayed_work *dwork);
291extern bool flush_delayed_work_sync(struct delayed_work *work);
292extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
293
294extern void workqueue_set_max_active(struct workqueue_struct *wq,
295 int max_active);
296extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
297extern unsigned int work_cpu(struct work_struct *work);
298extern unsigned int work_busy(struct work_struct *work);
299extern int print_workqueue(void);
300extern unsigned long get_work_func_of_task_struct(struct task_struct *tsk);
301static inline bool cancel_delayed_work(struct delayed_work *work)
302{
303 bool ret;
304
305 ret = del_timer_sync(&work->timer);
306 if (ret)
307 work_clear_pending(&work->work);
308 return ret;
309}
310
311static inline bool __cancel_delayed_work(struct delayed_work *work)
312{
313 bool ret;
314
315 ret = del_timer(&work->timer);
316 if (ret)
317 work_clear_pending(&work->work);
318 return ret;
319}
320
321#ifndef CONFIG_SMP
322static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
323{
324 return fn(arg);
325}
326#else
327long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
328#endif
329
330#ifdef CONFIG_FREEZER
331extern void freeze_workqueues_begin(void);
332extern bool freeze_workqueues_busy(void);
333extern void thaw_workqueues(void);
334#endif
335
336#endif