blob: e697d6c72daacfd1f0e7ecca67f5d5640a52966d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
Francois Camie1f8e872008-10-15 22:01:59 -070012 * Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
Christoph Lametercde53532008-07-04 09:59:22 -070016 * Made to use alloc_percpu by Christoph Lameter.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Johannes Berg4e6045f2007-10-18 23:39:55 -070035#include <linux/lockdep.h>
Tejun Heoc34056a2010-06-29 10:07:11 +020036#include <linux/idr.h>
Tejun Heo7e116292010-06-29 10:07:13 +020037#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Tejun Heoc8e55f32010-06-29 10:07:12 +020039enum {
Tejun Heodb7bccf2010-06-29 10:07:12 +020040 /* global_cwq flags */
41 GCWQ_FREEZING = 1 << 3, /* freeze in progress */
42
Tejun Heoc8e55f32010-06-29 10:07:12 +020043 /* worker flags */
44 WORKER_STARTED = 1 << 0, /* started */
45 WORKER_DIE = 1 << 1, /* die die die */
46 WORKER_IDLE = 1 << 2, /* is idle */
Tejun Heodb7bccf2010-06-29 10:07:12 +020047 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
48
49 /* gcwq->trustee_state */
50 TRUSTEE_START = 0, /* start */
51 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
52 TRUSTEE_BUTCHER = 2, /* butcher workers */
53 TRUSTEE_RELEASE = 3, /* release workers */
54 TRUSTEE_DONE = 4, /* trustee is done */
Tejun Heoc8e55f32010-06-29 10:07:12 +020055
56 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
57 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
58 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
Tejun Heodb7bccf2010-06-29 10:07:12 +020059
60 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
Tejun Heoc8e55f32010-06-29 10:07:12 +020061};
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/*
Tejun Heo4690c4a2010-06-29 10:07:10 +020064 * Structure fields follow one of the following exclusion rules.
65 *
66 * I: Set during initialization and read-only afterwards.
67 *
Tejun Heo8b03ae32010-06-29 10:07:12 +020068 * L: gcwq->lock protected. Access with gcwq->lock held.
Tejun Heo4690c4a2010-06-29 10:07:10 +020069 *
Tejun Heo73f53c42010-06-29 10:07:11 +020070 * F: wq->flush_mutex protected.
71 *
Tejun Heo4690c4a2010-06-29 10:07:10 +020072 * W: workqueue_lock protected.
73 */
74
Tejun Heo8b03ae32010-06-29 10:07:12 +020075struct global_cwq;
Tejun Heoc34056a2010-06-29 10:07:11 +020076
77struct worker {
Tejun Heoc8e55f32010-06-29 10:07:12 +020078 /* on idle list while idle, on busy hash table while busy */
79 union {
80 struct list_head entry; /* L: while idle */
81 struct hlist_node hentry; /* L: while busy */
82 };
83
Tejun Heoc34056a2010-06-29 10:07:11 +020084 struct work_struct *current_work; /* L: work being processed */
Tejun Heo8cca0ee2010-06-29 10:07:13 +020085 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
Tejun Heoaffee4b2010-06-29 10:07:12 +020086 struct list_head scheduled; /* L: scheduled works */
Tejun Heoc34056a2010-06-29 10:07:11 +020087 struct task_struct *task; /* I: worker task */
Tejun Heo8b03ae32010-06-29 10:07:12 +020088 struct global_cwq *gcwq; /* I: the associated gcwq */
Tejun Heoc8e55f32010-06-29 10:07:12 +020089 unsigned int flags; /* L: flags */
Tejun Heoc34056a2010-06-29 10:07:11 +020090 int id; /* I: worker id */
91};
92
Tejun Heo4690c4a2010-06-29 10:07:10 +020093/*
Tejun Heo8b03ae32010-06-29 10:07:12 +020094 * Global per-cpu workqueue.
95 */
96struct global_cwq {
97 spinlock_t lock; /* the gcwq lock */
Tejun Heo7e116292010-06-29 10:07:13 +020098 struct list_head worklist; /* L: list of pending works */
Tejun Heo8b03ae32010-06-29 10:07:12 +020099 unsigned int cpu; /* I: the associated cpu */
Tejun Heodb7bccf2010-06-29 10:07:12 +0200100 unsigned int flags; /* L: GCWQ_* flags */
Tejun Heoc8e55f32010-06-29 10:07:12 +0200101
102 int nr_workers; /* L: total number of workers */
103 int nr_idle; /* L: currently idle ones */
104
105 /* workers are chained either in the idle_list or busy_hash */
106 struct list_head idle_list; /* L: list of idle workers */
107 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
108 /* L: hash of busy workers */
109
Tejun Heo8b03ae32010-06-29 10:07:12 +0200110 struct ida worker_ida; /* L: for worker IDs */
Tejun Heodb7bccf2010-06-29 10:07:12 +0200111
112 struct task_struct *trustee; /* L: for gcwq shutdown */
113 unsigned int trustee_state; /* L: trustee state */
114 wait_queue_head_t trustee_wait; /* trustee wait */
Tejun Heo8b03ae32010-06-29 10:07:12 +0200115} ____cacheline_aligned_in_smp;
116
117/*
Tejun Heo502ca9d2010-06-29 10:07:13 +0200118 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
Tejun Heo0f900042010-06-29 10:07:11 +0200119 * work_struct->data are used for flags and thus cwqs need to be
120 * aligned at two's power of the number of flag bits.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 */
122struct cpu_workqueue_struct {
Tejun Heo8b03ae32010-06-29 10:07:12 +0200123 struct global_cwq *gcwq; /* I: the associated gcwq */
Tejun Heoc34056a2010-06-29 10:07:11 +0200124 struct worker *worker;
Tejun Heo4690c4a2010-06-29 10:07:10 +0200125 struct workqueue_struct *wq; /* I: the owning workqueue */
Tejun Heo73f53c42010-06-29 10:07:11 +0200126 int work_color; /* L: current color */
127 int flush_color; /* L: flushing color */
128 int nr_in_flight[WORK_NR_COLORS];
129 /* L: nr of in_flight works */
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200130 int nr_active; /* L: nr of active works */
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200131 int max_active; /* L: max active works */
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200132 struct list_head delayed_works; /* L: delayed works */
Tejun Heo0f900042010-06-29 10:07:11 +0200133};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/*
Tejun Heo73f53c42010-06-29 10:07:11 +0200136 * Structure used to wait for workqueue flush.
137 */
138struct wq_flusher {
139 struct list_head list; /* F: list of flushers */
140 int flush_color; /* F: flush color waiting for */
141 struct completion done; /* flush completion */
142};
143
144/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 * The externally visible workqueue abstraction is an array of
146 * per-CPU workqueues:
147 */
148struct workqueue_struct {
Tejun Heo97e37d72010-06-29 10:07:10 +0200149 unsigned int flags; /* I: WQ_* flags */
Tejun Heo4690c4a2010-06-29 10:07:10 +0200150 struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
151 struct list_head list; /* W: list of all workqueues */
Tejun Heo73f53c42010-06-29 10:07:11 +0200152
153 struct mutex flush_mutex; /* protects wq flushing */
154 int work_color; /* F: current work color */
155 int flush_color; /* F: current flush color */
156 atomic_t nr_cwqs_to_flush; /* flush in progress */
157 struct wq_flusher *first_flusher; /* F: first flusher */
158 struct list_head flusher_queue; /* F: flush waiters */
159 struct list_head flusher_overflow; /* F: flush overflow list */
160
Tejun Heo502ca9d2010-06-29 10:07:13 +0200161 unsigned long single_cpu; /* cpu for single cpu wq */
162
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200163 int saved_max_active; /* I: saved cwq max_active */
Tejun Heo4690c4a2010-06-29 10:07:10 +0200164 const char *name; /* I: workqueue name */
Johannes Berg4e6045f2007-10-18 23:39:55 -0700165#ifdef CONFIG_LOCKDEP
Tejun Heo4690c4a2010-06-29 10:07:10 +0200166 struct lockdep_map lockdep_map;
Johannes Berg4e6045f2007-10-18 23:39:55 -0700167#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168};
169
Tejun Heodb7bccf2010-06-29 10:07:12 +0200170#define for_each_busy_worker(worker, i, pos, gcwq) \
171 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
172 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
173
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900174#ifdef CONFIG_DEBUG_OBJECTS_WORK
175
176static struct debug_obj_descr work_debug_descr;
177
178/*
179 * fixup_init is called when:
180 * - an active object is initialized
181 */
182static int work_fixup_init(void *addr, enum debug_obj_state state)
183{
184 struct work_struct *work = addr;
185
186 switch (state) {
187 case ODEBUG_STATE_ACTIVE:
188 cancel_work_sync(work);
189 debug_object_init(work, &work_debug_descr);
190 return 1;
191 default:
192 return 0;
193 }
194}
195
196/*
197 * fixup_activate is called when:
198 * - an active object is activated
199 * - an unknown object is activated (might be a statically initialized object)
200 */
201static int work_fixup_activate(void *addr, enum debug_obj_state state)
202{
203 struct work_struct *work = addr;
204
205 switch (state) {
206
207 case ODEBUG_STATE_NOTAVAILABLE:
208 /*
209 * This is not really a fixup. The work struct was
210 * statically initialized. We just make sure that it
211 * is tracked in the object tracker.
212 */
Tejun Heo22df02b2010-06-29 10:07:10 +0200213 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900214 debug_object_init(work, &work_debug_descr);
215 debug_object_activate(work, &work_debug_descr);
216 return 0;
217 }
218 WARN_ON_ONCE(1);
219 return 0;
220
221 case ODEBUG_STATE_ACTIVE:
222 WARN_ON(1);
223
224 default:
225 return 0;
226 }
227}
228
229/*
230 * fixup_free is called when:
231 * - an active object is freed
232 */
233static int work_fixup_free(void *addr, enum debug_obj_state state)
234{
235 struct work_struct *work = addr;
236
237 switch (state) {
238 case ODEBUG_STATE_ACTIVE:
239 cancel_work_sync(work);
240 debug_object_free(work, &work_debug_descr);
241 return 1;
242 default:
243 return 0;
244 }
245}
246
247static struct debug_obj_descr work_debug_descr = {
248 .name = "work_struct",
249 .fixup_init = work_fixup_init,
250 .fixup_activate = work_fixup_activate,
251 .fixup_free = work_fixup_free,
252};
253
254static inline void debug_work_activate(struct work_struct *work)
255{
256 debug_object_activate(work, &work_debug_descr);
257}
258
259static inline void debug_work_deactivate(struct work_struct *work)
260{
261 debug_object_deactivate(work, &work_debug_descr);
262}
263
264void __init_work(struct work_struct *work, int onstack)
265{
266 if (onstack)
267 debug_object_init_on_stack(work, &work_debug_descr);
268 else
269 debug_object_init(work, &work_debug_descr);
270}
271EXPORT_SYMBOL_GPL(__init_work);
272
273void destroy_work_on_stack(struct work_struct *work)
274{
275 debug_object_free(work, &work_debug_descr);
276}
277EXPORT_SYMBOL_GPL(destroy_work_on_stack);
278
279#else
280static inline void debug_work_activate(struct work_struct *work) { }
281static inline void debug_work_deactivate(struct work_struct *work) { }
282#endif
283
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100284/* Serializes the accesses to the list of workqueues. */
285static DEFINE_SPINLOCK(workqueue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286static LIST_HEAD(workqueues);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200287static bool workqueue_freezing; /* W: have wqs started freezing? */
Tejun Heoc34056a2010-06-29 10:07:11 +0200288
Tejun Heo8b03ae32010-06-29 10:07:12 +0200289static DEFINE_PER_CPU(struct global_cwq, global_cwq);
290
Tejun Heoc34056a2010-06-29 10:07:11 +0200291static int worker_thread(void *__worker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Tejun Heo8b03ae32010-06-29 10:07:12 +0200293static struct global_cwq *get_gcwq(unsigned int cpu)
294{
295 return &per_cpu(global_cwq, cpu);
296}
297
Tejun Heo4690c4a2010-06-29 10:07:10 +0200298static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
299 struct workqueue_struct *wq)
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700300{
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700301 return per_cpu_ptr(wq->cpu_wq, cpu);
302}
303
Tejun Heo73f53c42010-06-29 10:07:11 +0200304static unsigned int work_color_to_flags(int color)
305{
306 return color << WORK_STRUCT_COLOR_SHIFT;
307}
308
309static int get_work_color(struct work_struct *work)
310{
311 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
312 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
313}
314
315static int work_next_color(int color)
316{
317 return (color + 1) % WORK_NR_COLORS;
318}
319
David Howells4594bf12006-12-07 11:33:26 +0000320/*
Tejun Heo7a22ad72010-06-29 10:07:13 +0200321 * Work data points to the cwq while a work is on queue. Once
322 * execution starts, it points to the cpu the work was last on. This
323 * can be distinguished by comparing the data value against
324 * PAGE_OFFSET.
325 *
326 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
327 * cwq, cpu or clear work->data. These functions should only be
328 * called while the work is owned - ie. while the PENDING bit is set.
329 *
330 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
331 * corresponding to a work. gcwq is available once the work has been
332 * queued anywhere after initialization. cwq is available only from
333 * queueing until execution starts.
David Howells4594bf12006-12-07 11:33:26 +0000334 */
Tejun Heo7a22ad72010-06-29 10:07:13 +0200335static inline void set_work_data(struct work_struct *work, unsigned long data,
336 unsigned long flags)
David Howells365970a2006-11-22 14:54:49 +0000337{
David Howells4594bf12006-12-07 11:33:26 +0000338 BUG_ON(!work_pending(work));
Tejun Heo7a22ad72010-06-29 10:07:13 +0200339 atomic_long_set(&work->data, data | flags | work_static(work));
David Howells365970a2006-11-22 14:54:49 +0000340}
341
Tejun Heo7a22ad72010-06-29 10:07:13 +0200342static void set_work_cwq(struct work_struct *work,
343 struct cpu_workqueue_struct *cwq,
344 unsigned long extra_flags)
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200345{
Tejun Heo7a22ad72010-06-29 10:07:13 +0200346 set_work_data(work, (unsigned long)cwq,
347 WORK_STRUCT_PENDING | extra_flags);
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200348}
349
Tejun Heo7a22ad72010-06-29 10:07:13 +0200350static void set_work_cpu(struct work_struct *work, unsigned int cpu)
David Howells365970a2006-11-22 14:54:49 +0000351{
Tejun Heo7a22ad72010-06-29 10:07:13 +0200352 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
353}
354
355static void clear_work_data(struct work_struct *work)
356{
357 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
358}
359
360static inline unsigned long get_work_data(struct work_struct *work)
361{
362 return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK;
363}
364
365static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
366{
367 unsigned long data = get_work_data(work);
368
369 return data >= PAGE_OFFSET ? (void *)data : NULL;
370}
371
372static struct global_cwq *get_work_gcwq(struct work_struct *work)
373{
374 unsigned long data = get_work_data(work);
375 unsigned int cpu;
376
377 if (data >= PAGE_OFFSET)
378 return ((struct cpu_workqueue_struct *)data)->gcwq;
379
380 cpu = data >> WORK_STRUCT_FLAG_BITS;
381 if (cpu == NR_CPUS)
382 return NULL;
383
384 BUG_ON(cpu >= num_possible_cpus());
385 return get_gcwq(cpu);
David Howells365970a2006-11-22 14:54:49 +0000386}
387
Tejun Heo7e116292010-06-29 10:07:13 +0200388/* Return the first worker. Safe with preemption disabled */
389static struct worker *first_worker(struct global_cwq *gcwq)
390{
391 if (unlikely(list_empty(&gcwq->idle_list)))
392 return NULL;
393
394 return list_first_entry(&gcwq->idle_list, struct worker, entry);
395}
396
397/**
398 * wake_up_worker - wake up an idle worker
399 * @gcwq: gcwq to wake worker for
400 *
401 * Wake up the first idle worker of @gcwq.
402 *
403 * CONTEXT:
404 * spin_lock_irq(gcwq->lock).
405 */
406static void wake_up_worker(struct global_cwq *gcwq)
407{
408 struct worker *worker = first_worker(gcwq);
409
410 if (likely(worker))
411 wake_up_process(worker->task);
412}
413
Tejun Heo4690c4a2010-06-29 10:07:10 +0200414/**
Tejun Heoc8e55f32010-06-29 10:07:12 +0200415 * busy_worker_head - return the busy hash head for a work
416 * @gcwq: gcwq of interest
417 * @work: work to be hashed
418 *
419 * Return hash head of @gcwq for @work.
420 *
421 * CONTEXT:
422 * spin_lock_irq(gcwq->lock).
423 *
424 * RETURNS:
425 * Pointer to the hash head.
426 */
427static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
428 struct work_struct *work)
429{
430 const int base_shift = ilog2(sizeof(struct work_struct));
431 unsigned long v = (unsigned long)work;
432
433 /* simple shift and fold hash, do we need something better? */
434 v >>= base_shift;
435 v += v >> BUSY_WORKER_HASH_ORDER;
436 v &= BUSY_WORKER_HASH_MASK;
437
438 return &gcwq->busy_hash[v];
439}
440
441/**
Tejun Heo8cca0ee2010-06-29 10:07:13 +0200442 * __find_worker_executing_work - find worker which is executing a work
443 * @gcwq: gcwq of interest
444 * @bwh: hash head as returned by busy_worker_head()
445 * @work: work to find worker for
446 *
447 * Find a worker which is executing @work on @gcwq. @bwh should be
448 * the hash head obtained by calling busy_worker_head() with the same
449 * work.
450 *
451 * CONTEXT:
452 * spin_lock_irq(gcwq->lock).
453 *
454 * RETURNS:
455 * Pointer to worker which is executing @work if found, NULL
456 * otherwise.
457 */
458static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
459 struct hlist_head *bwh,
460 struct work_struct *work)
461{
462 struct worker *worker;
463 struct hlist_node *tmp;
464
465 hlist_for_each_entry(worker, tmp, bwh, hentry)
466 if (worker->current_work == work)
467 return worker;
468 return NULL;
469}
470
471/**
472 * find_worker_executing_work - find worker which is executing a work
473 * @gcwq: gcwq of interest
474 * @work: work to find worker for
475 *
476 * Find a worker which is executing @work on @gcwq. This function is
477 * identical to __find_worker_executing_work() except that this
478 * function calculates @bwh itself.
479 *
480 * CONTEXT:
481 * spin_lock_irq(gcwq->lock).
482 *
483 * RETURNS:
484 * Pointer to worker which is executing @work if found, NULL
485 * otherwise.
486 */
487static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
488 struct work_struct *work)
489{
490 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
491 work);
492}
493
494/**
Tejun Heo7e116292010-06-29 10:07:13 +0200495 * insert_work - insert a work into gcwq
Tejun Heo4690c4a2010-06-29 10:07:10 +0200496 * @cwq: cwq @work belongs to
497 * @work: work to insert
498 * @head: insertion point
499 * @extra_flags: extra WORK_STRUCT_* flags to set
500 *
Tejun Heo7e116292010-06-29 10:07:13 +0200501 * Insert @work which belongs to @cwq into @gcwq after @head.
502 * @extra_flags is or'd to work_struct flags.
Tejun Heo4690c4a2010-06-29 10:07:10 +0200503 *
504 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200505 * spin_lock_irq(gcwq->lock).
Tejun Heo4690c4a2010-06-29 10:07:10 +0200506 */
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700507static void insert_work(struct cpu_workqueue_struct *cwq,
Tejun Heo4690c4a2010-06-29 10:07:10 +0200508 struct work_struct *work, struct list_head *head,
509 unsigned int extra_flags)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700510{
Tejun Heo4690c4a2010-06-29 10:07:10 +0200511 /* we own @work, set data and link */
Tejun Heo7a22ad72010-06-29 10:07:13 +0200512 set_work_cwq(work, cwq, extra_flags);
Tejun Heo4690c4a2010-06-29 10:07:10 +0200513
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700514 /*
515 * Ensure that we get the right work->data if we see the
516 * result of list_add() below, see try_to_grab_pending().
517 */
518 smp_wmb();
Tejun Heo4690c4a2010-06-29 10:07:10 +0200519
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700520 list_add_tail(&work->entry, head);
Tejun Heo7e116292010-06-29 10:07:13 +0200521 wake_up_worker(cwq->gcwq);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700522}
523
Tejun Heo502ca9d2010-06-29 10:07:13 +0200524/**
525 * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
526 * @cwq: cwq to unbind
527 *
528 * Try to unbind @cwq from single cpu workqueue processing. If
529 * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
530 *
531 * CONTEXT:
532 * spin_lock_irq(gcwq->lock).
533 */
534static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
535{
536 struct workqueue_struct *wq = cwq->wq;
537 struct global_cwq *gcwq = cwq->gcwq;
538
539 BUG_ON(wq->single_cpu != gcwq->cpu);
540 /*
541 * Unbind from workqueue if @cwq is not frozen. If frozen,
542 * thaw_workqueues() will either restart processing on this
543 * cpu or unbind if empty. This keeps works queued while
544 * frozen fully ordered and flushable.
545 */
546 if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
547 smp_wmb(); /* paired with cmpxchg() in __queue_work() */
548 wq->single_cpu = NR_CPUS;
549 }
550}
551
Tejun Heo4690c4a2010-06-29 10:07:10 +0200552static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 struct work_struct *work)
554{
Tejun Heo502ca9d2010-06-29 10:07:13 +0200555 struct global_cwq *gcwq;
556 struct cpu_workqueue_struct *cwq;
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200557 struct list_head *worklist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 unsigned long flags;
Tejun Heo502ca9d2010-06-29 10:07:13 +0200559 bool arbitrate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900561 debug_work_activate(work);
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200562
Tejun Heo18aa9ef2010-06-29 10:07:13 +0200563 /*
564 * Determine gcwq to use. SINGLE_CPU is inherently
565 * NON_REENTRANT, so test it first.
566 */
Tejun Heo502ca9d2010-06-29 10:07:13 +0200567 if (!(wq->flags & WQ_SINGLE_CPU)) {
Tejun Heo18aa9ef2010-06-29 10:07:13 +0200568 struct global_cwq *last_gcwq;
569
570 /*
571 * It's multi cpu. If @wq is non-reentrant and @work
572 * was previously on a different cpu, it might still
573 * be running there, in which case the work needs to
574 * be queued on that cpu to guarantee non-reentrance.
575 */
Tejun Heo502ca9d2010-06-29 10:07:13 +0200576 gcwq = get_gcwq(cpu);
Tejun Heo18aa9ef2010-06-29 10:07:13 +0200577 if (wq->flags & WQ_NON_REENTRANT &&
578 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
579 struct worker *worker;
580
581 spin_lock_irqsave(&last_gcwq->lock, flags);
582
583 worker = find_worker_executing_work(last_gcwq, work);
584
585 if (worker && worker->current_cwq->wq == wq)
586 gcwq = last_gcwq;
587 else {
588 /* meh... not running there, queue here */
589 spin_unlock_irqrestore(&last_gcwq->lock, flags);
590 spin_lock_irqsave(&gcwq->lock, flags);
591 }
592 } else
593 spin_lock_irqsave(&gcwq->lock, flags);
Tejun Heo502ca9d2010-06-29 10:07:13 +0200594 } else {
595 unsigned int req_cpu = cpu;
596
597 /*
598 * It's a bit more complex for single cpu workqueues.
599 * We first need to determine which cpu is going to be
600 * used. If no cpu is currently serving this
601 * workqueue, arbitrate using atomic accesses to
602 * wq->single_cpu; otherwise, use the current one.
603 */
604 retry:
605 cpu = wq->single_cpu;
606 arbitrate = cpu == NR_CPUS;
607 if (arbitrate)
608 cpu = req_cpu;
609
610 gcwq = get_gcwq(cpu);
611 spin_lock_irqsave(&gcwq->lock, flags);
612
613 /*
614 * The following cmpxchg() is a full barrier paired
615 * with smp_wmb() in cwq_unbind_single_cpu() and
616 * guarantees that all changes to wq->st_* fields are
617 * visible on the new cpu after this point.
618 */
619 if (arbitrate)
620 cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
621
622 if (unlikely(wq->single_cpu != cpu)) {
623 spin_unlock_irqrestore(&gcwq->lock, flags);
624 goto retry;
625 }
626 }
627
628 /* gcwq determined, get cwq and queue */
629 cwq = get_cwq(gcwq->cpu, wq);
630
Tejun Heo4690c4a2010-06-29 10:07:10 +0200631 BUG_ON(!list_empty(&work->entry));
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200632
Tejun Heo73f53c42010-06-29 10:07:11 +0200633 cwq->nr_in_flight[cwq->work_color]++;
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200634
635 if (likely(cwq->nr_active < cwq->max_active)) {
636 cwq->nr_active++;
Tejun Heo7e116292010-06-29 10:07:13 +0200637 worklist = &gcwq->worklist;
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200638 } else
639 worklist = &cwq->delayed_works;
640
641 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
642
Tejun Heo8b03ae32010-06-29 10:07:12 +0200643 spin_unlock_irqrestore(&gcwq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644}
645
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700646/**
647 * queue_work - queue work on a workqueue
648 * @wq: workqueue to use
649 * @work: work to queue
650 *
Alan Stern057647f2006-10-28 10:38:58 -0700651 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 *
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700653 * We queue the work to the CPU on which it was submitted, but if the CPU dies
654 * it can be processed by another CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800656int queue_work(struct workqueue_struct *wq, struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
Oleg Nesterovef1ca232008-07-25 01:47:53 -0700658 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
Oleg Nesterovef1ca232008-07-25 01:47:53 -0700660 ret = queue_work_on(get_cpu(), wq, work);
661 put_cpu();
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 return ret;
664}
Dave Jonesae90dd52006-06-30 01:40:45 -0400665EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Zhang Ruic1a220e2008-07-23 21:28:39 -0700667/**
668 * queue_work_on - queue work on specific cpu
669 * @cpu: CPU number to execute work on
670 * @wq: workqueue to use
671 * @work: work to queue
672 *
673 * Returns 0 if @work was already on a queue, non-zero otherwise.
674 *
675 * We queue the work to a specific CPU, the caller must ensure it
676 * can't go away.
677 */
678int
679queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
680{
681 int ret = 0;
682
Tejun Heo22df02b2010-06-29 10:07:10 +0200683 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
Tejun Heo4690c4a2010-06-29 10:07:10 +0200684 __queue_work(cpu, wq, work);
Zhang Ruic1a220e2008-07-23 21:28:39 -0700685 ret = 1;
686 }
687 return ret;
688}
689EXPORT_SYMBOL_GPL(queue_work_on);
690
Li Zefan6d141c32008-02-08 04:21:09 -0800691static void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692{
David Howells52bad642006-11-22 14:54:01 +0000693 struct delayed_work *dwork = (struct delayed_work *)__data;
Tejun Heo7a22ad72010-06-29 10:07:13 +0200694 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Tejun Heo4690c4a2010-06-29 10:07:10 +0200696 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697}
698
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700699/**
700 * queue_delayed_work - queue work on a workqueue after delay
701 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800702 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700703 * @delay: number of jiffies to wait before queueing
704 *
Alan Stern057647f2006-10-28 10:38:58 -0700705 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700706 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800707int queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000708 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709{
David Howells52bad642006-11-22 14:54:01 +0000710 if (delay == 0)
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700711 return queue_work(wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700713 return queue_delayed_work_on(-1, wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
Dave Jonesae90dd52006-06-30 01:40:45 -0400715EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700717/**
718 * queue_delayed_work_on - queue work on specific CPU after delay
719 * @cpu: CPU number to execute work on
720 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800721 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700722 * @delay: number of jiffies to wait before queueing
723 *
Alan Stern057647f2006-10-28 10:38:58 -0700724 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700725 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700726int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000727 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700728{
729 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000730 struct timer_list *timer = &dwork->timer;
731 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700732
Tejun Heo22df02b2010-06-29 10:07:10 +0200733 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
Tejun Heo7a22ad72010-06-29 10:07:13 +0200734 struct global_cwq *gcwq = get_work_gcwq(work);
735 unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id();
736
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700737 BUG_ON(timer_pending(timer));
738 BUG_ON(!list_empty(&work->entry));
739
Andrew Liu8a3e77c2008-05-01 04:35:14 -0700740 timer_stats_timer_set_start_info(&dwork->timer);
Tejun Heo7a22ad72010-06-29 10:07:13 +0200741 /*
742 * This stores cwq for the moment, for the timer_fn.
743 * Note that the work's gcwq is preserved to allow
744 * reentrance detection for delayed works.
745 */
746 set_work_cwq(work, get_cwq(lcpu, wq), 0);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700747 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000748 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700749 timer->function = delayed_work_timer_fn;
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700750
751 if (unlikely(cpu >= 0))
752 add_timer_on(timer, cpu);
753 else
754 add_timer(timer);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700755 ret = 1;
756 }
757 return ret;
758}
Dave Jonesae90dd52006-06-30 01:40:45 -0400759EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
Tejun Heoc8e55f32010-06-29 10:07:12 +0200761/**
762 * worker_enter_idle - enter idle state
763 * @worker: worker which is entering idle state
764 *
765 * @worker is entering idle state. Update stats and idle timer if
766 * necessary.
767 *
768 * LOCKING:
769 * spin_lock_irq(gcwq->lock).
770 */
771static void worker_enter_idle(struct worker *worker)
772{
773 struct global_cwq *gcwq = worker->gcwq;
774
775 BUG_ON(worker->flags & WORKER_IDLE);
776 BUG_ON(!list_empty(&worker->entry) &&
777 (worker->hentry.next || worker->hentry.pprev));
778
779 worker->flags |= WORKER_IDLE;
780 gcwq->nr_idle++;
781
782 /* idle_list is LIFO */
783 list_add(&worker->entry, &gcwq->idle_list);
Tejun Heodb7bccf2010-06-29 10:07:12 +0200784
785 if (unlikely(worker->flags & WORKER_ROGUE))
786 wake_up_all(&gcwq->trustee_wait);
Tejun Heoc8e55f32010-06-29 10:07:12 +0200787}
788
789/**
790 * worker_leave_idle - leave idle state
791 * @worker: worker which is leaving idle state
792 *
793 * @worker is leaving idle state. Update stats.
794 *
795 * LOCKING:
796 * spin_lock_irq(gcwq->lock).
797 */
798static void worker_leave_idle(struct worker *worker)
799{
800 struct global_cwq *gcwq = worker->gcwq;
801
802 BUG_ON(!(worker->flags & WORKER_IDLE));
803 worker->flags &= ~WORKER_IDLE;
804 gcwq->nr_idle--;
805 list_del_init(&worker->entry);
806}
807
Tejun Heoc34056a2010-06-29 10:07:11 +0200808static struct worker *alloc_worker(void)
809{
810 struct worker *worker;
811
812 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
Tejun Heoc8e55f32010-06-29 10:07:12 +0200813 if (worker) {
814 INIT_LIST_HEAD(&worker->entry);
Tejun Heoaffee4b2010-06-29 10:07:12 +0200815 INIT_LIST_HEAD(&worker->scheduled);
Tejun Heoc8e55f32010-06-29 10:07:12 +0200816 }
Tejun Heoc34056a2010-06-29 10:07:11 +0200817 return worker;
818}
819
820/**
821 * create_worker - create a new workqueue worker
Tejun Heo7e116292010-06-29 10:07:13 +0200822 * @gcwq: gcwq the new worker will belong to
Tejun Heoc34056a2010-06-29 10:07:11 +0200823 * @bind: whether to set affinity to @cpu or not
824 *
Tejun Heo7e116292010-06-29 10:07:13 +0200825 * Create a new worker which is bound to @gcwq. The returned worker
Tejun Heoc34056a2010-06-29 10:07:11 +0200826 * can be started by calling start_worker() or destroyed using
827 * destroy_worker().
828 *
829 * CONTEXT:
830 * Might sleep. Does GFP_KERNEL allocations.
831 *
832 * RETURNS:
833 * Pointer to the newly created worker.
834 */
Tejun Heo7e116292010-06-29 10:07:13 +0200835static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
Tejun Heoc34056a2010-06-29 10:07:11 +0200836{
837 int id = -1;
838 struct worker *worker = NULL;
839
Tejun Heo8b03ae32010-06-29 10:07:12 +0200840 spin_lock_irq(&gcwq->lock);
841 while (ida_get_new(&gcwq->worker_ida, &id)) {
842 spin_unlock_irq(&gcwq->lock);
843 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
Tejun Heoc34056a2010-06-29 10:07:11 +0200844 goto fail;
Tejun Heo8b03ae32010-06-29 10:07:12 +0200845 spin_lock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +0200846 }
Tejun Heo8b03ae32010-06-29 10:07:12 +0200847 spin_unlock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +0200848
849 worker = alloc_worker();
850 if (!worker)
851 goto fail;
852
Tejun Heo8b03ae32010-06-29 10:07:12 +0200853 worker->gcwq = gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +0200854 worker->id = id;
855
856 worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
Tejun Heo8b03ae32010-06-29 10:07:12 +0200857 gcwq->cpu, id);
Tejun Heoc34056a2010-06-29 10:07:11 +0200858 if (IS_ERR(worker->task))
859 goto fail;
860
Tejun Heodb7bccf2010-06-29 10:07:12 +0200861 /*
862 * A rogue worker will become a regular one if CPU comes
863 * online later on. Make sure every worker has
864 * PF_THREAD_BOUND set.
865 */
Tejun Heoc34056a2010-06-29 10:07:11 +0200866 if (bind)
Tejun Heo8b03ae32010-06-29 10:07:12 +0200867 kthread_bind(worker->task, gcwq->cpu);
Tejun Heodb7bccf2010-06-29 10:07:12 +0200868 else
869 worker->task->flags |= PF_THREAD_BOUND;
Tejun Heoc34056a2010-06-29 10:07:11 +0200870
871 return worker;
872fail:
873 if (id >= 0) {
Tejun Heo8b03ae32010-06-29 10:07:12 +0200874 spin_lock_irq(&gcwq->lock);
875 ida_remove(&gcwq->worker_ida, id);
876 spin_unlock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +0200877 }
878 kfree(worker);
879 return NULL;
880}
881
882/**
883 * start_worker - start a newly created worker
884 * @worker: worker to start
885 *
Tejun Heoc8e55f32010-06-29 10:07:12 +0200886 * Make the gcwq aware of @worker and start it.
Tejun Heoc34056a2010-06-29 10:07:11 +0200887 *
888 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200889 * spin_lock_irq(gcwq->lock).
Tejun Heoc34056a2010-06-29 10:07:11 +0200890 */
891static void start_worker(struct worker *worker)
892{
Tejun Heoc8e55f32010-06-29 10:07:12 +0200893 worker->flags |= WORKER_STARTED;
894 worker->gcwq->nr_workers++;
895 worker_enter_idle(worker);
Tejun Heoc34056a2010-06-29 10:07:11 +0200896 wake_up_process(worker->task);
897}
898
899/**
900 * destroy_worker - destroy a workqueue worker
901 * @worker: worker to be destroyed
902 *
Tejun Heoc8e55f32010-06-29 10:07:12 +0200903 * Destroy @worker and adjust @gcwq stats accordingly.
904 *
905 * CONTEXT:
906 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
Tejun Heoc34056a2010-06-29 10:07:11 +0200907 */
908static void destroy_worker(struct worker *worker)
909{
Tejun Heo8b03ae32010-06-29 10:07:12 +0200910 struct global_cwq *gcwq = worker->gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +0200911 int id = worker->id;
912
913 /* sanity check frenzy */
914 BUG_ON(worker->current_work);
Tejun Heoaffee4b2010-06-29 10:07:12 +0200915 BUG_ON(!list_empty(&worker->scheduled));
Tejun Heoc34056a2010-06-29 10:07:11 +0200916
Tejun Heoc8e55f32010-06-29 10:07:12 +0200917 if (worker->flags & WORKER_STARTED)
918 gcwq->nr_workers--;
919 if (worker->flags & WORKER_IDLE)
920 gcwq->nr_idle--;
921
922 list_del_init(&worker->entry);
923 worker->flags |= WORKER_DIE;
924
925 spin_unlock_irq(&gcwq->lock);
926
Tejun Heoc34056a2010-06-29 10:07:11 +0200927 kthread_stop(worker->task);
928 kfree(worker);
929
Tejun Heo8b03ae32010-06-29 10:07:12 +0200930 spin_lock_irq(&gcwq->lock);
931 ida_remove(&gcwq->worker_ida, id);
Tejun Heoc34056a2010-06-29 10:07:11 +0200932}
933
Tejun Heoa62428c2010-06-29 10:07:10 +0200934/**
Tejun Heoaffee4b2010-06-29 10:07:12 +0200935 * move_linked_works - move linked works to a list
936 * @work: start of series of works to be scheduled
937 * @head: target list to append @work to
938 * @nextp: out paramter for nested worklist walking
939 *
940 * Schedule linked works starting from @work to @head. Work series to
941 * be scheduled starts at @work and includes any consecutive work with
942 * WORK_STRUCT_LINKED set in its predecessor.
943 *
944 * If @nextp is not NULL, it's updated to point to the next work of
945 * the last scheduled work. This allows move_linked_works() to be
946 * nested inside outer list_for_each_entry_safe().
947 *
948 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200949 * spin_lock_irq(gcwq->lock).
Tejun Heoaffee4b2010-06-29 10:07:12 +0200950 */
951static void move_linked_works(struct work_struct *work, struct list_head *head,
952 struct work_struct **nextp)
953{
954 struct work_struct *n;
955
956 /*
957 * Linked worklist will always end before the end of the list,
958 * use NULL for list head.
959 */
960 list_for_each_entry_safe_from(work, n, NULL, entry) {
961 list_move_tail(&work->entry, head);
962 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
963 break;
964 }
965
966 /*
967 * If we're already inside safe list traversal and have moved
968 * multiple works to the scheduled queue, the next position
969 * needs to be updated.
970 */
971 if (nextp)
972 *nextp = n;
973}
974
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200975static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
976{
977 struct work_struct *work = list_first_entry(&cwq->delayed_works,
978 struct work_struct, entry);
979
Tejun Heo7e116292010-06-29 10:07:13 +0200980 move_linked_works(work, &cwq->gcwq->worklist, NULL);
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200981 cwq->nr_active++;
982}
983
Tejun Heoaffee4b2010-06-29 10:07:12 +0200984/**
Tejun Heo73f53c42010-06-29 10:07:11 +0200985 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
986 * @cwq: cwq of interest
987 * @color: color of work which left the queue
988 *
989 * A work either has completed or is removed from pending queue,
990 * decrement nr_in_flight of its cwq and handle workqueue flushing.
991 *
992 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200993 * spin_lock_irq(gcwq->lock).
Tejun Heo73f53c42010-06-29 10:07:11 +0200994 */
995static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
996{
997 /* ignore uncolored works */
998 if (color == WORK_NO_COLOR)
999 return;
1000
1001 cwq->nr_in_flight[color]--;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001002 cwq->nr_active--;
1003
Tejun Heo502ca9d2010-06-29 10:07:13 +02001004 if (!list_empty(&cwq->delayed_works)) {
1005 /* one down, submit a delayed one */
1006 if (cwq->nr_active < cwq->max_active)
1007 cwq_activate_first_delayed(cwq);
1008 } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
1009 /* this was the last work, unbind from single cpu */
1010 cwq_unbind_single_cpu(cwq);
1011 }
Tejun Heo73f53c42010-06-29 10:07:11 +02001012
1013 /* is flush in progress and are we at the flushing tip? */
1014 if (likely(cwq->flush_color != color))
1015 return;
1016
1017 /* are there still in-flight works? */
1018 if (cwq->nr_in_flight[color])
1019 return;
1020
1021 /* this cwq is done, clear flush_color */
1022 cwq->flush_color = -1;
1023
1024 /*
1025 * If this was the last cwq, wake up the first flusher. It
1026 * will handle the rest.
1027 */
1028 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1029 complete(&cwq->wq->first_flusher->done);
1030}
1031
1032/**
Tejun Heoa62428c2010-06-29 10:07:10 +02001033 * process_one_work - process single work
Tejun Heoc34056a2010-06-29 10:07:11 +02001034 * @worker: self
Tejun Heoa62428c2010-06-29 10:07:10 +02001035 * @work: work to process
1036 *
1037 * Process @work. This function contains all the logics necessary to
1038 * process a single work including synchronization against and
1039 * interaction with other workers on the same cpu, queueing and
1040 * flushing. As long as context requirement is met, any worker can
1041 * call this function to process a work.
1042 *
1043 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001044 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
Tejun Heoa62428c2010-06-29 10:07:10 +02001045 */
Tejun Heoc34056a2010-06-29 10:07:11 +02001046static void process_one_work(struct worker *worker, struct work_struct *work)
Tejun Heoa62428c2010-06-29 10:07:10 +02001047{
Tejun Heo7e116292010-06-29 10:07:13 +02001048 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
Tejun Heo8b03ae32010-06-29 10:07:12 +02001049 struct global_cwq *gcwq = cwq->gcwq;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001050 struct hlist_head *bwh = busy_worker_head(gcwq, work);
Tejun Heoa62428c2010-06-29 10:07:10 +02001051 work_func_t f = work->func;
Tejun Heo73f53c42010-06-29 10:07:11 +02001052 int work_color;
Tejun Heo7e116292010-06-29 10:07:13 +02001053 struct worker *collision;
Tejun Heoa62428c2010-06-29 10:07:10 +02001054#ifdef CONFIG_LOCKDEP
1055 /*
1056 * It is permissible to free the struct work_struct from
1057 * inside the function that is called from it, this we need to
1058 * take into account for lockdep too. To avoid bogus "held
1059 * lock freed" warnings as well as problems when looking into
1060 * work->lockdep_map, make a copy and use that here.
1061 */
1062 struct lockdep_map lockdep_map = work->lockdep_map;
1063#endif
Tejun Heo7e116292010-06-29 10:07:13 +02001064 /*
1065 * A single work shouldn't be executed concurrently by
1066 * multiple workers on a single cpu. Check whether anyone is
1067 * already processing the work. If so, defer the work to the
1068 * currently executing one.
1069 */
1070 collision = __find_worker_executing_work(gcwq, bwh, work);
1071 if (unlikely(collision)) {
1072 move_linked_works(work, &collision->scheduled, NULL);
1073 return;
1074 }
1075
Tejun Heoa62428c2010-06-29 10:07:10 +02001076 /* claim and process */
Tejun Heoa62428c2010-06-29 10:07:10 +02001077 debug_work_deactivate(work);
Tejun Heoc8e55f32010-06-29 10:07:12 +02001078 hlist_add_head(&worker->hentry, bwh);
Tejun Heoc34056a2010-06-29 10:07:11 +02001079 worker->current_work = work;
Tejun Heo8cca0ee2010-06-29 10:07:13 +02001080 worker->current_cwq = cwq;
Tejun Heo73f53c42010-06-29 10:07:11 +02001081 work_color = get_work_color(work);
Tejun Heo7a22ad72010-06-29 10:07:13 +02001082
Tejun Heo7a22ad72010-06-29 10:07:13 +02001083 /* record the current cpu number in the work data and dequeue */
1084 set_work_cpu(work, gcwq->cpu);
Tejun Heoa62428c2010-06-29 10:07:10 +02001085 list_del_init(&work->entry);
1086
Tejun Heo8b03ae32010-06-29 10:07:12 +02001087 spin_unlock_irq(&gcwq->lock);
Tejun Heoa62428c2010-06-29 10:07:10 +02001088
Tejun Heoa62428c2010-06-29 10:07:10 +02001089 work_clear_pending(work);
1090 lock_map_acquire(&cwq->wq->lockdep_map);
1091 lock_map_acquire(&lockdep_map);
1092 f(work);
1093 lock_map_release(&lockdep_map);
1094 lock_map_release(&cwq->wq->lockdep_map);
1095
1096 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1097 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1098 "%s/0x%08x/%d\n",
1099 current->comm, preempt_count(), task_pid_nr(current));
1100 printk(KERN_ERR " last function: ");
1101 print_symbol("%s\n", (unsigned long)f);
1102 debug_show_held_locks(current);
1103 dump_stack();
1104 }
1105
Tejun Heo8b03ae32010-06-29 10:07:12 +02001106 spin_lock_irq(&gcwq->lock);
Tejun Heoa62428c2010-06-29 10:07:10 +02001107
1108 /* we're done with it, release */
Tejun Heoc8e55f32010-06-29 10:07:12 +02001109 hlist_del_init(&worker->hentry);
Tejun Heoc34056a2010-06-29 10:07:11 +02001110 worker->current_work = NULL;
Tejun Heo8cca0ee2010-06-29 10:07:13 +02001111 worker->current_cwq = NULL;
Tejun Heo73f53c42010-06-29 10:07:11 +02001112 cwq_dec_nr_in_flight(cwq, work_color);
Tejun Heoa62428c2010-06-29 10:07:10 +02001113}
1114
Tejun Heoaffee4b2010-06-29 10:07:12 +02001115/**
1116 * process_scheduled_works - process scheduled works
1117 * @worker: self
1118 *
1119 * Process all scheduled works. Please note that the scheduled list
1120 * may change while processing a work, so this function repeatedly
1121 * fetches a work from the top and executes it.
1122 *
1123 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001124 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
Tejun Heoaffee4b2010-06-29 10:07:12 +02001125 * multiple times.
1126 */
1127static void process_scheduled_works(struct worker *worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128{
Tejun Heoaffee4b2010-06-29 10:07:12 +02001129 while (!list_empty(&worker->scheduled)) {
1130 struct work_struct *work = list_first_entry(&worker->scheduled,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 struct work_struct, entry);
Tejun Heoc34056a2010-06-29 10:07:11 +02001132 process_one_work(worker, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134}
1135
Tejun Heo4690c4a2010-06-29 10:07:10 +02001136/**
1137 * worker_thread - the worker thread function
Tejun Heoc34056a2010-06-29 10:07:11 +02001138 * @__worker: self
Tejun Heo4690c4a2010-06-29 10:07:10 +02001139 *
1140 * The cwq worker thread function.
1141 */
Tejun Heoc34056a2010-06-29 10:07:11 +02001142static int worker_thread(void *__worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143{
Tejun Heoc34056a2010-06-29 10:07:11 +02001144 struct worker *worker = __worker;
Tejun Heo8b03ae32010-06-29 10:07:12 +02001145 struct global_cwq *gcwq = worker->gcwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Tejun Heoc8e55f32010-06-29 10:07:12 +02001147woke_up:
Tejun Heoc8e55f32010-06-29 10:07:12 +02001148 spin_lock_irq(&gcwq->lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001149
Tejun Heoc8e55f32010-06-29 10:07:12 +02001150 /* DIE can be set only while we're idle, checking here is enough */
1151 if (worker->flags & WORKER_DIE) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02001152 spin_unlock_irq(&gcwq->lock);
Tejun Heoc8e55f32010-06-29 10:07:12 +02001153 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 }
Oleg Nesterov3af244332007-05-09 02:34:09 -07001155
Tejun Heoc8e55f32010-06-29 10:07:12 +02001156 worker_leave_idle(worker);
Tejun Heodb7bccf2010-06-29 10:07:12 +02001157recheck:
Tejun Heoc8e55f32010-06-29 10:07:12 +02001158 /*
1159 * ->scheduled list can only be filled while a worker is
1160 * preparing to process a work or actually processing it.
1161 * Make sure nobody diddled with it while I was sleeping.
1162 */
1163 BUG_ON(!list_empty(&worker->scheduled));
1164
Tejun Heo7e116292010-06-29 10:07:13 +02001165 while (!list_empty(&gcwq->worklist)) {
Tejun Heoc8e55f32010-06-29 10:07:12 +02001166 struct work_struct *work =
Tejun Heo7e116292010-06-29 10:07:13 +02001167 list_first_entry(&gcwq->worklist,
Tejun Heoc8e55f32010-06-29 10:07:12 +02001168 struct work_struct, entry);
1169
Tejun Heodb7bccf2010-06-29 10:07:12 +02001170 /*
1171 * The following is a rather inefficient way to close
1172 * race window against cpu hotplug operations. Will
1173 * be replaced soon.
1174 */
1175 if (unlikely(!(worker->flags & WORKER_ROGUE) &&
1176 !cpumask_equal(&worker->task->cpus_allowed,
1177 get_cpu_mask(gcwq->cpu)))) {
1178 spin_unlock_irq(&gcwq->lock);
1179 set_cpus_allowed_ptr(worker->task,
1180 get_cpu_mask(gcwq->cpu));
1181 cpu_relax();
1182 spin_lock_irq(&gcwq->lock);
1183 goto recheck;
1184 }
1185
Tejun Heoc8e55f32010-06-29 10:07:12 +02001186 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1187 /* optimization path, not strictly necessary */
1188 process_one_work(worker, work);
1189 if (unlikely(!list_empty(&worker->scheduled)))
1190 process_scheduled_works(worker);
1191 } else {
1192 move_linked_works(work, &worker->scheduled, NULL);
1193 process_scheduled_works(worker);
1194 }
1195 }
1196
1197 /*
1198 * gcwq->lock is held and there's no work to process, sleep.
1199 * Workers are woken up only while holding gcwq->lock, so
1200 * setting the current state before releasing gcwq->lock is
1201 * enough to prevent losing any event.
1202 */
1203 worker_enter_idle(worker);
1204 __set_current_state(TASK_INTERRUPTIBLE);
1205 spin_unlock_irq(&gcwq->lock);
1206 schedule();
1207 goto woke_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208}
1209
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07001210struct wq_barrier {
1211 struct work_struct work;
1212 struct completion done;
1213};
1214
1215static void wq_barrier_func(struct work_struct *work)
1216{
1217 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1218 complete(&barr->done);
1219}
1220
Tejun Heo4690c4a2010-06-29 10:07:10 +02001221/**
1222 * insert_wq_barrier - insert a barrier work
1223 * @cwq: cwq to insert barrier into
1224 * @barr: wq_barrier to insert
Tejun Heoaffee4b2010-06-29 10:07:12 +02001225 * @target: target work to attach @barr to
1226 * @worker: worker currently executing @target, NULL if @target is not executing
Tejun Heo4690c4a2010-06-29 10:07:10 +02001227 *
Tejun Heoaffee4b2010-06-29 10:07:12 +02001228 * @barr is linked to @target such that @barr is completed only after
1229 * @target finishes execution. Please note that the ordering
1230 * guarantee is observed only with respect to @target and on the local
1231 * cpu.
1232 *
1233 * Currently, a queued barrier can't be canceled. This is because
1234 * try_to_grab_pending() can't determine whether the work to be
1235 * grabbed is at the head of the queue and thus can't clear LINKED
1236 * flag of the previous work while there must be a valid next work
1237 * after a work with LINKED flag set.
1238 *
1239 * Note that when @worker is non-NULL, @target may be modified
1240 * underneath us, so we can't reliably determine cwq from @target.
Tejun Heo4690c4a2010-06-29 10:07:10 +02001241 *
1242 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001243 * spin_lock_irq(gcwq->lock).
Tejun Heo4690c4a2010-06-29 10:07:10 +02001244 */
Oleg Nesterov83c22522007-05-09 02:33:54 -07001245static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
Tejun Heoaffee4b2010-06-29 10:07:12 +02001246 struct wq_barrier *barr,
1247 struct work_struct *target, struct worker *worker)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07001248{
Tejun Heoaffee4b2010-06-29 10:07:12 +02001249 struct list_head *head;
1250 unsigned int linked = 0;
1251
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001252 /*
Tejun Heo8b03ae32010-06-29 10:07:12 +02001253 * debugobject calls are safe here even with gcwq->lock locked
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001254 * as we know for sure that this will not trigger any of the
1255 * checks and call back into the fixup functions where we
1256 * might deadlock.
1257 */
1258 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
Tejun Heo22df02b2010-06-29 10:07:10 +02001259 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07001260 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -07001261
Tejun Heoaffee4b2010-06-29 10:07:12 +02001262 /*
1263 * If @target is currently being executed, schedule the
1264 * barrier to the worker; otherwise, put it after @target.
1265 */
1266 if (worker)
1267 head = worker->scheduled.next;
1268 else {
1269 unsigned long *bits = work_data_bits(target);
1270
1271 head = target->entry.next;
1272 /* there can already be other linked works, inherit and set */
1273 linked = *bits & WORK_STRUCT_LINKED;
1274 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
1275 }
1276
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001277 debug_work_activate(&barr->work);
Tejun Heoaffee4b2010-06-29 10:07:12 +02001278 insert_work(cwq, &barr->work, head,
1279 work_color_to_flags(WORK_NO_COLOR) | linked);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07001280}
1281
Tejun Heo73f53c42010-06-29 10:07:11 +02001282/**
1283 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
1284 * @wq: workqueue being flushed
1285 * @flush_color: new flush color, < 0 for no-op
1286 * @work_color: new work color, < 0 for no-op
1287 *
1288 * Prepare cwqs for workqueue flushing.
1289 *
1290 * If @flush_color is non-negative, flush_color on all cwqs should be
1291 * -1. If no cwq has in-flight commands at the specified color, all
1292 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
1293 * has in flight commands, its cwq->flush_color is set to
1294 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
1295 * wakeup logic is armed and %true is returned.
1296 *
1297 * The caller should have initialized @wq->first_flusher prior to
1298 * calling this function with non-negative @flush_color. If
1299 * @flush_color is negative, no flush color update is done and %false
1300 * is returned.
1301 *
1302 * If @work_color is non-negative, all cwqs should have the same
1303 * work_color which is previous to @work_color and all will be
1304 * advanced to @work_color.
1305 *
1306 * CONTEXT:
1307 * mutex_lock(wq->flush_mutex).
1308 *
1309 * RETURNS:
1310 * %true if @flush_color >= 0 and there's something to flush. %false
1311 * otherwise.
1312 */
1313static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
1314 int flush_color, int work_color)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315{
Tejun Heo73f53c42010-06-29 10:07:11 +02001316 bool wait = false;
1317 unsigned int cpu;
Oleg Nesterov14441962007-05-23 13:57:57 -07001318
Tejun Heo73f53c42010-06-29 10:07:11 +02001319 if (flush_color >= 0) {
1320 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
1321 atomic_set(&wq->nr_cwqs_to_flush, 1);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001322 }
Oleg Nesterov14441962007-05-23 13:57:57 -07001323
Tejun Heo73f53c42010-06-29 10:07:11 +02001324 for_each_possible_cpu(cpu) {
1325 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Tejun Heo8b03ae32010-06-29 10:07:12 +02001326 struct global_cwq *gcwq = cwq->gcwq;
Tejun Heo73f53c42010-06-29 10:07:11 +02001327
Tejun Heo8b03ae32010-06-29 10:07:12 +02001328 spin_lock_irq(&gcwq->lock);
Tejun Heo73f53c42010-06-29 10:07:11 +02001329
1330 if (flush_color >= 0) {
1331 BUG_ON(cwq->flush_color != -1);
1332
1333 if (cwq->nr_in_flight[flush_color]) {
1334 cwq->flush_color = flush_color;
1335 atomic_inc(&wq->nr_cwqs_to_flush);
1336 wait = true;
1337 }
1338 }
1339
1340 if (work_color >= 0) {
1341 BUG_ON(work_color != work_next_color(cwq->work_color));
1342 cwq->work_color = work_color;
1343 }
1344
Tejun Heo8b03ae32010-06-29 10:07:12 +02001345 spin_unlock_irq(&gcwq->lock);
Tejun Heo73f53c42010-06-29 10:07:11 +02001346 }
1347
1348 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
1349 complete(&wq->first_flusher->done);
1350
1351 return wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352}
1353
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001354/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001356 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 *
1358 * Forces execution of the workqueue and blocks until its completion.
1359 * This is typically used in driver shutdown handlers.
1360 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07001361 * We sleep until all works which were queued on entry have been handled,
1362 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001364void flush_workqueue(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365{
Tejun Heo73f53c42010-06-29 10:07:11 +02001366 struct wq_flusher this_flusher = {
1367 .list = LIST_HEAD_INIT(this_flusher.list),
1368 .flush_color = -1,
1369 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
1370 };
1371 int next_color;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07001372
Ingo Molnar3295f0e2008-08-11 10:30:30 +02001373 lock_map_acquire(&wq->lockdep_map);
1374 lock_map_release(&wq->lockdep_map);
Tejun Heo73f53c42010-06-29 10:07:11 +02001375
1376 mutex_lock(&wq->flush_mutex);
1377
1378 /*
1379 * Start-to-wait phase
1380 */
1381 next_color = work_next_color(wq->work_color);
1382
1383 if (next_color != wq->flush_color) {
1384 /*
1385 * Color space is not full. The current work_color
1386 * becomes our flush_color and work_color is advanced
1387 * by one.
1388 */
1389 BUG_ON(!list_empty(&wq->flusher_overflow));
1390 this_flusher.flush_color = wq->work_color;
1391 wq->work_color = next_color;
1392
1393 if (!wq->first_flusher) {
1394 /* no flush in progress, become the first flusher */
1395 BUG_ON(wq->flush_color != this_flusher.flush_color);
1396
1397 wq->first_flusher = &this_flusher;
1398
1399 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
1400 wq->work_color)) {
1401 /* nothing to flush, done */
1402 wq->flush_color = next_color;
1403 wq->first_flusher = NULL;
1404 goto out_unlock;
1405 }
1406 } else {
1407 /* wait in queue */
1408 BUG_ON(wq->flush_color == this_flusher.flush_color);
1409 list_add_tail(&this_flusher.list, &wq->flusher_queue);
1410 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1411 }
1412 } else {
1413 /*
1414 * Oops, color space is full, wait on overflow queue.
1415 * The next flush completion will assign us
1416 * flush_color and transfer to flusher_queue.
1417 */
1418 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
1419 }
1420
1421 mutex_unlock(&wq->flush_mutex);
1422
1423 wait_for_completion(&this_flusher.done);
1424
1425 /*
1426 * Wake-up-and-cascade phase
1427 *
1428 * First flushers are responsible for cascading flushes and
1429 * handling overflow. Non-first flushers can simply return.
1430 */
1431 if (wq->first_flusher != &this_flusher)
1432 return;
1433
1434 mutex_lock(&wq->flush_mutex);
1435
1436 wq->first_flusher = NULL;
1437
1438 BUG_ON(!list_empty(&this_flusher.list));
1439 BUG_ON(wq->flush_color != this_flusher.flush_color);
1440
1441 while (true) {
1442 struct wq_flusher *next, *tmp;
1443
1444 /* complete all the flushers sharing the current flush color */
1445 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
1446 if (next->flush_color != wq->flush_color)
1447 break;
1448 list_del_init(&next->list);
1449 complete(&next->done);
1450 }
1451
1452 BUG_ON(!list_empty(&wq->flusher_overflow) &&
1453 wq->flush_color != work_next_color(wq->work_color));
1454
1455 /* this flush_color is finished, advance by one */
1456 wq->flush_color = work_next_color(wq->flush_color);
1457
1458 /* one color has been freed, handle overflow queue */
1459 if (!list_empty(&wq->flusher_overflow)) {
1460 /*
1461 * Assign the same color to all overflowed
1462 * flushers, advance work_color and append to
1463 * flusher_queue. This is the start-to-wait
1464 * phase for these overflowed flushers.
1465 */
1466 list_for_each_entry(tmp, &wq->flusher_overflow, list)
1467 tmp->flush_color = wq->work_color;
1468
1469 wq->work_color = work_next_color(wq->work_color);
1470
1471 list_splice_tail_init(&wq->flusher_overflow,
1472 &wq->flusher_queue);
1473 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1474 }
1475
1476 if (list_empty(&wq->flusher_queue)) {
1477 BUG_ON(wq->flush_color != wq->work_color);
1478 break;
1479 }
1480
1481 /*
1482 * Need to flush more colors. Make the next flusher
1483 * the new first flusher and arm cwqs.
1484 */
1485 BUG_ON(wq->flush_color == wq->work_color);
1486 BUG_ON(wq->flush_color != next->flush_color);
1487
1488 list_del_init(&next->list);
1489 wq->first_flusher = next;
1490
1491 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
1492 break;
1493
1494 /*
1495 * Meh... this color is already done, clear first
1496 * flusher and repeat cascading.
1497 */
1498 wq->first_flusher = NULL;
1499 }
1500
1501out_unlock:
1502 mutex_unlock(&wq->flush_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503}
Dave Jonesae90dd52006-06-30 01:40:45 -04001504EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
Oleg Nesterovdb700892008-07-25 01:47:49 -07001506/**
1507 * flush_work - block until a work_struct's callback has terminated
1508 * @work: the work which is to be flushed
1509 *
Oleg Nesterova67da702008-07-25 01:47:52 -07001510 * Returns false if @work has already terminated.
1511 *
Oleg Nesterovdb700892008-07-25 01:47:49 -07001512 * It is expected that, prior to calling flush_work(), the caller has
1513 * arranged for the work to not be requeued, otherwise it doesn't make
1514 * sense to use this function.
1515 */
1516int flush_work(struct work_struct *work)
1517{
Tejun Heoaffee4b2010-06-29 10:07:12 +02001518 struct worker *worker = NULL;
Tejun Heo8b03ae32010-06-29 10:07:12 +02001519 struct global_cwq *gcwq;
Tejun Heo7a22ad72010-06-29 10:07:13 +02001520 struct cpu_workqueue_struct *cwq;
Oleg Nesterovdb700892008-07-25 01:47:49 -07001521 struct wq_barrier barr;
1522
1523 might_sleep();
Tejun Heo7a22ad72010-06-29 10:07:13 +02001524 gcwq = get_work_gcwq(work);
1525 if (!gcwq)
Oleg Nesterovdb700892008-07-25 01:47:49 -07001526 return 0;
Oleg Nesterova67da702008-07-25 01:47:52 -07001527
Tejun Heo8b03ae32010-06-29 10:07:12 +02001528 spin_lock_irq(&gcwq->lock);
Oleg Nesterovdb700892008-07-25 01:47:49 -07001529 if (!list_empty(&work->entry)) {
1530 /*
1531 * See the comment near try_to_grab_pending()->smp_rmb().
Tejun Heo7a22ad72010-06-29 10:07:13 +02001532 * If it was re-queued to a different gcwq under us, we
1533 * are not going to wait.
Oleg Nesterovdb700892008-07-25 01:47:49 -07001534 */
1535 smp_rmb();
Tejun Heo7a22ad72010-06-29 10:07:13 +02001536 cwq = get_work_cwq(work);
1537 if (unlikely(!cwq || gcwq != cwq->gcwq))
Tejun Heo4690c4a2010-06-29 10:07:10 +02001538 goto already_gone;
Oleg Nesterovdb700892008-07-25 01:47:49 -07001539 } else {
Tejun Heo7a22ad72010-06-29 10:07:13 +02001540 worker = find_worker_executing_work(gcwq, work);
Tejun Heoaffee4b2010-06-29 10:07:12 +02001541 if (!worker)
Tejun Heo4690c4a2010-06-29 10:07:10 +02001542 goto already_gone;
Tejun Heo7a22ad72010-06-29 10:07:13 +02001543 cwq = worker->current_cwq;
Oleg Nesterovdb700892008-07-25 01:47:49 -07001544 }
Oleg Nesterovdb700892008-07-25 01:47:49 -07001545
Tejun Heoaffee4b2010-06-29 10:07:12 +02001546 insert_wq_barrier(cwq, &barr, work, worker);
Tejun Heo8b03ae32010-06-29 10:07:12 +02001547 spin_unlock_irq(&gcwq->lock);
Tejun Heo7a22ad72010-06-29 10:07:13 +02001548
1549 lock_map_acquire(&cwq->wq->lockdep_map);
1550 lock_map_release(&cwq->wq->lockdep_map);
1551
Oleg Nesterovdb700892008-07-25 01:47:49 -07001552 wait_for_completion(&barr.done);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001553 destroy_work_on_stack(&barr.work);
Oleg Nesterovdb700892008-07-25 01:47:49 -07001554 return 1;
Tejun Heo4690c4a2010-06-29 10:07:10 +02001555already_gone:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001556 spin_unlock_irq(&gcwq->lock);
Tejun Heo4690c4a2010-06-29 10:07:10 +02001557 return 0;
Oleg Nesterovdb700892008-07-25 01:47:49 -07001558}
1559EXPORT_SYMBOL_GPL(flush_work);
1560
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001561/*
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001562 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001563 * so this work can't be re-armed in any way.
1564 */
1565static int try_to_grab_pending(struct work_struct *work)
1566{
Tejun Heo8b03ae32010-06-29 10:07:12 +02001567 struct global_cwq *gcwq;
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001568 int ret = -1;
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001569
Tejun Heo22df02b2010-06-29 10:07:10 +02001570 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001571 return 0;
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001572
1573 /*
1574 * The queueing is in progress, or it is already queued. Try to
1575 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1576 */
Tejun Heo7a22ad72010-06-29 10:07:13 +02001577 gcwq = get_work_gcwq(work);
1578 if (!gcwq)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001579 return ret;
1580
Tejun Heo8b03ae32010-06-29 10:07:12 +02001581 spin_lock_irq(&gcwq->lock);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001582 if (!list_empty(&work->entry)) {
1583 /*
Tejun Heo7a22ad72010-06-29 10:07:13 +02001584 * This work is queued, but perhaps we locked the wrong gcwq.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001585 * In that case we must see the new value after rmb(), see
1586 * insert_work()->wmb().
1587 */
1588 smp_rmb();
Tejun Heo7a22ad72010-06-29 10:07:13 +02001589 if (gcwq == get_work_gcwq(work)) {
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001590 debug_work_deactivate(work);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001591 list_del_init(&work->entry);
Tejun Heo7a22ad72010-06-29 10:07:13 +02001592 cwq_dec_nr_in_flight(get_work_cwq(work),
1593 get_work_color(work));
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001594 ret = 1;
1595 }
1596 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02001597 spin_unlock_irq(&gcwq->lock);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001598
1599 return ret;
1600}
1601
Tejun Heo7a22ad72010-06-29 10:07:13 +02001602static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001603{
1604 struct wq_barrier barr;
Tejun Heoaffee4b2010-06-29 10:07:12 +02001605 struct worker *worker;
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001606
Tejun Heo8b03ae32010-06-29 10:07:12 +02001607 spin_lock_irq(&gcwq->lock);
Tejun Heoaffee4b2010-06-29 10:07:12 +02001608
Tejun Heo7a22ad72010-06-29 10:07:13 +02001609 worker = find_worker_executing_work(gcwq, work);
1610 if (unlikely(worker))
1611 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
Tejun Heoaffee4b2010-06-29 10:07:12 +02001612
Tejun Heo8b03ae32010-06-29 10:07:12 +02001613 spin_unlock_irq(&gcwq->lock);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001614
Tejun Heoaffee4b2010-06-29 10:07:12 +02001615 if (unlikely(worker)) {
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001616 wait_for_completion(&barr.done);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001617 destroy_work_on_stack(&barr.work);
1618 }
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001619}
1620
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001621static void wait_on_work(struct work_struct *work)
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001622{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07001623 int cpu;
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001624
Oleg Nesterovf293ea92007-05-09 02:34:10 -07001625 might_sleep();
1626
Ingo Molnar3295f0e2008-08-11 10:30:30 +02001627 lock_map_acquire(&work->lockdep_map);
1628 lock_map_release(&work->lockdep_map);
Johannes Berg4e6045f2007-10-18 23:39:55 -07001629
Tejun Heo15376632010-06-29 10:07:11 +02001630 for_each_possible_cpu(cpu)
Tejun Heo7a22ad72010-06-29 10:07:13 +02001631 wait_on_cpu_work(get_gcwq(cpu), work);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001632}
1633
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001634static int __cancel_work_timer(struct work_struct *work,
1635 struct timer_list* timer)
1636{
1637 int ret;
1638
1639 do {
1640 ret = (timer && likely(del_timer(timer)));
1641 if (!ret)
1642 ret = try_to_grab_pending(work);
1643 wait_on_work(work);
1644 } while (unlikely(ret < 0));
1645
Tejun Heo7a22ad72010-06-29 10:07:13 +02001646 clear_work_data(work);
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001647 return ret;
1648}
1649
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001650/**
1651 * cancel_work_sync - block until a work_struct's callback has terminated
1652 * @work: the work which is to be flushed
1653 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001654 * Returns true if @work was pending.
1655 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001656 * cancel_work_sync() will cancel the work if it is queued. If the work's
1657 * callback appears to be running, cancel_work_sync() will block until it
1658 * has completed.
1659 *
1660 * It is possible to use this function if the work re-queues itself. It can
1661 * cancel the work even if it migrates to another workqueue, however in that
1662 * case it only guarantees that work->func() has completed on the last queued
1663 * workqueue.
1664 *
1665 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
1666 * pending, otherwise it goes into a busy-wait loop until the timer expires.
1667 *
1668 * The caller must ensure that workqueue_struct on which this work was last
1669 * queued can't be destroyed before this function returns.
1670 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001671int cancel_work_sync(struct work_struct *work)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001672{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001673 return __cancel_work_timer(work, NULL);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001674}
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07001675EXPORT_SYMBOL_GPL(cancel_work_sync);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001676
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001677/**
Oleg Nesterovf5a421a2007-07-15 23:41:44 -07001678 * cancel_delayed_work_sync - reliably kill off a delayed work.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001679 * @dwork: the delayed work struct
1680 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001681 * Returns true if @dwork was pending.
1682 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001683 * It is possible to use this function if @dwork rearms itself via queue_work()
1684 * or queue_delayed_work(). See also the comment for cancel_work_sync().
1685 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001686int cancel_delayed_work_sync(struct delayed_work *dwork)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001687{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001688 return __cancel_work_timer(&dwork->work, &dwork->timer);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001689}
Oleg Nesterovf5a421a2007-07-15 23:41:44 -07001690EXPORT_SYMBOL(cancel_delayed_work_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001692static struct workqueue_struct *keventd_wq __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001694/**
1695 * schedule_work - put work task in global workqueue
1696 * @work: job to be done
1697 *
Bart Van Assche5b0f437d2009-07-30 19:00:53 +02001698 * Returns zero if @work was already on the kernel-global workqueue and
1699 * non-zero otherwise.
1700 *
1701 * This puts a job in the kernel-global workqueue if it was not already
1702 * queued and leaves it in the same position on the kernel-global
1703 * workqueue otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001704 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001705int schedule_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706{
1707 return queue_work(keventd_wq, work);
1708}
Dave Jonesae90dd52006-06-30 01:40:45 -04001709EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
Zhang Ruic1a220e2008-07-23 21:28:39 -07001711/*
1712 * schedule_work_on - put work task on a specific cpu
1713 * @cpu: cpu to put the work task on
1714 * @work: job to be done
1715 *
1716 * This puts a job on a specific cpu
1717 */
1718int schedule_work_on(int cpu, struct work_struct *work)
1719{
1720 return queue_work_on(cpu, keventd_wq, work);
1721}
1722EXPORT_SYMBOL(schedule_work_on);
1723
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001724/**
1725 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +00001726 * @dwork: job to be done
1727 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001728 *
1729 * After waiting for a given time this puts a job in the kernel-global
1730 * workqueue.
1731 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001732int schedule_delayed_work(struct delayed_work *dwork,
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001733 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734{
David Howells52bad642006-11-22 14:54:01 +00001735 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736}
Dave Jonesae90dd52006-06-30 01:40:45 -04001737EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001739/**
Linus Torvalds8c53e462009-10-14 09:16:42 -07001740 * flush_delayed_work - block until a dwork_struct's callback has terminated
1741 * @dwork: the delayed work which is to be flushed
1742 *
1743 * Any timeout is cancelled, and any pending work is run immediately.
1744 */
1745void flush_delayed_work(struct delayed_work *dwork)
1746{
1747 if (del_timer_sync(&dwork->timer)) {
Tejun Heo7a22ad72010-06-29 10:07:13 +02001748 __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
Tejun Heo4690c4a2010-06-29 10:07:10 +02001749 &dwork->work);
Linus Torvalds8c53e462009-10-14 09:16:42 -07001750 put_cpu();
1751 }
1752 flush_work(&dwork->work);
1753}
1754EXPORT_SYMBOL(flush_delayed_work);
1755
1756/**
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001757 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
1758 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +00001759 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001760 * @delay: number of jiffies to wait
1761 *
1762 * After waiting for a given time this puts a job in the kernel-global
1763 * workqueue on the specified CPU.
1764 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +00001766 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767{
David Howells52bad642006-11-22 14:54:01 +00001768 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769}
Dave Jonesae90dd52006-06-30 01:40:45 -04001770EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Andrew Mortonb6136772006-06-25 05:47:49 -07001772/**
1773 * schedule_on_each_cpu - call a function on each online CPU from keventd
1774 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -07001775 *
1776 * Returns zero on success.
1777 * Returns -ve errno on failure.
1778 *
Andrew Mortonb6136772006-06-25 05:47:49 -07001779 * schedule_on_each_cpu() is very slow.
1780 */
David Howells65f27f32006-11-22 14:55:48 +00001781int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -08001782{
1783 int cpu;
Andi Kleen65a64462009-10-14 06:22:47 +02001784 int orig = -1;
Andrew Mortonb6136772006-06-25 05:47:49 -07001785 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -08001786
Andrew Mortonb6136772006-06-25 05:47:49 -07001787 works = alloc_percpu(struct work_struct);
1788 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -08001789 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -07001790
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001791 get_online_cpus();
Tejun Heo93981802009-11-17 14:06:20 -08001792
1793 /*
1794 * When running in keventd don't schedule a work item on
1795 * itself. Can just call directly because the work queue is
1796 * already bound. This also is faster.
1797 */
1798 if (current_is_keventd())
1799 orig = raw_smp_processor_id();
1800
Christoph Lameter15316ba2006-01-08 01:00:43 -08001801 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +01001802 struct work_struct *work = per_cpu_ptr(works, cpu);
1803
1804 INIT_WORK(work, func);
Andi Kleen65a64462009-10-14 06:22:47 +02001805 if (cpu != orig)
Tejun Heo93981802009-11-17 14:06:20 -08001806 schedule_work_on(cpu, work);
Andi Kleen65a64462009-10-14 06:22:47 +02001807 }
Tejun Heo93981802009-11-17 14:06:20 -08001808 if (orig >= 0)
1809 func(per_cpu_ptr(works, orig));
1810
1811 for_each_online_cpu(cpu)
1812 flush_work(per_cpu_ptr(works, cpu));
1813
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001814 put_online_cpus();
Andrew Mortonb6136772006-06-25 05:47:49 -07001815 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -08001816 return 0;
1817}
1818
Alan Sterneef6a7d2010-02-12 17:39:21 +09001819/**
1820 * flush_scheduled_work - ensure that any scheduled work has run to completion.
1821 *
1822 * Forces execution of the kernel-global workqueue and blocks until its
1823 * completion.
1824 *
1825 * Think twice before calling this function! It's very easy to get into
1826 * trouble if you don't take great care. Either of the following situations
1827 * will lead to deadlock:
1828 *
1829 * One of the work items currently on the workqueue needs to acquire
1830 * a lock held by your code or its caller.
1831 *
1832 * Your code is running in the context of a work routine.
1833 *
1834 * They will be detected by lockdep when they occur, but the first might not
1835 * occur very often. It depends on what work items are on the workqueue and
1836 * what locks they need, which you have no control over.
1837 *
1838 * In most situations flushing the entire workqueue is overkill; you merely
1839 * need to know that a particular work item isn't queued and isn't running.
1840 * In such cases you should use cancel_delayed_work_sync() or
1841 * cancel_work_sync() instead.
1842 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843void flush_scheduled_work(void)
1844{
1845 flush_workqueue(keventd_wq);
1846}
Dave Jonesae90dd52006-06-30 01:40:45 -04001847EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
1849/**
James Bottomley1fa44ec2006-02-23 12:43:43 -06001850 * execute_in_process_context - reliably execute the routine with user context
1851 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -06001852 * @ew: guaranteed storage for the execute work structure (must
1853 * be available when the work executes)
1854 *
1855 * Executes the function immediately if process context is available,
1856 * otherwise schedules the function for delayed execution.
1857 *
1858 * Returns: 0 - function was executed
1859 * 1 - function was scheduled for execution
1860 */
David Howells65f27f32006-11-22 14:55:48 +00001861int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -06001862{
1863 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +00001864 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -06001865 return 0;
1866 }
1867
David Howells65f27f32006-11-22 14:55:48 +00001868 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -06001869 schedule_work(&ew->work);
1870
1871 return 1;
1872}
1873EXPORT_SYMBOL_GPL(execute_in_process_context);
1874
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875int keventd_up(void)
1876{
1877 return keventd_wq != NULL;
1878}
1879
1880int current_is_keventd(void)
1881{
Tejun Heo7e116292010-06-29 10:07:13 +02001882 bool found = false;
1883 unsigned int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
Tejun Heo7e116292010-06-29 10:07:13 +02001885 /*
1886 * There no longer is one-to-one relation between worker and
1887 * work queue and a worker task might be unbound from its cpu
1888 * if the cpu was offlined. Match all busy workers. This
1889 * function will go away once dynamic pool is implemented.
1890 */
1891 for_each_possible_cpu(cpu) {
1892 struct global_cwq *gcwq = get_gcwq(cpu);
1893 struct worker *worker;
1894 struct hlist_node *pos;
1895 unsigned long flags;
1896 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Tejun Heo7e116292010-06-29 10:07:13 +02001898 spin_lock_irqsave(&gcwq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
Tejun Heo7e116292010-06-29 10:07:13 +02001900 for_each_busy_worker(worker, i, pos, gcwq) {
1901 if (worker->task == current) {
1902 found = true;
1903 break;
1904 }
1905 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
Tejun Heo7e116292010-06-29 10:07:13 +02001907 spin_unlock_irqrestore(&gcwq->lock, flags);
1908 if (found)
1909 break;
1910 }
1911
1912 return found;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913}
1914
Tejun Heo0f900042010-06-29 10:07:11 +02001915static struct cpu_workqueue_struct *alloc_cwqs(void)
1916{
1917 /*
1918 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
1919 * Make sure that the alignment isn't lower than that of
1920 * unsigned long long.
1921 */
1922 const size_t size = sizeof(struct cpu_workqueue_struct);
1923 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
1924 __alignof__(unsigned long long));
1925 struct cpu_workqueue_struct *cwqs;
1926#ifndef CONFIG_SMP
1927 void *ptr;
1928
1929 /*
1930 * On UP, percpu allocator doesn't honor alignment parameter
1931 * and simply uses arch-dependent default. Allocate enough
1932 * room to align cwq and put an extra pointer at the end
1933 * pointing back to the originally allocated pointer which
1934 * will be used for free.
1935 *
1936 * FIXME: This really belongs to UP percpu code. Update UP
1937 * percpu code to honor alignment and remove this ugliness.
1938 */
1939 ptr = __alloc_percpu(size + align + sizeof(void *), 1);
1940 cwqs = PTR_ALIGN(ptr, align);
1941 *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
1942#else
1943 /* On SMP, percpu allocator can do it itself */
1944 cwqs = __alloc_percpu(size, align);
1945#endif
1946 /* just in case, make sure it's actually aligned */
1947 BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
1948 return cwqs;
1949}
1950
1951static void free_cwqs(struct cpu_workqueue_struct *cwqs)
1952{
1953#ifndef CONFIG_SMP
1954 /* on UP, the pointer to free is stored right after the cwq */
1955 if (cwqs)
1956 free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
1957#else
1958 free_percpu(cwqs);
1959#endif
1960}
1961
Johannes Berg4e6045f2007-10-18 23:39:55 -07001962struct workqueue_struct *__create_workqueue_key(const char *name,
Tejun Heo97e37d72010-06-29 10:07:10 +02001963 unsigned int flags,
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001964 int max_active,
Johannes Bergeb13ba82008-01-16 09:51:58 +01001965 struct lock_class_key *key,
1966 const char *lock_name)
Oleg Nesterov3af244332007-05-09 02:34:09 -07001967{
1968 struct workqueue_struct *wq;
Tejun Heoc34056a2010-06-29 10:07:11 +02001969 bool failed = false;
1970 unsigned int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001971
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001972 max_active = clamp_val(max_active, 1, INT_MAX);
1973
Oleg Nesterov3af244332007-05-09 02:34:09 -07001974 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1975 if (!wq)
Tejun Heo4690c4a2010-06-29 10:07:10 +02001976 goto err;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001977
Tejun Heo0f900042010-06-29 10:07:11 +02001978 wq->cpu_wq = alloc_cwqs();
Tejun Heo4690c4a2010-06-29 10:07:10 +02001979 if (!wq->cpu_wq)
1980 goto err;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001981
Tejun Heo97e37d72010-06-29 10:07:10 +02001982 wq->flags = flags;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001983 wq->saved_max_active = max_active;
Tejun Heo73f53c42010-06-29 10:07:11 +02001984 mutex_init(&wq->flush_mutex);
1985 atomic_set(&wq->nr_cwqs_to_flush, 0);
1986 INIT_LIST_HEAD(&wq->flusher_queue);
1987 INIT_LIST_HEAD(&wq->flusher_overflow);
Tejun Heo502ca9d2010-06-29 10:07:13 +02001988 wq->single_cpu = NR_CPUS;
1989
Oleg Nesterov3af244332007-05-09 02:34:09 -07001990 wq->name = name;
Johannes Bergeb13ba82008-01-16 09:51:58 +01001991 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
Oleg Nesterovcce1a162007-05-09 02:34:13 -07001992 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001993
Tejun Heo15376632010-06-29 10:07:11 +02001994 cpu_maps_update_begin();
1995 /*
1996 * We must initialize cwqs for each possible cpu even if we
1997 * are going to call destroy_workqueue() finally. Otherwise
1998 * cpu_up() can hit the uninitialized cwq once we drop the
1999 * lock.
2000 */
2001 for_each_possible_cpu(cpu) {
2002 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Tejun Heo8b03ae32010-06-29 10:07:12 +02002003 struct global_cwq *gcwq = get_gcwq(cpu);
Tejun Heo15376632010-06-29 10:07:11 +02002004
Tejun Heo0f900042010-06-29 10:07:11 +02002005 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
Tejun Heo8b03ae32010-06-29 10:07:12 +02002006 cwq->gcwq = gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +02002007 cwq->wq = wq;
Tejun Heo73f53c42010-06-29 10:07:11 +02002008 cwq->flush_color = -1;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02002009 cwq->max_active = max_active;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02002010 INIT_LIST_HEAD(&cwq->delayed_works);
Tejun Heo15376632010-06-29 10:07:11 +02002011
Tejun Heoc34056a2010-06-29 10:07:11 +02002012 if (failed)
Tejun Heo15376632010-06-29 10:07:11 +02002013 continue;
Tejun Heo7e116292010-06-29 10:07:13 +02002014 cwq->worker = create_worker(gcwq, cpu_online(cpu));
Tejun Heoc34056a2010-06-29 10:07:11 +02002015 if (cwq->worker)
2016 start_worker(cwq->worker);
Tejun Heo15376632010-06-29 10:07:11 +02002017 else
Tejun Heoc34056a2010-06-29 10:07:11 +02002018 failed = true;
Oleg Nesterov3af244332007-05-09 02:34:09 -07002019 }
2020
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002021 /*
2022 * workqueue_lock protects global freeze state and workqueues
2023 * list. Grab it, set max_active accordingly and add the new
2024 * workqueue to workqueues list.
2025 */
Tejun Heo15376632010-06-29 10:07:11 +02002026 spin_lock(&workqueue_lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002027
2028 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
2029 for_each_possible_cpu(cpu)
2030 get_cwq(cpu, wq)->max_active = 0;
2031
Tejun Heo15376632010-06-29 10:07:11 +02002032 list_add(&wq->list, &workqueues);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002033
Tejun Heo15376632010-06-29 10:07:11 +02002034 spin_unlock(&workqueue_lock);
2035
2036 cpu_maps_update_done();
2037
Tejun Heoc34056a2010-06-29 10:07:11 +02002038 if (failed) {
Oleg Nesterov3af244332007-05-09 02:34:09 -07002039 destroy_workqueue(wq);
2040 wq = NULL;
2041 }
2042 return wq;
Tejun Heo4690c4a2010-06-29 10:07:10 +02002043err:
2044 if (wq) {
Tejun Heo0f900042010-06-29 10:07:11 +02002045 free_cwqs(wq->cpu_wq);
Tejun Heo4690c4a2010-06-29 10:07:10 +02002046 kfree(wq);
2047 }
2048 return NULL;
Oleg Nesterov3af244332007-05-09 02:34:09 -07002049}
Johannes Berg4e6045f2007-10-18 23:39:55 -07002050EXPORT_SYMBOL_GPL(__create_workqueue_key);
Oleg Nesterov3af244332007-05-09 02:34:09 -07002051
Oleg Nesterov3af244332007-05-09 02:34:09 -07002052/**
2053 * destroy_workqueue - safely terminate a workqueue
2054 * @wq: target workqueue
2055 *
2056 * Safely destroy a workqueue. All work currently pending will be done first.
2057 */
2058void destroy_workqueue(struct workqueue_struct *wq)
2059{
Tejun Heoc8e55f32010-06-29 10:07:12 +02002060 unsigned int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -07002061
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002062 flush_workqueue(wq);
2063
2064 /*
2065 * wq list is used to freeze wq, remove from list after
2066 * flushing is complete in case freeze races us.
2067 */
Oleg Nesterov3da1c842008-07-25 01:47:50 -07002068 cpu_maps_update_begin();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002069 spin_lock(&workqueue_lock);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07002070 list_del(&wq->list);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002071 spin_unlock(&workqueue_lock);
Tejun Heo15376632010-06-29 10:07:11 +02002072 cpu_maps_update_done();
Oleg Nesterov3af244332007-05-09 02:34:09 -07002073
Tejun Heo73f53c42010-06-29 10:07:11 +02002074 for_each_possible_cpu(cpu) {
2075 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Tejun Heo7e116292010-06-29 10:07:13 +02002076 struct global_cwq *gcwq = cwq->gcwq;
Tejun Heo73f53c42010-06-29 10:07:11 +02002077 int i;
2078
Tejun Heoc34056a2010-06-29 10:07:11 +02002079 if (cwq->worker) {
Tejun Heo7e116292010-06-29 10:07:13 +02002080 retry:
2081 spin_lock_irq(&gcwq->lock);
2082 /*
2083 * Worker can only be destroyed while idle.
2084 * Wait till it becomes idle. This is ugly
2085 * and prone to starvation. It will go away
2086 * once dynamic worker pool is implemented.
2087 */
2088 if (!(cwq->worker->flags & WORKER_IDLE)) {
2089 spin_unlock_irq(&gcwq->lock);
2090 msleep(100);
2091 goto retry;
2092 }
Tejun Heoc34056a2010-06-29 10:07:11 +02002093 destroy_worker(cwq->worker);
2094 cwq->worker = NULL;
Tejun Heo7e116292010-06-29 10:07:13 +02002095 spin_unlock_irq(&gcwq->lock);
Tejun Heo73f53c42010-06-29 10:07:11 +02002096 }
2097
2098 for (i = 0; i < WORK_NR_COLORS; i++)
2099 BUG_ON(cwq->nr_in_flight[i]);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02002100 BUG_ON(cwq->nr_active);
2101 BUG_ON(!list_empty(&cwq->delayed_works));
Tejun Heo73f53c42010-06-29 10:07:11 +02002102 }
Oleg Nesterov3af244332007-05-09 02:34:09 -07002103
Tejun Heo0f900042010-06-29 10:07:11 +02002104 free_cwqs(wq->cpu_wq);
Oleg Nesterov3af244332007-05-09 02:34:09 -07002105 kfree(wq);
2106}
2107EXPORT_SYMBOL_GPL(destroy_workqueue);
2108
Tejun Heodb7bccf2010-06-29 10:07:12 +02002109/*
2110 * CPU hotplug.
2111 *
2112 * CPU hotplug is implemented by allowing cwqs to be detached from
2113 * CPU, running with unbound workers and allowing them to be
2114 * reattached later if the cpu comes back online. A separate thread
2115 * is created to govern cwqs in such state and is called the trustee.
2116 *
2117 * Trustee states and their descriptions.
2118 *
2119 * START Command state used on startup. On CPU_DOWN_PREPARE, a
2120 * new trustee is started with this state.
2121 *
2122 * IN_CHARGE Once started, trustee will enter this state after
2123 * making all existing workers rogue. DOWN_PREPARE waits
2124 * for trustee to enter this state. After reaching
2125 * IN_CHARGE, trustee tries to execute the pending
2126 * worklist until it's empty and the state is set to
2127 * BUTCHER, or the state is set to RELEASE.
2128 *
2129 * BUTCHER Command state which is set by the cpu callback after
2130 * the cpu has went down. Once this state is set trustee
2131 * knows that there will be no new works on the worklist
2132 * and once the worklist is empty it can proceed to
2133 * killing idle workers.
2134 *
2135 * RELEASE Command state which is set by the cpu callback if the
2136 * cpu down has been canceled or it has come online
2137 * again. After recognizing this state, trustee stops
2138 * trying to drain or butcher and transits to DONE.
2139 *
2140 * DONE Trustee will enter this state after BUTCHER or RELEASE
2141 * is complete.
2142 *
2143 * trustee CPU draining
2144 * took over down complete
2145 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2146 * | | ^
2147 * | CPU is back online v return workers |
2148 * ----------------> RELEASE --------------
2149 */
2150
2151/**
2152 * trustee_wait_event_timeout - timed event wait for trustee
2153 * @cond: condition to wait for
2154 * @timeout: timeout in jiffies
2155 *
2156 * wait_event_timeout() for trustee to use. Handles locking and
2157 * checks for RELEASE request.
2158 *
2159 * CONTEXT:
2160 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2161 * multiple times. To be used by trustee.
2162 *
2163 * RETURNS:
2164 * Positive indicating left time if @cond is satisfied, 0 if timed
2165 * out, -1 if canceled.
2166 */
2167#define trustee_wait_event_timeout(cond, timeout) ({ \
2168 long __ret = (timeout); \
2169 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
2170 __ret) { \
2171 spin_unlock_irq(&gcwq->lock); \
2172 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
2173 (gcwq->trustee_state == TRUSTEE_RELEASE), \
2174 __ret); \
2175 spin_lock_irq(&gcwq->lock); \
2176 } \
2177 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
2178})
2179
2180/**
2181 * trustee_wait_event - event wait for trustee
2182 * @cond: condition to wait for
2183 *
2184 * wait_event() for trustee to use. Automatically handles locking and
2185 * checks for CANCEL request.
2186 *
2187 * CONTEXT:
2188 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2189 * multiple times. To be used by trustee.
2190 *
2191 * RETURNS:
2192 * 0 if @cond is satisfied, -1 if canceled.
2193 */
2194#define trustee_wait_event(cond) ({ \
2195 long __ret1; \
2196 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2197 __ret1 < 0 ? -1 : 0; \
2198})
2199
2200static int __cpuinit trustee_thread(void *__gcwq)
2201{
2202 struct global_cwq *gcwq = __gcwq;
2203 struct worker *worker;
2204 struct hlist_node *pos;
2205 int i;
2206
2207 BUG_ON(gcwq->cpu != smp_processor_id());
2208
2209 spin_lock_irq(&gcwq->lock);
2210 /*
Tejun Heo502ca9d2010-06-29 10:07:13 +02002211 * Make all workers rogue. Trustee must be bound to the
2212 * target cpu and can't be cancelled.
Tejun Heodb7bccf2010-06-29 10:07:12 +02002213 */
2214 BUG_ON(gcwq->cpu != smp_processor_id());
2215
2216 list_for_each_entry(worker, &gcwq->idle_list, entry)
Tejun Heo502ca9d2010-06-29 10:07:13 +02002217 worker->flags |= WORKER_ROGUE;
Tejun Heodb7bccf2010-06-29 10:07:12 +02002218
2219 for_each_busy_worker(worker, i, pos, gcwq)
Tejun Heo502ca9d2010-06-29 10:07:13 +02002220 worker->flags |= WORKER_ROGUE;
Tejun Heodb7bccf2010-06-29 10:07:12 +02002221
2222 /*
2223 * We're now in charge. Notify and proceed to drain. We need
2224 * to keep the gcwq running during the whole CPU down
2225 * procedure as other cpu hotunplug callbacks may need to
2226 * flush currently running tasks.
2227 */
2228 gcwq->trustee_state = TRUSTEE_IN_CHARGE;
2229 wake_up_all(&gcwq->trustee_wait);
2230
2231 /*
2232 * The original cpu is in the process of dying and may go away
2233 * anytime now. When that happens, we and all workers would
2234 * be migrated to other cpus. Try draining any left work.
2235 * Note that if the gcwq is frozen, there may be frozen works
2236 * in freezeable cwqs. Don't declare completion while frozen.
2237 */
2238 while (gcwq->nr_workers != gcwq->nr_idle ||
2239 gcwq->flags & GCWQ_FREEZING ||
2240 gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
2241 /* give a breather */
2242 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
2243 break;
2244 }
2245
2246 /* notify completion */
2247 gcwq->trustee = NULL;
2248 gcwq->trustee_state = TRUSTEE_DONE;
2249 wake_up_all(&gcwq->trustee_wait);
2250 spin_unlock_irq(&gcwq->lock);
2251 return 0;
2252}
2253
2254/**
2255 * wait_trustee_state - wait for trustee to enter the specified state
2256 * @gcwq: gcwq the trustee of interest belongs to
2257 * @state: target state to wait for
2258 *
2259 * Wait for the trustee to reach @state. DONE is already matched.
2260 *
2261 * CONTEXT:
2262 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2263 * multiple times. To be used by cpu_callback.
2264 */
2265static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
2266{
2267 if (!(gcwq->trustee_state == state ||
2268 gcwq->trustee_state == TRUSTEE_DONE)) {
2269 spin_unlock_irq(&gcwq->lock);
2270 __wait_event(gcwq->trustee_wait,
2271 gcwq->trustee_state == state ||
2272 gcwq->trustee_state == TRUSTEE_DONE);
2273 spin_lock_irq(&gcwq->lock);
2274 }
2275}
2276
Oleg Nesterov3af244332007-05-09 02:34:09 -07002277static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
2278 unsigned long action,
2279 void *hcpu)
2280{
2281 unsigned int cpu = (unsigned long)hcpu;
Tejun Heodb7bccf2010-06-29 10:07:12 +02002282 struct global_cwq *gcwq = get_gcwq(cpu);
2283 struct task_struct *new_trustee = NULL;
2284 struct worker *worker;
2285 struct hlist_node *pos;
2286 unsigned long flags;
2287 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07002289 action &= ~CPU_TASKS_FROZEN;
2290
Tejun Heodb7bccf2010-06-29 10:07:12 +02002291 switch (action) {
2292 case CPU_DOWN_PREPARE:
2293 new_trustee = kthread_create(trustee_thread, gcwq,
2294 "workqueue_trustee/%d\n", cpu);
2295 if (IS_ERR(new_trustee))
2296 return notifier_from_errno(PTR_ERR(new_trustee));
2297 kthread_bind(new_trustee, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 }
2299
Tejun Heodb7bccf2010-06-29 10:07:12 +02002300 /* some are called w/ irq disabled, don't disturb irq status */
2301 spin_lock_irqsave(&gcwq->lock, flags);
2302
2303 switch (action) {
2304 case CPU_DOWN_PREPARE:
2305 /* initialize trustee and tell it to acquire the gcwq */
2306 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
2307 gcwq->trustee = new_trustee;
2308 gcwq->trustee_state = TRUSTEE_START;
2309 wake_up_process(gcwq->trustee);
2310 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
2311 break;
2312
2313 case CPU_POST_DEAD:
2314 gcwq->trustee_state = TRUSTEE_BUTCHER;
2315 break;
2316
2317 case CPU_DOWN_FAILED:
2318 case CPU_ONLINE:
2319 if (gcwq->trustee_state != TRUSTEE_DONE) {
2320 gcwq->trustee_state = TRUSTEE_RELEASE;
2321 wake_up_process(gcwq->trustee);
2322 wait_trustee_state(gcwq, TRUSTEE_DONE);
2323 }
2324
Tejun Heo502ca9d2010-06-29 10:07:13 +02002325 /* clear ROGUE from all workers */
Tejun Heodb7bccf2010-06-29 10:07:12 +02002326 list_for_each_entry(worker, &gcwq->idle_list, entry)
Tejun Heo502ca9d2010-06-29 10:07:13 +02002327 worker->flags &= ~WORKER_ROGUE;
Tejun Heodb7bccf2010-06-29 10:07:12 +02002328
2329 for_each_busy_worker(worker, i, pos, gcwq)
Tejun Heo502ca9d2010-06-29 10:07:13 +02002330 worker->flags &= ~WORKER_ROGUE;
Tejun Heodb7bccf2010-06-29 10:07:12 +02002331 break;
2332 }
2333
2334 spin_unlock_irqrestore(&gcwq->lock, flags);
2335
Tejun Heo15376632010-06-29 10:07:11 +02002336 return notifier_from_errno(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
Rusty Russell2d3854a2008-11-05 13:39:10 +11002339#ifdef CONFIG_SMP
Rusty Russell8ccad402009-01-16 15:31:15 -08002340
Rusty Russell2d3854a2008-11-05 13:39:10 +11002341struct work_for_cpu {
Andrew Morton6b440032009-04-09 09:50:37 -06002342 struct completion completion;
Rusty Russell2d3854a2008-11-05 13:39:10 +11002343 long (*fn)(void *);
2344 void *arg;
2345 long ret;
2346};
2347
Andrew Morton6b440032009-04-09 09:50:37 -06002348static int do_work_for_cpu(void *_wfc)
Rusty Russell2d3854a2008-11-05 13:39:10 +11002349{
Andrew Morton6b440032009-04-09 09:50:37 -06002350 struct work_for_cpu *wfc = _wfc;
Rusty Russell2d3854a2008-11-05 13:39:10 +11002351 wfc->ret = wfc->fn(wfc->arg);
Andrew Morton6b440032009-04-09 09:50:37 -06002352 complete(&wfc->completion);
2353 return 0;
Rusty Russell2d3854a2008-11-05 13:39:10 +11002354}
2355
2356/**
2357 * work_on_cpu - run a function in user context on a particular cpu
2358 * @cpu: the cpu to run on
2359 * @fn: the function to run
2360 * @arg: the function arg
2361 *
Rusty Russell31ad9082009-01-16 15:31:15 -08002362 * This will return the value @fn returns.
2363 * It is up to the caller to ensure that the cpu doesn't go offline.
Andrew Morton6b440032009-04-09 09:50:37 -06002364 * The caller must not hold any locks which would prevent @fn from completing.
Rusty Russell2d3854a2008-11-05 13:39:10 +11002365 */
2366long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
2367{
Andrew Morton6b440032009-04-09 09:50:37 -06002368 struct task_struct *sub_thread;
2369 struct work_for_cpu wfc = {
2370 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
2371 .fn = fn,
2372 .arg = arg,
2373 };
Rusty Russell2d3854a2008-11-05 13:39:10 +11002374
Andrew Morton6b440032009-04-09 09:50:37 -06002375 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
2376 if (IS_ERR(sub_thread))
2377 return PTR_ERR(sub_thread);
2378 kthread_bind(sub_thread, cpu);
2379 wake_up_process(sub_thread);
2380 wait_for_completion(&wfc.completion);
Rusty Russell2d3854a2008-11-05 13:39:10 +11002381 return wfc.ret;
2382}
2383EXPORT_SYMBOL_GPL(work_on_cpu);
2384#endif /* CONFIG_SMP */
2385
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002386#ifdef CONFIG_FREEZER
2387
2388/**
2389 * freeze_workqueues_begin - begin freezing workqueues
2390 *
2391 * Start freezing workqueues. After this function returns, all
2392 * freezeable workqueues will queue new works to their frozen_works
Tejun Heo7e116292010-06-29 10:07:13 +02002393 * list instead of gcwq->worklist.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002394 *
2395 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02002396 * Grabs and releases workqueue_lock and gcwq->lock's.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002397 */
2398void freeze_workqueues_begin(void)
2399{
2400 struct workqueue_struct *wq;
2401 unsigned int cpu;
2402
2403 spin_lock(&workqueue_lock);
2404
2405 BUG_ON(workqueue_freezing);
2406 workqueue_freezing = true;
2407
2408 for_each_possible_cpu(cpu) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02002409 struct global_cwq *gcwq = get_gcwq(cpu);
2410
2411 spin_lock_irq(&gcwq->lock);
2412
Tejun Heodb7bccf2010-06-29 10:07:12 +02002413 BUG_ON(gcwq->flags & GCWQ_FREEZING);
2414 gcwq->flags |= GCWQ_FREEZING;
2415
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002416 list_for_each_entry(wq, &workqueues, list) {
2417 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2418
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002419 if (wq->flags & WQ_FREEZEABLE)
2420 cwq->max_active = 0;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002421 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02002422
2423 spin_unlock_irq(&gcwq->lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002424 }
2425
2426 spin_unlock(&workqueue_lock);
2427}
2428
2429/**
2430 * freeze_workqueues_busy - are freezeable workqueues still busy?
2431 *
2432 * Check whether freezing is complete. This function must be called
2433 * between freeze_workqueues_begin() and thaw_workqueues().
2434 *
2435 * CONTEXT:
2436 * Grabs and releases workqueue_lock.
2437 *
2438 * RETURNS:
2439 * %true if some freezeable workqueues are still busy. %false if
2440 * freezing is complete.
2441 */
2442bool freeze_workqueues_busy(void)
2443{
2444 struct workqueue_struct *wq;
2445 unsigned int cpu;
2446 bool busy = false;
2447
2448 spin_lock(&workqueue_lock);
2449
2450 BUG_ON(!workqueue_freezing);
2451
2452 for_each_possible_cpu(cpu) {
2453 /*
2454 * nr_active is monotonically decreasing. It's safe
2455 * to peek without lock.
2456 */
2457 list_for_each_entry(wq, &workqueues, list) {
2458 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2459
2460 if (!(wq->flags & WQ_FREEZEABLE))
2461 continue;
2462
2463 BUG_ON(cwq->nr_active < 0);
2464 if (cwq->nr_active) {
2465 busy = true;
2466 goto out_unlock;
2467 }
2468 }
2469 }
2470out_unlock:
2471 spin_unlock(&workqueue_lock);
2472 return busy;
2473}
2474
2475/**
2476 * thaw_workqueues - thaw workqueues
2477 *
2478 * Thaw workqueues. Normal queueing is restored and all collected
Tejun Heo7e116292010-06-29 10:07:13 +02002479 * frozen works are transferred to their respective gcwq worklists.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002480 *
2481 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02002482 * Grabs and releases workqueue_lock and gcwq->lock's.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002483 */
2484void thaw_workqueues(void)
2485{
2486 struct workqueue_struct *wq;
2487 unsigned int cpu;
2488
2489 spin_lock(&workqueue_lock);
2490
2491 if (!workqueue_freezing)
2492 goto out_unlock;
2493
2494 for_each_possible_cpu(cpu) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02002495 struct global_cwq *gcwq = get_gcwq(cpu);
2496
2497 spin_lock_irq(&gcwq->lock);
2498
Tejun Heodb7bccf2010-06-29 10:07:12 +02002499 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
2500 gcwq->flags &= ~GCWQ_FREEZING;
2501
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002502 list_for_each_entry(wq, &workqueues, list) {
2503 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2504
2505 if (!(wq->flags & WQ_FREEZEABLE))
2506 continue;
2507
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002508 /* restore max_active and repopulate worklist */
2509 cwq->max_active = wq->saved_max_active;
2510
2511 while (!list_empty(&cwq->delayed_works) &&
2512 cwq->nr_active < cwq->max_active)
2513 cwq_activate_first_delayed(cwq);
2514
Tejun Heo502ca9d2010-06-29 10:07:13 +02002515 /* perform delayed unbind from single cpu if empty */
2516 if (wq->single_cpu == gcwq->cpu &&
2517 !cwq->nr_active && list_empty(&cwq->delayed_works))
2518 cwq_unbind_single_cpu(cwq);
2519
Tejun Heoc8e55f32010-06-29 10:07:12 +02002520 wake_up_process(cwq->worker->task);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002521 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02002522
2523 spin_unlock_irq(&gcwq->lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002524 }
2525
2526 workqueue_freezing = false;
2527out_unlock:
2528 spin_unlock(&workqueue_lock);
2529}
2530#endif /* CONFIG_FREEZER */
2531
Oleg Nesterovc12920d2007-05-09 02:34:14 -07002532void __init init_workqueues(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533{
Tejun Heoc34056a2010-06-29 10:07:11 +02002534 unsigned int cpu;
Tejun Heoc8e55f32010-06-29 10:07:12 +02002535 int i;
Tejun Heoc34056a2010-06-29 10:07:11 +02002536
Tejun Heo7a22ad72010-06-29 10:07:13 +02002537 /*
2538 * The pointer part of work->data is either pointing to the
2539 * cwq or contains the cpu number the work ran last on. Make
2540 * sure cpu number won't overflow into kernel pointer area so
2541 * that they can be distinguished.
2542 */
2543 BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
2544
Tejun Heodb7bccf2010-06-29 10:07:12 +02002545 hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
Tejun Heo8b03ae32010-06-29 10:07:12 +02002546
2547 /* initialize gcwqs */
2548 for_each_possible_cpu(cpu) {
2549 struct global_cwq *gcwq = get_gcwq(cpu);
2550
2551 spin_lock_init(&gcwq->lock);
Tejun Heo7e116292010-06-29 10:07:13 +02002552 INIT_LIST_HEAD(&gcwq->worklist);
Tejun Heo8b03ae32010-06-29 10:07:12 +02002553 gcwq->cpu = cpu;
2554
Tejun Heoc8e55f32010-06-29 10:07:12 +02002555 INIT_LIST_HEAD(&gcwq->idle_list);
2556 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
2557 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
2558
Tejun Heo8b03ae32010-06-29 10:07:12 +02002559 ida_init(&gcwq->worker_ida);
Tejun Heodb7bccf2010-06-29 10:07:12 +02002560
2561 gcwq->trustee_state = TRUSTEE_DONE;
2562 init_waitqueue_head(&gcwq->trustee_wait);
Tejun Heo8b03ae32010-06-29 10:07:12 +02002563 }
2564
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 keventd_wq = create_workqueue("events");
2566 BUG_ON(!keventd_wq);
2567}