blob: 6308a4bc6a821f654bedbab4fefb1999d67cfa5e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080037 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 struct list_head worklist;
45 wait_queue_head_t more_work;
Oleg Nesterov3af244332007-05-09 02:34:09 -070046 struct work_struct *current_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070049 struct task_struct *thread;
Oleg Nesterov3af244332007-05-09 02:34:09 -070050 int should_stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080060 struct cpu_workqueue_struct *cpu_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 const char *name;
62 struct list_head list; /* Empty if single thread */
Oleg Nesterov319c2a92007-05-09 02:34:06 -070063 int freezeable; /* Freeze threads during suspend */
Linus Torvalds1da177e2005-04-16 15:20:36 -070064};
65
66/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
67 threads to each one as cpus come/go. */
Andrew Morton9b41ea72006-08-13 23:24:26 -070068static DEFINE_MUTEX(workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069static LIST_HEAD(workqueues);
70
Oleg Nesterov3af244332007-05-09 02:34:09 -070071static int singlethread_cpu __read_mostly;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070072static cpumask_t cpu_singlethread_map __read_mostly;
Oleg Nesterov3af244332007-05-09 02:34:09 -070073/* optimization, we could use cpu_possible_map */
74static cpumask_t cpu_populated_map __read_mostly;
Nathan Lynchf756d5e2006-01-08 01:05:12 -080075
Linus Torvalds1da177e2005-04-16 15:20:36 -070076/* If it's single threaded, it isn't in the list of workqueues. */
77static inline int is_single_threaded(struct workqueue_struct *wq)
78{
79 return list_empty(&wq->list);
80}
81
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070082static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
83{
84 return is_single_threaded(wq)
85 ? &cpu_singlethread_map : &cpu_populated_map;
86}
87
David Howells4594bf12006-12-07 11:33:26 +000088/*
89 * Set the workqueue on which a work item is to be run
90 * - Must *only* be called if the pending flag is set
91 */
David Howells365970a2006-11-22 14:54:49 +000092static inline void set_wq_data(struct work_struct *work, void *wq)
93{
David Howells4594bf12006-12-07 11:33:26 +000094 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +000095
David Howells4594bf12006-12-07 11:33:26 +000096 BUG_ON(!work_pending(work));
97
David Howells365970a2006-11-22 14:54:49 +000098 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -080099 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
100 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +0000101}
102
103static inline void *get_wq_data(struct work_struct *work)
104{
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800105 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +0000106}
107
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700108static void insert_work(struct cpu_workqueue_struct *cwq,
109 struct work_struct *work, int tail)
110{
111 set_wq_data(work, cwq);
112 if (tail)
113 list_add_tail(&work->entry, &cwq->worklist);
114 else
115 list_add(&work->entry, &cwq->worklist);
116 wake_up(&cwq->more_work);
117}
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119/* Preempt must be disabled. */
120static void __queue_work(struct cpu_workqueue_struct *cwq,
121 struct work_struct *work)
122{
123 unsigned long flags;
124
125 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700126 insert_work(cwq, work, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 spin_unlock_irqrestore(&cwq->lock, flags);
128}
129
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700130/**
131 * queue_work - queue work on a workqueue
132 * @wq: workqueue to use
133 * @work: work to queue
134 *
Alan Stern057647f2006-10-28 10:38:58 -0700135 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 *
137 * We queue the work to the CPU it was submitted, but there is no
138 * guarantee that it will be processed by that CPU.
139 */
140int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
141{
142 int ret = 0, cpu = get_cpu();
143
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800144 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800146 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 BUG_ON(!list_empty(&work->entry));
Christoph Lameter89ada672005-10-30 15:01:59 -0800148 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 ret = 1;
150 }
151 put_cpu();
152 return ret;
153}
Dave Jonesae90dd52006-06-30 01:40:45 -0400154EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800156void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
David Howells52bad642006-11-22 14:54:01 +0000158 struct delayed_work *dwork = (struct delayed_work *)__data;
David Howells365970a2006-11-22 14:54:49 +0000159 struct workqueue_struct *wq = get_wq_data(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 int cpu = smp_processor_id();
161
162 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800163 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
David Howells52bad642006-11-22 14:54:01 +0000165 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166}
167
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700168/**
169 * queue_delayed_work - queue work on a workqueue after delay
170 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800171 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700172 * @delay: number of jiffies to wait before queueing
173 *
Alan Stern057647f2006-10-28 10:38:58 -0700174 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700175 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176int fastcall queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000177 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
179 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000180 struct timer_list *timer = &dwork->timer;
181 struct work_struct *work = &dwork->work;
182
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800183 timer_stats_timer_set_start_info(timer);
David Howells52bad642006-11-22 14:54:01 +0000184 if (delay == 0)
185 return queue_work(wq, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800187 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 BUG_ON(timer_pending(timer));
189 BUG_ON(!list_empty(&work->entry));
190
191 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000192 set_wq_data(work, wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000194 timer->data = (unsigned long)dwork;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 timer->function = delayed_work_timer_fn;
196 add_timer(timer);
197 ret = 1;
198 }
199 return ret;
200}
Dave Jonesae90dd52006-06-30 01:40:45 -0400201EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700203/**
204 * queue_delayed_work_on - queue work on specific CPU after delay
205 * @cpu: CPU number to execute work on
206 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800207 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700208 * @delay: number of jiffies to wait before queueing
209 *
Alan Stern057647f2006-10-28 10:38:58 -0700210 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700211 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700212int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000213 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700214{
215 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000216 struct timer_list *timer = &dwork->timer;
217 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700218
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800219 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700220 BUG_ON(timer_pending(timer));
221 BUG_ON(!list_empty(&work->entry));
222
223 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000224 set_wq_data(work, wq);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700225 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000226 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700227 timer->function = delayed_work_timer_fn;
228 add_timer_on(timer, cpu);
229 ret = 1;
230 }
231 return ret;
232}
Dave Jonesae90dd52006-06-30 01:40:45 -0400233EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Arjan van de Ven858119e2006-01-14 13:20:43 -0800235static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236{
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700237 spin_lock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 cwq->run_depth++;
239 if (cwq->run_depth > 3) {
240 /* morton gets to eat his hat */
241 printk("%s: recursion depth exceeded: %d\n",
242 __FUNCTION__, cwq->run_depth);
243 dump_stack();
244 }
245 while (!list_empty(&cwq->worklist)) {
246 struct work_struct *work = list_entry(cwq->worklist.next,
247 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000248 work_func_t f = work->func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700250 cwq->current_work = work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 list_del_init(cwq->worklist.next);
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700252 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
David Howells365970a2006-11-22 14:54:49 +0000254 BUG_ON(get_wq_data(work) != cwq);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800255 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
David Howells65f27f32006-11-22 14:55:48 +0000256 work_release(work);
257 f(work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800259 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
260 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
261 "%s/0x%08x/%d\n",
262 current->comm, preempt_count(),
263 current->pid);
264 printk(KERN_ERR " last function: ");
265 print_symbol("%s\n", (unsigned long)f);
266 debug_show_held_locks(current);
267 dump_stack();
268 }
269
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700270 spin_lock_irq(&cwq->lock);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700271 cwq->current_work = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 }
273 cwq->run_depth--;
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700274 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
276
Oleg Nesterov3af244332007-05-09 02:34:09 -0700277/*
278 * NOTE: the caller must not touch *cwq if this func returns true
279 */
280static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
281{
282 int should_stop = cwq->should_stop;
283
284 if (unlikely(should_stop)) {
285 spin_lock_irq(&cwq->lock);
286 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
287 if (should_stop)
288 cwq->thread = NULL;
289 spin_unlock_irq(&cwq->lock);
290 }
291
292 return should_stop;
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static int worker_thread(void *__cwq)
296{
297 struct cpu_workqueue_struct *cwq = __cwq;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700298 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 struct k_sigaction sa;
300 sigset_t blocked;
301
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700302 if (!cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800303 current->flags |= PF_NOFREEZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305 set_user_nice(current, -5);
306
307 /* Block and flush all signals */
308 sigfillset(&blocked);
309 sigprocmask(SIG_BLOCK, &blocked, NULL);
310 flush_signals(current);
311
Christoph Lameter46934022006-10-11 01:21:26 -0700312 /*
313 * We inherited MPOL_INTERLEAVE from the booting kernel.
314 * Set MPOL_DEFAULT to insure node local allocations.
315 */
316 numa_default_policy();
317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
319 sa.sa.sa_handler = SIG_IGN;
320 sa.sa.sa_flags = 0;
321 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
322 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
323
Oleg Nesterov3af244332007-05-09 02:34:09 -0700324 for (;;) {
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700325 if (cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800326 try_to_freeze();
327
Oleg Nesterov3af244332007-05-09 02:34:09 -0700328 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
329 if (!cwq->should_stop && list_empty(&cwq->worklist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 schedule();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700331 finish_wait(&cwq->more_work, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Oleg Nesterov3af244332007-05-09 02:34:09 -0700333 if (cwq_should_stop(cwq))
334 break;
335
336 run_workqueue(cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 }
Oleg Nesterov3af244332007-05-09 02:34:09 -0700338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 return 0;
340}
341
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700342struct wq_barrier {
343 struct work_struct work;
344 struct completion done;
345};
346
347static void wq_barrier_func(struct work_struct *work)
348{
349 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
350 complete(&barr->done);
351}
352
Oleg Nesterov83c22522007-05-09 02:33:54 -0700353static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
354 struct wq_barrier *barr, int tail)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700355{
356 INIT_WORK(&barr->work, wq_barrier_func);
357 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
358
359 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700360
361 insert_work(cwq, &barr->work, tail);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700362}
363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
365{
366 if (cwq->thread == current) {
367 /*
368 * Probably keventd trying to flush its own queue. So simply run
369 * it by hand rather than deadlocking.
370 */
371 run_workqueue(cwq);
372 } else {
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700373 struct wq_barrier barr;
Oleg Nesterov83c22522007-05-09 02:33:54 -0700374 int active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
Oleg Nesterov83c22522007-05-09 02:33:54 -0700376 spin_lock_irq(&cwq->lock);
377 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
378 insert_wq_barrier(cwq, &barr, 1);
379 active = 1;
380 }
381 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
Oleg Nesterovd7213042007-05-09 02:34:07 -0700383 if (active)
Oleg Nesterov83c22522007-05-09 02:33:54 -0700384 wait_for_completion(&barr.done);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386}
387
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700388/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700390 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 *
392 * Forces execution of the workqueue and blocks until its completion.
393 * This is typically used in driver shutdown handlers.
394 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700395 * We sleep until all works which were queued on entry have been handled,
396 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 *
398 * This function used to run the workqueues itself. Now we just wait for the
399 * helper threads to do it.
400 */
401void fastcall flush_workqueue(struct workqueue_struct *wq)
402{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700403 const cpumask_t *cpu_map = wq_cpu_map(wq);
404 int cpu
405
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700406 might_sleep();
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700407 for_each_cpu_mask(cpu, *cpu_map)
408 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409}
Dave Jonesae90dd52006-06-30 01:40:45 -0400410EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700412static void wait_on_work(struct cpu_workqueue_struct *cwq,
413 struct work_struct *work)
414{
415 struct wq_barrier barr;
416 int running = 0;
417
418 spin_lock_irq(&cwq->lock);
419 if (unlikely(cwq->current_work == work)) {
Oleg Nesterov83c22522007-05-09 02:33:54 -0700420 insert_wq_barrier(cwq, &barr, 0);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700421 running = 1;
422 }
423 spin_unlock_irq(&cwq->lock);
424
Oleg Nesterov3af244332007-05-09 02:34:09 -0700425 if (unlikely(running))
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700426 wait_for_completion(&barr.done);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700427}
428
429/**
430 * flush_work - block until a work_struct's callback has terminated
431 * @wq: the workqueue on which the work is queued
432 * @work: the work which is to be flushed
433 *
434 * flush_work() will attempt to cancel the work if it is queued. If the work's
435 * callback appears to be running, flush_work() will block until it has
436 * completed.
437 *
438 * flush_work() is designed to be used when the caller is tearing down data
439 * structures which the callback function operates upon. It is expected that,
440 * prior to calling flush_work(), the caller has arranged for the work to not
441 * be requeued.
442 */
443void flush_work(struct workqueue_struct *wq, struct work_struct *work)
444{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700445 const cpumask_t *cpu_map = wq_cpu_map(wq);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700446 struct cpu_workqueue_struct *cwq;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700447 int cpu;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700448
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700449 might_sleep();
450
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700451 cwq = get_wq_data(work);
452 /* Was it ever queued ? */
453 if (!cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700454 return;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700455
456 /*
Oleg Nesterov3af244332007-05-09 02:34:09 -0700457 * This work can't be re-queued, no need to re-check that
458 * get_wq_data() is still the same when we take cwq->lock.
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700459 */
460 spin_lock_irq(&cwq->lock);
461 list_del_init(&work->entry);
462 work_release(work);
463 spin_unlock_irq(&cwq->lock);
464
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700465 for_each_cpu_mask(cpu, *cpu_map)
466 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700467}
468EXPORT_SYMBOL_GPL(flush_work);
469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
471static struct workqueue_struct *keventd_wq;
472
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700473/**
474 * schedule_work - put work task in global workqueue
475 * @work: job to be done
476 *
477 * This puts a job in the kernel-global workqueue.
478 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479int fastcall schedule_work(struct work_struct *work)
480{
481 return queue_work(keventd_wq, work);
482}
Dave Jonesae90dd52006-06-30 01:40:45 -0400483EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700485/**
486 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000487 * @dwork: job to be done
488 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700489 *
490 * After waiting for a given time this puts a job in the kernel-global
491 * workqueue.
492 */
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800493int fastcall schedule_delayed_work(struct delayed_work *dwork,
494 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495{
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800496 timer_stats_timer_set_start_info(&dwork->timer);
David Howells52bad642006-11-22 14:54:01 +0000497 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498}
Dave Jonesae90dd52006-06-30 01:40:45 -0400499EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700501/**
502 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
503 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000504 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700505 * @delay: number of jiffies to wait
506 *
507 * After waiting for a given time this puts a job in the kernel-global
508 * workqueue on the specified CPU.
509 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000511 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512{
David Howells52bad642006-11-22 14:54:01 +0000513 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
Dave Jonesae90dd52006-06-30 01:40:45 -0400515EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Andrew Mortonb6136772006-06-25 05:47:49 -0700517/**
518 * schedule_on_each_cpu - call a function on each online CPU from keventd
519 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700520 *
521 * Returns zero on success.
522 * Returns -ve errno on failure.
523 *
524 * Appears to be racy against CPU hotplug.
525 *
526 * schedule_on_each_cpu() is very slow.
527 */
David Howells65f27f32006-11-22 14:55:48 +0000528int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800529{
530 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700531 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800532
Andrew Mortonb6136772006-06-25 05:47:49 -0700533 works = alloc_percpu(struct work_struct);
534 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800535 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700536
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700537 preempt_disable(); /* CPU hotplug */
Christoph Lameter15316ba2006-01-08 01:00:43 -0800538 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100539 struct work_struct *work = per_cpu_ptr(works, cpu);
540
541 INIT_WORK(work, func);
542 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
543 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800544 }
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700545 preempt_enable();
Christoph Lameter15316ba2006-01-08 01:00:43 -0800546 flush_workqueue(keventd_wq);
Andrew Mortonb6136772006-06-25 05:47:49 -0700547 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800548 return 0;
549}
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551void flush_scheduled_work(void)
552{
553 flush_workqueue(keventd_wq);
554}
Dave Jonesae90dd52006-06-30 01:40:45 -0400555EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700557void flush_work_keventd(struct work_struct *work)
558{
559 flush_work(keventd_wq, work);
560}
561EXPORT_SYMBOL(flush_work_keventd);
562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800564 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 * @wq: the controlling workqueue structure
David Howells52bad642006-11-22 14:54:01 +0000566 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 */
James Bottomley81ddef72005-04-16 15:23:59 -0700568void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000569 struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570{
Oleg Nesterovdfb4b822007-05-09 02:34:11 -0700571 /* Was it ever queued ? */
572 if (!get_wq_data(&dwork->work))
573 return;
574
David Howells52bad642006-11-22 14:54:01 +0000575 while (!cancel_delayed_work(dwork))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 flush_workqueue(wq);
577}
James Bottomley81ddef72005-04-16 15:23:59 -0700578EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800581 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
David Howells52bad642006-11-22 14:54:01 +0000582 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 */
David Howells52bad642006-11-22 14:54:01 +0000584void cancel_rearming_delayed_work(struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585{
David Howells52bad642006-11-22 14:54:01 +0000586 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588EXPORT_SYMBOL(cancel_rearming_delayed_work);
589
James Bottomley1fa44ec2006-02-23 12:43:43 -0600590/**
591 * execute_in_process_context - reliably execute the routine with user context
592 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600593 * @ew: guaranteed storage for the execute work structure (must
594 * be available when the work executes)
595 *
596 * Executes the function immediately if process context is available,
597 * otherwise schedules the function for delayed execution.
598 *
599 * Returns: 0 - function was executed
600 * 1 - function was scheduled for execution
601 */
David Howells65f27f32006-11-22 14:55:48 +0000602int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600603{
604 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000605 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600606 return 0;
607 }
608
David Howells65f27f32006-11-22 14:55:48 +0000609 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600610 schedule_work(&ew->work);
611
612 return 1;
613}
614EXPORT_SYMBOL_GPL(execute_in_process_context);
615
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616int keventd_up(void)
617{
618 return keventd_wq != NULL;
619}
620
621int current_is_keventd(void)
622{
623 struct cpu_workqueue_struct *cwq;
624 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
625 int ret = 0;
626
627 BUG_ON(!keventd_wq);
628
Christoph Lameter89ada672005-10-30 15:01:59 -0800629 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 if (current == cwq->thread)
631 ret = 1;
632
633 return ret;
634
635}
636
Oleg Nesterov3af244332007-05-09 02:34:09 -0700637static struct cpu_workqueue_struct *
638init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639{
Christoph Lameter89ada672005-10-30 15:01:59 -0800640 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
Oleg Nesterov3af244332007-05-09 02:34:09 -0700642 cwq->wq = wq;
643 spin_lock_init(&cwq->lock);
644 INIT_LIST_HEAD(&cwq->worklist);
645 init_waitqueue_head(&cwq->more_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Oleg Nesterov3af244332007-05-09 02:34:09 -0700647 return cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648}
649
Oleg Nesterov3af244332007-05-09 02:34:09 -0700650static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700652 struct workqueue_struct *wq = cwq->wq;
653 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
654 struct task_struct *p;
655
656 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
657 /*
658 * Nobody can add the work_struct to this cwq,
659 * if (caller is __create_workqueue)
660 * nobody should see this wq
661 * else // caller is CPU_UP_PREPARE
662 * cpu is not on cpu_online_map
663 * so we can abort safely.
664 */
665 if (IS_ERR(p))
666 return PTR_ERR(p);
667
668 cwq->thread = p;
669 cwq->should_stop = 0;
670 if (!is_single_threaded(wq))
671 kthread_bind(p, cpu);
672
673 if (is_single_threaded(wq) || cpu_online(cpu))
674 wake_up_process(p);
675
676 return 0;
677}
678
679struct workqueue_struct *__create_workqueue(const char *name,
680 int singlethread, int freezeable)
681{
682 struct workqueue_struct *wq;
683 struct cpu_workqueue_struct *cwq;
684 int err = 0, cpu;
685
686 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
687 if (!wq)
688 return NULL;
689
690 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
691 if (!wq->cpu_wq) {
692 kfree(wq);
693 return NULL;
694 }
695
696 wq->name = name;
697 wq->freezeable = freezeable;
698
699 if (singlethread) {
700 INIT_LIST_HEAD(&wq->list);
701 cwq = init_cpu_workqueue(wq, singlethread_cpu);
702 err = create_workqueue_thread(cwq, singlethread_cpu);
703 } else {
704 mutex_lock(&workqueue_mutex);
705 list_add(&wq->list, &workqueues);
706
707 for_each_possible_cpu(cpu) {
708 cwq = init_cpu_workqueue(wq, cpu);
709 if (err || !cpu_online(cpu))
710 continue;
711 err = create_workqueue_thread(cwq, cpu);
712 }
713 mutex_unlock(&workqueue_mutex);
714 }
715
716 if (err) {
717 destroy_workqueue(wq);
718 wq = NULL;
719 }
720 return wq;
721}
722EXPORT_SYMBOL_GPL(__create_workqueue);
723
724static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
725{
726 struct wq_barrier barr;
727 int alive = 0;
728
729 spin_lock_irq(&cwq->lock);
730 if (cwq->thread != NULL) {
731 insert_wq_barrier(cwq, &barr, 1);
732 cwq->should_stop = 1;
733 alive = 1;
734 }
735 spin_unlock_irq(&cwq->lock);
736
737 if (alive) {
738 wait_for_completion(&barr.done);
739
740 while (unlikely(cwq->thread != NULL))
741 cpu_relax();
742 /*
743 * Wait until cwq->thread unlocks cwq->lock,
744 * it won't touch *cwq after that.
745 */
746 smp_rmb();
747 spin_unlock_wait(&cwq->lock);
748 }
749}
750
751/**
752 * destroy_workqueue - safely terminate a workqueue
753 * @wq: target workqueue
754 *
755 * Safely destroy a workqueue. All work currently pending will be done first.
756 */
757void destroy_workqueue(struct workqueue_struct *wq)
758{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700759 const cpumask_t *cpu_map = wq_cpu_map(wq);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700760 struct cpu_workqueue_struct *cwq;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700761 int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700762
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700763 mutex_lock(&workqueue_mutex);
764 list_del(&wq->list);
765 mutex_unlock(&workqueue_mutex);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700766
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700767 for_each_cpu_mask(cpu, *cpu_map) {
768 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
769 cleanup_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700770 }
771
772 free_percpu(wq->cpu_wq);
773 kfree(wq);
774}
775EXPORT_SYMBOL_GPL(destroy_workqueue);
776
777static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
778 unsigned long action,
779 void *hcpu)
780{
781 unsigned int cpu = (unsigned long)hcpu;
782 struct cpu_workqueue_struct *cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 struct workqueue_struct *wq;
784
785 switch (action) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700786 case CPU_LOCK_ACQUIRE:
787 mutex_lock(&workqueue_mutex);
788 return NOTIFY_OK;
789
790 case CPU_LOCK_RELEASE:
791 mutex_unlock(&workqueue_mutex);
792 return NOTIFY_OK;
793
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 case CPU_UP_PREPARE:
Oleg Nesterov3af244332007-05-09 02:34:09 -0700795 cpu_set(cpu, cpu_populated_map);
796 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
Oleg Nesterov3af244332007-05-09 02:34:09 -0700798 list_for_each_entry(wq, &workqueues, list) {
799 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Christoph Lameter89ada672005-10-30 15:01:59 -0800800
Oleg Nesterov3af244332007-05-09 02:34:09 -0700801 switch (action) {
802 case CPU_UP_PREPARE:
803 if (!create_workqueue_thread(cwq, cpu))
804 break;
805 printk(KERN_ERR "workqueue for %i failed\n", cpu);
806 return NOTIFY_BAD;
807
808 case CPU_ONLINE:
Christoph Lameter89ada672005-10-30 15:01:59 -0800809 wake_up_process(cwq->thread);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700810 break;
811
812 case CPU_UP_CANCELED:
813 if (cwq->thread)
814 wake_up_process(cwq->thread);
815 case CPU_DEAD:
816 cleanup_workqueue_thread(cwq, cpu);
817 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 }
820
821 return NOTIFY_OK;
822}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
824void init_workqueues(void)
825{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700826 cpu_populated_map = cpu_online_map;
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800827 singlethread_cpu = first_cpu(cpu_possible_map);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700828 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 hotcpu_notifier(workqueue_cpu_callback, 0);
830 keventd_wq = create_workqueue("events");
831 BUG_ON(!keventd_wq);
832}