blob: 74f3f7825229b679935ce08084a2e3800c4cd5ef [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080037 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 struct list_head worklist;
45 wait_queue_head_t more_work;
Oleg Nesterov3af244332007-05-09 02:34:09 -070046 struct work_struct *current_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070049 struct task_struct *thread;
Oleg Nesterov3af244332007-05-09 02:34:09 -070050 int should_stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080060 struct cpu_workqueue_struct *cpu_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 const char *name;
62 struct list_head list; /* Empty if single thread */
Oleg Nesterov319c2a92007-05-09 02:34:06 -070063 int freezeable; /* Freeze threads during suspend */
Linus Torvalds1da177e2005-04-16 15:20:36 -070064};
65
66/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
67 threads to each one as cpus come/go. */
Andrew Morton9b41ea72006-08-13 23:24:26 -070068static DEFINE_MUTEX(workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069static LIST_HEAD(workqueues);
70
Oleg Nesterov3af244332007-05-09 02:34:09 -070071static int singlethread_cpu __read_mostly;
72/* optimization, we could use cpu_possible_map */
73static cpumask_t cpu_populated_map __read_mostly;
Nathan Lynchf756d5e2006-01-08 01:05:12 -080074
Linus Torvalds1da177e2005-04-16 15:20:36 -070075/* If it's single threaded, it isn't in the list of workqueues. */
76static inline int is_single_threaded(struct workqueue_struct *wq)
77{
78 return list_empty(&wq->list);
79}
80
David Howells4594bf12006-12-07 11:33:26 +000081/*
82 * Set the workqueue on which a work item is to be run
83 * - Must *only* be called if the pending flag is set
84 */
David Howells365970a2006-11-22 14:54:49 +000085static inline void set_wq_data(struct work_struct *work, void *wq)
86{
David Howells4594bf12006-12-07 11:33:26 +000087 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +000088
David Howells4594bf12006-12-07 11:33:26 +000089 BUG_ON(!work_pending(work));
90
David Howells365970a2006-11-22 14:54:49 +000091 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -080092 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
93 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +000094}
95
96static inline void *get_wq_data(struct work_struct *work)
97{
Linus Torvaldsa08727b2006-12-16 09:53:50 -080098 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +000099}
100
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700101static void insert_work(struct cpu_workqueue_struct *cwq,
102 struct work_struct *work, int tail)
103{
104 set_wq_data(work, cwq);
105 if (tail)
106 list_add_tail(&work->entry, &cwq->worklist);
107 else
108 list_add(&work->entry, &cwq->worklist);
109 wake_up(&cwq->more_work);
110}
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/* Preempt must be disabled. */
113static void __queue_work(struct cpu_workqueue_struct *cwq,
114 struct work_struct *work)
115{
116 unsigned long flags;
117
118 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700119 insert_work(cwq, work, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 spin_unlock_irqrestore(&cwq->lock, flags);
121}
122
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700123/**
124 * queue_work - queue work on a workqueue
125 * @wq: workqueue to use
126 * @work: work to queue
127 *
Alan Stern057647f2006-10-28 10:38:58 -0700128 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 *
130 * We queue the work to the CPU it was submitted, but there is no
131 * guarantee that it will be processed by that CPU.
132 */
133int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
134{
135 int ret = 0, cpu = get_cpu();
136
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800137 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800139 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 BUG_ON(!list_empty(&work->entry));
Christoph Lameter89ada672005-10-30 15:01:59 -0800141 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 ret = 1;
143 }
144 put_cpu();
145 return ret;
146}
Dave Jonesae90dd52006-06-30 01:40:45 -0400147EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800149void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
David Howells52bad642006-11-22 14:54:01 +0000151 struct delayed_work *dwork = (struct delayed_work *)__data;
David Howells365970a2006-11-22 14:54:49 +0000152 struct workqueue_struct *wq = get_wq_data(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 int cpu = smp_processor_id();
154
155 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800156 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
David Howells52bad642006-11-22 14:54:01 +0000158 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159}
160
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700161/**
162 * queue_delayed_work - queue work on a workqueue after delay
163 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800164 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700165 * @delay: number of jiffies to wait before queueing
166 *
Alan Stern057647f2006-10-28 10:38:58 -0700167 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700168 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169int fastcall queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000170 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000173 struct timer_list *timer = &dwork->timer;
174 struct work_struct *work = &dwork->work;
175
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800176 timer_stats_timer_set_start_info(timer);
David Howells52bad642006-11-22 14:54:01 +0000177 if (delay == 0)
178 return queue_work(wq, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800180 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 BUG_ON(timer_pending(timer));
182 BUG_ON(!list_empty(&work->entry));
183
184 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000185 set_wq_data(work, wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000187 timer->data = (unsigned long)dwork;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 timer->function = delayed_work_timer_fn;
189 add_timer(timer);
190 ret = 1;
191 }
192 return ret;
193}
Dave Jonesae90dd52006-06-30 01:40:45 -0400194EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700196/**
197 * queue_delayed_work_on - queue work on specific CPU after delay
198 * @cpu: CPU number to execute work on
199 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800200 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700201 * @delay: number of jiffies to wait before queueing
202 *
Alan Stern057647f2006-10-28 10:38:58 -0700203 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700204 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700205int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000206 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700207{
208 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000209 struct timer_list *timer = &dwork->timer;
210 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700211
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800212 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700213 BUG_ON(timer_pending(timer));
214 BUG_ON(!list_empty(&work->entry));
215
216 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000217 set_wq_data(work, wq);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700218 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000219 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700220 timer->function = delayed_work_timer_fn;
221 add_timer_on(timer, cpu);
222 ret = 1;
223 }
224 return ret;
225}
Dave Jonesae90dd52006-06-30 01:40:45 -0400226EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Arjan van de Ven858119e2006-01-14 13:20:43 -0800228static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229{
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700230 spin_lock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 cwq->run_depth++;
232 if (cwq->run_depth > 3) {
233 /* morton gets to eat his hat */
234 printk("%s: recursion depth exceeded: %d\n",
235 __FUNCTION__, cwq->run_depth);
236 dump_stack();
237 }
238 while (!list_empty(&cwq->worklist)) {
239 struct work_struct *work = list_entry(cwq->worklist.next,
240 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000241 work_func_t f = work->func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700243 cwq->current_work = work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 list_del_init(cwq->worklist.next);
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700245 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
David Howells365970a2006-11-22 14:54:49 +0000247 BUG_ON(get_wq_data(work) != cwq);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800248 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
David Howells65f27f32006-11-22 14:55:48 +0000249 work_release(work);
250 f(work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800252 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
253 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
254 "%s/0x%08x/%d\n",
255 current->comm, preempt_count(),
256 current->pid);
257 printk(KERN_ERR " last function: ");
258 print_symbol("%s\n", (unsigned long)f);
259 debug_show_held_locks(current);
260 dump_stack();
261 }
262
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700263 spin_lock_irq(&cwq->lock);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700264 cwq->current_work = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 }
266 cwq->run_depth--;
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700267 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}
269
Oleg Nesterov3af244332007-05-09 02:34:09 -0700270/*
271 * NOTE: the caller must not touch *cwq if this func returns true
272 */
273static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
274{
275 int should_stop = cwq->should_stop;
276
277 if (unlikely(should_stop)) {
278 spin_lock_irq(&cwq->lock);
279 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
280 if (should_stop)
281 cwq->thread = NULL;
282 spin_unlock_irq(&cwq->lock);
283 }
284
285 return should_stop;
286}
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288static int worker_thread(void *__cwq)
289{
290 struct cpu_workqueue_struct *cwq = __cwq;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700291 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 struct k_sigaction sa;
293 sigset_t blocked;
294
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700295 if (!cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800296 current->flags |= PF_NOFREEZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
298 set_user_nice(current, -5);
299
300 /* Block and flush all signals */
301 sigfillset(&blocked);
302 sigprocmask(SIG_BLOCK, &blocked, NULL);
303 flush_signals(current);
304
Christoph Lameter46934022006-10-11 01:21:26 -0700305 /*
306 * We inherited MPOL_INTERLEAVE from the booting kernel.
307 * Set MPOL_DEFAULT to insure node local allocations.
308 */
309 numa_default_policy();
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
312 sa.sa.sa_handler = SIG_IGN;
313 sa.sa.sa_flags = 0;
314 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
315 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
316
Oleg Nesterov3af244332007-05-09 02:34:09 -0700317 for (;;) {
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700318 if (cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800319 try_to_freeze();
320
Oleg Nesterov3af244332007-05-09 02:34:09 -0700321 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
322 if (!cwq->should_stop && list_empty(&cwq->worklist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 schedule();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700324 finish_wait(&cwq->more_work, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Oleg Nesterov3af244332007-05-09 02:34:09 -0700326 if (cwq_should_stop(cwq))
327 break;
328
329 run_workqueue(cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 }
Oleg Nesterov3af244332007-05-09 02:34:09 -0700331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 return 0;
333}
334
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700335struct wq_barrier {
336 struct work_struct work;
337 struct completion done;
338};
339
340static void wq_barrier_func(struct work_struct *work)
341{
342 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
343 complete(&barr->done);
344}
345
Oleg Nesterov83c22522007-05-09 02:33:54 -0700346static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
347 struct wq_barrier *barr, int tail)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700348{
349 INIT_WORK(&barr->work, wq_barrier_func);
350 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
351
352 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700353
354 insert_work(cwq, &barr->work, tail);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700355}
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
358{
359 if (cwq->thread == current) {
360 /*
361 * Probably keventd trying to flush its own queue. So simply run
362 * it by hand rather than deadlocking.
363 */
364 run_workqueue(cwq);
365 } else {
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700366 struct wq_barrier barr;
Oleg Nesterov83c22522007-05-09 02:33:54 -0700367 int active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Oleg Nesterov83c22522007-05-09 02:33:54 -0700369 spin_lock_irq(&cwq->lock);
370 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
371 insert_wq_barrier(cwq, &barr, 1);
372 active = 1;
373 }
374 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
Oleg Nesterovd7213042007-05-09 02:34:07 -0700376 if (active)
Oleg Nesterov83c22522007-05-09 02:33:54 -0700377 wait_for_completion(&barr.done);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 }
379}
380
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700381/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700383 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 *
385 * Forces execution of the workqueue and blocks until its completion.
386 * This is typically used in driver shutdown handlers.
387 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700388 * We sleep until all works which were queued on entry have been handled,
389 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 *
391 * This function used to run the workqueues itself. Now we just wait for the
392 * helper threads to do it.
393 */
394void fastcall flush_workqueue(struct workqueue_struct *wq)
395{
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700396 might_sleep();
397
Oleg Nesterov3af244332007-05-09 02:34:09 -0700398 if (is_single_threaded(wq))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800399 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
Oleg Nesterov3af244332007-05-09 02:34:09 -0700400 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 int cpu;
402
Oleg Nesterov3af244332007-05-09 02:34:09 -0700403 for_each_cpu_mask(cpu, cpu_populated_map)
Christoph Lameter89ada672005-10-30 15:01:59 -0800404 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 }
406}
Dave Jonesae90dd52006-06-30 01:40:45 -0400407EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700409static void wait_on_work(struct cpu_workqueue_struct *cwq,
410 struct work_struct *work)
411{
412 struct wq_barrier barr;
413 int running = 0;
414
415 spin_lock_irq(&cwq->lock);
416 if (unlikely(cwq->current_work == work)) {
Oleg Nesterov83c22522007-05-09 02:33:54 -0700417 insert_wq_barrier(cwq, &barr, 0);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700418 running = 1;
419 }
420 spin_unlock_irq(&cwq->lock);
421
Oleg Nesterov3af244332007-05-09 02:34:09 -0700422 if (unlikely(running))
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700423 wait_for_completion(&barr.done);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700424}
425
426/**
427 * flush_work - block until a work_struct's callback has terminated
428 * @wq: the workqueue on which the work is queued
429 * @work: the work which is to be flushed
430 *
431 * flush_work() will attempt to cancel the work if it is queued. If the work's
432 * callback appears to be running, flush_work() will block until it has
433 * completed.
434 *
435 * flush_work() is designed to be used when the caller is tearing down data
436 * structures which the callback function operates upon. It is expected that,
437 * prior to calling flush_work(), the caller has arranged for the work to not
438 * be requeued.
439 */
440void flush_work(struct workqueue_struct *wq, struct work_struct *work)
441{
442 struct cpu_workqueue_struct *cwq;
443
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700444 might_sleep();
445
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700446 cwq = get_wq_data(work);
447 /* Was it ever queued ? */
448 if (!cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700449 return;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700450
451 /*
Oleg Nesterov3af244332007-05-09 02:34:09 -0700452 * This work can't be re-queued, no need to re-check that
453 * get_wq_data() is still the same when we take cwq->lock.
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700454 */
455 spin_lock_irq(&cwq->lock);
456 list_del_init(&work->entry);
457 work_release(work);
458 spin_unlock_irq(&cwq->lock);
459
Oleg Nesterov3af244332007-05-09 02:34:09 -0700460 if (is_single_threaded(wq))
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700461 wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700462 else {
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700463 int cpu;
464
Oleg Nesterov3af244332007-05-09 02:34:09 -0700465 for_each_cpu_mask(cpu, cpu_populated_map)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700466 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
467 }
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700468}
469EXPORT_SYMBOL_GPL(flush_work);
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472static struct workqueue_struct *keventd_wq;
473
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700474/**
475 * schedule_work - put work task in global workqueue
476 * @work: job to be done
477 *
478 * This puts a job in the kernel-global workqueue.
479 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480int fastcall schedule_work(struct work_struct *work)
481{
482 return queue_work(keventd_wq, work);
483}
Dave Jonesae90dd52006-06-30 01:40:45 -0400484EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700486/**
487 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000488 * @dwork: job to be done
489 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700490 *
491 * After waiting for a given time this puts a job in the kernel-global
492 * workqueue.
493 */
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800494int fastcall schedule_delayed_work(struct delayed_work *dwork,
495 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496{
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800497 timer_stats_timer_set_start_info(&dwork->timer);
David Howells52bad642006-11-22 14:54:01 +0000498 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499}
Dave Jonesae90dd52006-06-30 01:40:45 -0400500EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700502/**
503 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
504 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000505 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700506 * @delay: number of jiffies to wait
507 *
508 * After waiting for a given time this puts a job in the kernel-global
509 * workqueue on the specified CPU.
510 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000512 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
David Howells52bad642006-11-22 14:54:01 +0000514 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515}
Dave Jonesae90dd52006-06-30 01:40:45 -0400516EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
Andrew Mortonb6136772006-06-25 05:47:49 -0700518/**
519 * schedule_on_each_cpu - call a function on each online CPU from keventd
520 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700521 *
522 * Returns zero on success.
523 * Returns -ve errno on failure.
524 *
525 * Appears to be racy against CPU hotplug.
526 *
527 * schedule_on_each_cpu() is very slow.
528 */
David Howells65f27f32006-11-22 14:55:48 +0000529int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800530{
531 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700532 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800533
Andrew Mortonb6136772006-06-25 05:47:49 -0700534 works = alloc_percpu(struct work_struct);
535 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800536 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700537
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700538 preempt_disable(); /* CPU hotplug */
Christoph Lameter15316ba2006-01-08 01:00:43 -0800539 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100540 struct work_struct *work = per_cpu_ptr(works, cpu);
541
542 INIT_WORK(work, func);
543 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
544 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800545 }
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700546 preempt_enable();
Christoph Lameter15316ba2006-01-08 01:00:43 -0800547 flush_workqueue(keventd_wq);
Andrew Mortonb6136772006-06-25 05:47:49 -0700548 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800549 return 0;
550}
551
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552void flush_scheduled_work(void)
553{
554 flush_workqueue(keventd_wq);
555}
Dave Jonesae90dd52006-06-30 01:40:45 -0400556EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700558void flush_work_keventd(struct work_struct *work)
559{
560 flush_work(keventd_wq, work);
561}
562EXPORT_SYMBOL(flush_work_keventd);
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800565 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 * @wq: the controlling workqueue structure
David Howells52bad642006-11-22 14:54:01 +0000567 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 */
James Bottomley81ddef72005-04-16 15:23:59 -0700569void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000570 struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{
David Howells52bad642006-11-22 14:54:01 +0000572 while (!cancel_delayed_work(dwork))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 flush_workqueue(wq);
574}
James Bottomley81ddef72005-04-16 15:23:59 -0700575EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800578 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
David Howells52bad642006-11-22 14:54:01 +0000579 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 */
David Howells52bad642006-11-22 14:54:01 +0000581void cancel_rearming_delayed_work(struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582{
David Howells52bad642006-11-22 14:54:01 +0000583 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584}
585EXPORT_SYMBOL(cancel_rearming_delayed_work);
586
James Bottomley1fa44ec2006-02-23 12:43:43 -0600587/**
588 * execute_in_process_context - reliably execute the routine with user context
589 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600590 * @ew: guaranteed storage for the execute work structure (must
591 * be available when the work executes)
592 *
593 * Executes the function immediately if process context is available,
594 * otherwise schedules the function for delayed execution.
595 *
596 * Returns: 0 - function was executed
597 * 1 - function was scheduled for execution
598 */
David Howells65f27f32006-11-22 14:55:48 +0000599int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600600{
601 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000602 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600603 return 0;
604 }
605
David Howells65f27f32006-11-22 14:55:48 +0000606 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600607 schedule_work(&ew->work);
608
609 return 1;
610}
611EXPORT_SYMBOL_GPL(execute_in_process_context);
612
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613int keventd_up(void)
614{
615 return keventd_wq != NULL;
616}
617
618int current_is_keventd(void)
619{
620 struct cpu_workqueue_struct *cwq;
621 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
622 int ret = 0;
623
624 BUG_ON(!keventd_wq);
625
Christoph Lameter89ada672005-10-30 15:01:59 -0800626 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 if (current == cwq->thread)
628 ret = 1;
629
630 return ret;
631
632}
633
Oleg Nesterov3af244332007-05-09 02:34:09 -0700634static struct cpu_workqueue_struct *
635init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636{
Christoph Lameter89ada672005-10-30 15:01:59 -0800637 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Oleg Nesterov3af244332007-05-09 02:34:09 -0700639 cwq->wq = wq;
640 spin_lock_init(&cwq->lock);
641 INIT_LIST_HEAD(&cwq->worklist);
642 init_waitqueue_head(&cwq->more_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Oleg Nesterov3af244332007-05-09 02:34:09 -0700644 return cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645}
646
Oleg Nesterov3af244332007-05-09 02:34:09 -0700647static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700649 struct workqueue_struct *wq = cwq->wq;
650 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
651 struct task_struct *p;
652
653 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
654 /*
655 * Nobody can add the work_struct to this cwq,
656 * if (caller is __create_workqueue)
657 * nobody should see this wq
658 * else // caller is CPU_UP_PREPARE
659 * cpu is not on cpu_online_map
660 * so we can abort safely.
661 */
662 if (IS_ERR(p))
663 return PTR_ERR(p);
664
665 cwq->thread = p;
666 cwq->should_stop = 0;
667 if (!is_single_threaded(wq))
668 kthread_bind(p, cpu);
669
670 if (is_single_threaded(wq) || cpu_online(cpu))
671 wake_up_process(p);
672
673 return 0;
674}
675
676struct workqueue_struct *__create_workqueue(const char *name,
677 int singlethread, int freezeable)
678{
679 struct workqueue_struct *wq;
680 struct cpu_workqueue_struct *cwq;
681 int err = 0, cpu;
682
683 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
684 if (!wq)
685 return NULL;
686
687 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
688 if (!wq->cpu_wq) {
689 kfree(wq);
690 return NULL;
691 }
692
693 wq->name = name;
694 wq->freezeable = freezeable;
695
696 if (singlethread) {
697 INIT_LIST_HEAD(&wq->list);
698 cwq = init_cpu_workqueue(wq, singlethread_cpu);
699 err = create_workqueue_thread(cwq, singlethread_cpu);
700 } else {
701 mutex_lock(&workqueue_mutex);
702 list_add(&wq->list, &workqueues);
703
704 for_each_possible_cpu(cpu) {
705 cwq = init_cpu_workqueue(wq, cpu);
706 if (err || !cpu_online(cpu))
707 continue;
708 err = create_workqueue_thread(cwq, cpu);
709 }
710 mutex_unlock(&workqueue_mutex);
711 }
712
713 if (err) {
714 destroy_workqueue(wq);
715 wq = NULL;
716 }
717 return wq;
718}
719EXPORT_SYMBOL_GPL(__create_workqueue);
720
721static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
722{
723 struct wq_barrier barr;
724 int alive = 0;
725
726 spin_lock_irq(&cwq->lock);
727 if (cwq->thread != NULL) {
728 insert_wq_barrier(cwq, &barr, 1);
729 cwq->should_stop = 1;
730 alive = 1;
731 }
732 spin_unlock_irq(&cwq->lock);
733
734 if (alive) {
735 wait_for_completion(&barr.done);
736
737 while (unlikely(cwq->thread != NULL))
738 cpu_relax();
739 /*
740 * Wait until cwq->thread unlocks cwq->lock,
741 * it won't touch *cwq after that.
742 */
743 smp_rmb();
744 spin_unlock_wait(&cwq->lock);
745 }
746}
747
748/**
749 * destroy_workqueue - safely terminate a workqueue
750 * @wq: target workqueue
751 *
752 * Safely destroy a workqueue. All work currently pending will be done first.
753 */
754void destroy_workqueue(struct workqueue_struct *wq)
755{
756 struct cpu_workqueue_struct *cwq;
757
758 if (is_single_threaded(wq)) {
759 cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu);
760 cleanup_workqueue_thread(cwq, singlethread_cpu);
761 } else {
762 int cpu;
763
764 mutex_lock(&workqueue_mutex);
765 list_del(&wq->list);
766 mutex_unlock(&workqueue_mutex);
767
768 for_each_cpu_mask(cpu, cpu_populated_map) {
769 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
770 cleanup_workqueue_thread(cwq, cpu);
771 }
772 }
773
774 free_percpu(wq->cpu_wq);
775 kfree(wq);
776}
777EXPORT_SYMBOL_GPL(destroy_workqueue);
778
779static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
780 unsigned long action,
781 void *hcpu)
782{
783 unsigned int cpu = (unsigned long)hcpu;
784 struct cpu_workqueue_struct *cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 struct workqueue_struct *wq;
786
787 switch (action) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700788 case CPU_LOCK_ACQUIRE:
789 mutex_lock(&workqueue_mutex);
790 return NOTIFY_OK;
791
792 case CPU_LOCK_RELEASE:
793 mutex_unlock(&workqueue_mutex);
794 return NOTIFY_OK;
795
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 case CPU_UP_PREPARE:
Oleg Nesterov3af244332007-05-09 02:34:09 -0700797 cpu_set(cpu, cpu_populated_map);
798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
Oleg Nesterov3af244332007-05-09 02:34:09 -0700800 list_for_each_entry(wq, &workqueues, list) {
801 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Christoph Lameter89ada672005-10-30 15:01:59 -0800802
Oleg Nesterov3af244332007-05-09 02:34:09 -0700803 switch (action) {
804 case CPU_UP_PREPARE:
805 if (!create_workqueue_thread(cwq, cpu))
806 break;
807 printk(KERN_ERR "workqueue for %i failed\n", cpu);
808 return NOTIFY_BAD;
809
810 case CPU_ONLINE:
Christoph Lameter89ada672005-10-30 15:01:59 -0800811 wake_up_process(cwq->thread);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700812 break;
813
814 case CPU_UP_CANCELED:
815 if (cwq->thread)
816 wake_up_process(cwq->thread);
817 case CPU_DEAD:
818 cleanup_workqueue_thread(cwq, cpu);
819 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 }
822
823 return NOTIFY_OK;
824}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
826void init_workqueues(void)
827{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700828 cpu_populated_map = cpu_online_map;
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800829 singlethread_cpu = first_cpu(cpu_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 hotcpu_notifier(workqueue_cpu_callback, 0);
831 keventd_wq = create_workqueue("events");
832 BUG_ON(!keventd_wq);
833}