blob: 1d1933cf3778163ca4e54f1a7c32055920599d2d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080037 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 struct list_head worklist;
45 wait_queue_head_t more_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070048 struct task_struct *thread;
Oleg Nesterovb89deed2007-05-09 02:33:52 -070049 struct work_struct *current_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51 int run_depth; /* Detect run_workqueue() recursion depth */
52} ____cacheline_aligned;
53
54/*
55 * The externally visible workqueue abstraction is an array of
56 * per-CPU workqueues:
57 */
58struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080059 struct cpu_workqueue_struct *cpu_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 const char *name;
61 struct list_head list; /* Empty if single thread */
Oleg Nesterov319c2a92007-05-09 02:34:06 -070062 int freezeable; /* Freeze threads during suspend */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063};
64
65/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
66 threads to each one as cpus come/go. */
Oleg Nesterovd7213042007-05-09 02:34:07 -070067static long migrate_sequence __read_mostly;
Andrew Morton9b41ea72006-08-13 23:24:26 -070068static DEFINE_MUTEX(workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069static LIST_HEAD(workqueues);
70
Nathan Lynchf756d5e2006-01-08 01:05:12 -080071static int singlethread_cpu;
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/* If it's single threaded, it isn't in the list of workqueues. */
74static inline int is_single_threaded(struct workqueue_struct *wq)
75{
76 return list_empty(&wq->list);
77}
78
David Howells4594bf12006-12-07 11:33:26 +000079/*
80 * Set the workqueue on which a work item is to be run
81 * - Must *only* be called if the pending flag is set
82 */
David Howells365970a2006-11-22 14:54:49 +000083static inline void set_wq_data(struct work_struct *work, void *wq)
84{
David Howells4594bf12006-12-07 11:33:26 +000085 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +000086
David Howells4594bf12006-12-07 11:33:26 +000087 BUG_ON(!work_pending(work));
88
David Howells365970a2006-11-22 14:54:49 +000089 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -080090 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
91 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +000092}
93
94static inline void *get_wq_data(struct work_struct *work)
95{
Linus Torvaldsa08727b2006-12-16 09:53:50 -080096 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +000097}
98
Linus Torvalds68380b52006-12-07 09:28:19 -080099static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
100{
101 int ret = 0;
102 unsigned long flags;
103
104 spin_lock_irqsave(&cwq->lock, flags);
105 /*
106 * We need to re-validate the work info after we've gotten
107 * the cpu_workqueue lock. We can run the work now iff:
108 *
109 * - the wq_data still matches the cpu_workqueue_struct
110 * - AND the work is still marked pending
111 * - AND the work is still on a list (which will be this
112 * workqueue_struct list)
113 *
114 * All these conditions are important, because we
115 * need to protect against the work being run right
116 * now on another CPU (all but the last one might be
117 * true if it's currently running and has not been
118 * released yet, for example).
119 */
120 if (get_wq_data(work) == cwq
121 && work_pending(work)
122 && !list_empty(&work->entry)) {
123 work_func_t f = work->func;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700124 cwq->current_work = work;
Linus Torvalds68380b52006-12-07 09:28:19 -0800125 list_del_init(&work->entry);
126 spin_unlock_irqrestore(&cwq->lock, flags);
127
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800128 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
Linus Torvalds68380b52006-12-07 09:28:19 -0800129 work_release(work);
130 f(work);
131
132 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700133 cwq->current_work = NULL;
Linus Torvalds68380b52006-12-07 09:28:19 -0800134 ret = 1;
135 }
136 spin_unlock_irqrestore(&cwq->lock, flags);
137 return ret;
138}
139
140/**
141 * run_scheduled_work - run scheduled work synchronously
142 * @work: work to run
143 *
144 * This checks if the work was pending, and runs it
145 * synchronously if so. It returns a boolean to indicate
146 * whether it had any scheduled work to run or not.
147 *
148 * NOTE! This _only_ works for normal work_structs. You
149 * CANNOT use this for delayed work, because the wq data
150 * for delayed work will not point properly to the per-
151 * CPU workqueue struct, but will change!
152 */
153int fastcall run_scheduled_work(struct work_struct *work)
154{
155 for (;;) {
156 struct cpu_workqueue_struct *cwq;
157
158 if (!work_pending(work))
159 return 0;
160 if (list_empty(&work->entry))
161 return 0;
162 /* NOTE! This depends intimately on __queue_work! */
163 cwq = get_wq_data(work);
164 if (!cwq)
165 return 0;
166 if (__run_work(cwq, work))
167 return 1;
168 }
169}
170EXPORT_SYMBOL(run_scheduled_work);
171
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700172static void insert_work(struct cpu_workqueue_struct *cwq,
173 struct work_struct *work, int tail)
174{
175 set_wq_data(work, cwq);
176 if (tail)
177 list_add_tail(&work->entry, &cwq->worklist);
178 else
179 list_add(&work->entry, &cwq->worklist);
180 wake_up(&cwq->more_work);
181}
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183/* Preempt must be disabled. */
184static void __queue_work(struct cpu_workqueue_struct *cwq,
185 struct work_struct *work)
186{
187 unsigned long flags;
188
189 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700190 insert_work(cwq, work, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 spin_unlock_irqrestore(&cwq->lock, flags);
192}
193
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700194/**
195 * queue_work - queue work on a workqueue
196 * @wq: workqueue to use
197 * @work: work to queue
198 *
Alan Stern057647f2006-10-28 10:38:58 -0700199 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 *
201 * We queue the work to the CPU it was submitted, but there is no
202 * guarantee that it will be processed by that CPU.
203 */
204int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
205{
206 int ret = 0, cpu = get_cpu();
207
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800208 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800210 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 BUG_ON(!list_empty(&work->entry));
Christoph Lameter89ada672005-10-30 15:01:59 -0800212 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 ret = 1;
214 }
215 put_cpu();
216 return ret;
217}
Dave Jonesae90dd52006-06-30 01:40:45 -0400218EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800220void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
David Howells52bad642006-11-22 14:54:01 +0000222 struct delayed_work *dwork = (struct delayed_work *)__data;
David Howells365970a2006-11-22 14:54:49 +0000223 struct workqueue_struct *wq = get_wq_data(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 int cpu = smp_processor_id();
225
226 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800227 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
David Howells52bad642006-11-22 14:54:01 +0000229 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
231
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700232/**
233 * queue_delayed_work - queue work on a workqueue after delay
234 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800235 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700236 * @delay: number of jiffies to wait before queueing
237 *
Alan Stern057647f2006-10-28 10:38:58 -0700238 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700239 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240int fastcall queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000241 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
243 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000244 struct timer_list *timer = &dwork->timer;
245 struct work_struct *work = &dwork->work;
246
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800247 timer_stats_timer_set_start_info(timer);
David Howells52bad642006-11-22 14:54:01 +0000248 if (delay == 0)
249 return queue_work(wq, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800251 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 BUG_ON(timer_pending(timer));
253 BUG_ON(!list_empty(&work->entry));
254
255 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000256 set_wq_data(work, wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000258 timer->data = (unsigned long)dwork;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 timer->function = delayed_work_timer_fn;
260 add_timer(timer);
261 ret = 1;
262 }
263 return ret;
264}
Dave Jonesae90dd52006-06-30 01:40:45 -0400265EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700267/**
268 * queue_delayed_work_on - queue work on specific CPU after delay
269 * @cpu: CPU number to execute work on
270 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800271 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700272 * @delay: number of jiffies to wait before queueing
273 *
Alan Stern057647f2006-10-28 10:38:58 -0700274 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700275 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700276int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000277 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700278{
279 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000280 struct timer_list *timer = &dwork->timer;
281 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700282
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800283 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700284 BUG_ON(timer_pending(timer));
285 BUG_ON(!list_empty(&work->entry));
286
287 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000288 set_wq_data(work, wq);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700289 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000290 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700291 timer->function = delayed_work_timer_fn;
292 add_timer_on(timer, cpu);
293 ret = 1;
294 }
295 return ret;
296}
Dave Jonesae90dd52006-06-30 01:40:45 -0400297EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
Arjan van de Ven858119e2006-01-14 13:20:43 -0800299static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
301 unsigned long flags;
302
303 /*
304 * Keep taking off work from the queue until
305 * done.
306 */
307 spin_lock_irqsave(&cwq->lock, flags);
308 cwq->run_depth++;
309 if (cwq->run_depth > 3) {
310 /* morton gets to eat his hat */
311 printk("%s: recursion depth exceeded: %d\n",
312 __FUNCTION__, cwq->run_depth);
313 dump_stack();
314 }
315 while (!list_empty(&cwq->worklist)) {
316 struct work_struct *work = list_entry(cwq->worklist.next,
317 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000318 work_func_t f = work->func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700320 cwq->current_work = work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 list_del_init(cwq->worklist.next);
322 spin_unlock_irqrestore(&cwq->lock, flags);
323
David Howells365970a2006-11-22 14:54:49 +0000324 BUG_ON(get_wq_data(work) != cwq);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800325 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
David Howells65f27f32006-11-22 14:55:48 +0000326 work_release(work);
327 f(work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800329 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
330 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
331 "%s/0x%08x/%d\n",
332 current->comm, preempt_count(),
333 current->pid);
334 printk(KERN_ERR " last function: ");
335 print_symbol("%s\n", (unsigned long)f);
336 debug_show_held_locks(current);
337 dump_stack();
338 }
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700341 cwq->current_work = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
343 cwq->run_depth--;
344 spin_unlock_irqrestore(&cwq->lock, flags);
345}
346
347static int worker_thread(void *__cwq)
348{
349 struct cpu_workqueue_struct *cwq = __cwq;
350 DECLARE_WAITQUEUE(wait, current);
351 struct k_sigaction sa;
352 sigset_t blocked;
353
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700354 if (!cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800355 current->flags |= PF_NOFREEZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357 set_user_nice(current, -5);
358
359 /* Block and flush all signals */
360 sigfillset(&blocked);
361 sigprocmask(SIG_BLOCK, &blocked, NULL);
362 flush_signals(current);
363
Christoph Lameter46934022006-10-11 01:21:26 -0700364 /*
365 * We inherited MPOL_INTERLEAVE from the booting kernel.
366 * Set MPOL_DEFAULT to insure node local allocations.
367 */
368 numa_default_policy();
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
371 sa.sa.sa_handler = SIG_IGN;
372 sa.sa.sa_flags = 0;
373 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
374 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
375
376 set_current_state(TASK_INTERRUPTIBLE);
377 while (!kthread_should_stop()) {
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700378 if (cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800379 try_to_freeze();
380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 add_wait_queue(&cwq->more_work, &wait);
382 if (list_empty(&cwq->worklist))
383 schedule();
384 else
385 __set_current_state(TASK_RUNNING);
386 remove_wait_queue(&cwq->more_work, &wait);
387
388 if (!list_empty(&cwq->worklist))
389 run_workqueue(cwq);
390 set_current_state(TASK_INTERRUPTIBLE);
391 }
392 __set_current_state(TASK_RUNNING);
393 return 0;
394}
395
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700396struct wq_barrier {
397 struct work_struct work;
398 struct completion done;
399};
400
401static void wq_barrier_func(struct work_struct *work)
402{
403 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
404 complete(&barr->done);
405}
406
Oleg Nesterov83c22522007-05-09 02:33:54 -0700407static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
408 struct wq_barrier *barr, int tail)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700409{
410 INIT_WORK(&barr->work, wq_barrier_func);
411 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
412
413 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700414
415 insert_work(cwq, &barr->work, tail);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700416}
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
419{
420 if (cwq->thread == current) {
421 /*
422 * Probably keventd trying to flush its own queue. So simply run
423 * it by hand rather than deadlocking.
424 */
425 run_workqueue(cwq);
426 } else {
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700427 struct wq_barrier barr;
Oleg Nesterov83c22522007-05-09 02:33:54 -0700428 int active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
Oleg Nesterov83c22522007-05-09 02:33:54 -0700430 spin_lock_irq(&cwq->lock);
431 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
432 insert_wq_barrier(cwq, &barr, 1);
433 active = 1;
434 }
435 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Oleg Nesterovd7213042007-05-09 02:34:07 -0700437 if (active)
Oleg Nesterov83c22522007-05-09 02:33:54 -0700438 wait_for_completion(&barr.done);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 }
440}
441
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700442/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700444 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 *
446 * Forces execution of the workqueue and blocks until its completion.
447 * This is typically used in driver shutdown handlers.
448 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700449 * We sleep until all works which were queued on entry have been handled,
450 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 *
452 * This function used to run the workqueues itself. Now we just wait for the
453 * helper threads to do it.
454 */
455void fastcall flush_workqueue(struct workqueue_struct *wq)
456{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 if (is_single_threaded(wq)) {
Ben Collinsbce61dd2005-11-28 13:43:56 -0800458 /* Always use first cpu's area. */
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800459 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 } else {
Oleg Nesterovd7213042007-05-09 02:34:07 -0700461 long sequence;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 int cpu;
Oleg Nesterovd7213042007-05-09 02:34:07 -0700463again:
464 sequence = migrate_sequence;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Oleg Nesterovd7213042007-05-09 02:34:07 -0700466 for_each_possible_cpu(cpu)
Christoph Lameter89ada672005-10-30 15:01:59 -0800467 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Oleg Nesterovd7213042007-05-09 02:34:07 -0700468
469 if (unlikely(sequence != migrate_sequence))
470 goto again;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
472}
Dave Jonesae90dd52006-06-30 01:40:45 -0400473EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700475static void wait_on_work(struct cpu_workqueue_struct *cwq,
476 struct work_struct *work)
477{
478 struct wq_barrier barr;
479 int running = 0;
480
481 spin_lock_irq(&cwq->lock);
482 if (unlikely(cwq->current_work == work)) {
Oleg Nesterov83c22522007-05-09 02:33:54 -0700483 insert_wq_barrier(cwq, &barr, 0);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700484 running = 1;
485 }
486 spin_unlock_irq(&cwq->lock);
487
488 if (unlikely(running)) {
489 mutex_unlock(&workqueue_mutex);
490 wait_for_completion(&barr.done);
491 mutex_lock(&workqueue_mutex);
492 }
493}
494
495/**
496 * flush_work - block until a work_struct's callback has terminated
497 * @wq: the workqueue on which the work is queued
498 * @work: the work which is to be flushed
499 *
500 * flush_work() will attempt to cancel the work if it is queued. If the work's
501 * callback appears to be running, flush_work() will block until it has
502 * completed.
503 *
504 * flush_work() is designed to be used when the caller is tearing down data
505 * structures which the callback function operates upon. It is expected that,
506 * prior to calling flush_work(), the caller has arranged for the work to not
507 * be requeued.
508 */
509void flush_work(struct workqueue_struct *wq, struct work_struct *work)
510{
511 struct cpu_workqueue_struct *cwq;
512
513 mutex_lock(&workqueue_mutex);
514 cwq = get_wq_data(work);
515 /* Was it ever queued ? */
516 if (!cwq)
517 goto out;
518
519 /*
520 * This work can't be re-queued, and the lock above protects us
521 * from take_over_work(), no need to re-check that get_wq_data()
522 * is still the same when we take cwq->lock.
523 */
524 spin_lock_irq(&cwq->lock);
525 list_del_init(&work->entry);
526 work_release(work);
527 spin_unlock_irq(&cwq->lock);
528
529 if (is_single_threaded(wq)) {
530 /* Always use first cpu's area. */
531 wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
532 } else {
533 int cpu;
534
535 for_each_online_cpu(cpu)
536 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
537 }
538out:
539 mutex_unlock(&workqueue_mutex);
540}
541EXPORT_SYMBOL_GPL(flush_work);
542
Oleg Nesterovd7213042007-05-09 02:34:07 -0700543static void init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
544{
545 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
546
547 cwq->wq = wq;
548 spin_lock_init(&cwq->lock);
549 INIT_LIST_HEAD(&cwq->worklist);
550 init_waitqueue_head(&cwq->more_work);
551}
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700554 int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555{
Christoph Lameter89ada672005-10-30 15:01:59 -0800556 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 struct task_struct *p;
558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 if (is_single_threaded(wq))
560 p = kthread_create(worker_thread, cwq, "%s", wq->name);
561 else
562 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
563 if (IS_ERR(p))
564 return NULL;
565 cwq->thread = p;
566 return p;
567}
568
569struct workqueue_struct *__create_workqueue(const char *name,
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800570 int singlethread, int freezeable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{
572 int cpu, destroy = 0;
573 struct workqueue_struct *wq;
574 struct task_struct *p;
575
Pekka J Enbergdd392712005-09-06 15:18:31 -0700576 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 if (!wq)
578 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Christoph Lameter89ada672005-10-30 15:01:59 -0800580 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
Ben Collins676121f2006-01-08 01:03:04 -0800581 if (!wq->cpu_wq) {
582 kfree(wq);
583 return NULL;
584 }
585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 wq->name = name;
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700587 wq->freezeable = freezeable;
588
Andrew Morton9b41ea72006-08-13 23:24:26 -0700589 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 if (singlethread) {
591 INIT_LIST_HEAD(&wq->list);
Oleg Nesterovd7213042007-05-09 02:34:07 -0700592 init_cpu_workqueue(wq, singlethread_cpu);
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700593 p = create_workqueue_thread(wq, singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 if (!p)
595 destroy = 1;
596 else
597 wake_up_process(p);
598 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 list_add(&wq->list, &workqueues);
Oleg Nesterovd7213042007-05-09 02:34:07 -0700600 for_each_possible_cpu(cpu) {
601 init_cpu_workqueue(wq, cpu);
602 if (!cpu_online(cpu))
603 continue;
604
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700605 p = create_workqueue_thread(wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 if (p) {
607 kthread_bind(p, cpu);
608 wake_up_process(p);
609 } else
610 destroy = 1;
611 }
612 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700613 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
615 /*
616 * Was there any error during startup? If yes then clean up:
617 */
618 if (destroy) {
619 destroy_workqueue(wq);
620 wq = NULL;
621 }
622 return wq;
623}
Dave Jonesae90dd52006-06-30 01:40:45 -0400624EXPORT_SYMBOL_GPL(__create_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
626static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
627{
628 struct cpu_workqueue_struct *cwq;
629 unsigned long flags;
630 struct task_struct *p;
631
Christoph Lameter89ada672005-10-30 15:01:59 -0800632 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 spin_lock_irqsave(&cwq->lock, flags);
634 p = cwq->thread;
635 cwq->thread = NULL;
636 spin_unlock_irqrestore(&cwq->lock, flags);
637 if (p)
638 kthread_stop(p);
639}
640
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700641/**
642 * destroy_workqueue - safely terminate a workqueue
643 * @wq: target workqueue
644 *
645 * Safely destroy a workqueue. All work currently pending will be done first.
646 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647void destroy_workqueue(struct workqueue_struct *wq)
648{
649 int cpu;
650
651 flush_workqueue(wq);
652
653 /* We don't need the distraction of CPUs appearing and vanishing. */
Andrew Morton9b41ea72006-08-13 23:24:26 -0700654 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 if (is_single_threaded(wq))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800656 cleanup_workqueue_thread(wq, singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 else {
658 for_each_online_cpu(cpu)
659 cleanup_workqueue_thread(wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 list_del(&wq->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700662 mutex_unlock(&workqueue_mutex);
Christoph Lameter89ada672005-10-30 15:01:59 -0800663 free_percpu(wq->cpu_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 kfree(wq);
665}
Dave Jonesae90dd52006-06-30 01:40:45 -0400666EXPORT_SYMBOL_GPL(destroy_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668static struct workqueue_struct *keventd_wq;
669
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700670/**
671 * schedule_work - put work task in global workqueue
672 * @work: job to be done
673 *
674 * This puts a job in the kernel-global workqueue.
675 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676int fastcall schedule_work(struct work_struct *work)
677{
678 return queue_work(keventd_wq, work);
679}
Dave Jonesae90dd52006-06-30 01:40:45 -0400680EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700682/**
683 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000684 * @dwork: job to be done
685 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700686 *
687 * After waiting for a given time this puts a job in the kernel-global
688 * workqueue.
689 */
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800690int fastcall schedule_delayed_work(struct delayed_work *dwork,
691 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692{
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800693 timer_stats_timer_set_start_info(&dwork->timer);
David Howells52bad642006-11-22 14:54:01 +0000694 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695}
Dave Jonesae90dd52006-06-30 01:40:45 -0400696EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700698/**
699 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
700 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000701 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700702 * @delay: number of jiffies to wait
703 *
704 * After waiting for a given time this puts a job in the kernel-global
705 * workqueue on the specified CPU.
706 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000708 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709{
David Howells52bad642006-11-22 14:54:01 +0000710 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711}
Dave Jonesae90dd52006-06-30 01:40:45 -0400712EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Andrew Mortonb6136772006-06-25 05:47:49 -0700714/**
715 * schedule_on_each_cpu - call a function on each online CPU from keventd
716 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700717 *
718 * Returns zero on success.
719 * Returns -ve errno on failure.
720 *
721 * Appears to be racy against CPU hotplug.
722 *
723 * schedule_on_each_cpu() is very slow.
724 */
David Howells65f27f32006-11-22 14:55:48 +0000725int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800726{
727 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700728 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800729
Andrew Mortonb6136772006-06-25 05:47:49 -0700730 works = alloc_percpu(struct work_struct);
731 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800732 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700733
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700734 preempt_disable(); /* CPU hotplug */
Christoph Lameter15316ba2006-01-08 01:00:43 -0800735 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100736 struct work_struct *work = per_cpu_ptr(works, cpu);
737
738 INIT_WORK(work, func);
739 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
740 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800741 }
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700742 preempt_enable();
Christoph Lameter15316ba2006-01-08 01:00:43 -0800743 flush_workqueue(keventd_wq);
Andrew Mortonb6136772006-06-25 05:47:49 -0700744 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800745 return 0;
746}
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748void flush_scheduled_work(void)
749{
750 flush_workqueue(keventd_wq);
751}
Dave Jonesae90dd52006-06-30 01:40:45 -0400752EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700754void flush_work_keventd(struct work_struct *work)
755{
756 flush_work(keventd_wq, work);
757}
758EXPORT_SYMBOL(flush_work_keventd);
759
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800761 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 * @wq: the controlling workqueue structure
David Howells52bad642006-11-22 14:54:01 +0000763 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 */
James Bottomley81ddef72005-04-16 15:23:59 -0700765void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000766 struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767{
David Howells52bad642006-11-22 14:54:01 +0000768 while (!cancel_delayed_work(dwork))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 flush_workqueue(wq);
770}
James Bottomley81ddef72005-04-16 15:23:59 -0700771EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
773/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800774 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
David Howells52bad642006-11-22 14:54:01 +0000775 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 */
David Howells52bad642006-11-22 14:54:01 +0000777void cancel_rearming_delayed_work(struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778{
David Howells52bad642006-11-22 14:54:01 +0000779 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780}
781EXPORT_SYMBOL(cancel_rearming_delayed_work);
782
James Bottomley1fa44ec2006-02-23 12:43:43 -0600783/**
784 * execute_in_process_context - reliably execute the routine with user context
785 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600786 * @ew: guaranteed storage for the execute work structure (must
787 * be available when the work executes)
788 *
789 * Executes the function immediately if process context is available,
790 * otherwise schedules the function for delayed execution.
791 *
792 * Returns: 0 - function was executed
793 * 1 - function was scheduled for execution
794 */
David Howells65f27f32006-11-22 14:55:48 +0000795int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600796{
797 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000798 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600799 return 0;
800 }
801
David Howells65f27f32006-11-22 14:55:48 +0000802 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600803 schedule_work(&ew->work);
804
805 return 1;
806}
807EXPORT_SYMBOL_GPL(execute_in_process_context);
808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809int keventd_up(void)
810{
811 return keventd_wq != NULL;
812}
813
814int current_is_keventd(void)
815{
816 struct cpu_workqueue_struct *cwq;
817 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
818 int ret = 0;
819
820 BUG_ON(!keventd_wq);
821
Christoph Lameter89ada672005-10-30 15:01:59 -0800822 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 if (current == cwq->thread)
824 ret = 1;
825
826 return ret;
827
828}
829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830/* Take the work from this (downed) CPU. */
831static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
832{
Christoph Lameter89ada672005-10-30 15:01:59 -0800833 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700834 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 struct work_struct *work;
836
837 spin_lock_irq(&cwq->lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700838 list_replace_init(&cwq->worklist, &list);
Oleg Nesterovd7213042007-05-09 02:34:07 -0700839 migrate_sequence++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
841 while (!list_empty(&list)) {
842 printk("Taking work for %s\n", wq->name);
843 work = list_entry(list.next,struct work_struct,entry);
844 list_del(&work->entry);
Christoph Lameter89ada672005-10-30 15:01:59 -0800845 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 }
847 spin_unlock_irq(&cwq->lock);
848}
849
850/* We're holding the cpucontrol mutex here */
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -0700851static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 unsigned long action,
853 void *hcpu)
854{
855 unsigned int hotcpu = (unsigned long)hcpu;
856 struct workqueue_struct *wq;
857
858 switch (action) {
859 case CPU_UP_PREPARE:
Andrew Morton9b41ea72006-08-13 23:24:26 -0700860 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 /* Create a new workqueue thread for it. */
862 list_for_each_entry(wq, &workqueues, list) {
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700863 if (!create_workqueue_thread(wq, hotcpu)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 printk("workqueue for %i failed\n", hotcpu);
865 return NOTIFY_BAD;
866 }
867 }
868 break;
869
870 case CPU_ONLINE:
871 /* Kick off worker threads. */
872 list_for_each_entry(wq, &workqueues, list) {
Christoph Lameter89ada672005-10-30 15:01:59 -0800873 struct cpu_workqueue_struct *cwq;
874
875 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
876 kthread_bind(cwq->thread, hotcpu);
877 wake_up_process(cwq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700879 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 break;
881
882 case CPU_UP_CANCELED:
883 list_for_each_entry(wq, &workqueues, list) {
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700884 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
885 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 /* Unbind so it can run. */
Christoph Lameter89ada672005-10-30 15:01:59 -0800887 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800888 any_online_cpu(cpu_online_map));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 cleanup_workqueue_thread(wq, hotcpu);
890 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700891 mutex_unlock(&workqueue_mutex);
892 break;
893
894 case CPU_DOWN_PREPARE:
895 mutex_lock(&workqueue_mutex);
896 break;
897
898 case CPU_DOWN_FAILED:
899 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 break;
901
902 case CPU_DEAD:
903 list_for_each_entry(wq, &workqueues, list)
904 cleanup_workqueue_thread(wq, hotcpu);
905 list_for_each_entry(wq, &workqueues, list)
906 take_over_work(wq, hotcpu);
Andrew Morton9b41ea72006-08-13 23:24:26 -0700907 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 break;
909 }
910
911 return NOTIFY_OK;
912}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
914void init_workqueues(void)
915{
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800916 singlethread_cpu = first_cpu(cpu_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 hotcpu_notifier(workqueue_cpu_callback, 0);
918 keventd_wq = create_workqueue("events");
919 BUG_ON(!keventd_wq);
920}
921