blob: 32f8e0d2bf5ad2d629b63432e654f2948949420a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
Francois Camie1f8e872008-10-15 22:01:59 -070012 * Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
Christoph Lametercde53532008-07-04 09:59:22 -070016 * Made to use alloc_percpu by Christoph Lameter.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Johannes Berg4e6045f2007-10-18 23:39:55 -070035#include <linux/lockdep.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080038 * The per-CPU workqueue (if single thread, we always use the first
39 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 */
41struct cpu_workqueue_struct {
42
43 spinlock_t lock;
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 struct list_head worklist;
46 wait_queue_head_t more_work;
Oleg Nesterov3af244332007-05-09 02:34:09 -070047 struct work_struct *current_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070050 struct task_struct *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051} ____cacheline_aligned;
52
53/*
54 * The externally visible workqueue abstraction is an array of
55 * per-CPU workqueues:
56 */
57struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080058 struct cpu_workqueue_struct *cpu_wq;
Oleg Nesterovcce1a162007-05-09 02:34:13 -070059 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 const char *name;
Oleg Nesterovcce1a162007-05-09 02:34:13 -070061 int singlethread;
Oleg Nesterov319c2a92007-05-09 02:34:06 -070062 int freezeable; /* Freeze threads during suspend */
Heiko Carstens0d557dc2008-10-13 23:50:09 +020063 int rt;
Johannes Berg4e6045f2007-10-18 23:39:55 -070064#ifdef CONFIG_LOCKDEP
65 struct lockdep_map lockdep_map;
66#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070067};
68
Gautham R Shenoy95402b32008-01-25 21:08:02 +010069/* Serializes the accesses to the list of workqueues. */
70static DEFINE_SPINLOCK(workqueue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071static LIST_HEAD(workqueues);
72
Oleg Nesterov3af244332007-05-09 02:34:09 -070073static int singlethread_cpu __read_mostly;
Rusty Russelle7577c52009-01-01 10:12:25 +103074static const struct cpumask *cpu_singlethread_map __read_mostly;
Oleg Nesterov14441962007-05-23 13:57:57 -070075/*
76 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
77 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
78 * which comes in between can't use for_each_online_cpu(). We could
79 * use cpu_possible_map, the cpumask below is more a documentation
80 * than optimization.
81 */
Rusty Russelle7577c52009-01-01 10:12:25 +103082static cpumask_var_t cpu_populated_map __read_mostly;
Nathan Lynchf756d5e2006-01-08 01:05:12 -080083
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/* If it's single threaded, it isn't in the list of workqueues. */
David Howells6cc88bc2008-11-14 10:39:21 +110085static inline int is_wq_single_threaded(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086{
Oleg Nesterovcce1a162007-05-09 02:34:13 -070087 return wq->singlethread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
Rusty Russelle7577c52009-01-01 10:12:25 +103090static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070091{
David Howells6cc88bc2008-11-14 10:39:21 +110092 return is_wq_single_threaded(wq)
Rusty Russelle7577c52009-01-01 10:12:25 +103093 ? cpu_singlethread_map : cpu_populated_map;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070094}
95
Oleg Nesterova848e3b2007-05-09 02:34:17 -070096static
97struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
98{
David Howells6cc88bc2008-11-14 10:39:21 +110099 if (unlikely(is_wq_single_threaded(wq)))
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700100 cpu = singlethread_cpu;
101 return per_cpu_ptr(wq->cpu_wq, cpu);
102}
103
David Howells4594bf12006-12-07 11:33:26 +0000104/*
105 * Set the workqueue on which a work item is to be run
106 * - Must *only* be called if the pending flag is set
107 */
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700108static inline void set_wq_data(struct work_struct *work,
109 struct cpu_workqueue_struct *cwq)
David Howells365970a2006-11-22 14:54:49 +0000110{
David Howells4594bf12006-12-07 11:33:26 +0000111 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +0000112
David Howells4594bf12006-12-07 11:33:26 +0000113 BUG_ON(!work_pending(work));
114
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700115 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800116 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
117 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +0000118}
119
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700120static inline
121struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
David Howells365970a2006-11-22 14:54:49 +0000122{
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800123 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +0000124}
125
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700126static void insert_work(struct cpu_workqueue_struct *cwq,
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700127 struct work_struct *work, struct list_head *head)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700128{
129 set_wq_data(work, cwq);
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700130 /*
131 * Ensure that we get the right work->data if we see the
132 * result of list_add() below, see try_to_grab_pending().
133 */
134 smp_wmb();
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700135 list_add_tail(&work->entry, head);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700136 wake_up(&cwq->more_work);
137}
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139static void __queue_work(struct cpu_workqueue_struct *cwq,
140 struct work_struct *work)
141{
142 unsigned long flags;
143
144 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700145 insert_work(cwq, work, &cwq->worklist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 spin_unlock_irqrestore(&cwq->lock, flags);
147}
148
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700149/**
150 * queue_work - queue work on a workqueue
151 * @wq: workqueue to use
152 * @work: work to queue
153 *
Alan Stern057647f2006-10-28 10:38:58 -0700154 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 *
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700156 * We queue the work to the CPU on which it was submitted, but if the CPU dies
157 * it can be processed by another CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800159int queue_work(struct workqueue_struct *wq, struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Oleg Nesterovef1ca232008-07-25 01:47:53 -0700161 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Oleg Nesterovef1ca232008-07-25 01:47:53 -0700163 ret = queue_work_on(get_cpu(), wq, work);
164 put_cpu();
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 return ret;
167}
Dave Jonesae90dd52006-06-30 01:40:45 -0400168EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Zhang Ruic1a220e2008-07-23 21:28:39 -0700170/**
171 * queue_work_on - queue work on specific cpu
172 * @cpu: CPU number to execute work on
173 * @wq: workqueue to use
174 * @work: work to queue
175 *
176 * Returns 0 if @work was already on a queue, non-zero otherwise.
177 *
178 * We queue the work to a specific CPU, the caller must ensure it
179 * can't go away.
180 */
181int
182queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
183{
184 int ret = 0;
185
186 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
187 BUG_ON(!list_empty(&work->entry));
188 __queue_work(wq_per_cpu(wq, cpu), work);
189 ret = 1;
190 }
191 return ret;
192}
193EXPORT_SYMBOL_GPL(queue_work_on);
194
Li Zefan6d141c32008-02-08 04:21:09 -0800195static void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196{
David Howells52bad642006-11-22 14:54:01 +0000197 struct delayed_work *dwork = (struct delayed_work *)__data;
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700198 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
199 struct workqueue_struct *wq = cwq->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700201 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202}
203
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700204/**
205 * queue_delayed_work - queue work on a workqueue after delay
206 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800207 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700208 * @delay: number of jiffies to wait before queueing
209 *
Alan Stern057647f2006-10-28 10:38:58 -0700210 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700211 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800212int queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000213 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
David Howells52bad642006-11-22 14:54:01 +0000215 if (delay == 0)
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700216 return queue_work(wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700218 return queue_delayed_work_on(-1, wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
Dave Jonesae90dd52006-06-30 01:40:45 -0400220EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700222/**
223 * queue_delayed_work_on - queue work on specific CPU after delay
224 * @cpu: CPU number to execute work on
225 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800226 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700227 * @delay: number of jiffies to wait before queueing
228 *
Alan Stern057647f2006-10-28 10:38:58 -0700229 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700230 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700231int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000232 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700233{
234 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000235 struct timer_list *timer = &dwork->timer;
236 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700237
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800238 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700239 BUG_ON(timer_pending(timer));
240 BUG_ON(!list_empty(&work->entry));
241
Andrew Liu8a3e77c2008-05-01 04:35:14 -0700242 timer_stats_timer_set_start_info(&dwork->timer);
243
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700244 /* This stores cwq for the moment, for the timer_fn */
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700245 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700246 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000247 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700248 timer->function = delayed_work_timer_fn;
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700249
250 if (unlikely(cpu >= 0))
251 add_timer_on(timer, cpu);
252 else
253 add_timer(timer);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700254 ret = 1;
255 }
256 return ret;
257}
Dave Jonesae90dd52006-06-30 01:40:45 -0400258EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Arjan van de Ven858119e2006-01-14 13:20:43 -0800260static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261{
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700262 spin_lock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 while (!list_empty(&cwq->worklist)) {
264 struct work_struct *work = list_entry(cwq->worklist.next,
265 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000266 work_func_t f = work->func;
Johannes Berg4e6045f2007-10-18 23:39:55 -0700267#ifdef CONFIG_LOCKDEP
268 /*
269 * It is permissible to free the struct work_struct
270 * from inside the function that is called from it,
271 * this we need to take into account for lockdep too.
272 * To avoid bogus "held lock freed" warnings as well
273 * as problems when looking into work->lockdep_map,
274 * make a copy and use that here.
275 */
276 struct lockdep_map lockdep_map = work->lockdep_map;
277#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700279 cwq->current_work = work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 list_del_init(cwq->worklist.next);
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700281 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
David Howells365970a2006-11-22 14:54:49 +0000283 BUG_ON(get_wq_data(work) != cwq);
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700284 work_clear_pending(work);
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200285 lock_map_acquire(&cwq->wq->lockdep_map);
286 lock_map_acquire(&lockdep_map);
David Howells65f27f32006-11-22 14:55:48 +0000287 f(work);
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200288 lock_map_release(&lockdep_map);
289 lock_map_release(&cwq->wq->lockdep_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800291 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
292 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
293 "%s/0x%08x/%d\n",
294 current->comm, preempt_count(),
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700295 task_pid_nr(current));
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800296 printk(KERN_ERR " last function: ");
297 print_symbol("%s\n", (unsigned long)f);
298 debug_show_held_locks(current);
299 dump_stack();
300 }
301
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700302 spin_lock_irq(&cwq->lock);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700303 cwq->current_work = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 }
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700305 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
308static int worker_thread(void *__cwq)
309{
310 struct cpu_workqueue_struct *cwq = __cwq;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700311 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700313 if (cwq->wq->freezeable)
314 set_freezable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316 set_user_nice(current, -5);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Oleg Nesterov3af244332007-05-09 02:34:09 -0700318 for (;;) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700319 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
Oleg Nesterov14441962007-05-23 13:57:57 -0700320 if (!freezing(current) &&
321 !kthread_should_stop() &&
322 list_empty(&cwq->worklist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 schedule();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700324 finish_wait(&cwq->more_work, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Oleg Nesterov85f41862007-05-09 02:34:20 -0700326 try_to_freeze();
327
Oleg Nesterov14441962007-05-23 13:57:57 -0700328 if (kthread_should_stop())
Oleg Nesterov3af244332007-05-09 02:34:09 -0700329 break;
330
331 run_workqueue(cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 }
Oleg Nesterov3af244332007-05-09 02:34:09 -0700333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 return 0;
335}
336
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700337struct wq_barrier {
338 struct work_struct work;
339 struct completion done;
340};
341
342static void wq_barrier_func(struct work_struct *work)
343{
344 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
345 complete(&barr->done);
346}
347
Oleg Nesterov83c22522007-05-09 02:33:54 -0700348static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700349 struct wq_barrier *barr, struct list_head *head)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700350{
351 INIT_WORK(&barr->work, wq_barrier_func);
352 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
353
354 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700355
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700356 insert_work(cwq, &barr->work, head);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700357}
358
Oleg Nesterov14441962007-05-23 13:57:57 -0700359static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Lai Jiangshan2355b702009-04-02 16:58:24 -0700361 int active = 0;
362 struct wq_barrier barr;
Oleg Nesterov14441962007-05-23 13:57:57 -0700363
Lai Jiangshan2355b702009-04-02 16:58:24 -0700364 WARN_ON(cwq->thread == current);
365
366 spin_lock_irq(&cwq->lock);
367 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
368 insert_wq_barrier(cwq, &barr, &cwq->worklist);
Oleg Nesterov14441962007-05-23 13:57:57 -0700369 active = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 }
Lai Jiangshan2355b702009-04-02 16:58:24 -0700371 spin_unlock_irq(&cwq->lock);
372
373 if (active)
374 wait_for_completion(&barr.done);
Oleg Nesterov14441962007-05-23 13:57:57 -0700375
376 return active;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700379/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700381 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 *
383 * Forces execution of the workqueue and blocks until its completion.
384 * This is typically used in driver shutdown handlers.
385 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700386 * We sleep until all works which were queued on entry have been handled,
387 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 *
389 * This function used to run the workqueues itself. Now we just wait for the
390 * helper threads to do it.
391 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800392void flush_workqueue(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
Rusty Russelle7577c52009-01-01 10:12:25 +1030394 const struct cpumask *cpu_map = wq_cpu_map(wq);
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700395 int cpu;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700396
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700397 might_sleep();
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200398 lock_map_acquire(&wq->lockdep_map);
399 lock_map_release(&wq->lockdep_map);
Rusty Russellaa85ea52009-03-30 22:05:15 -0600400 for_each_cpu(cpu, cpu_map)
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
Dave Jonesae90dd52006-06-30 01:40:45 -0400403EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Oleg Nesterovdb700892008-07-25 01:47:49 -0700405/**
406 * flush_work - block until a work_struct's callback has terminated
407 * @work: the work which is to be flushed
408 *
Oleg Nesterova67da702008-07-25 01:47:52 -0700409 * Returns false if @work has already terminated.
410 *
Oleg Nesterovdb700892008-07-25 01:47:49 -0700411 * It is expected that, prior to calling flush_work(), the caller has
412 * arranged for the work to not be requeued, otherwise it doesn't make
413 * sense to use this function.
414 */
415int flush_work(struct work_struct *work)
416{
417 struct cpu_workqueue_struct *cwq;
418 struct list_head *prev;
419 struct wq_barrier barr;
420
421 might_sleep();
422 cwq = get_wq_data(work);
423 if (!cwq)
424 return 0;
425
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200426 lock_map_acquire(&cwq->wq->lockdep_map);
427 lock_map_release(&cwq->wq->lockdep_map);
Oleg Nesterova67da702008-07-25 01:47:52 -0700428
Oleg Nesterovdb700892008-07-25 01:47:49 -0700429 prev = NULL;
430 spin_lock_irq(&cwq->lock);
431 if (!list_empty(&work->entry)) {
432 /*
433 * See the comment near try_to_grab_pending()->smp_rmb().
434 * If it was re-queued under us we are not going to wait.
435 */
436 smp_rmb();
437 if (unlikely(cwq != get_wq_data(work)))
438 goto out;
439 prev = &work->entry;
440 } else {
441 if (cwq->current_work != work)
442 goto out;
443 prev = &cwq->worklist;
444 }
445 insert_wq_barrier(cwq, &barr, prev->next);
446out:
447 spin_unlock_irq(&cwq->lock);
448 if (!prev)
449 return 0;
450
451 wait_for_completion(&barr.done);
452 return 1;
453}
454EXPORT_SYMBOL_GPL(flush_work);
455
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700456/*
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700457 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700458 * so this work can't be re-armed in any way.
459 */
460static int try_to_grab_pending(struct work_struct *work)
461{
462 struct cpu_workqueue_struct *cwq;
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700463 int ret = -1;
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700464
465 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700466 return 0;
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700467
468 /*
469 * The queueing is in progress, or it is already queued. Try to
470 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
471 */
472
473 cwq = get_wq_data(work);
474 if (!cwq)
475 return ret;
476
477 spin_lock_irq(&cwq->lock);
478 if (!list_empty(&work->entry)) {
479 /*
480 * This work is queued, but perhaps we locked the wrong cwq.
481 * In that case we must see the new value after rmb(), see
482 * insert_work()->wmb().
483 */
484 smp_rmb();
485 if (cwq == get_wq_data(work)) {
486 list_del_init(&work->entry);
487 ret = 1;
488 }
489 }
490 spin_unlock_irq(&cwq->lock);
491
492 return ret;
493}
494
495static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700496 struct work_struct *work)
497{
498 struct wq_barrier barr;
499 int running = 0;
500
501 spin_lock_irq(&cwq->lock);
502 if (unlikely(cwq->current_work == work)) {
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700503 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700504 running = 1;
505 }
506 spin_unlock_irq(&cwq->lock);
507
Oleg Nesterov3af244332007-05-09 02:34:09 -0700508 if (unlikely(running))
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700509 wait_for_completion(&barr.done);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700510}
511
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700512static void wait_on_work(struct work_struct *work)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700513{
514 struct cpu_workqueue_struct *cwq;
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700515 struct workqueue_struct *wq;
Rusty Russelle7577c52009-01-01 10:12:25 +1030516 const struct cpumask *cpu_map;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700517 int cpu;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700518
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700519 might_sleep();
520
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200521 lock_map_acquire(&work->lockdep_map);
522 lock_map_release(&work->lockdep_map);
Johannes Berg4e6045f2007-10-18 23:39:55 -0700523
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700524 cwq = get_wq_data(work);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700525 if (!cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700526 return;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700527
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700528 wq = cwq->wq;
529 cpu_map = wq_cpu_map(wq);
530
Rusty Russellaa85ea52009-03-30 22:05:15 -0600531 for_each_cpu(cpu, cpu_map)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700532 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
533}
534
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700535static int __cancel_work_timer(struct work_struct *work,
536 struct timer_list* timer)
537{
538 int ret;
539
540 do {
541 ret = (timer && likely(del_timer(timer)));
542 if (!ret)
543 ret = try_to_grab_pending(work);
544 wait_on_work(work);
545 } while (unlikely(ret < 0));
546
547 work_clear_pending(work);
548 return ret;
549}
550
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700551/**
552 * cancel_work_sync - block until a work_struct's callback has terminated
553 * @work: the work which is to be flushed
554 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700555 * Returns true if @work was pending.
556 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700557 * cancel_work_sync() will cancel the work if it is queued. If the work's
558 * callback appears to be running, cancel_work_sync() will block until it
559 * has completed.
560 *
561 * It is possible to use this function if the work re-queues itself. It can
562 * cancel the work even if it migrates to another workqueue, however in that
563 * case it only guarantees that work->func() has completed on the last queued
564 * workqueue.
565 *
566 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
567 * pending, otherwise it goes into a busy-wait loop until the timer expires.
568 *
569 * The caller must ensure that workqueue_struct on which this work was last
570 * queued can't be destroyed before this function returns.
571 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700572int cancel_work_sync(struct work_struct *work)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700573{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700574 return __cancel_work_timer(work, NULL);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700575}
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700576EXPORT_SYMBOL_GPL(cancel_work_sync);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700577
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700578/**
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700579 * cancel_delayed_work_sync - reliably kill off a delayed work.
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700580 * @dwork: the delayed work struct
581 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700582 * Returns true if @dwork was pending.
583 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700584 * It is possible to use this function if @dwork rearms itself via queue_work()
585 * or queue_delayed_work(). See also the comment for cancel_work_sync().
586 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700587int cancel_delayed_work_sync(struct delayed_work *dwork)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700588{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700589 return __cancel_work_timer(&dwork->work, &dwork->timer);
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700590}
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700591EXPORT_SYMBOL(cancel_delayed_work_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700593static struct workqueue_struct *keventd_wq __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700595/**
596 * schedule_work - put work task in global workqueue
597 * @work: job to be done
598 *
599 * This puts a job in the kernel-global workqueue.
600 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800601int schedule_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602{
603 return queue_work(keventd_wq, work);
604}
Dave Jonesae90dd52006-06-30 01:40:45 -0400605EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Zhang Ruic1a220e2008-07-23 21:28:39 -0700607/*
608 * schedule_work_on - put work task on a specific cpu
609 * @cpu: cpu to put the work task on
610 * @work: job to be done
611 *
612 * This puts a job on a specific cpu
613 */
614int schedule_work_on(int cpu, struct work_struct *work)
615{
616 return queue_work_on(cpu, keventd_wq, work);
617}
618EXPORT_SYMBOL(schedule_work_on);
619
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700620/**
621 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000622 * @dwork: job to be done
623 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700624 *
625 * After waiting for a given time this puts a job in the kernel-global
626 * workqueue.
627 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800628int schedule_delayed_work(struct delayed_work *dwork,
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800629 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
David Howells52bad642006-11-22 14:54:01 +0000631 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632}
Dave Jonesae90dd52006-06-30 01:40:45 -0400633EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700635/**
636 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
637 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000638 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700639 * @delay: number of jiffies to wait
640 *
641 * After waiting for a given time this puts a job in the kernel-global
642 * workqueue on the specified CPU.
643 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000645 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646{
David Howells52bad642006-11-22 14:54:01 +0000647 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648}
Dave Jonesae90dd52006-06-30 01:40:45 -0400649EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Andrew Mortonb6136772006-06-25 05:47:49 -0700651/**
652 * schedule_on_each_cpu - call a function on each online CPU from keventd
653 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700654 *
655 * Returns zero on success.
656 * Returns -ve errno on failure.
657 *
Andrew Mortonb6136772006-06-25 05:47:49 -0700658 * schedule_on_each_cpu() is very slow.
659 */
David Howells65f27f32006-11-22 14:55:48 +0000660int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800661{
662 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700663 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800664
Andrew Mortonb6136772006-06-25 05:47:49 -0700665 works = alloc_percpu(struct work_struct);
666 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800667 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700668
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100669 get_online_cpus();
Christoph Lameter15316ba2006-01-08 01:00:43 -0800670 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100671 struct work_struct *work = per_cpu_ptr(works, cpu);
672
673 INIT_WORK(work, func);
Oleg Nesterov8de6d302008-07-25 01:47:53 -0700674 schedule_work_on(cpu, work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800675 }
Oleg Nesterov8616a892008-07-25 01:47:49 -0700676 for_each_online_cpu(cpu)
677 flush_work(per_cpu_ptr(works, cpu));
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100678 put_online_cpus();
Andrew Mortonb6136772006-06-25 05:47:49 -0700679 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800680 return 0;
681}
682
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683void flush_scheduled_work(void)
684{
685 flush_workqueue(keventd_wq);
686}
Dave Jonesae90dd52006-06-30 01:40:45 -0400687EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689/**
James Bottomley1fa44ec2006-02-23 12:43:43 -0600690 * execute_in_process_context - reliably execute the routine with user context
691 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600692 * @ew: guaranteed storage for the execute work structure (must
693 * be available when the work executes)
694 *
695 * Executes the function immediately if process context is available,
696 * otherwise schedules the function for delayed execution.
697 *
698 * Returns: 0 - function was executed
699 * 1 - function was scheduled for execution
700 */
David Howells65f27f32006-11-22 14:55:48 +0000701int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600702{
703 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000704 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600705 return 0;
706 }
707
David Howells65f27f32006-11-22 14:55:48 +0000708 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600709 schedule_work(&ew->work);
710
711 return 1;
712}
713EXPORT_SYMBOL_GPL(execute_in_process_context);
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715int keventd_up(void)
716{
717 return keventd_wq != NULL;
718}
719
720int current_is_keventd(void)
721{
722 struct cpu_workqueue_struct *cwq;
Hugh Dickinsd2437692007-08-27 16:06:19 +0100723 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 int ret = 0;
725
726 BUG_ON(!keventd_wq);
727
Christoph Lameter89ada672005-10-30 15:01:59 -0800728 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 if (current == cwq->thread)
730 ret = 1;
731
732 return ret;
733
734}
735
Oleg Nesterov3af244332007-05-09 02:34:09 -0700736static struct cpu_workqueue_struct *
737init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
Christoph Lameter89ada672005-10-30 15:01:59 -0800739 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Oleg Nesterov3af244332007-05-09 02:34:09 -0700741 cwq->wq = wq;
742 spin_lock_init(&cwq->lock);
743 INIT_LIST_HEAD(&cwq->worklist);
744 init_waitqueue_head(&cwq->more_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Oleg Nesterov3af244332007-05-09 02:34:09 -0700746 return cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747}
748
Oleg Nesterov3af244332007-05-09 02:34:09 -0700749static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Heiko Carstens0d557dc2008-10-13 23:50:09 +0200751 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
Oleg Nesterov3af244332007-05-09 02:34:09 -0700752 struct workqueue_struct *wq = cwq->wq;
David Howells6cc88bc2008-11-14 10:39:21 +1100753 const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
Oleg Nesterov3af244332007-05-09 02:34:09 -0700754 struct task_struct *p;
755
756 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
757 /*
758 * Nobody can add the work_struct to this cwq,
759 * if (caller is __create_workqueue)
760 * nobody should see this wq
761 * else // caller is CPU_UP_PREPARE
762 * cpu is not on cpu_online_map
763 * so we can abort safely.
764 */
765 if (IS_ERR(p))
766 return PTR_ERR(p);
Heiko Carstens0d557dc2008-10-13 23:50:09 +0200767 if (cwq->wq->rt)
768 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700769 cwq->thread = p;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700770
771 return 0;
772}
773
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700774static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
775{
776 struct task_struct *p = cwq->thread;
777
778 if (p != NULL) {
779 if (cpu >= 0)
780 kthread_bind(p, cpu);
781 wake_up_process(p);
782 }
783}
784
Johannes Berg4e6045f2007-10-18 23:39:55 -0700785struct workqueue_struct *__create_workqueue_key(const char *name,
786 int singlethread,
787 int freezeable,
Heiko Carstens0d557dc2008-10-13 23:50:09 +0200788 int rt,
Johannes Bergeb13ba82008-01-16 09:51:58 +0100789 struct lock_class_key *key,
790 const char *lock_name)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700791{
792 struct workqueue_struct *wq;
793 struct cpu_workqueue_struct *cwq;
794 int err = 0, cpu;
795
796 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
797 if (!wq)
798 return NULL;
799
800 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
801 if (!wq->cpu_wq) {
802 kfree(wq);
803 return NULL;
804 }
805
806 wq->name = name;
Johannes Bergeb13ba82008-01-16 09:51:58 +0100807 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700808 wq->singlethread = singlethread;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700809 wq->freezeable = freezeable;
Heiko Carstens0d557dc2008-10-13 23:50:09 +0200810 wq->rt = rt;
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700811 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700812
813 if (singlethread) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700814 cwq = init_cpu_workqueue(wq, singlethread_cpu);
815 err = create_workqueue_thread(cwq, singlethread_cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700816 start_workqueue_thread(cwq, -1);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700817 } else {
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700818 cpu_maps_update_begin();
Oleg Nesterov6af8bf32008-07-29 22:33:49 -0700819 /*
820 * We must place this wq on list even if the code below fails.
821 * cpu_down(cpu) can remove cpu from cpu_populated_map before
822 * destroy_workqueue() takes the lock, in that case we leak
823 * cwq[cpu]->thread.
824 */
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100825 spin_lock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700826 list_add(&wq->list, &workqueues);
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100827 spin_unlock(&workqueue_lock);
Oleg Nesterov6af8bf32008-07-29 22:33:49 -0700828 /*
829 * We must initialize cwqs for each possible cpu even if we
830 * are going to call destroy_workqueue() finally. Otherwise
831 * cpu_up() can hit the uninitialized cwq once we drop the
832 * lock.
833 */
Oleg Nesterov3af244332007-05-09 02:34:09 -0700834 for_each_possible_cpu(cpu) {
835 cwq = init_cpu_workqueue(wq, cpu);
836 if (err || !cpu_online(cpu))
837 continue;
838 err = create_workqueue_thread(cwq, cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700839 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700840 }
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700841 cpu_maps_update_done();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700842 }
843
844 if (err) {
845 destroy_workqueue(wq);
846 wq = NULL;
847 }
848 return wq;
849}
Johannes Berg4e6045f2007-10-18 23:39:55 -0700850EXPORT_SYMBOL_GPL(__create_workqueue_key);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700851
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -0700852static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700853{
Oleg Nesterov14441962007-05-23 13:57:57 -0700854 /*
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700855 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
856 * cpu_add_remove_lock protects cwq->thread.
Oleg Nesterov14441962007-05-23 13:57:57 -0700857 */
858 if (cwq->thread == NULL)
859 return;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700860
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200861 lock_map_acquire(&cwq->wq->lockdep_map);
862 lock_map_release(&cwq->wq->lockdep_map);
Johannes Berg4e6045f2007-10-18 23:39:55 -0700863
Oleg Nesterov13c22162007-07-17 04:03:55 -0700864 flush_cpu_workqueue(cwq);
Oleg Nesterov14441962007-05-23 13:57:57 -0700865 /*
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700866 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
Oleg Nesterov13c22162007-07-17 04:03:55 -0700867 * a concurrent flush_workqueue() can insert a barrier after us.
868 * However, in that case run_workqueue() won't return and check
869 * kthread_should_stop() until it flushes all work_struct's.
Oleg Nesterov14441962007-05-23 13:57:57 -0700870 * When ->worklist becomes empty it is safe to exit because no
871 * more work_structs can be queued on this cwq: flush_workqueue
872 * checks list_empty(), and a "normal" queue_work() can't use
873 * a dead CPU.
874 */
Oleg Nesterov14441962007-05-23 13:57:57 -0700875 kthread_stop(cwq->thread);
876 cwq->thread = NULL;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700877}
878
879/**
880 * destroy_workqueue - safely terminate a workqueue
881 * @wq: target workqueue
882 *
883 * Safely destroy a workqueue. All work currently pending will be done first.
884 */
885void destroy_workqueue(struct workqueue_struct *wq)
886{
Rusty Russelle7577c52009-01-01 10:12:25 +1030887 const struct cpumask *cpu_map = wq_cpu_map(wq);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700888 int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700889
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700890 cpu_maps_update_begin();
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100891 spin_lock(&workqueue_lock);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700892 list_del(&wq->list);
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100893 spin_unlock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700894
Rusty Russellaa85ea52009-03-30 22:05:15 -0600895 for_each_cpu(cpu, cpu_map)
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -0700896 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700897 cpu_maps_update_done();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700898
899 free_percpu(wq->cpu_wq);
900 kfree(wq);
901}
902EXPORT_SYMBOL_GPL(destroy_workqueue);
903
904static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
905 unsigned long action,
906 void *hcpu)
907{
908 unsigned int cpu = (unsigned long)hcpu;
909 struct cpu_workqueue_struct *cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 struct workqueue_struct *wq;
Oleg Nesterov84485022008-07-25 01:47:54 -0700911 int ret = NOTIFY_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700913 action &= ~CPU_TASKS_FROZEN;
914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 switch (action) {
916 case CPU_UP_PREPARE:
Rusty Russelle7577c52009-01-01 10:12:25 +1030917 cpumask_set_cpu(cpu, cpu_populated_map);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700918 }
Oleg Nesterov84485022008-07-25 01:47:54 -0700919undo:
Oleg Nesterov3af244332007-05-09 02:34:09 -0700920 list_for_each_entry(wq, &workqueues, list) {
921 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Christoph Lameter89ada672005-10-30 15:01:59 -0800922
Oleg Nesterov3af244332007-05-09 02:34:09 -0700923 switch (action) {
924 case CPU_UP_PREPARE:
925 if (!create_workqueue_thread(cwq, cpu))
926 break;
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100927 printk(KERN_ERR "workqueue [%s] for %i failed\n",
928 wq->name, cpu);
Oleg Nesterov84485022008-07-25 01:47:54 -0700929 action = CPU_UP_CANCELED;
930 ret = NOTIFY_BAD;
931 goto undo;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700932
933 case CPU_ONLINE:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700934 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700935 break;
936
937 case CPU_UP_CANCELED:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700938 start_workqueue_thread(cwq, -1);
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700939 case CPU_POST_DEAD:
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -0700940 cleanup_workqueue_thread(cwq);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700941 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 }
944
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700945 switch (action) {
946 case CPU_UP_CANCELED:
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700947 case CPU_POST_DEAD:
Rusty Russelle7577c52009-01-01 10:12:25 +1030948 cpumask_clear_cpu(cpu, cpu_populated_map);
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700949 }
950
Oleg Nesterov84485022008-07-25 01:47:54 -0700951 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Rusty Russell2d3854a2008-11-05 13:39:10 +1100954#ifdef CONFIG_SMP
Rusty Russell8ccad402009-01-16 15:31:15 -0800955static struct workqueue_struct *work_on_cpu_wq __read_mostly;
956
Rusty Russell2d3854a2008-11-05 13:39:10 +1100957struct work_for_cpu {
958 struct work_struct work;
959 long (*fn)(void *);
960 void *arg;
961 long ret;
962};
963
964static void do_work_for_cpu(struct work_struct *w)
965{
966 struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
967
968 wfc->ret = wfc->fn(wfc->arg);
969}
970
971/**
972 * work_on_cpu - run a function in user context on a particular cpu
973 * @cpu: the cpu to run on
974 * @fn: the function to run
975 * @arg: the function arg
976 *
Rusty Russell31ad9082009-01-16 15:31:15 -0800977 * This will return the value @fn returns.
978 * It is up to the caller to ensure that the cpu doesn't go offline.
Rusty Russell2d3854a2008-11-05 13:39:10 +1100979 */
980long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
981{
982 struct work_for_cpu wfc;
983
984 INIT_WORK(&wfc.work, do_work_for_cpu);
985 wfc.fn = fn;
986 wfc.arg = arg;
Rusty Russell8ccad402009-01-16 15:31:15 -0800987 queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
Rusty Russell31ad9082009-01-16 15:31:15 -0800988 flush_work(&wfc.work);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100989
990 return wfc.ret;
991}
992EXPORT_SYMBOL_GPL(work_on_cpu);
993#endif /* CONFIG_SMP */
994
Oleg Nesterovc12920d2007-05-09 02:34:14 -0700995void __init init_workqueues(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
Rusty Russelle7577c52009-01-01 10:12:25 +1030997 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
998
999 cpumask_copy(cpu_populated_map, cpu_online_mask);
1000 singlethread_cpu = cpumask_first(cpu_possible_mask);
1001 cpu_singlethread_map = cpumask_of(singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 hotcpu_notifier(workqueue_cpu_callback, 0);
1003 keventd_wq = create_workqueue("events");
1004 BUG_ON(!keventd_wq);
Rusty Russell8ccad402009-01-16 15:31:15 -08001005#ifdef CONFIG_SMP
1006 work_on_cpu_wq = create_workqueue("work_on_cpu");
1007 BUG_ON(!work_on_cpu_wq);
1008#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009}