| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/kernel/workqueue.c | 
|  | 3 | * | 
|  | 4 | * Generic mechanism for defining kernel helper threads for running | 
|  | 5 | * arbitrary tasks in process context. | 
|  | 6 | * | 
|  | 7 | * Started by Ingo Molnar, Copyright (C) 2002 | 
|  | 8 | * | 
|  | 9 | * Derived from the taskqueue/keventd code by: | 
|  | 10 | * | 
|  | 11 | *   David Woodhouse <dwmw2@infradead.org> | 
|  | 12 | *   Andrew Morton <andrewm@uow.edu.au> | 
|  | 13 | *   Kai Petzke <wpp@marie.physik.tu-berlin.de> | 
|  | 14 | *   Theodore Ts'o <tytso@mit.edu> | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 15 | * | 
|  | 16 | * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ | 
|  | 18 |  | 
|  | 19 | #include <linux/module.h> | 
|  | 20 | #include <linux/kernel.h> | 
|  | 21 | #include <linux/sched.h> | 
|  | 22 | #include <linux/init.h> | 
|  | 23 | #include <linux/signal.h> | 
|  | 24 | #include <linux/completion.h> | 
|  | 25 | #include <linux/workqueue.h> | 
|  | 26 | #include <linux/slab.h> | 
|  | 27 | #include <linux/cpu.h> | 
|  | 28 | #include <linux/notifier.h> | 
|  | 29 | #include <linux/kthread.h> | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 30 | #include <linux/hardirq.h> | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 31 | #include <linux/mempolicy.h> | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 32 | #include <linux/freezer.h> | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 33 | #include <linux/kallsyms.h> | 
|  | 34 | #include <linux/debug_locks.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 |  | 
|  | 36 | /* | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 37 | * The per-CPU workqueue (if single thread, we always use the first | 
|  | 38 | * possible cpu). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | */ | 
|  | 40 | struct cpu_workqueue_struct { | 
|  | 41 |  | 
|  | 42 | spinlock_t lock; | 
|  | 43 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | struct list_head worklist; | 
|  | 45 | wait_queue_head_t more_work; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 46 | struct work_struct *current_work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
|  | 48 | struct workqueue_struct *wq; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 49 | struct task_struct *thread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 |  | 
|  | 51 | int run_depth;		/* Detect run_workqueue() recursion depth */ | 
|  | 52 | } ____cacheline_aligned; | 
|  | 53 |  | 
|  | 54 | /* | 
|  | 55 | * The externally visible workqueue abstraction is an array of | 
|  | 56 | * per-CPU workqueues: | 
|  | 57 | */ | 
|  | 58 | struct workqueue_struct { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 59 | struct cpu_workqueue_struct *cpu_wq; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 60 | struct list_head list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | const char *name; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 62 | int singlethread; | 
| Oleg Nesterov | 319c2a9 | 2007-05-09 02:34:06 -0700 | [diff] [blame] | 63 | int freezeable;		/* Freeze threads during suspend */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | }; | 
|  | 65 |  | 
|  | 66 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 
|  | 67 | threads to each one as cpus come/go. */ | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 68 | static DEFINE_MUTEX(workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | static LIST_HEAD(workqueues); | 
|  | 70 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 71 | static int singlethread_cpu __read_mostly; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 72 | static cpumask_t cpu_singlethread_map __read_mostly; | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 73 | /* | 
|  | 74 | * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD | 
|  | 75 | * flushes cwq->worklist. This means that flush_workqueue/wait_on_work | 
|  | 76 | * which comes in between can't use for_each_online_cpu(). We could | 
|  | 77 | * use cpu_possible_map, the cpumask below is more a documentation | 
|  | 78 | * than optimization. | 
|  | 79 | */ | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 80 | static cpumask_t cpu_populated_map __read_mostly; | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 81 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | /* If it's single threaded, it isn't in the list of workqueues. */ | 
|  | 83 | static inline int is_single_threaded(struct workqueue_struct *wq) | 
|  | 84 | { | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 85 | return wq->singlethread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | } | 
|  | 87 |  | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 88 | static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) | 
|  | 89 | { | 
|  | 90 | return is_single_threaded(wq) | 
|  | 91 | ? &cpu_singlethread_map : &cpu_populated_map; | 
|  | 92 | } | 
|  | 93 |  | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 94 | static | 
|  | 95 | struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) | 
|  | 96 | { | 
|  | 97 | if (unlikely(is_single_threaded(wq))) | 
|  | 98 | cpu = singlethread_cpu; | 
|  | 99 | return per_cpu_ptr(wq->cpu_wq, cpu); | 
|  | 100 | } | 
|  | 101 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 102 | /* | 
|  | 103 | * Set the workqueue on which a work item is to be run | 
|  | 104 | * - Must *only* be called if the pending flag is set | 
|  | 105 | */ | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 106 | static inline void set_wq_data(struct work_struct *work, | 
|  | 107 | struct cpu_workqueue_struct *cwq) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 108 | { | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 109 | unsigned long new; | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 110 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 111 | BUG_ON(!work_pending(work)); | 
|  | 112 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 113 | new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 114 | new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); | 
|  | 115 | atomic_long_set(&work->data, new); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 116 | } | 
|  | 117 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 118 | static inline | 
|  | 119 | struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 120 | { | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 121 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 122 | } | 
|  | 123 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 124 | static void insert_work(struct cpu_workqueue_struct *cwq, | 
|  | 125 | struct work_struct *work, int tail) | 
|  | 126 | { | 
|  | 127 | set_wq_data(work, cwq); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 128 | /* | 
|  | 129 | * Ensure that we get the right work->data if we see the | 
|  | 130 | * result of list_add() below, see try_to_grab_pending(). | 
|  | 131 | */ | 
|  | 132 | smp_wmb(); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 133 | if (tail) | 
|  | 134 | list_add_tail(&work->entry, &cwq->worklist); | 
|  | 135 | else | 
|  | 136 | list_add(&work->entry, &cwq->worklist); | 
|  | 137 | wake_up(&cwq->more_work); | 
|  | 138 | } | 
|  | 139 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | /* Preempt must be disabled. */ | 
|  | 141 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 
|  | 142 | struct work_struct *work) | 
|  | 143 | { | 
|  | 144 | unsigned long flags; | 
|  | 145 |  | 
|  | 146 | spin_lock_irqsave(&cwq->lock, flags); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 147 | insert_work(cwq, work, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | spin_unlock_irqrestore(&cwq->lock, flags); | 
|  | 149 | } | 
|  | 150 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 151 | /** | 
|  | 152 | * queue_work - queue work on a workqueue | 
|  | 153 | * @wq: workqueue to use | 
|  | 154 | * @work: work to queue | 
|  | 155 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 156 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | * | 
|  | 158 | * We queue the work to the CPU it was submitted, but there is no | 
|  | 159 | * guarantee that it will be processed by that CPU. | 
|  | 160 | */ | 
|  | 161 | int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | 
|  | 162 | { | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 163 | int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 165 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | BUG_ON(!list_empty(&work->entry)); | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 167 | __queue_work(wq_per_cpu(wq, get_cpu()), work); | 
|  | 168 | put_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | ret = 1; | 
|  | 170 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | return ret; | 
|  | 172 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 173 | EXPORT_SYMBOL_GPL(queue_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 175 | void delayed_work_timer_fn(unsigned long __data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 177 | struct delayed_work *dwork = (struct delayed_work *)__data; | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 178 | struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); | 
|  | 179 | struct workqueue_struct *wq = cwq->wq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 |  | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 181 | __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | } | 
|  | 183 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 184 | /** | 
|  | 185 | * queue_delayed_work - queue work on a workqueue after delay | 
|  | 186 | * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 187 | * @dwork: delayable work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 188 | * @delay: number of jiffies to wait before queueing | 
|  | 189 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 190 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 191 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 193 | struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | { | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 195 | timer_stats_timer_set_start_info(&dwork->timer); | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 196 | if (delay == 0) | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 197 | return queue_work(wq, &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 |  | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 199 | return queue_delayed_work_on(-1, wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 201 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 203 | /** | 
|  | 204 | * queue_delayed_work_on - queue work on specific CPU after delay | 
|  | 205 | * @cpu: CPU number to execute work on | 
|  | 206 | * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 207 | * @dwork: work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 208 | * @delay: number of jiffies to wait before queueing | 
|  | 209 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 210 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 211 | */ | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 212 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 213 | struct delayed_work *dwork, unsigned long delay) | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 214 | { | 
|  | 215 | int ret = 0; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 216 | struct timer_list *timer = &dwork->timer; | 
|  | 217 | struct work_struct *work = &dwork->work; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 218 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 219 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 220 | BUG_ON(timer_pending(timer)); | 
|  | 221 | BUG_ON(!list_empty(&work->entry)); | 
|  | 222 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 223 | /* This stores cwq for the moment, for the timer_fn */ | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 224 | set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 225 | timer->expires = jiffies + delay; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 226 | timer->data = (unsigned long)dwork; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 227 | timer->function = delayed_work_timer_fn; | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 228 |  | 
|  | 229 | if (unlikely(cpu >= 0)) | 
|  | 230 | add_timer_on(timer, cpu); | 
|  | 231 | else | 
|  | 232 | add_timer(timer); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 233 | ret = 1; | 
|  | 234 | } | 
|  | 235 | return ret; | 
|  | 236 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 237 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 |  | 
| Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 239 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | { | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 241 | spin_lock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | cwq->run_depth++; | 
|  | 243 | if (cwq->run_depth > 3) { | 
|  | 244 | /* morton gets to eat his hat */ | 
|  | 245 | printk("%s: recursion depth exceeded: %d\n", | 
|  | 246 | __FUNCTION__, cwq->run_depth); | 
|  | 247 | dump_stack(); | 
|  | 248 | } | 
|  | 249 | while (!list_empty(&cwq->worklist)) { | 
|  | 250 | struct work_struct *work = list_entry(cwq->worklist.next, | 
|  | 251 | struct work_struct, entry); | 
| David Howells | 6bb49e5 | 2006-11-22 14:54:45 +0000 | [diff] [blame] | 252 | work_func_t f = work->func; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 254 | cwq->current_work = work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | list_del_init(cwq->worklist.next); | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 256 | spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 |  | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 258 | BUG_ON(get_wq_data(work) != cwq); | 
| Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 259 | work_clear_pending(work); | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 260 | f(work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 |  | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 262 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 
|  | 263 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 
|  | 264 | "%s/0x%08x/%d\n", | 
|  | 265 | current->comm, preempt_count(), | 
|  | 266 | current->pid); | 
|  | 267 | printk(KERN_ERR "    last function: "); | 
|  | 268 | print_symbol("%s\n", (unsigned long)f); | 
|  | 269 | debug_show_held_locks(current); | 
|  | 270 | dump_stack(); | 
|  | 271 | } | 
|  | 272 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 273 | spin_lock_irq(&cwq->lock); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 274 | cwq->current_work = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | } | 
|  | 276 | cwq->run_depth--; | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 277 | spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | } | 
|  | 279 |  | 
|  | 280 | static int worker_thread(void *__cwq) | 
|  | 281 | { | 
|  | 282 | struct cpu_workqueue_struct *cwq = __cwq; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 283 | DEFINE_WAIT(wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 |  | 
| Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 285 | if (cwq->wq->freezeable) | 
|  | 286 | set_freezable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 |  | 
|  | 288 | set_user_nice(current, -5); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 290 | for (;;) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 291 | prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 292 | if (!freezing(current) && | 
|  | 293 | !kthread_should_stop() && | 
|  | 294 | list_empty(&cwq->worklist)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | schedule(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 296 | finish_wait(&cwq->more_work, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 |  | 
| Oleg Nesterov | 85f4186 | 2007-05-09 02:34:20 -0700 | [diff] [blame] | 298 | try_to_freeze(); | 
|  | 299 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 300 | if (kthread_should_stop()) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 301 | break; | 
|  | 302 |  | 
|  | 303 | run_workqueue(cwq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | } | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 305 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | return 0; | 
|  | 307 | } | 
|  | 308 |  | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 309 | struct wq_barrier { | 
|  | 310 | struct work_struct	work; | 
|  | 311 | struct completion	done; | 
|  | 312 | }; | 
|  | 313 |  | 
|  | 314 | static void wq_barrier_func(struct work_struct *work) | 
|  | 315 | { | 
|  | 316 | struct wq_barrier *barr = container_of(work, struct wq_barrier, work); | 
|  | 317 | complete(&barr->done); | 
|  | 318 | } | 
|  | 319 |  | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 320 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 
|  | 321 | struct wq_barrier *barr, int tail) | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 322 | { | 
|  | 323 | INIT_WORK(&barr->work, wq_barrier_func); | 
|  | 324 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 
|  | 325 |  | 
|  | 326 | init_completion(&barr->done); | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 327 |  | 
|  | 328 | insert_work(cwq, &barr->work, tail); | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 329 | } | 
|  | 330 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 331 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | { | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 333 | int active; | 
|  | 334 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | if (cwq->thread == current) { | 
|  | 336 | /* | 
|  | 337 | * Probably keventd trying to flush its own queue. So simply run | 
|  | 338 | * it by hand rather than deadlocking. | 
|  | 339 | */ | 
|  | 340 | run_workqueue(cwq); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 341 | active = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | } else { | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 343 | struct wq_barrier barr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 345 | active = 0; | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 346 | spin_lock_irq(&cwq->lock); | 
|  | 347 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 
|  | 348 | insert_wq_barrier(cwq, &barr, 1); | 
|  | 349 | active = 1; | 
|  | 350 | } | 
|  | 351 | spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 |  | 
| Oleg Nesterov | d721304 | 2007-05-09 02:34:07 -0700 | [diff] [blame] | 353 | if (active) | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 354 | wait_for_completion(&barr.done); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | } | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 356 |  | 
|  | 357 | return active; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | } | 
|  | 359 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 360 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | * flush_workqueue - ensure that any scheduled work has run to completion. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 362 | * @wq: workqueue to flush | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | * | 
|  | 364 | * Forces execution of the workqueue and blocks until its completion. | 
|  | 365 | * This is typically used in driver shutdown handlers. | 
|  | 366 | * | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 367 | * We sleep until all works which were queued on entry have been handled, | 
|  | 368 | * but we are not livelocked by new incoming ones. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | * | 
|  | 370 | * This function used to run the workqueues itself.  Now we just wait for the | 
|  | 371 | * helper threads to do it. | 
|  | 372 | */ | 
|  | 373 | void fastcall flush_workqueue(struct workqueue_struct *wq) | 
|  | 374 | { | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 375 | const cpumask_t *cpu_map = wq_cpu_map(wq); | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 376 | int cpu; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 377 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 378 | might_sleep(); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 379 | for_each_cpu_mask(cpu, *cpu_map) | 
|  | 380 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 382 | EXPORT_SYMBOL_GPL(flush_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 384 | /* | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 385 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 386 | * so this work can't be re-armed in any way. | 
|  | 387 | */ | 
|  | 388 | static int try_to_grab_pending(struct work_struct *work) | 
|  | 389 | { | 
|  | 390 | struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 391 | int ret = -1; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 392 |  | 
|  | 393 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 394 | return 0; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 395 |  | 
|  | 396 | /* | 
|  | 397 | * The queueing is in progress, or it is already queued. Try to | 
|  | 398 | * steal it from ->worklist without clearing WORK_STRUCT_PENDING. | 
|  | 399 | */ | 
|  | 400 |  | 
|  | 401 | cwq = get_wq_data(work); | 
|  | 402 | if (!cwq) | 
|  | 403 | return ret; | 
|  | 404 |  | 
|  | 405 | spin_lock_irq(&cwq->lock); | 
|  | 406 | if (!list_empty(&work->entry)) { | 
|  | 407 | /* | 
|  | 408 | * This work is queued, but perhaps we locked the wrong cwq. | 
|  | 409 | * In that case we must see the new value after rmb(), see | 
|  | 410 | * insert_work()->wmb(). | 
|  | 411 | */ | 
|  | 412 | smp_rmb(); | 
|  | 413 | if (cwq == get_wq_data(work)) { | 
|  | 414 | list_del_init(&work->entry); | 
|  | 415 | ret = 1; | 
|  | 416 | } | 
|  | 417 | } | 
|  | 418 | spin_unlock_irq(&cwq->lock); | 
|  | 419 |  | 
|  | 420 | return ret; | 
|  | 421 | } | 
|  | 422 |  | 
|  | 423 | static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 424 | struct work_struct *work) | 
|  | 425 | { | 
|  | 426 | struct wq_barrier barr; | 
|  | 427 | int running = 0; | 
|  | 428 |  | 
|  | 429 | spin_lock_irq(&cwq->lock); | 
|  | 430 | if (unlikely(cwq->current_work == work)) { | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 431 | insert_wq_barrier(cwq, &barr, 0); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 432 | running = 1; | 
|  | 433 | } | 
|  | 434 | spin_unlock_irq(&cwq->lock); | 
|  | 435 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 436 | if (unlikely(running)) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 437 | wait_for_completion(&barr.done); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 438 | } | 
|  | 439 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 440 | static void wait_on_work(struct work_struct *work) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 441 | { | 
|  | 442 | struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 443 | struct workqueue_struct *wq; | 
|  | 444 | const cpumask_t *cpu_map; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 445 | int cpu; | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 446 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 447 | might_sleep(); | 
|  | 448 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 449 | cwq = get_wq_data(work); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 450 | if (!cwq) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 451 | return; | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 452 |  | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 453 | wq = cwq->wq; | 
|  | 454 | cpu_map = wq_cpu_map(wq); | 
|  | 455 |  | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 456 | for_each_cpu_mask(cpu, *cpu_map) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 457 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 
|  | 458 | } | 
|  | 459 |  | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 460 | static int __cancel_work_timer(struct work_struct *work, | 
|  | 461 | struct timer_list* timer) | 
|  | 462 | { | 
|  | 463 | int ret; | 
|  | 464 |  | 
|  | 465 | do { | 
|  | 466 | ret = (timer && likely(del_timer(timer))); | 
|  | 467 | if (!ret) | 
|  | 468 | ret = try_to_grab_pending(work); | 
|  | 469 | wait_on_work(work); | 
|  | 470 | } while (unlikely(ret < 0)); | 
|  | 471 |  | 
|  | 472 | work_clear_pending(work); | 
|  | 473 | return ret; | 
|  | 474 | } | 
|  | 475 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 476 | /** | 
|  | 477 | * cancel_work_sync - block until a work_struct's callback has terminated | 
|  | 478 | * @work: the work which is to be flushed | 
|  | 479 | * | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 480 | * Returns true if @work was pending. | 
|  | 481 | * | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 482 | * cancel_work_sync() will cancel the work if it is queued. If the work's | 
|  | 483 | * callback appears to be running, cancel_work_sync() will block until it | 
|  | 484 | * has completed. | 
|  | 485 | * | 
|  | 486 | * It is possible to use this function if the work re-queues itself. It can | 
|  | 487 | * cancel the work even if it migrates to another workqueue, however in that | 
|  | 488 | * case it only guarantees that work->func() has completed on the last queued | 
|  | 489 | * workqueue. | 
|  | 490 | * | 
|  | 491 | * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not | 
|  | 492 | * pending, otherwise it goes into a busy-wait loop until the timer expires. | 
|  | 493 | * | 
|  | 494 | * The caller must ensure that workqueue_struct on which this work was last | 
|  | 495 | * queued can't be destroyed before this function returns. | 
|  | 496 | */ | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 497 | int cancel_work_sync(struct work_struct *work) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 498 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 499 | return __cancel_work_timer(work, NULL); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 500 | } | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 501 | EXPORT_SYMBOL_GPL(cancel_work_sync); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 502 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 503 | /** | 
| Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 504 | * cancel_delayed_work_sync - reliably kill off a delayed work. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 505 | * @dwork: the delayed work struct | 
|  | 506 | * | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 507 | * Returns true if @dwork was pending. | 
|  | 508 | * | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 509 | * It is possible to use this function if @dwork rearms itself via queue_work() | 
|  | 510 | * or queue_delayed_work(). See also the comment for cancel_work_sync(). | 
|  | 511 | */ | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 512 | int cancel_delayed_work_sync(struct delayed_work *dwork) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 513 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 514 | return __cancel_work_timer(&dwork->work, &dwork->timer); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 515 | } | 
| Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 516 | EXPORT_SYMBOL(cancel_delayed_work_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 518 | static struct workqueue_struct *keventd_wq __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 520 | /** | 
|  | 521 | * schedule_work - put work task in global workqueue | 
|  | 522 | * @work: job to be done | 
|  | 523 | * | 
|  | 524 | * This puts a job in the kernel-global workqueue. | 
|  | 525 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | int fastcall schedule_work(struct work_struct *work) | 
|  | 527 | { | 
|  | 528 | return queue_work(keventd_wq, work); | 
|  | 529 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 530 | EXPORT_SYMBOL(schedule_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 532 | /** | 
|  | 533 | * schedule_delayed_work - put work task in global workqueue after delay | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 534 | * @dwork: job to be done | 
|  | 535 | * @delay: number of jiffies to wait or 0 for immediate execution | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 536 | * | 
|  | 537 | * After waiting for a given time this puts a job in the kernel-global | 
|  | 538 | * workqueue. | 
|  | 539 | */ | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 540 | int fastcall schedule_delayed_work(struct delayed_work *dwork, | 
|  | 541 | unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | { | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 543 | timer_stats_timer_set_start_info(&dwork->timer); | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 544 | return queue_delayed_work(keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 546 | EXPORT_SYMBOL(schedule_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 548 | /** | 
|  | 549 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 
|  | 550 | * @cpu: cpu to use | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 551 | * @dwork: job to be done | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 552 | * @delay: number of jiffies to wait | 
|  | 553 | * | 
|  | 554 | * After waiting for a given time this puts a job in the kernel-global | 
|  | 555 | * workqueue on the specified CPU. | 
|  | 556 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | int schedule_delayed_work_on(int cpu, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 558 | struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 560 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 562 | EXPORT_SYMBOL(schedule_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 564 | /** | 
|  | 565 | * schedule_on_each_cpu - call a function on each online CPU from keventd | 
|  | 566 | * @func: the function to call | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 567 | * | 
|  | 568 | * Returns zero on success. | 
|  | 569 | * Returns -ve errno on failure. | 
|  | 570 | * | 
|  | 571 | * Appears to be racy against CPU hotplug. | 
|  | 572 | * | 
|  | 573 | * schedule_on_each_cpu() is very slow. | 
|  | 574 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 575 | int schedule_on_each_cpu(work_func_t func) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 576 | { | 
|  | 577 | int cpu; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 578 | struct work_struct *works; | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 579 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 580 | works = alloc_percpu(struct work_struct); | 
|  | 581 | if (!works) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 582 | return -ENOMEM; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 583 |  | 
| Andrew Morton | e18f3ff | 2007-05-09 02:33:50 -0700 | [diff] [blame] | 584 | preempt_disable();		/* CPU hotplug */ | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 585 | for_each_online_cpu(cpu) { | 
| Ingo Molnar | 9bfb183 | 2006-12-18 20:05:09 +0100 | [diff] [blame] | 586 | struct work_struct *work = per_cpu_ptr(works, cpu); | 
|  | 587 |  | 
|  | 588 | INIT_WORK(work, func); | 
|  | 589 | set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); | 
|  | 590 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 591 | } | 
| Andrew Morton | e18f3ff | 2007-05-09 02:33:50 -0700 | [diff] [blame] | 592 | preempt_enable(); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 593 | flush_workqueue(keventd_wq); | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 594 | free_percpu(works); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 595 | return 0; | 
|  | 596 | } | 
|  | 597 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | void flush_scheduled_work(void) | 
|  | 599 | { | 
|  | 600 | flush_workqueue(keventd_wq); | 
|  | 601 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 602 | EXPORT_SYMBOL(flush_scheduled_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 |  | 
|  | 604 | /** | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 605 | * execute_in_process_context - reliably execute the routine with user context | 
|  | 606 | * @fn:		the function to execute | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 607 | * @ew:		guaranteed storage for the execute work structure (must | 
|  | 608 | *		be available when the work executes) | 
|  | 609 | * | 
|  | 610 | * Executes the function immediately if process context is available, | 
|  | 611 | * otherwise schedules the function for delayed execution. | 
|  | 612 | * | 
|  | 613 | * Returns:	0 - function was executed | 
|  | 614 | *		1 - function was scheduled for execution | 
|  | 615 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 616 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 617 | { | 
|  | 618 | if (!in_interrupt()) { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 619 | fn(&ew->work); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 620 | return 0; | 
|  | 621 | } | 
|  | 622 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 623 | INIT_WORK(&ew->work, fn); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 624 | schedule_work(&ew->work); | 
|  | 625 |  | 
|  | 626 | return 1; | 
|  | 627 | } | 
|  | 628 | EXPORT_SYMBOL_GPL(execute_in_process_context); | 
|  | 629 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | int keventd_up(void) | 
|  | 631 | { | 
|  | 632 | return keventd_wq != NULL; | 
|  | 633 | } | 
|  | 634 |  | 
|  | 635 | int current_is_keventd(void) | 
|  | 636 | { | 
|  | 637 | struct cpu_workqueue_struct *cwq; | 
| Hugh Dickins | d243769 | 2007-08-27 16:06:19 +0100 | [diff] [blame] | 638 | int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | int ret = 0; | 
|  | 640 |  | 
|  | 641 | BUG_ON(!keventd_wq); | 
|  | 642 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 643 | cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | if (current == cwq->thread) | 
|  | 645 | ret = 1; | 
|  | 646 |  | 
|  | 647 | return ret; | 
|  | 648 |  | 
|  | 649 | } | 
|  | 650 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 651 | static struct cpu_workqueue_struct * | 
|  | 652 | init_cpu_workqueue(struct workqueue_struct *wq, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 654 | struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 656 | cwq->wq = wq; | 
|  | 657 | spin_lock_init(&cwq->lock); | 
|  | 658 | INIT_LIST_HEAD(&cwq->worklist); | 
|  | 659 | init_waitqueue_head(&cwq->more_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 661 | return cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | } | 
|  | 663 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 664 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 666 | struct workqueue_struct *wq = cwq->wq; | 
|  | 667 | const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; | 
|  | 668 | struct task_struct *p; | 
|  | 669 |  | 
|  | 670 | p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); | 
|  | 671 | /* | 
|  | 672 | * Nobody can add the work_struct to this cwq, | 
|  | 673 | *	if (caller is __create_workqueue) | 
|  | 674 | *		nobody should see this wq | 
|  | 675 | *	else // caller is CPU_UP_PREPARE | 
|  | 676 | *		cpu is not on cpu_online_map | 
|  | 677 | * so we can abort safely. | 
|  | 678 | */ | 
|  | 679 | if (IS_ERR(p)) | 
|  | 680 | return PTR_ERR(p); | 
|  | 681 |  | 
|  | 682 | cwq->thread = p; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 683 |  | 
|  | 684 | return 0; | 
|  | 685 | } | 
|  | 686 |  | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 687 | static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
|  | 688 | { | 
|  | 689 | struct task_struct *p = cwq->thread; | 
|  | 690 |  | 
|  | 691 | if (p != NULL) { | 
|  | 692 | if (cpu >= 0) | 
|  | 693 | kthread_bind(p, cpu); | 
|  | 694 | wake_up_process(p); | 
|  | 695 | } | 
|  | 696 | } | 
|  | 697 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 698 | struct workqueue_struct *__create_workqueue(const char *name, | 
|  | 699 | int singlethread, int freezeable) | 
|  | 700 | { | 
|  | 701 | struct workqueue_struct *wq; | 
|  | 702 | struct cpu_workqueue_struct *cwq; | 
|  | 703 | int err = 0, cpu; | 
|  | 704 |  | 
|  | 705 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 
|  | 706 | if (!wq) | 
|  | 707 | return NULL; | 
|  | 708 |  | 
|  | 709 | wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); | 
|  | 710 | if (!wq->cpu_wq) { | 
|  | 711 | kfree(wq); | 
|  | 712 | return NULL; | 
|  | 713 | } | 
|  | 714 |  | 
|  | 715 | wq->name = name; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 716 | wq->singlethread = singlethread; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 717 | wq->freezeable = freezeable; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 718 | INIT_LIST_HEAD(&wq->list); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 719 |  | 
|  | 720 | if (singlethread) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 721 | cwq = init_cpu_workqueue(wq, singlethread_cpu); | 
|  | 722 | err = create_workqueue_thread(cwq, singlethread_cpu); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 723 | start_workqueue_thread(cwq, -1); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 724 | } else { | 
|  | 725 | mutex_lock(&workqueue_mutex); | 
|  | 726 | list_add(&wq->list, &workqueues); | 
|  | 727 |  | 
|  | 728 | for_each_possible_cpu(cpu) { | 
|  | 729 | cwq = init_cpu_workqueue(wq, cpu); | 
|  | 730 | if (err || !cpu_online(cpu)) | 
|  | 731 | continue; | 
|  | 732 | err = create_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 733 | start_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 734 | } | 
|  | 735 | mutex_unlock(&workqueue_mutex); | 
|  | 736 | } | 
|  | 737 |  | 
|  | 738 | if (err) { | 
|  | 739 | destroy_workqueue(wq); | 
|  | 740 | wq = NULL; | 
|  | 741 | } | 
|  | 742 | return wq; | 
|  | 743 | } | 
|  | 744 | EXPORT_SYMBOL_GPL(__create_workqueue); | 
|  | 745 |  | 
|  | 746 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
|  | 747 | { | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 748 | /* | 
|  | 749 | * Our caller is either destroy_workqueue() or CPU_DEAD, | 
|  | 750 | * workqueue_mutex protects cwq->thread | 
|  | 751 | */ | 
|  | 752 | if (cwq->thread == NULL) | 
|  | 753 | return; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 754 |  | 
| Oleg Nesterov | 13c2216 | 2007-07-17 04:03:55 -0700 | [diff] [blame] | 755 | flush_cpu_workqueue(cwq); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 756 | /* | 
| Oleg Nesterov | 13c2216 | 2007-07-17 04:03:55 -0700 | [diff] [blame] | 757 | * If the caller is CPU_DEAD and cwq->worklist was not empty, | 
|  | 758 | * a concurrent flush_workqueue() can insert a barrier after us. | 
|  | 759 | * However, in that case run_workqueue() won't return and check | 
|  | 760 | * kthread_should_stop() until it flushes all work_struct's. | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 761 | * When ->worklist becomes empty it is safe to exit because no | 
|  | 762 | * more work_structs can be queued on this cwq: flush_workqueue | 
|  | 763 | * checks list_empty(), and a "normal" queue_work() can't use | 
|  | 764 | * a dead CPU. | 
|  | 765 | */ | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 766 | kthread_stop(cwq->thread); | 
|  | 767 | cwq->thread = NULL; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 768 | } | 
|  | 769 |  | 
|  | 770 | /** | 
|  | 771 | * destroy_workqueue - safely terminate a workqueue | 
|  | 772 | * @wq: target workqueue | 
|  | 773 | * | 
|  | 774 | * Safely destroy a workqueue. All work currently pending will be done first. | 
|  | 775 | */ | 
|  | 776 | void destroy_workqueue(struct workqueue_struct *wq) | 
|  | 777 | { | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 778 | const cpumask_t *cpu_map = wq_cpu_map(wq); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 779 | struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 780 | int cpu; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 781 |  | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 782 | mutex_lock(&workqueue_mutex); | 
|  | 783 | list_del(&wq->list); | 
|  | 784 | mutex_unlock(&workqueue_mutex); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 785 |  | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 786 | for_each_cpu_mask(cpu, *cpu_map) { | 
|  | 787 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
|  | 788 | cleanup_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 789 | } | 
|  | 790 |  | 
|  | 791 | free_percpu(wq->cpu_wq); | 
|  | 792 | kfree(wq); | 
|  | 793 | } | 
|  | 794 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 
|  | 795 |  | 
|  | 796 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | 
|  | 797 | unsigned long action, | 
|  | 798 | void *hcpu) | 
|  | 799 | { | 
|  | 800 | unsigned int cpu = (unsigned long)hcpu; | 
|  | 801 | struct cpu_workqueue_struct *cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | struct workqueue_struct *wq; | 
|  | 803 |  | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 804 | action &= ~CPU_TASKS_FROZEN; | 
|  | 805 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | switch (action) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 807 | case CPU_LOCK_ACQUIRE: | 
|  | 808 | mutex_lock(&workqueue_mutex); | 
|  | 809 | return NOTIFY_OK; | 
|  | 810 |  | 
|  | 811 | case CPU_LOCK_RELEASE: | 
|  | 812 | mutex_unlock(&workqueue_mutex); | 
|  | 813 | return NOTIFY_OK; | 
|  | 814 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 | case CPU_UP_PREPARE: | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 816 | cpu_set(cpu, cpu_populated_map); | 
|  | 817 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 819 | list_for_each_entry(wq, &workqueues, list) { | 
|  | 820 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 821 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 822 | switch (action) { | 
|  | 823 | case CPU_UP_PREPARE: | 
|  | 824 | if (!create_workqueue_thread(cwq, cpu)) | 
|  | 825 | break; | 
|  | 826 | printk(KERN_ERR "workqueue for %i failed\n", cpu); | 
|  | 827 | return NOTIFY_BAD; | 
|  | 828 |  | 
|  | 829 | case CPU_ONLINE: | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 830 | start_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 831 | break; | 
|  | 832 |  | 
|  | 833 | case CPU_UP_CANCELED: | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 834 | start_workqueue_thread(cwq, -1); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 835 | case CPU_DEAD: | 
|  | 836 | cleanup_workqueue_thread(cwq, cpu); | 
|  | 837 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | } | 
|  | 840 |  | 
|  | 841 | return NOTIFY_OK; | 
|  | 842 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 |  | 
| Oleg Nesterov | c12920d | 2007-05-09 02:34:14 -0700 | [diff] [blame] | 844 | void __init init_workqueues(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 846 | cpu_populated_map = cpu_online_map; | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 847 | singlethread_cpu = first_cpu(cpu_possible_map); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 848 | cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | hotcpu_notifier(workqueue_cpu_callback, 0); | 
|  | 850 | keventd_wq = create_workqueue("events"); | 
|  | 851 | BUG_ON(!keventd_wq); | 
|  | 852 | } |