| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/kernel/workqueue.c | 
|  | 3 | * | 
|  | 4 | * Generic mechanism for defining kernel helper threads for running | 
|  | 5 | * arbitrary tasks in process context. | 
|  | 6 | * | 
|  | 7 | * Started by Ingo Molnar, Copyright (C) 2002 | 
|  | 8 | * | 
|  | 9 | * Derived from the taskqueue/keventd code by: | 
|  | 10 | * | 
|  | 11 | *   David Woodhouse <dwmw2@infradead.org> | 
|  | 12 | *   Andrew Morton <andrewm@uow.edu.au> | 
|  | 13 | *   Kai Petzke <wpp@marie.physik.tu-berlin.de> | 
|  | 14 | *   Theodore Ts'o <tytso@mit.edu> | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 15 | * | 
|  | 16 | * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ | 
|  | 18 |  | 
|  | 19 | #include <linux/module.h> | 
|  | 20 | #include <linux/kernel.h> | 
|  | 21 | #include <linux/sched.h> | 
|  | 22 | #include <linux/init.h> | 
|  | 23 | #include <linux/signal.h> | 
|  | 24 | #include <linux/completion.h> | 
|  | 25 | #include <linux/workqueue.h> | 
|  | 26 | #include <linux/slab.h> | 
|  | 27 | #include <linux/cpu.h> | 
|  | 28 | #include <linux/notifier.h> | 
|  | 29 | #include <linux/kthread.h> | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 30 | #include <linux/hardirq.h> | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 31 | #include <linux/mempolicy.h> | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 32 | #include <linux/freezer.h> | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 33 | #include <linux/kallsyms.h> | 
|  | 34 | #include <linux/debug_locks.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 |  | 
|  | 36 | /* | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 37 | * The per-CPU workqueue (if single thread, we always use the first | 
|  | 38 | * possible cpu). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | * | 
|  | 40 | * The sequence counters are for flush_scheduled_work().  It wants to wait | 
| Rolf Eike Beer | 9f5d785 | 2006-10-03 23:07:31 +0200 | [diff] [blame] | 41 | * until all currently-scheduled works are completed, but it doesn't | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | * want to be livelocked by new, incoming ones.  So it waits until | 
|  | 43 | * remove_sequence is >= the insert_sequence which pertained when | 
|  | 44 | * flush_scheduled_work() was called. | 
|  | 45 | */ | 
|  | 46 | struct cpu_workqueue_struct { | 
|  | 47 |  | 
|  | 48 | spinlock_t lock; | 
|  | 49 |  | 
|  | 50 | long remove_sequence;	/* Least-recently added (next to run) */ | 
|  | 51 | long insert_sequence;	/* Next to add */ | 
|  | 52 |  | 
|  | 53 | struct list_head worklist; | 
|  | 54 | wait_queue_head_t more_work; | 
|  | 55 | wait_queue_head_t work_done; | 
|  | 56 |  | 
|  | 57 | struct workqueue_struct *wq; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 58 | struct task_struct *thread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 |  | 
|  | 60 | int run_depth;		/* Detect run_workqueue() recursion depth */ | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 61 |  | 
|  | 62 | int freezeable;		/* Freeze the thread during suspend */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | } ____cacheline_aligned; | 
|  | 64 |  | 
|  | 65 | /* | 
|  | 66 | * The externally visible workqueue abstraction is an array of | 
|  | 67 | * per-CPU workqueues: | 
|  | 68 | */ | 
|  | 69 | struct workqueue_struct { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 70 | struct cpu_workqueue_struct *cpu_wq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | const char *name; | 
|  | 72 | struct list_head list; 	/* Empty if single thread */ | 
|  | 73 | }; | 
|  | 74 |  | 
|  | 75 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 
|  | 76 | threads to each one as cpus come/go. */ | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 77 | static DEFINE_MUTEX(workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | static LIST_HEAD(workqueues); | 
|  | 79 |  | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 80 | static int singlethread_cpu; | 
|  | 81 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | /* If it's single threaded, it isn't in the list of workqueues. */ | 
|  | 83 | static inline int is_single_threaded(struct workqueue_struct *wq) | 
|  | 84 | { | 
|  | 85 | return list_empty(&wq->list); | 
|  | 86 | } | 
|  | 87 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 88 | /* | 
|  | 89 | * Set the workqueue on which a work item is to be run | 
|  | 90 | * - Must *only* be called if the pending flag is set | 
|  | 91 | */ | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 92 | static inline void set_wq_data(struct work_struct *work, void *wq) | 
|  | 93 | { | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 94 | unsigned long new; | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 95 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 96 | BUG_ON(!work_pending(work)); | 
|  | 97 |  | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 98 | new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 99 | new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); | 
|  | 100 | atomic_long_set(&work->data, new); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 101 | } | 
|  | 102 |  | 
|  | 103 | static inline void *get_wq_data(struct work_struct *work) | 
|  | 104 | { | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 105 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 106 | } | 
|  | 107 |  | 
| Linus Torvalds | 68380b5 | 2006-12-07 09:28:19 -0800 | [diff] [blame] | 108 | static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) | 
|  | 109 | { | 
|  | 110 | int ret = 0; | 
|  | 111 | unsigned long flags; | 
|  | 112 |  | 
|  | 113 | spin_lock_irqsave(&cwq->lock, flags); | 
|  | 114 | /* | 
|  | 115 | * We need to re-validate the work info after we've gotten | 
|  | 116 | * the cpu_workqueue lock. We can run the work now iff: | 
|  | 117 | * | 
|  | 118 | *  - the wq_data still matches the cpu_workqueue_struct | 
|  | 119 | *  - AND the work is still marked pending | 
|  | 120 | *  - AND the work is still on a list (which will be this | 
|  | 121 | *    workqueue_struct list) | 
|  | 122 | * | 
|  | 123 | * All these conditions are important, because we | 
|  | 124 | * need to protect against the work being run right | 
|  | 125 | * now on another CPU (all but the last one might be | 
|  | 126 | * true if it's currently running and has not been | 
|  | 127 | * released yet, for example). | 
|  | 128 | */ | 
|  | 129 | if (get_wq_data(work) == cwq | 
|  | 130 | && work_pending(work) | 
|  | 131 | && !list_empty(&work->entry)) { | 
|  | 132 | work_func_t f = work->func; | 
|  | 133 | list_del_init(&work->entry); | 
|  | 134 | spin_unlock_irqrestore(&cwq->lock, flags); | 
|  | 135 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 136 | if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) | 
| Linus Torvalds | 68380b5 | 2006-12-07 09:28:19 -0800 | [diff] [blame] | 137 | work_release(work); | 
|  | 138 | f(work); | 
|  | 139 |  | 
|  | 140 | spin_lock_irqsave(&cwq->lock, flags); | 
|  | 141 | cwq->remove_sequence++; | 
|  | 142 | wake_up(&cwq->work_done); | 
|  | 143 | ret = 1; | 
|  | 144 | } | 
|  | 145 | spin_unlock_irqrestore(&cwq->lock, flags); | 
|  | 146 | return ret; | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | /** | 
|  | 150 | * run_scheduled_work - run scheduled work synchronously | 
|  | 151 | * @work: work to run | 
|  | 152 | * | 
|  | 153 | * This checks if the work was pending, and runs it | 
|  | 154 | * synchronously if so. It returns a boolean to indicate | 
|  | 155 | * whether it had any scheduled work to run or not. | 
|  | 156 | * | 
|  | 157 | * NOTE! This _only_ works for normal work_structs. You | 
|  | 158 | * CANNOT use this for delayed work, because the wq data | 
|  | 159 | * for delayed work will not point properly to the per- | 
|  | 160 | * CPU workqueue struct, but will change! | 
|  | 161 | */ | 
|  | 162 | int fastcall run_scheduled_work(struct work_struct *work) | 
|  | 163 | { | 
|  | 164 | for (;;) { | 
|  | 165 | struct cpu_workqueue_struct *cwq; | 
|  | 166 |  | 
|  | 167 | if (!work_pending(work)) | 
|  | 168 | return 0; | 
|  | 169 | if (list_empty(&work->entry)) | 
|  | 170 | return 0; | 
|  | 171 | /* NOTE! This depends intimately on __queue_work! */ | 
|  | 172 | cwq = get_wq_data(work); | 
|  | 173 | if (!cwq) | 
|  | 174 | return 0; | 
|  | 175 | if (__run_work(cwq, work)) | 
|  | 176 | return 1; | 
|  | 177 | } | 
|  | 178 | } | 
|  | 179 | EXPORT_SYMBOL(run_scheduled_work); | 
|  | 180 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | /* Preempt must be disabled. */ | 
|  | 182 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 
|  | 183 | struct work_struct *work) | 
|  | 184 | { | 
|  | 185 | unsigned long flags; | 
|  | 186 |  | 
|  | 187 | spin_lock_irqsave(&cwq->lock, flags); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 188 | set_wq_data(work, cwq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | list_add_tail(&work->entry, &cwq->worklist); | 
|  | 190 | cwq->insert_sequence++; | 
|  | 191 | wake_up(&cwq->more_work); | 
|  | 192 | spin_unlock_irqrestore(&cwq->lock, flags); | 
|  | 193 | } | 
|  | 194 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 195 | /** | 
|  | 196 | * queue_work - queue work on a workqueue | 
|  | 197 | * @wq: workqueue to use | 
|  | 198 | * @work: work to queue | 
|  | 199 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 200 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | * | 
|  | 202 | * We queue the work to the CPU it was submitted, but there is no | 
|  | 203 | * guarantee that it will be processed by that CPU. | 
|  | 204 | */ | 
|  | 205 | int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | 
|  | 206 | { | 
|  | 207 | int ret = 0, cpu = get_cpu(); | 
|  | 208 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 209 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | if (unlikely(is_single_threaded(wq))) | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 211 | cpu = singlethread_cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | BUG_ON(!list_empty(&work->entry)); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 213 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | ret = 1; | 
|  | 215 | } | 
|  | 216 | put_cpu(); | 
|  | 217 | return ret; | 
|  | 218 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 219 | EXPORT_SYMBOL_GPL(queue_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 |  | 
|  | 221 | static void delayed_work_timer_fn(unsigned long __data) | 
|  | 222 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 223 | struct delayed_work *dwork = (struct delayed_work *)__data; | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 224 | struct workqueue_struct *wq = get_wq_data(&dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | int cpu = smp_processor_id(); | 
|  | 226 |  | 
|  | 227 | if (unlikely(is_single_threaded(wq))) | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 228 | cpu = singlethread_cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 |  | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 230 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | } | 
|  | 232 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 233 | /** | 
|  | 234 | * queue_delayed_work - queue work on a workqueue after delay | 
|  | 235 | * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 236 | * @dwork: delayable work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 237 | * @delay: number of jiffies to wait before queueing | 
|  | 238 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 239 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 240 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 242 | struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | { | 
|  | 244 | int ret = 0; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 245 | struct timer_list *timer = &dwork->timer; | 
|  | 246 | struct work_struct *work = &dwork->work; | 
|  | 247 |  | 
|  | 248 | if (delay == 0) | 
|  | 249 | return queue_work(wq, work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 251 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | BUG_ON(timer_pending(timer)); | 
|  | 253 | BUG_ON(!list_empty(&work->entry)); | 
|  | 254 |  | 
|  | 255 | /* This stores wq for the moment, for the timer_fn */ | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 256 | set_wq_data(work, wq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | timer->expires = jiffies + delay; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 258 | timer->data = (unsigned long)dwork; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | timer->function = delayed_work_timer_fn; | 
|  | 260 | add_timer(timer); | 
|  | 261 | ret = 1; | 
|  | 262 | } | 
|  | 263 | return ret; | 
|  | 264 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 265 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 267 | /** | 
|  | 268 | * queue_delayed_work_on - queue work on specific CPU after delay | 
|  | 269 | * @cpu: CPU number to execute work on | 
|  | 270 | * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 271 | * @dwork: work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 272 | * @delay: number of jiffies to wait before queueing | 
|  | 273 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 274 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 275 | */ | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 276 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 277 | struct delayed_work *dwork, unsigned long delay) | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 278 | { | 
|  | 279 | int ret = 0; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 280 | struct timer_list *timer = &dwork->timer; | 
|  | 281 | struct work_struct *work = &dwork->work; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 282 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 283 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 284 | BUG_ON(timer_pending(timer)); | 
|  | 285 | BUG_ON(!list_empty(&work->entry)); | 
|  | 286 |  | 
|  | 287 | /* This stores wq for the moment, for the timer_fn */ | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 288 | set_wq_data(work, wq); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 289 | timer->expires = jiffies + delay; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 290 | timer->data = (unsigned long)dwork; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 291 | timer->function = delayed_work_timer_fn; | 
|  | 292 | add_timer_on(timer, cpu); | 
|  | 293 | ret = 1; | 
|  | 294 | } | 
|  | 295 | return ret; | 
|  | 296 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 297 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 |  | 
| Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 299 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | { | 
|  | 301 | unsigned long flags; | 
|  | 302 |  | 
|  | 303 | /* | 
|  | 304 | * Keep taking off work from the queue until | 
|  | 305 | * done. | 
|  | 306 | */ | 
|  | 307 | spin_lock_irqsave(&cwq->lock, flags); | 
|  | 308 | cwq->run_depth++; | 
|  | 309 | if (cwq->run_depth > 3) { | 
|  | 310 | /* morton gets to eat his hat */ | 
|  | 311 | printk("%s: recursion depth exceeded: %d\n", | 
|  | 312 | __FUNCTION__, cwq->run_depth); | 
|  | 313 | dump_stack(); | 
|  | 314 | } | 
|  | 315 | while (!list_empty(&cwq->worklist)) { | 
|  | 316 | struct work_struct *work = list_entry(cwq->worklist.next, | 
|  | 317 | struct work_struct, entry); | 
| David Howells | 6bb49e5 | 2006-11-22 14:54:45 +0000 | [diff] [blame] | 318 | work_func_t f = work->func; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 |  | 
|  | 320 | list_del_init(cwq->worklist.next); | 
|  | 321 | spin_unlock_irqrestore(&cwq->lock, flags); | 
|  | 322 |  | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 323 | BUG_ON(get_wq_data(work) != cwq); | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 324 | if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 325 | work_release(work); | 
|  | 326 | f(work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 |  | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 328 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 
|  | 329 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 
|  | 330 | "%s/0x%08x/%d\n", | 
|  | 331 | current->comm, preempt_count(), | 
|  | 332 | current->pid); | 
|  | 333 | printk(KERN_ERR "    last function: "); | 
|  | 334 | print_symbol("%s\n", (unsigned long)f); | 
|  | 335 | debug_show_held_locks(current); | 
|  | 336 | dump_stack(); | 
|  | 337 | } | 
|  | 338 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | spin_lock_irqsave(&cwq->lock, flags); | 
|  | 340 | cwq->remove_sequence++; | 
|  | 341 | wake_up(&cwq->work_done); | 
|  | 342 | } | 
|  | 343 | cwq->run_depth--; | 
|  | 344 | spin_unlock_irqrestore(&cwq->lock, flags); | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 | static int worker_thread(void *__cwq) | 
|  | 348 | { | 
|  | 349 | struct cpu_workqueue_struct *cwq = __cwq; | 
|  | 350 | DECLARE_WAITQUEUE(wait, current); | 
|  | 351 | struct k_sigaction sa; | 
|  | 352 | sigset_t blocked; | 
|  | 353 |  | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 354 | if (!cwq->freezeable) | 
|  | 355 | current->flags |= PF_NOFREEZE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 |  | 
|  | 357 | set_user_nice(current, -5); | 
|  | 358 |  | 
|  | 359 | /* Block and flush all signals */ | 
|  | 360 | sigfillset(&blocked); | 
|  | 361 | sigprocmask(SIG_BLOCK, &blocked, NULL); | 
|  | 362 | flush_signals(current); | 
|  | 363 |  | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 364 | /* | 
|  | 365 | * We inherited MPOL_INTERLEAVE from the booting kernel. | 
|  | 366 | * Set MPOL_DEFAULT to insure node local allocations. | 
|  | 367 | */ | 
|  | 368 | numa_default_policy(); | 
|  | 369 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | /* SIG_IGN makes children autoreap: see do_notify_parent(). */ | 
|  | 371 | sa.sa.sa_handler = SIG_IGN; | 
|  | 372 | sa.sa.sa_flags = 0; | 
|  | 373 | siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); | 
|  | 374 | do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); | 
|  | 375 |  | 
|  | 376 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 377 | while (!kthread_should_stop()) { | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 378 | if (cwq->freezeable) | 
|  | 379 | try_to_freeze(); | 
|  | 380 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | add_wait_queue(&cwq->more_work, &wait); | 
|  | 382 | if (list_empty(&cwq->worklist)) | 
|  | 383 | schedule(); | 
|  | 384 | else | 
|  | 385 | __set_current_state(TASK_RUNNING); | 
|  | 386 | remove_wait_queue(&cwq->more_work, &wait); | 
|  | 387 |  | 
|  | 388 | if (!list_empty(&cwq->worklist)) | 
|  | 389 | run_workqueue(cwq); | 
|  | 390 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 391 | } | 
|  | 392 | __set_current_state(TASK_RUNNING); | 
|  | 393 | return 0; | 
|  | 394 | } | 
|  | 395 |  | 
|  | 396 | static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 
|  | 397 | { | 
|  | 398 | if (cwq->thread == current) { | 
|  | 399 | /* | 
|  | 400 | * Probably keventd trying to flush its own queue. So simply run | 
|  | 401 | * it by hand rather than deadlocking. | 
|  | 402 | */ | 
|  | 403 | run_workqueue(cwq); | 
|  | 404 | } else { | 
|  | 405 | DEFINE_WAIT(wait); | 
|  | 406 | long sequence_needed; | 
|  | 407 |  | 
|  | 408 | spin_lock_irq(&cwq->lock); | 
|  | 409 | sequence_needed = cwq->insert_sequence; | 
|  | 410 |  | 
|  | 411 | while (sequence_needed - cwq->remove_sequence > 0) { | 
|  | 412 | prepare_to_wait(&cwq->work_done, &wait, | 
|  | 413 | TASK_UNINTERRUPTIBLE); | 
|  | 414 | spin_unlock_irq(&cwq->lock); | 
|  | 415 | schedule(); | 
|  | 416 | spin_lock_irq(&cwq->lock); | 
|  | 417 | } | 
|  | 418 | finish_wait(&cwq->work_done, &wait); | 
|  | 419 | spin_unlock_irq(&cwq->lock); | 
|  | 420 | } | 
|  | 421 | } | 
|  | 422 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 423 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | * flush_workqueue - ensure that any scheduled work has run to completion. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 425 | * @wq: workqueue to flush | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | * | 
|  | 427 | * Forces execution of the workqueue and blocks until its completion. | 
|  | 428 | * This is typically used in driver shutdown handlers. | 
|  | 429 | * | 
|  | 430 | * This function will sample each workqueue's current insert_sequence number and | 
|  | 431 | * will sleep until the head sequence is greater than or equal to that.  This | 
|  | 432 | * means that we sleep until all works which were queued on entry have been | 
|  | 433 | * handled, but we are not livelocked by new incoming ones. | 
|  | 434 | * | 
|  | 435 | * This function used to run the workqueues itself.  Now we just wait for the | 
|  | 436 | * helper threads to do it. | 
|  | 437 | */ | 
|  | 438 | void fastcall flush_workqueue(struct workqueue_struct *wq) | 
|  | 439 | { | 
|  | 440 | might_sleep(); | 
|  | 441 |  | 
|  | 442 | if (is_single_threaded(wq)) { | 
| Ben Collins | bce61dd | 2005-11-28 13:43:56 -0800 | [diff] [blame] | 443 | /* Always use first cpu's area. */ | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 444 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | } else { | 
|  | 446 | int cpu; | 
|  | 447 |  | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 448 | mutex_lock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | for_each_online_cpu(cpu) | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 450 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 451 | mutex_unlock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | } | 
|  | 453 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 454 | EXPORT_SYMBOL_GPL(flush_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 |  | 
|  | 456 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 457 | int cpu, int freezeable) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 459 | struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | struct task_struct *p; | 
|  | 461 |  | 
|  | 462 | spin_lock_init(&cwq->lock); | 
|  | 463 | cwq->wq = wq; | 
|  | 464 | cwq->thread = NULL; | 
|  | 465 | cwq->insert_sequence = 0; | 
|  | 466 | cwq->remove_sequence = 0; | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 467 | cwq->freezeable = freezeable; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | INIT_LIST_HEAD(&cwq->worklist); | 
|  | 469 | init_waitqueue_head(&cwq->more_work); | 
|  | 470 | init_waitqueue_head(&cwq->work_done); | 
|  | 471 |  | 
|  | 472 | if (is_single_threaded(wq)) | 
|  | 473 | p = kthread_create(worker_thread, cwq, "%s", wq->name); | 
|  | 474 | else | 
|  | 475 | p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); | 
|  | 476 | if (IS_ERR(p)) | 
|  | 477 | return NULL; | 
|  | 478 | cwq->thread = p; | 
|  | 479 | return p; | 
|  | 480 | } | 
|  | 481 |  | 
|  | 482 | struct workqueue_struct *__create_workqueue(const char *name, | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 483 | int singlethread, int freezeable) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | { | 
|  | 485 | int cpu, destroy = 0; | 
|  | 486 | struct workqueue_struct *wq; | 
|  | 487 | struct task_struct *p; | 
|  | 488 |  | 
| Pekka J Enberg | dd39271 | 2005-09-06 15:18:31 -0700 | [diff] [blame] | 489 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | if (!wq) | 
|  | 491 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 493 | wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); | 
| Ben Collins | 676121f | 2006-01-08 01:03:04 -0800 | [diff] [blame] | 494 | if (!wq->cpu_wq) { | 
|  | 495 | kfree(wq); | 
|  | 496 | return NULL; | 
|  | 497 | } | 
|  | 498 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | wq->name = name; | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 500 | mutex_lock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | if (singlethread) { | 
|  | 502 | INIT_LIST_HEAD(&wq->list); | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 503 | p = create_workqueue_thread(wq, singlethread_cpu, freezeable); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | if (!p) | 
|  | 505 | destroy = 1; | 
|  | 506 | else | 
|  | 507 | wake_up_process(p); | 
|  | 508 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | list_add(&wq->list, &workqueues); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | for_each_online_cpu(cpu) { | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 511 | p = create_workqueue_thread(wq, cpu, freezeable); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | if (p) { | 
|  | 513 | kthread_bind(p, cpu); | 
|  | 514 | wake_up_process(p); | 
|  | 515 | } else | 
|  | 516 | destroy = 1; | 
|  | 517 | } | 
|  | 518 | } | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 519 | mutex_unlock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 |  | 
|  | 521 | /* | 
|  | 522 | * Was there any error during startup? If yes then clean up: | 
|  | 523 | */ | 
|  | 524 | if (destroy) { | 
|  | 525 | destroy_workqueue(wq); | 
|  | 526 | wq = NULL; | 
|  | 527 | } | 
|  | 528 | return wq; | 
|  | 529 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 530 | EXPORT_SYMBOL_GPL(__create_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 |  | 
|  | 532 | static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) | 
|  | 533 | { | 
|  | 534 | struct cpu_workqueue_struct *cwq; | 
|  | 535 | unsigned long flags; | 
|  | 536 | struct task_struct *p; | 
|  | 537 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 538 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | spin_lock_irqsave(&cwq->lock, flags); | 
|  | 540 | p = cwq->thread; | 
|  | 541 | cwq->thread = NULL; | 
|  | 542 | spin_unlock_irqrestore(&cwq->lock, flags); | 
|  | 543 | if (p) | 
|  | 544 | kthread_stop(p); | 
|  | 545 | } | 
|  | 546 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 547 | /** | 
|  | 548 | * destroy_workqueue - safely terminate a workqueue | 
|  | 549 | * @wq: target workqueue | 
|  | 550 | * | 
|  | 551 | * Safely destroy a workqueue. All work currently pending will be done first. | 
|  | 552 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | void destroy_workqueue(struct workqueue_struct *wq) | 
|  | 554 | { | 
|  | 555 | int cpu; | 
|  | 556 |  | 
|  | 557 | flush_workqueue(wq); | 
|  | 558 |  | 
|  | 559 | /* We don't need the distraction of CPUs appearing and vanishing. */ | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 560 | mutex_lock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | if (is_single_threaded(wq)) | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 562 | cleanup_workqueue_thread(wq, singlethread_cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | else { | 
|  | 564 | for_each_online_cpu(cpu) | 
|  | 565 | cleanup_workqueue_thread(wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | list_del(&wq->list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | } | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 568 | mutex_unlock(&workqueue_mutex); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 569 | free_percpu(wq->cpu_wq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | kfree(wq); | 
|  | 571 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 572 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 |  | 
|  | 574 | static struct workqueue_struct *keventd_wq; | 
|  | 575 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 576 | /** | 
|  | 577 | * schedule_work - put work task in global workqueue | 
|  | 578 | * @work: job to be done | 
|  | 579 | * | 
|  | 580 | * This puts a job in the kernel-global workqueue. | 
|  | 581 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | int fastcall schedule_work(struct work_struct *work) | 
|  | 583 | { | 
|  | 584 | return queue_work(keventd_wq, work); | 
|  | 585 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 586 | EXPORT_SYMBOL(schedule_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 588 | /** | 
|  | 589 | * schedule_delayed_work - put work task in global workqueue after delay | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 590 | * @dwork: job to be done | 
|  | 591 | * @delay: number of jiffies to wait or 0 for immediate execution | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 592 | * | 
|  | 593 | * After waiting for a given time this puts a job in the kernel-global | 
|  | 594 | * workqueue. | 
|  | 595 | */ | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 596 | int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 598 | return queue_delayed_work(keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 600 | EXPORT_SYMBOL(schedule_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 602 | /** | 
|  | 603 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 
|  | 604 | * @cpu: cpu to use | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 605 | * @dwork: job to be done | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 606 | * @delay: number of jiffies to wait | 
|  | 607 | * | 
|  | 608 | * After waiting for a given time this puts a job in the kernel-global | 
|  | 609 | * workqueue on the specified CPU. | 
|  | 610 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | int schedule_delayed_work_on(int cpu, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 612 | struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 614 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 616 | EXPORT_SYMBOL(schedule_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 618 | /** | 
|  | 619 | * schedule_on_each_cpu - call a function on each online CPU from keventd | 
|  | 620 | * @func: the function to call | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 621 | * | 
|  | 622 | * Returns zero on success. | 
|  | 623 | * Returns -ve errno on failure. | 
|  | 624 | * | 
|  | 625 | * Appears to be racy against CPU hotplug. | 
|  | 626 | * | 
|  | 627 | * schedule_on_each_cpu() is very slow. | 
|  | 628 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 629 | int schedule_on_each_cpu(work_func_t func) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 630 | { | 
|  | 631 | int cpu; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 632 | struct work_struct *works; | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 633 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 634 | works = alloc_percpu(struct work_struct); | 
|  | 635 | if (!works) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 636 | return -ENOMEM; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 637 |  | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 638 | mutex_lock(&workqueue_mutex); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 639 | for_each_online_cpu(cpu) { | 
| Ingo Molnar | 9bfb183 | 2006-12-18 20:05:09 +0100 | [diff] [blame] | 640 | struct work_struct *work = per_cpu_ptr(works, cpu); | 
|  | 641 |  | 
|  | 642 | INIT_WORK(work, func); | 
|  | 643 | set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); | 
|  | 644 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 645 | } | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 646 | mutex_unlock(&workqueue_mutex); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 647 | flush_workqueue(keventd_wq); | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 648 | free_percpu(works); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 649 | return 0; | 
|  | 650 | } | 
|  | 651 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | void flush_scheduled_work(void) | 
|  | 653 | { | 
|  | 654 | flush_workqueue(keventd_wq); | 
|  | 655 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 656 | EXPORT_SYMBOL(flush_scheduled_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 |  | 
|  | 658 | /** | 
|  | 659 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 
|  | 660 | *			work whose handler rearms the delayed work. | 
|  | 661 | * @wq:   the controlling workqueue structure | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 662 | * @dwork: the delayed work struct | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | */ | 
| James Bottomley | 81ddef7 | 2005-04-16 15:23:59 -0700 | [diff] [blame] | 664 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 665 | struct delayed_work *dwork) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 667 | while (!cancel_delayed_work(dwork)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | flush_workqueue(wq); | 
|  | 669 | } | 
| James Bottomley | 81ddef7 | 2005-04-16 15:23:59 -0700 | [diff] [blame] | 670 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 |  | 
|  | 672 | /** | 
|  | 673 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd | 
|  | 674 | *			work whose handler rearms the delayed work. | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 675 | * @dwork: the delayed work struct | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | */ | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 677 | void cancel_rearming_delayed_work(struct delayed_work *dwork) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 679 | cancel_rearming_delayed_workqueue(keventd_wq, dwork); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | } | 
|  | 681 | EXPORT_SYMBOL(cancel_rearming_delayed_work); | 
|  | 682 |  | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 683 | /** | 
|  | 684 | * execute_in_process_context - reliably execute the routine with user context | 
|  | 685 | * @fn:		the function to execute | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 686 | * @ew:		guaranteed storage for the execute work structure (must | 
|  | 687 | *		be available when the work executes) | 
|  | 688 | * | 
|  | 689 | * Executes the function immediately if process context is available, | 
|  | 690 | * otherwise schedules the function for delayed execution. | 
|  | 691 | * | 
|  | 692 | * Returns:	0 - function was executed | 
|  | 693 | *		1 - function was scheduled for execution | 
|  | 694 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 695 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 696 | { | 
|  | 697 | if (!in_interrupt()) { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 698 | fn(&ew->work); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 699 | return 0; | 
|  | 700 | } | 
|  | 701 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 702 | INIT_WORK(&ew->work, fn); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 703 | schedule_work(&ew->work); | 
|  | 704 |  | 
|  | 705 | return 1; | 
|  | 706 | } | 
|  | 707 | EXPORT_SYMBOL_GPL(execute_in_process_context); | 
|  | 708 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | int keventd_up(void) | 
|  | 710 | { | 
|  | 711 | return keventd_wq != NULL; | 
|  | 712 | } | 
|  | 713 |  | 
|  | 714 | int current_is_keventd(void) | 
|  | 715 | { | 
|  | 716 | struct cpu_workqueue_struct *cwq; | 
|  | 717 | int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */ | 
|  | 718 | int ret = 0; | 
|  | 719 |  | 
|  | 720 | BUG_ON(!keventd_wq); | 
|  | 721 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 722 | cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | if (current == cwq->thread) | 
|  | 724 | ret = 1; | 
|  | 725 |  | 
|  | 726 | return ret; | 
|  | 727 |  | 
|  | 728 | } | 
|  | 729 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | /* Take the work from this (downed) CPU. */ | 
|  | 731 | static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) | 
|  | 732 | { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 733 | struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 734 | struct list_head list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | struct work_struct *work; | 
|  | 736 |  | 
|  | 737 | spin_lock_irq(&cwq->lock); | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 738 | list_replace_init(&cwq->worklist, &list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 |  | 
|  | 740 | while (!list_empty(&list)) { | 
|  | 741 | printk("Taking work for %s\n", wq->name); | 
|  | 742 | work = list_entry(list.next,struct work_struct,entry); | 
|  | 743 | list_del(&work->entry); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 744 | __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | } | 
|  | 746 | spin_unlock_irq(&cwq->lock); | 
|  | 747 | } | 
|  | 748 |  | 
|  | 749 | /* We're holding the cpucontrol mutex here */ | 
| Chandra Seetharaman | 9c7b216 | 2006-06-27 02:54:07 -0700 | [diff] [blame] | 750 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | unsigned long action, | 
|  | 752 | void *hcpu) | 
|  | 753 | { | 
|  | 754 | unsigned int hotcpu = (unsigned long)hcpu; | 
|  | 755 | struct workqueue_struct *wq; | 
|  | 756 |  | 
|  | 757 | switch (action) { | 
|  | 758 | case CPU_UP_PREPARE: | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 759 | mutex_lock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | /* Create a new workqueue thread for it. */ | 
|  | 761 | list_for_each_entry(wq, &workqueues, list) { | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 762 | if (!create_workqueue_thread(wq, hotcpu, 0)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 763 | printk("workqueue for %i failed\n", hotcpu); | 
|  | 764 | return NOTIFY_BAD; | 
|  | 765 | } | 
|  | 766 | } | 
|  | 767 | break; | 
|  | 768 |  | 
|  | 769 | case CPU_ONLINE: | 
|  | 770 | /* Kick off worker threads. */ | 
|  | 771 | list_for_each_entry(wq, &workqueues, list) { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 772 | struct cpu_workqueue_struct *cwq; | 
|  | 773 |  | 
|  | 774 | cwq = per_cpu_ptr(wq->cpu_wq, hotcpu); | 
|  | 775 | kthread_bind(cwq->thread, hotcpu); | 
|  | 776 | wake_up_process(cwq->thread); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | } | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 778 | mutex_unlock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | break; | 
|  | 780 |  | 
|  | 781 | case CPU_UP_CANCELED: | 
|  | 782 | list_for_each_entry(wq, &workqueues, list) { | 
| Heiko Carstens | fc75cdf | 2006-06-25 05:49:10 -0700 | [diff] [blame] | 783 | if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) | 
|  | 784 | continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | /* Unbind so it can run. */ | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 786 | kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, | 
| Heiko Carstens | a4c4af7 | 2005-11-07 00:58:38 -0800 | [diff] [blame] | 787 | any_online_cpu(cpu_online_map)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | cleanup_workqueue_thread(wq, hotcpu); | 
|  | 789 | } | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 790 | mutex_unlock(&workqueue_mutex); | 
|  | 791 | break; | 
|  | 792 |  | 
|  | 793 | case CPU_DOWN_PREPARE: | 
|  | 794 | mutex_lock(&workqueue_mutex); | 
|  | 795 | break; | 
|  | 796 |  | 
|  | 797 | case CPU_DOWN_FAILED: | 
|  | 798 | mutex_unlock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | break; | 
|  | 800 |  | 
|  | 801 | case CPU_DEAD: | 
|  | 802 | list_for_each_entry(wq, &workqueues, list) | 
|  | 803 | cleanup_workqueue_thread(wq, hotcpu); | 
|  | 804 | list_for_each_entry(wq, &workqueues, list) | 
|  | 805 | take_over_work(wq, hotcpu); | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 806 | mutex_unlock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | break; | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | return NOTIFY_OK; | 
|  | 811 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 |  | 
|  | 813 | void init_workqueues(void) | 
|  | 814 | { | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 815 | singlethread_cpu = first_cpu(cpu_possible_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | hotcpu_notifier(workqueue_cpu_callback, 0); | 
|  | 817 | keventd_wq = create_workqueue("events"); | 
|  | 818 | BUG_ON(!keventd_wq); | 
|  | 819 | } | 
|  | 820 |  |