| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/kernel/workqueue.c | 
|  | 3 | * | 
|  | 4 | * Generic mechanism for defining kernel helper threads for running | 
|  | 5 | * arbitrary tasks in process context. | 
|  | 6 | * | 
|  | 7 | * Started by Ingo Molnar, Copyright (C) 2002 | 
|  | 8 | * | 
|  | 9 | * Derived from the taskqueue/keventd code by: | 
|  | 10 | * | 
|  | 11 | *   David Woodhouse <dwmw2@infradead.org> | 
| Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 12 | *   Andrew Morton | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | *   Kai Petzke <wpp@marie.physik.tu-berlin.de> | 
|  | 14 | *   Theodore Ts'o <tytso@mit.edu> | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 15 | * | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 16 | * Made to use alloc_percpu by Christoph Lameter. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ | 
|  | 18 |  | 
|  | 19 | #include <linux/module.h> | 
|  | 20 | #include <linux/kernel.h> | 
|  | 21 | #include <linux/sched.h> | 
|  | 22 | #include <linux/init.h> | 
|  | 23 | #include <linux/signal.h> | 
|  | 24 | #include <linux/completion.h> | 
|  | 25 | #include <linux/workqueue.h> | 
|  | 26 | #include <linux/slab.h> | 
|  | 27 | #include <linux/cpu.h> | 
|  | 28 | #include <linux/notifier.h> | 
|  | 29 | #include <linux/kthread.h> | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 30 | #include <linux/hardirq.h> | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 31 | #include <linux/mempolicy.h> | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 32 | #include <linux/freezer.h> | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 33 | #include <linux/kallsyms.h> | 
|  | 34 | #include <linux/debug_locks.h> | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 35 | #include <linux/lockdep.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
|  | 37 | /* | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 38 | * The per-CPU workqueue (if single thread, we always use the first | 
|  | 39 | * possible cpu). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | */ | 
|  | 41 | struct cpu_workqueue_struct { | 
|  | 42 |  | 
|  | 43 | spinlock_t lock; | 
|  | 44 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | struct list_head worklist; | 
|  | 46 | wait_queue_head_t more_work; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 47 | struct work_struct *current_work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 |  | 
|  | 49 | struct workqueue_struct *wq; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 50 | struct task_struct *thread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 |  | 
|  | 52 | int run_depth;		/* Detect run_workqueue() recursion depth */ | 
|  | 53 | } ____cacheline_aligned; | 
|  | 54 |  | 
|  | 55 | /* | 
|  | 56 | * The externally visible workqueue abstraction is an array of | 
|  | 57 | * per-CPU workqueues: | 
|  | 58 | */ | 
|  | 59 | struct workqueue_struct { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 60 | struct cpu_workqueue_struct *cpu_wq; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 61 | struct list_head list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | const char *name; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 63 | int singlethread; | 
| Oleg Nesterov | 319c2a9 | 2007-05-09 02:34:06 -0700 | [diff] [blame] | 64 | int freezeable;		/* Freeze threads during suspend */ | 
| Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 65 | int rt; | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 66 | #ifdef CONFIG_LOCKDEP | 
|  | 67 | struct lockdep_map lockdep_map; | 
|  | 68 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | }; | 
|  | 70 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 71 | /* Serializes the accesses to the list of workqueues. */ | 
|  | 72 | static DEFINE_SPINLOCK(workqueue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | static LIST_HEAD(workqueues); | 
|  | 74 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 75 | static int singlethread_cpu __read_mostly; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 76 | static cpumask_t cpu_singlethread_map __read_mostly; | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 77 | /* | 
|  | 78 | * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD | 
|  | 79 | * flushes cwq->worklist. This means that flush_workqueue/wait_on_work | 
|  | 80 | * which comes in between can't use for_each_online_cpu(). We could | 
|  | 81 | * use cpu_possible_map, the cpumask below is more a documentation | 
|  | 82 | * than optimization. | 
|  | 83 | */ | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 84 | static cpumask_t cpu_populated_map __read_mostly; | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 85 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | /* If it's single threaded, it isn't in the list of workqueues. */ | 
|  | 87 | static inline int is_single_threaded(struct workqueue_struct *wq) | 
|  | 88 | { | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 89 | return wq->singlethread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | } | 
|  | 91 |  | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 92 | static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) | 
|  | 93 | { | 
|  | 94 | return is_single_threaded(wq) | 
|  | 95 | ? &cpu_singlethread_map : &cpu_populated_map; | 
|  | 96 | } | 
|  | 97 |  | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 98 | static | 
|  | 99 | struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) | 
|  | 100 | { | 
|  | 101 | if (unlikely(is_single_threaded(wq))) | 
|  | 102 | cpu = singlethread_cpu; | 
|  | 103 | return per_cpu_ptr(wq->cpu_wq, cpu); | 
|  | 104 | } | 
|  | 105 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 106 | /* | 
|  | 107 | * Set the workqueue on which a work item is to be run | 
|  | 108 | * - Must *only* be called if the pending flag is set | 
|  | 109 | */ | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 110 | static inline void set_wq_data(struct work_struct *work, | 
|  | 111 | struct cpu_workqueue_struct *cwq) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 112 | { | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 113 | unsigned long new; | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 114 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 115 | BUG_ON(!work_pending(work)); | 
|  | 116 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 117 | new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 118 | new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); | 
|  | 119 | atomic_long_set(&work->data, new); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 120 | } | 
|  | 121 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 122 | static inline | 
|  | 123 | struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 124 | { | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 125 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 126 | } | 
|  | 127 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 128 | static void insert_work(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 129 | struct work_struct *work, struct list_head *head) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 130 | { | 
|  | 131 | set_wq_data(work, cwq); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 132 | /* | 
|  | 133 | * Ensure that we get the right work->data if we see the | 
|  | 134 | * result of list_add() below, see try_to_grab_pending(). | 
|  | 135 | */ | 
|  | 136 | smp_wmb(); | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 137 | list_add_tail(&work->entry, head); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 138 | wake_up(&cwq->more_work); | 
|  | 139 | } | 
|  | 140 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 
|  | 142 | struct work_struct *work) | 
|  | 143 | { | 
|  | 144 | unsigned long flags; | 
|  | 145 |  | 
|  | 146 | spin_lock_irqsave(&cwq->lock, flags); | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 147 | insert_work(cwq, work, &cwq->worklist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | spin_unlock_irqrestore(&cwq->lock, flags); | 
|  | 149 | } | 
|  | 150 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 151 | /** | 
|  | 152 | * queue_work - queue work on a workqueue | 
|  | 153 | * @wq: workqueue to use | 
|  | 154 | * @work: work to queue | 
|  | 155 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 156 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | * | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 158 | * We queue the work to the CPU on which it was submitted, but if the CPU dies | 
|  | 159 | * it can be processed by another CPU. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 161 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | { | 
| Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 163 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 |  | 
| Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 165 | ret = queue_work_on(get_cpu(), wq, work); | 
|  | 166 | put_cpu(); | 
|  | 167 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | return ret; | 
|  | 169 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 170 | EXPORT_SYMBOL_GPL(queue_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 |  | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 172 | /** | 
|  | 173 | * queue_work_on - queue work on specific cpu | 
|  | 174 | * @cpu: CPU number to execute work on | 
|  | 175 | * @wq: workqueue to use | 
|  | 176 | * @work: work to queue | 
|  | 177 | * | 
|  | 178 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
|  | 179 | * | 
|  | 180 | * We queue the work to a specific CPU, the caller must ensure it | 
|  | 181 | * can't go away. | 
|  | 182 | */ | 
|  | 183 | int | 
|  | 184 | queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) | 
|  | 185 | { | 
|  | 186 | int ret = 0; | 
|  | 187 |  | 
|  | 188 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
|  | 189 | BUG_ON(!list_empty(&work->entry)); | 
|  | 190 | __queue_work(wq_per_cpu(wq, cpu), work); | 
|  | 191 | ret = 1; | 
|  | 192 | } | 
|  | 193 | return ret; | 
|  | 194 | } | 
|  | 195 | EXPORT_SYMBOL_GPL(queue_work_on); | 
|  | 196 |  | 
| Li Zefan | 6d141c3 | 2008-02-08 04:21:09 -0800 | [diff] [blame] | 197 | static void delayed_work_timer_fn(unsigned long __data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 199 | struct delayed_work *dwork = (struct delayed_work *)__data; | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 200 | struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); | 
|  | 201 | struct workqueue_struct *wq = cwq->wq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 |  | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 203 | __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | } | 
|  | 205 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 206 | /** | 
|  | 207 | * queue_delayed_work - queue work on a workqueue after delay | 
|  | 208 | * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 209 | * @dwork: delayable work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 210 | * @delay: number of jiffies to wait before queueing | 
|  | 211 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 212 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 213 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 214 | int queue_delayed_work(struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 215 | struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 217 | if (delay == 0) | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 218 | return queue_work(wq, &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 |  | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 220 | return queue_delayed_work_on(-1, wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 222 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 224 | /** | 
|  | 225 | * queue_delayed_work_on - queue work on specific CPU after delay | 
|  | 226 | * @cpu: CPU number to execute work on | 
|  | 227 | * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 228 | * @dwork: work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 229 | * @delay: number of jiffies to wait before queueing | 
|  | 230 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 231 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 232 | */ | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 233 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 234 | struct delayed_work *dwork, unsigned long delay) | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 235 | { | 
|  | 236 | int ret = 0; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 237 | struct timer_list *timer = &dwork->timer; | 
|  | 238 | struct work_struct *work = &dwork->work; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 239 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 240 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 241 | BUG_ON(timer_pending(timer)); | 
|  | 242 | BUG_ON(!list_empty(&work->entry)); | 
|  | 243 |  | 
| Andrew Liu | 8a3e77c | 2008-05-01 04:35:14 -0700 | [diff] [blame] | 244 | timer_stats_timer_set_start_info(&dwork->timer); | 
|  | 245 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 246 | /* This stores cwq for the moment, for the timer_fn */ | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 247 | set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 248 | timer->expires = jiffies + delay; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 249 | timer->data = (unsigned long)dwork; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 250 | timer->function = delayed_work_timer_fn; | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 251 |  | 
|  | 252 | if (unlikely(cpu >= 0)) | 
|  | 253 | add_timer_on(timer, cpu); | 
|  | 254 | else | 
|  | 255 | add_timer(timer); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 256 | ret = 1; | 
|  | 257 | } | 
|  | 258 | return ret; | 
|  | 259 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 260 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 |  | 
| Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 262 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | { | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 264 | spin_lock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | cwq->run_depth++; | 
|  | 266 | if (cwq->run_depth > 3) { | 
|  | 267 | /* morton gets to eat his hat */ | 
|  | 268 | printk("%s: recursion depth exceeded: %d\n", | 
| Harvey Harrison | af1f16d | 2008-04-30 00:55:08 -0700 | [diff] [blame] | 269 | __func__, cwq->run_depth); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | dump_stack(); | 
|  | 271 | } | 
|  | 272 | while (!list_empty(&cwq->worklist)) { | 
|  | 273 | struct work_struct *work = list_entry(cwq->worklist.next, | 
|  | 274 | struct work_struct, entry); | 
| David Howells | 6bb49e5 | 2006-11-22 14:54:45 +0000 | [diff] [blame] | 275 | work_func_t f = work->func; | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 276 | #ifdef CONFIG_LOCKDEP | 
|  | 277 | /* | 
|  | 278 | * It is permissible to free the struct work_struct | 
|  | 279 | * from inside the function that is called from it, | 
|  | 280 | * this we need to take into account for lockdep too. | 
|  | 281 | * To avoid bogus "held lock freed" warnings as well | 
|  | 282 | * as problems when looking into work->lockdep_map, | 
|  | 283 | * make a copy and use that here. | 
|  | 284 | */ | 
|  | 285 | struct lockdep_map lockdep_map = work->lockdep_map; | 
|  | 286 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 288 | cwq->current_work = work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | list_del_init(cwq->worklist.next); | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 290 | spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 |  | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 292 | BUG_ON(get_wq_data(work) != cwq); | 
| Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 293 | work_clear_pending(work); | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 294 | lock_map_acquire(&cwq->wq->lockdep_map); | 
|  | 295 | lock_map_acquire(&lockdep_map); | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 296 | f(work); | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 297 | lock_map_release(&lockdep_map); | 
|  | 298 | lock_map_release(&cwq->wq->lockdep_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 |  | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 300 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 
|  | 301 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 
|  | 302 | "%s/0x%08x/%d\n", | 
|  | 303 | current->comm, preempt_count(), | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 304 | task_pid_nr(current)); | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 305 | printk(KERN_ERR "    last function: "); | 
|  | 306 | print_symbol("%s\n", (unsigned long)f); | 
|  | 307 | debug_show_held_locks(current); | 
|  | 308 | dump_stack(); | 
|  | 309 | } | 
|  | 310 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 311 | spin_lock_irq(&cwq->lock); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 312 | cwq->current_work = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | } | 
|  | 314 | cwq->run_depth--; | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 315 | spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | } | 
|  | 317 |  | 
|  | 318 | static int worker_thread(void *__cwq) | 
|  | 319 | { | 
|  | 320 | struct cpu_workqueue_struct *cwq = __cwq; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 321 | DEFINE_WAIT(wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 |  | 
| Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 323 | if (cwq->wq->freezeable) | 
|  | 324 | set_freezable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 |  | 
|  | 326 | set_user_nice(current, -5); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 328 | for (;;) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 329 | prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 330 | if (!freezing(current) && | 
|  | 331 | !kthread_should_stop() && | 
|  | 332 | list_empty(&cwq->worklist)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | schedule(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 334 | finish_wait(&cwq->more_work, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 |  | 
| Oleg Nesterov | 85f4186 | 2007-05-09 02:34:20 -0700 | [diff] [blame] | 336 | try_to_freeze(); | 
|  | 337 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 338 | if (kthread_should_stop()) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 339 | break; | 
|  | 340 |  | 
|  | 341 | run_workqueue(cwq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | } | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 343 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | return 0; | 
|  | 345 | } | 
|  | 346 |  | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 347 | struct wq_barrier { | 
|  | 348 | struct work_struct	work; | 
|  | 349 | struct completion	done; | 
|  | 350 | }; | 
|  | 351 |  | 
|  | 352 | static void wq_barrier_func(struct work_struct *work) | 
|  | 353 | { | 
|  | 354 | struct wq_barrier *barr = container_of(work, struct wq_barrier, work); | 
|  | 355 | complete(&barr->done); | 
|  | 356 | } | 
|  | 357 |  | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 358 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 359 | struct wq_barrier *barr, struct list_head *head) | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 360 | { | 
|  | 361 | INIT_WORK(&barr->work, wq_barrier_func); | 
|  | 362 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 
|  | 363 |  | 
|  | 364 | init_completion(&barr->done); | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 365 |  | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 366 | insert_work(cwq, &barr->work, head); | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 367 | } | 
|  | 368 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 369 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | { | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 371 | int active; | 
|  | 372 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | if (cwq->thread == current) { | 
|  | 374 | /* | 
|  | 375 | * Probably keventd trying to flush its own queue. So simply run | 
|  | 376 | * it by hand rather than deadlocking. | 
|  | 377 | */ | 
|  | 378 | run_workqueue(cwq); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 379 | active = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | } else { | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 381 | struct wq_barrier barr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 383 | active = 0; | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 384 | spin_lock_irq(&cwq->lock); | 
|  | 385 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 386 | insert_wq_barrier(cwq, &barr, &cwq->worklist); | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 387 | active = 1; | 
|  | 388 | } | 
|  | 389 | spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 |  | 
| Oleg Nesterov | d721304 | 2007-05-09 02:34:07 -0700 | [diff] [blame] | 391 | if (active) | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 392 | wait_for_completion(&barr.done); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | } | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 394 |  | 
|  | 395 | return active; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | } | 
|  | 397 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 398 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | * flush_workqueue - ensure that any scheduled work has run to completion. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 400 | * @wq: workqueue to flush | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | * | 
|  | 402 | * Forces execution of the workqueue and blocks until its completion. | 
|  | 403 | * This is typically used in driver shutdown handlers. | 
|  | 404 | * | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 405 | * We sleep until all works which were queued on entry have been handled, | 
|  | 406 | * but we are not livelocked by new incoming ones. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | * | 
|  | 408 | * This function used to run the workqueues itself.  Now we just wait for the | 
|  | 409 | * helper threads to do it. | 
|  | 410 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 411 | void flush_workqueue(struct workqueue_struct *wq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | { | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 413 | const cpumask_t *cpu_map = wq_cpu_map(wq); | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 414 | int cpu; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 415 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 416 | might_sleep(); | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 417 | lock_map_acquire(&wq->lockdep_map); | 
|  | 418 | lock_map_release(&wq->lockdep_map); | 
| Mike Travis | 363ab6f | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 419 | for_each_cpu_mask_nr(cpu, *cpu_map) | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 420 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 422 | EXPORT_SYMBOL_GPL(flush_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 |  | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 424 | /** | 
|  | 425 | * flush_work - block until a work_struct's callback has terminated | 
|  | 426 | * @work: the work which is to be flushed | 
|  | 427 | * | 
| Oleg Nesterov | a67da70 | 2008-07-25 01:47:52 -0700 | [diff] [blame] | 428 | * Returns false if @work has already terminated. | 
|  | 429 | * | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 430 | * It is expected that, prior to calling flush_work(), the caller has | 
|  | 431 | * arranged for the work to not be requeued, otherwise it doesn't make | 
|  | 432 | * sense to use this function. | 
|  | 433 | */ | 
|  | 434 | int flush_work(struct work_struct *work) | 
|  | 435 | { | 
|  | 436 | struct cpu_workqueue_struct *cwq; | 
|  | 437 | struct list_head *prev; | 
|  | 438 | struct wq_barrier barr; | 
|  | 439 |  | 
|  | 440 | might_sleep(); | 
|  | 441 | cwq = get_wq_data(work); | 
|  | 442 | if (!cwq) | 
|  | 443 | return 0; | 
|  | 444 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 445 | lock_map_acquire(&cwq->wq->lockdep_map); | 
|  | 446 | lock_map_release(&cwq->wq->lockdep_map); | 
| Oleg Nesterov | a67da70 | 2008-07-25 01:47:52 -0700 | [diff] [blame] | 447 |  | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 448 | prev = NULL; | 
|  | 449 | spin_lock_irq(&cwq->lock); | 
|  | 450 | if (!list_empty(&work->entry)) { | 
|  | 451 | /* | 
|  | 452 | * See the comment near try_to_grab_pending()->smp_rmb(). | 
|  | 453 | * If it was re-queued under us we are not going to wait. | 
|  | 454 | */ | 
|  | 455 | smp_rmb(); | 
|  | 456 | if (unlikely(cwq != get_wq_data(work))) | 
|  | 457 | goto out; | 
|  | 458 | prev = &work->entry; | 
|  | 459 | } else { | 
|  | 460 | if (cwq->current_work != work) | 
|  | 461 | goto out; | 
|  | 462 | prev = &cwq->worklist; | 
|  | 463 | } | 
|  | 464 | insert_wq_barrier(cwq, &barr, prev->next); | 
|  | 465 | out: | 
|  | 466 | spin_unlock_irq(&cwq->lock); | 
|  | 467 | if (!prev) | 
|  | 468 | return 0; | 
|  | 469 |  | 
|  | 470 | wait_for_completion(&barr.done); | 
|  | 471 | return 1; | 
|  | 472 | } | 
|  | 473 | EXPORT_SYMBOL_GPL(flush_work); | 
|  | 474 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 475 | /* | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 476 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 477 | * so this work can't be re-armed in any way. | 
|  | 478 | */ | 
|  | 479 | static int try_to_grab_pending(struct work_struct *work) | 
|  | 480 | { | 
|  | 481 | struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 482 | int ret = -1; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 483 |  | 
|  | 484 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 485 | return 0; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 486 |  | 
|  | 487 | /* | 
|  | 488 | * The queueing is in progress, or it is already queued. Try to | 
|  | 489 | * steal it from ->worklist without clearing WORK_STRUCT_PENDING. | 
|  | 490 | */ | 
|  | 491 |  | 
|  | 492 | cwq = get_wq_data(work); | 
|  | 493 | if (!cwq) | 
|  | 494 | return ret; | 
|  | 495 |  | 
|  | 496 | spin_lock_irq(&cwq->lock); | 
|  | 497 | if (!list_empty(&work->entry)) { | 
|  | 498 | /* | 
|  | 499 | * This work is queued, but perhaps we locked the wrong cwq. | 
|  | 500 | * In that case we must see the new value after rmb(), see | 
|  | 501 | * insert_work()->wmb(). | 
|  | 502 | */ | 
|  | 503 | smp_rmb(); | 
|  | 504 | if (cwq == get_wq_data(work)) { | 
|  | 505 | list_del_init(&work->entry); | 
|  | 506 | ret = 1; | 
|  | 507 | } | 
|  | 508 | } | 
|  | 509 | spin_unlock_irq(&cwq->lock); | 
|  | 510 |  | 
|  | 511 | return ret; | 
|  | 512 | } | 
|  | 513 |  | 
|  | 514 | static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 515 | struct work_struct *work) | 
|  | 516 | { | 
|  | 517 | struct wq_barrier barr; | 
|  | 518 | int running = 0; | 
|  | 519 |  | 
|  | 520 | spin_lock_irq(&cwq->lock); | 
|  | 521 | if (unlikely(cwq->current_work == work)) { | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 522 | insert_wq_barrier(cwq, &barr, cwq->worklist.next); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 523 | running = 1; | 
|  | 524 | } | 
|  | 525 | spin_unlock_irq(&cwq->lock); | 
|  | 526 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 527 | if (unlikely(running)) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 528 | wait_for_completion(&barr.done); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 529 | } | 
|  | 530 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 531 | static void wait_on_work(struct work_struct *work) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 532 | { | 
|  | 533 | struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 534 | struct workqueue_struct *wq; | 
|  | 535 | const cpumask_t *cpu_map; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 536 | int cpu; | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 537 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 538 | might_sleep(); | 
|  | 539 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 540 | lock_map_acquire(&work->lockdep_map); | 
|  | 541 | lock_map_release(&work->lockdep_map); | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 542 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 543 | cwq = get_wq_data(work); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 544 | if (!cwq) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 545 | return; | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 546 |  | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 547 | wq = cwq->wq; | 
|  | 548 | cpu_map = wq_cpu_map(wq); | 
|  | 549 |  | 
| Mike Travis | 363ab6f | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 550 | for_each_cpu_mask_nr(cpu, *cpu_map) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 551 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 
|  | 552 | } | 
|  | 553 |  | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 554 | static int __cancel_work_timer(struct work_struct *work, | 
|  | 555 | struct timer_list* timer) | 
|  | 556 | { | 
|  | 557 | int ret; | 
|  | 558 |  | 
|  | 559 | do { | 
|  | 560 | ret = (timer && likely(del_timer(timer))); | 
|  | 561 | if (!ret) | 
|  | 562 | ret = try_to_grab_pending(work); | 
|  | 563 | wait_on_work(work); | 
|  | 564 | } while (unlikely(ret < 0)); | 
|  | 565 |  | 
|  | 566 | work_clear_pending(work); | 
|  | 567 | return ret; | 
|  | 568 | } | 
|  | 569 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 570 | /** | 
|  | 571 | * cancel_work_sync - block until a work_struct's callback has terminated | 
|  | 572 | * @work: the work which is to be flushed | 
|  | 573 | * | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 574 | * Returns true if @work was pending. | 
|  | 575 | * | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 576 | * cancel_work_sync() will cancel the work if it is queued. If the work's | 
|  | 577 | * callback appears to be running, cancel_work_sync() will block until it | 
|  | 578 | * has completed. | 
|  | 579 | * | 
|  | 580 | * It is possible to use this function if the work re-queues itself. It can | 
|  | 581 | * cancel the work even if it migrates to another workqueue, however in that | 
|  | 582 | * case it only guarantees that work->func() has completed on the last queued | 
|  | 583 | * workqueue. | 
|  | 584 | * | 
|  | 585 | * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not | 
|  | 586 | * pending, otherwise it goes into a busy-wait loop until the timer expires. | 
|  | 587 | * | 
|  | 588 | * The caller must ensure that workqueue_struct on which this work was last | 
|  | 589 | * queued can't be destroyed before this function returns. | 
|  | 590 | */ | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 591 | int cancel_work_sync(struct work_struct *work) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 592 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 593 | return __cancel_work_timer(work, NULL); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 594 | } | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 595 | EXPORT_SYMBOL_GPL(cancel_work_sync); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 596 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 597 | /** | 
| Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 598 | * cancel_delayed_work_sync - reliably kill off a delayed work. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 599 | * @dwork: the delayed work struct | 
|  | 600 | * | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 601 | * Returns true if @dwork was pending. | 
|  | 602 | * | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 603 | * It is possible to use this function if @dwork rearms itself via queue_work() | 
|  | 604 | * or queue_delayed_work(). See also the comment for cancel_work_sync(). | 
|  | 605 | */ | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 606 | int cancel_delayed_work_sync(struct delayed_work *dwork) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 607 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 608 | return __cancel_work_timer(&dwork->work, &dwork->timer); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 609 | } | 
| Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 610 | EXPORT_SYMBOL(cancel_delayed_work_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 612 | static struct workqueue_struct *keventd_wq __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 614 | /** | 
|  | 615 | * schedule_work - put work task in global workqueue | 
|  | 616 | * @work: job to be done | 
|  | 617 | * | 
|  | 618 | * This puts a job in the kernel-global workqueue. | 
|  | 619 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 620 | int schedule_work(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | { | 
|  | 622 | return queue_work(keventd_wq, work); | 
|  | 623 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 624 | EXPORT_SYMBOL(schedule_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 |  | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 626 | /* | 
|  | 627 | * schedule_work_on - put work task on a specific cpu | 
|  | 628 | * @cpu: cpu to put the work task on | 
|  | 629 | * @work: job to be done | 
|  | 630 | * | 
|  | 631 | * This puts a job on a specific cpu | 
|  | 632 | */ | 
|  | 633 | int schedule_work_on(int cpu, struct work_struct *work) | 
|  | 634 | { | 
|  | 635 | return queue_work_on(cpu, keventd_wq, work); | 
|  | 636 | } | 
|  | 637 | EXPORT_SYMBOL(schedule_work_on); | 
|  | 638 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 639 | /** | 
|  | 640 | * schedule_delayed_work - put work task in global workqueue after delay | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 641 | * @dwork: job to be done | 
|  | 642 | * @delay: number of jiffies to wait or 0 for immediate execution | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 643 | * | 
|  | 644 | * After waiting for a given time this puts a job in the kernel-global | 
|  | 645 | * workqueue. | 
|  | 646 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 647 | int schedule_delayed_work(struct delayed_work *dwork, | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 648 | unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 650 | return queue_delayed_work(keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 652 | EXPORT_SYMBOL(schedule_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 654 | /** | 
|  | 655 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 
|  | 656 | * @cpu: cpu to use | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 657 | * @dwork: job to be done | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 658 | * @delay: number of jiffies to wait | 
|  | 659 | * | 
|  | 660 | * After waiting for a given time this puts a job in the kernel-global | 
|  | 661 | * workqueue on the specified CPU. | 
|  | 662 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | int schedule_delayed_work_on(int cpu, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 664 | struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 666 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 668 | EXPORT_SYMBOL(schedule_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 670 | /** | 
|  | 671 | * schedule_on_each_cpu - call a function on each online CPU from keventd | 
|  | 672 | * @func: the function to call | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 673 | * | 
|  | 674 | * Returns zero on success. | 
|  | 675 | * Returns -ve errno on failure. | 
|  | 676 | * | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 677 | * schedule_on_each_cpu() is very slow. | 
|  | 678 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 679 | int schedule_on_each_cpu(work_func_t func) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 680 | { | 
|  | 681 | int cpu; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 682 | struct work_struct *works; | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 683 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 684 | works = alloc_percpu(struct work_struct); | 
|  | 685 | if (!works) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 686 | return -ENOMEM; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 687 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 688 | get_online_cpus(); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 689 | for_each_online_cpu(cpu) { | 
| Ingo Molnar | 9bfb183 | 2006-12-18 20:05:09 +0100 | [diff] [blame] | 690 | struct work_struct *work = per_cpu_ptr(works, cpu); | 
|  | 691 |  | 
|  | 692 | INIT_WORK(work, func); | 
| Oleg Nesterov | 8de6d30 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 693 | schedule_work_on(cpu, work); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 694 | } | 
| Oleg Nesterov | 8616a89 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 695 | for_each_online_cpu(cpu) | 
|  | 696 | flush_work(per_cpu_ptr(works, cpu)); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 697 | put_online_cpus(); | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 698 | free_percpu(works); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 699 | return 0; | 
|  | 700 | } | 
|  | 701 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | void flush_scheduled_work(void) | 
|  | 703 | { | 
|  | 704 | flush_workqueue(keventd_wq); | 
|  | 705 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 706 | EXPORT_SYMBOL(flush_scheduled_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 |  | 
|  | 708 | /** | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 709 | * execute_in_process_context - reliably execute the routine with user context | 
|  | 710 | * @fn:		the function to execute | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 711 | * @ew:		guaranteed storage for the execute work structure (must | 
|  | 712 | *		be available when the work executes) | 
|  | 713 | * | 
|  | 714 | * Executes the function immediately if process context is available, | 
|  | 715 | * otherwise schedules the function for delayed execution. | 
|  | 716 | * | 
|  | 717 | * Returns:	0 - function was executed | 
|  | 718 | *		1 - function was scheduled for execution | 
|  | 719 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 720 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 721 | { | 
|  | 722 | if (!in_interrupt()) { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 723 | fn(&ew->work); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 724 | return 0; | 
|  | 725 | } | 
|  | 726 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 727 | INIT_WORK(&ew->work, fn); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 728 | schedule_work(&ew->work); | 
|  | 729 |  | 
|  | 730 | return 1; | 
|  | 731 | } | 
|  | 732 | EXPORT_SYMBOL_GPL(execute_in_process_context); | 
|  | 733 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | int keventd_up(void) | 
|  | 735 | { | 
|  | 736 | return keventd_wq != NULL; | 
|  | 737 | } | 
|  | 738 |  | 
|  | 739 | int current_is_keventd(void) | 
|  | 740 | { | 
|  | 741 | struct cpu_workqueue_struct *cwq; | 
| Hugh Dickins | d243769 | 2007-08-27 16:06:19 +0100 | [diff] [blame] | 742 | int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | int ret = 0; | 
|  | 744 |  | 
|  | 745 | BUG_ON(!keventd_wq); | 
|  | 746 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 747 | cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 | if (current == cwq->thread) | 
|  | 749 | ret = 1; | 
|  | 750 |  | 
|  | 751 | return ret; | 
|  | 752 |  | 
|  | 753 | } | 
|  | 754 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 755 | static struct cpu_workqueue_struct * | 
|  | 756 | init_cpu_workqueue(struct workqueue_struct *wq, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 758 | struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 760 | cwq->wq = wq; | 
|  | 761 | spin_lock_init(&cwq->lock); | 
|  | 762 | INIT_LIST_HEAD(&cwq->worklist); | 
|  | 763 | init_waitqueue_head(&cwq->more_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 765 | return cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 | } | 
|  | 767 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 768 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | { | 
| Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 770 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 771 | struct workqueue_struct *wq = cwq->wq; | 
|  | 772 | const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; | 
|  | 773 | struct task_struct *p; | 
|  | 774 |  | 
|  | 775 | p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); | 
|  | 776 | /* | 
|  | 777 | * Nobody can add the work_struct to this cwq, | 
|  | 778 | *	if (caller is __create_workqueue) | 
|  | 779 | *		nobody should see this wq | 
|  | 780 | *	else // caller is CPU_UP_PREPARE | 
|  | 781 | *		cpu is not on cpu_online_map | 
|  | 782 | * so we can abort safely. | 
|  | 783 | */ | 
|  | 784 | if (IS_ERR(p)) | 
|  | 785 | return PTR_ERR(p); | 
| Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 786 | if (cwq->wq->rt) | 
|  | 787 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 788 | cwq->thread = p; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 789 |  | 
|  | 790 | return 0; | 
|  | 791 | } | 
|  | 792 |  | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 793 | static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
|  | 794 | { | 
|  | 795 | struct task_struct *p = cwq->thread; | 
|  | 796 |  | 
|  | 797 | if (p != NULL) { | 
|  | 798 | if (cpu >= 0) | 
|  | 799 | kthread_bind(p, cpu); | 
|  | 800 | wake_up_process(p); | 
|  | 801 | } | 
|  | 802 | } | 
|  | 803 |  | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 804 | struct workqueue_struct *__create_workqueue_key(const char *name, | 
|  | 805 | int singlethread, | 
|  | 806 | int freezeable, | 
| Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 807 | int rt, | 
| Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 808 | struct lock_class_key *key, | 
|  | 809 | const char *lock_name) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 810 | { | 
|  | 811 | struct workqueue_struct *wq; | 
|  | 812 | struct cpu_workqueue_struct *cwq; | 
|  | 813 | int err = 0, cpu; | 
|  | 814 |  | 
|  | 815 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 
|  | 816 | if (!wq) | 
|  | 817 | return NULL; | 
|  | 818 |  | 
|  | 819 | wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); | 
|  | 820 | if (!wq->cpu_wq) { | 
|  | 821 | kfree(wq); | 
|  | 822 | return NULL; | 
|  | 823 | } | 
|  | 824 |  | 
|  | 825 | wq->name = name; | 
| Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 826 | lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 827 | wq->singlethread = singlethread; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 828 | wq->freezeable = freezeable; | 
| Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 829 | wq->rt = rt; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 830 | INIT_LIST_HEAD(&wq->list); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 831 |  | 
|  | 832 | if (singlethread) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 833 | cwq = init_cpu_workqueue(wq, singlethread_cpu); | 
|  | 834 | err = create_workqueue_thread(cwq, singlethread_cpu); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 835 | start_workqueue_thread(cwq, -1); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 836 | } else { | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 837 | cpu_maps_update_begin(); | 
| Oleg Nesterov | 6af8bf3 | 2008-07-29 22:33:49 -0700 | [diff] [blame] | 838 | /* | 
|  | 839 | * We must place this wq on list even if the code below fails. | 
|  | 840 | * cpu_down(cpu) can remove cpu from cpu_populated_map before | 
|  | 841 | * destroy_workqueue() takes the lock, in that case we leak | 
|  | 842 | * cwq[cpu]->thread. | 
|  | 843 | */ | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 844 | spin_lock(&workqueue_lock); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 845 | list_add(&wq->list, &workqueues); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 846 | spin_unlock(&workqueue_lock); | 
| Oleg Nesterov | 6af8bf3 | 2008-07-29 22:33:49 -0700 | [diff] [blame] | 847 | /* | 
|  | 848 | * We must initialize cwqs for each possible cpu even if we | 
|  | 849 | * are going to call destroy_workqueue() finally. Otherwise | 
|  | 850 | * cpu_up() can hit the uninitialized cwq once we drop the | 
|  | 851 | * lock. | 
|  | 852 | */ | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 853 | for_each_possible_cpu(cpu) { | 
|  | 854 | cwq = init_cpu_workqueue(wq, cpu); | 
|  | 855 | if (err || !cpu_online(cpu)) | 
|  | 856 | continue; | 
|  | 857 | err = create_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 858 | start_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 859 | } | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 860 | cpu_maps_update_done(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 861 | } | 
|  | 862 |  | 
|  | 863 | if (err) { | 
|  | 864 | destroy_workqueue(wq); | 
|  | 865 | wq = NULL; | 
|  | 866 | } | 
|  | 867 | return wq; | 
|  | 868 | } | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 869 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 870 |  | 
| Oleg Nesterov | 1e35eaa | 2008-04-29 01:00:28 -0700 | [diff] [blame] | 871 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 872 | { | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 873 | /* | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 874 | * Our caller is either destroy_workqueue() or CPU_POST_DEAD, | 
|  | 875 | * cpu_add_remove_lock protects cwq->thread. | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 876 | */ | 
|  | 877 | if (cwq->thread == NULL) | 
|  | 878 | return; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 879 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 880 | lock_map_acquire(&cwq->wq->lockdep_map); | 
|  | 881 | lock_map_release(&cwq->wq->lockdep_map); | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 882 |  | 
| Oleg Nesterov | 13c2216 | 2007-07-17 04:03:55 -0700 | [diff] [blame] | 883 | flush_cpu_workqueue(cwq); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 884 | /* | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 885 | * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, | 
| Oleg Nesterov | 13c2216 | 2007-07-17 04:03:55 -0700 | [diff] [blame] | 886 | * a concurrent flush_workqueue() can insert a barrier after us. | 
|  | 887 | * However, in that case run_workqueue() won't return and check | 
|  | 888 | * kthread_should_stop() until it flushes all work_struct's. | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 889 | * When ->worklist becomes empty it is safe to exit because no | 
|  | 890 | * more work_structs can be queued on this cwq: flush_workqueue | 
|  | 891 | * checks list_empty(), and a "normal" queue_work() can't use | 
|  | 892 | * a dead CPU. | 
|  | 893 | */ | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 894 | kthread_stop(cwq->thread); | 
|  | 895 | cwq->thread = NULL; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 896 | } | 
|  | 897 |  | 
|  | 898 | /** | 
|  | 899 | * destroy_workqueue - safely terminate a workqueue | 
|  | 900 | * @wq: target workqueue | 
|  | 901 | * | 
|  | 902 | * Safely destroy a workqueue. All work currently pending will be done first. | 
|  | 903 | */ | 
|  | 904 | void destroy_workqueue(struct workqueue_struct *wq) | 
|  | 905 | { | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 906 | const cpumask_t *cpu_map = wq_cpu_map(wq); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 907 | int cpu; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 908 |  | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 909 | cpu_maps_update_begin(); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 910 | spin_lock(&workqueue_lock); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 911 | list_del(&wq->list); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 912 | spin_unlock(&workqueue_lock); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 913 |  | 
| Mike Travis | 363ab6f | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 914 | for_each_cpu_mask_nr(cpu, *cpu_map) | 
| Oleg Nesterov | 1e35eaa | 2008-04-29 01:00:28 -0700 | [diff] [blame] | 915 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 916 | cpu_maps_update_done(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 917 |  | 
|  | 918 | free_percpu(wq->cpu_wq); | 
|  | 919 | kfree(wq); | 
|  | 920 | } | 
|  | 921 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 
|  | 922 |  | 
|  | 923 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | 
|  | 924 | unsigned long action, | 
|  | 925 | void *hcpu) | 
|  | 926 | { | 
|  | 927 | unsigned int cpu = (unsigned long)hcpu; | 
|  | 928 | struct cpu_workqueue_struct *cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | struct workqueue_struct *wq; | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 930 | int ret = NOTIFY_OK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 |  | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 932 | action &= ~CPU_TASKS_FROZEN; | 
|  | 933 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 | switch (action) { | 
|  | 935 | case CPU_UP_PREPARE: | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 936 | cpu_set(cpu, cpu_populated_map); | 
|  | 937 | } | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 938 | undo: | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 939 | list_for_each_entry(wq, &workqueues, list) { | 
|  | 940 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 941 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 942 | switch (action) { | 
|  | 943 | case CPU_UP_PREPARE: | 
|  | 944 | if (!create_workqueue_thread(cwq, cpu)) | 
|  | 945 | break; | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 946 | printk(KERN_ERR "workqueue [%s] for %i failed\n", | 
|  | 947 | wq->name, cpu); | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 948 | action = CPU_UP_CANCELED; | 
|  | 949 | ret = NOTIFY_BAD; | 
|  | 950 | goto undo; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 951 |  | 
|  | 952 | case CPU_ONLINE: | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 953 | start_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 954 | break; | 
|  | 955 |  | 
|  | 956 | case CPU_UP_CANCELED: | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 957 | start_workqueue_thread(cwq, -1); | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 958 | case CPU_POST_DEAD: | 
| Oleg Nesterov | 1e35eaa | 2008-04-29 01:00:28 -0700 | [diff] [blame] | 959 | cleanup_workqueue_thread(cwq); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 960 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | } | 
|  | 963 |  | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 964 | switch (action) { | 
|  | 965 | case CPU_UP_CANCELED: | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 966 | case CPU_POST_DEAD: | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 967 | cpu_clear(cpu, cpu_populated_map); | 
|  | 968 | } | 
|  | 969 |  | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 970 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 972 |  | 
| Oleg Nesterov | c12920d | 2007-05-09 02:34:14 -0700 | [diff] [blame] | 973 | void __init init_workqueues(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 | { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 975 | cpu_populated_map = cpu_online_map; | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 976 | singlethread_cpu = first_cpu(cpu_possible_map); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 977 | cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 | hotcpu_notifier(workqueue_cpu_callback, 0); | 
|  | 979 | keventd_wq = create_workqueue("events"); | 
|  | 980 | BUG_ON(!keventd_wq); | 
|  | 981 | } |