| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/kernel/workqueue.c | 
 | 3 |  * | 
 | 4 |  * Generic mechanism for defining kernel helper threads for running | 
 | 5 |  * arbitrary tasks in process context. | 
 | 6 |  * | 
 | 7 |  * Started by Ingo Molnar, Copyright (C) 2002 | 
 | 8 |  * | 
 | 9 |  * Derived from the taskqueue/keventd code by: | 
 | 10 |  * | 
 | 11 |  *   David Woodhouse <dwmw2@infradead.org> | 
 | 12 |  *   Andrew Morton <andrewm@uow.edu.au> | 
 | 13 |  *   Kai Petzke <wpp@marie.physik.tu-berlin.de> | 
 | 14 |  *   Theodore Ts'o <tytso@mit.edu> | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 15 |  * | 
 | 16 |  * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  */ | 
 | 18 |  | 
 | 19 | #include <linux/module.h> | 
 | 20 | #include <linux/kernel.h> | 
 | 21 | #include <linux/sched.h> | 
 | 22 | #include <linux/init.h> | 
 | 23 | #include <linux/signal.h> | 
 | 24 | #include <linux/completion.h> | 
 | 25 | #include <linux/workqueue.h> | 
 | 26 | #include <linux/slab.h> | 
 | 27 | #include <linux/cpu.h> | 
 | 28 | #include <linux/notifier.h> | 
 | 29 | #include <linux/kthread.h> | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 30 | #include <linux/hardirq.h> | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 31 | #include <linux/mempolicy.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 |  | 
 | 33 | /* | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 34 |  * The per-CPU workqueue (if single thread, we always use the first | 
 | 35 |  * possible cpu). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  * | 
 | 37 |  * The sequence counters are for flush_scheduled_work().  It wants to wait | 
| Rolf Eike Beer | 9f5d785 | 2006-10-03 23:07:31 +0200 | [diff] [blame] | 38 |  * until all currently-scheduled works are completed, but it doesn't | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  * want to be livelocked by new, incoming ones.  So it waits until | 
 | 40 |  * remove_sequence is >= the insert_sequence which pertained when | 
 | 41 |  * flush_scheduled_work() was called. | 
 | 42 |  */ | 
 | 43 | struct cpu_workqueue_struct { | 
 | 44 |  | 
 | 45 | 	spinlock_t lock; | 
 | 46 |  | 
 | 47 | 	long remove_sequence;	/* Least-recently added (next to run) */ | 
 | 48 | 	long insert_sequence;	/* Next to add */ | 
 | 49 |  | 
 | 50 | 	struct list_head worklist; | 
 | 51 | 	wait_queue_head_t more_work; | 
 | 52 | 	wait_queue_head_t work_done; | 
 | 53 |  | 
 | 54 | 	struct workqueue_struct *wq; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 55 | 	struct task_struct *thread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 |  | 
 | 57 | 	int run_depth;		/* Detect run_workqueue() recursion depth */ | 
 | 58 | } ____cacheline_aligned; | 
 | 59 |  | 
 | 60 | /* | 
 | 61 |  * The externally visible workqueue abstraction is an array of | 
 | 62 |  * per-CPU workqueues: | 
 | 63 |  */ | 
 | 64 | struct workqueue_struct { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 65 | 	struct cpu_workqueue_struct *cpu_wq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | 	const char *name; | 
 | 67 | 	struct list_head list; 	/* Empty if single thread */ | 
 | 68 | }; | 
 | 69 |  | 
 | 70 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 
 | 71 |    threads to each one as cpus come/go. */ | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 72 | static DEFINE_MUTEX(workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | static LIST_HEAD(workqueues); | 
 | 74 |  | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 75 | static int singlethread_cpu; | 
 | 76 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | /* If it's single threaded, it isn't in the list of workqueues. */ | 
 | 78 | static inline int is_single_threaded(struct workqueue_struct *wq) | 
 | 79 | { | 
 | 80 | 	return list_empty(&wq->list); | 
 | 81 | } | 
 | 82 |  | 
 | 83 | /* Preempt must be disabled. */ | 
 | 84 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 
 | 85 | 			 struct work_struct *work) | 
 | 86 | { | 
 | 87 | 	unsigned long flags; | 
 | 88 |  | 
 | 89 | 	spin_lock_irqsave(&cwq->lock, flags); | 
 | 90 | 	work->wq_data = cwq; | 
 | 91 | 	list_add_tail(&work->entry, &cwq->worklist); | 
 | 92 | 	cwq->insert_sequence++; | 
 | 93 | 	wake_up(&cwq->more_work); | 
 | 94 | 	spin_unlock_irqrestore(&cwq->lock, flags); | 
 | 95 | } | 
 | 96 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 97 | /** | 
 | 98 |  * queue_work - queue work on a workqueue | 
 | 99 |  * @wq: workqueue to use | 
 | 100 |  * @work: work to queue | 
 | 101 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 102 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 |  * | 
 | 104 |  * We queue the work to the CPU it was submitted, but there is no | 
 | 105 |  * guarantee that it will be processed by that CPU. | 
 | 106 |  */ | 
 | 107 | int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | 
 | 108 | { | 
 | 109 | 	int ret = 0, cpu = get_cpu(); | 
 | 110 |  | 
 | 111 | 	if (!test_and_set_bit(0, &work->pending)) { | 
 | 112 | 		if (unlikely(is_single_threaded(wq))) | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 113 | 			cpu = singlethread_cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | 		BUG_ON(!list_empty(&work->entry)); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 115 | 		__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | 		ret = 1; | 
 | 117 | 	} | 
 | 118 | 	put_cpu(); | 
 | 119 | 	return ret; | 
 | 120 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 121 | EXPORT_SYMBOL_GPL(queue_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 |  | 
 | 123 | static void delayed_work_timer_fn(unsigned long __data) | 
 | 124 | { | 
 | 125 | 	struct work_struct *work = (struct work_struct *)__data; | 
 | 126 | 	struct workqueue_struct *wq = work->wq_data; | 
 | 127 | 	int cpu = smp_processor_id(); | 
 | 128 |  | 
 | 129 | 	if (unlikely(is_single_threaded(wq))) | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 130 | 		cpu = singlethread_cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 132 | 	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | } | 
 | 134 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 135 | /** | 
 | 136 |  * queue_delayed_work - queue work on a workqueue after delay | 
 | 137 |  * @wq: workqueue to use | 
 | 138 |  * @work: work to queue | 
 | 139 |  * @delay: number of jiffies to wait before queueing | 
 | 140 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 141 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 142 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 
 | 144 | 			struct work_struct *work, unsigned long delay) | 
 | 145 | { | 
 | 146 | 	int ret = 0; | 
 | 147 | 	struct timer_list *timer = &work->timer; | 
 | 148 |  | 
 | 149 | 	if (!test_and_set_bit(0, &work->pending)) { | 
 | 150 | 		BUG_ON(timer_pending(timer)); | 
 | 151 | 		BUG_ON(!list_empty(&work->entry)); | 
 | 152 |  | 
 | 153 | 		/* This stores wq for the moment, for the timer_fn */ | 
 | 154 | 		work->wq_data = wq; | 
 | 155 | 		timer->expires = jiffies + delay; | 
 | 156 | 		timer->data = (unsigned long)work; | 
 | 157 | 		timer->function = delayed_work_timer_fn; | 
 | 158 | 		add_timer(timer); | 
 | 159 | 		ret = 1; | 
 | 160 | 	} | 
 | 161 | 	return ret; | 
 | 162 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 163 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 165 | /** | 
 | 166 |  * queue_delayed_work_on - queue work on specific CPU after delay | 
 | 167 |  * @cpu: CPU number to execute work on | 
 | 168 |  * @wq: workqueue to use | 
 | 169 |  * @work: work to queue | 
 | 170 |  * @delay: number of jiffies to wait before queueing | 
 | 171 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 172 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 173 |  */ | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 174 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
 | 175 | 			struct work_struct *work, unsigned long delay) | 
 | 176 | { | 
 | 177 | 	int ret = 0; | 
 | 178 | 	struct timer_list *timer = &work->timer; | 
 | 179 |  | 
 | 180 | 	if (!test_and_set_bit(0, &work->pending)) { | 
 | 181 | 		BUG_ON(timer_pending(timer)); | 
 | 182 | 		BUG_ON(!list_empty(&work->entry)); | 
 | 183 |  | 
 | 184 | 		/* This stores wq for the moment, for the timer_fn */ | 
 | 185 | 		work->wq_data = wq; | 
 | 186 | 		timer->expires = jiffies + delay; | 
 | 187 | 		timer->data = (unsigned long)work; | 
 | 188 | 		timer->function = delayed_work_timer_fn; | 
 | 189 | 		add_timer_on(timer, cpu); | 
 | 190 | 		ret = 1; | 
 | 191 | 	} | 
 | 192 | 	return ret; | 
 | 193 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 194 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 |  | 
| Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 196 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | { | 
 | 198 | 	unsigned long flags; | 
 | 199 |  | 
 | 200 | 	/* | 
 | 201 | 	 * Keep taking off work from the queue until | 
 | 202 | 	 * done. | 
 | 203 | 	 */ | 
 | 204 | 	spin_lock_irqsave(&cwq->lock, flags); | 
 | 205 | 	cwq->run_depth++; | 
 | 206 | 	if (cwq->run_depth > 3) { | 
 | 207 | 		/* morton gets to eat his hat */ | 
 | 208 | 		printk("%s: recursion depth exceeded: %d\n", | 
 | 209 | 			__FUNCTION__, cwq->run_depth); | 
 | 210 | 		dump_stack(); | 
 | 211 | 	} | 
 | 212 | 	while (!list_empty(&cwq->worklist)) { | 
 | 213 | 		struct work_struct *work = list_entry(cwq->worklist.next, | 
 | 214 | 						struct work_struct, entry); | 
 | 215 | 		void (*f) (void *) = work->func; | 
 | 216 | 		void *data = work->data; | 
 | 217 |  | 
 | 218 | 		list_del_init(cwq->worklist.next); | 
 | 219 | 		spin_unlock_irqrestore(&cwq->lock, flags); | 
 | 220 |  | 
 | 221 | 		BUG_ON(work->wq_data != cwq); | 
 | 222 | 		clear_bit(0, &work->pending); | 
 | 223 | 		f(data); | 
 | 224 |  | 
 | 225 | 		spin_lock_irqsave(&cwq->lock, flags); | 
 | 226 | 		cwq->remove_sequence++; | 
 | 227 | 		wake_up(&cwq->work_done); | 
 | 228 | 	} | 
 | 229 | 	cwq->run_depth--; | 
 | 230 | 	spin_unlock_irqrestore(&cwq->lock, flags); | 
 | 231 | } | 
 | 232 |  | 
 | 233 | static int worker_thread(void *__cwq) | 
 | 234 | { | 
 | 235 | 	struct cpu_workqueue_struct *cwq = __cwq; | 
 | 236 | 	DECLARE_WAITQUEUE(wait, current); | 
 | 237 | 	struct k_sigaction sa; | 
 | 238 | 	sigset_t blocked; | 
 | 239 |  | 
 | 240 | 	current->flags |= PF_NOFREEZE; | 
 | 241 |  | 
 | 242 | 	set_user_nice(current, -5); | 
 | 243 |  | 
 | 244 | 	/* Block and flush all signals */ | 
 | 245 | 	sigfillset(&blocked); | 
 | 246 | 	sigprocmask(SIG_BLOCK, &blocked, NULL); | 
 | 247 | 	flush_signals(current); | 
 | 248 |  | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 249 | 	/* | 
 | 250 | 	 * We inherited MPOL_INTERLEAVE from the booting kernel. | 
 | 251 | 	 * Set MPOL_DEFAULT to insure node local allocations. | 
 | 252 | 	 */ | 
 | 253 | 	numa_default_policy(); | 
 | 254 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | 	/* SIG_IGN makes children autoreap: see do_notify_parent(). */ | 
 | 256 | 	sa.sa.sa_handler = SIG_IGN; | 
 | 257 | 	sa.sa.sa_flags = 0; | 
 | 258 | 	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); | 
 | 259 | 	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); | 
 | 260 |  | 
 | 261 | 	set_current_state(TASK_INTERRUPTIBLE); | 
 | 262 | 	while (!kthread_should_stop()) { | 
 | 263 | 		add_wait_queue(&cwq->more_work, &wait); | 
 | 264 | 		if (list_empty(&cwq->worklist)) | 
 | 265 | 			schedule(); | 
 | 266 | 		else | 
 | 267 | 			__set_current_state(TASK_RUNNING); | 
 | 268 | 		remove_wait_queue(&cwq->more_work, &wait); | 
 | 269 |  | 
 | 270 | 		if (!list_empty(&cwq->worklist)) | 
 | 271 | 			run_workqueue(cwq); | 
 | 272 | 		set_current_state(TASK_INTERRUPTIBLE); | 
 | 273 | 	} | 
 | 274 | 	__set_current_state(TASK_RUNNING); | 
 | 275 | 	return 0; | 
 | 276 | } | 
 | 277 |  | 
 | 278 | static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 
 | 279 | { | 
 | 280 | 	if (cwq->thread == current) { | 
 | 281 | 		/* | 
 | 282 | 		 * Probably keventd trying to flush its own queue. So simply run | 
 | 283 | 		 * it by hand rather than deadlocking. | 
 | 284 | 		 */ | 
 | 285 | 		run_workqueue(cwq); | 
 | 286 | 	} else { | 
 | 287 | 		DEFINE_WAIT(wait); | 
 | 288 | 		long sequence_needed; | 
 | 289 |  | 
 | 290 | 		spin_lock_irq(&cwq->lock); | 
 | 291 | 		sequence_needed = cwq->insert_sequence; | 
 | 292 |  | 
 | 293 | 		while (sequence_needed - cwq->remove_sequence > 0) { | 
 | 294 | 			prepare_to_wait(&cwq->work_done, &wait, | 
 | 295 | 					TASK_UNINTERRUPTIBLE); | 
 | 296 | 			spin_unlock_irq(&cwq->lock); | 
 | 297 | 			schedule(); | 
 | 298 | 			spin_lock_irq(&cwq->lock); | 
 | 299 | 		} | 
 | 300 | 		finish_wait(&cwq->work_done, &wait); | 
 | 301 | 		spin_unlock_irq(&cwq->lock); | 
 | 302 | 	} | 
 | 303 | } | 
 | 304 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 305 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 |  * flush_workqueue - ensure that any scheduled work has run to completion. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 307 |  * @wq: workqueue to flush | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 |  * | 
 | 309 |  * Forces execution of the workqueue and blocks until its completion. | 
 | 310 |  * This is typically used in driver shutdown handlers. | 
 | 311 |  * | 
 | 312 |  * This function will sample each workqueue's current insert_sequence number and | 
 | 313 |  * will sleep until the head sequence is greater than or equal to that.  This | 
 | 314 |  * means that we sleep until all works which were queued on entry have been | 
 | 315 |  * handled, but we are not livelocked by new incoming ones. | 
 | 316 |  * | 
 | 317 |  * This function used to run the workqueues itself.  Now we just wait for the | 
 | 318 |  * helper threads to do it. | 
 | 319 |  */ | 
 | 320 | void fastcall flush_workqueue(struct workqueue_struct *wq) | 
 | 321 | { | 
 | 322 | 	might_sleep(); | 
 | 323 |  | 
 | 324 | 	if (is_single_threaded(wq)) { | 
| Ben Collins | bce61dd | 2005-11-28 13:43:56 -0800 | [diff] [blame] | 325 | 		/* Always use first cpu's area. */ | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 326 | 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | 	} else { | 
 | 328 | 		int cpu; | 
 | 329 |  | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 330 | 		mutex_lock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | 		for_each_online_cpu(cpu) | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 332 | 			flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 333 | 		mutex_unlock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | 	} | 
 | 335 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 336 | EXPORT_SYMBOL_GPL(flush_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 |  | 
 | 338 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | 
 | 339 | 						   int cpu) | 
 | 340 | { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 341 | 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | 	struct task_struct *p; | 
 | 343 |  | 
 | 344 | 	spin_lock_init(&cwq->lock); | 
 | 345 | 	cwq->wq = wq; | 
 | 346 | 	cwq->thread = NULL; | 
 | 347 | 	cwq->insert_sequence = 0; | 
 | 348 | 	cwq->remove_sequence = 0; | 
 | 349 | 	INIT_LIST_HEAD(&cwq->worklist); | 
 | 350 | 	init_waitqueue_head(&cwq->more_work); | 
 | 351 | 	init_waitqueue_head(&cwq->work_done); | 
 | 352 |  | 
 | 353 | 	if (is_single_threaded(wq)) | 
 | 354 | 		p = kthread_create(worker_thread, cwq, "%s", wq->name); | 
 | 355 | 	else | 
 | 356 | 		p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); | 
 | 357 | 	if (IS_ERR(p)) | 
 | 358 | 		return NULL; | 
 | 359 | 	cwq->thread = p; | 
 | 360 | 	return p; | 
 | 361 | } | 
 | 362 |  | 
 | 363 | struct workqueue_struct *__create_workqueue(const char *name, | 
 | 364 | 					    int singlethread) | 
 | 365 | { | 
 | 366 | 	int cpu, destroy = 0; | 
 | 367 | 	struct workqueue_struct *wq; | 
 | 368 | 	struct task_struct *p; | 
 | 369 |  | 
| Pekka J Enberg | dd39271 | 2005-09-06 15:18:31 -0700 | [diff] [blame] | 370 | 	wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | 	if (!wq) | 
 | 372 | 		return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 374 | 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); | 
| Ben Collins | 676121f | 2006-01-08 01:03:04 -0800 | [diff] [blame] | 375 | 	if (!wq->cpu_wq) { | 
 | 376 | 		kfree(wq); | 
 | 377 | 		return NULL; | 
 | 378 | 	} | 
 | 379 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | 	wq->name = name; | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 381 | 	mutex_lock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | 	if (singlethread) { | 
 | 383 | 		INIT_LIST_HEAD(&wq->list); | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 384 | 		p = create_workqueue_thread(wq, singlethread_cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | 		if (!p) | 
 | 386 | 			destroy = 1; | 
 | 387 | 		else | 
 | 388 | 			wake_up_process(p); | 
 | 389 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | 		list_add(&wq->list, &workqueues); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | 		for_each_online_cpu(cpu) { | 
 | 392 | 			p = create_workqueue_thread(wq, cpu); | 
 | 393 | 			if (p) { | 
 | 394 | 				kthread_bind(p, cpu); | 
 | 395 | 				wake_up_process(p); | 
 | 396 | 			} else | 
 | 397 | 				destroy = 1; | 
 | 398 | 		} | 
 | 399 | 	} | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 400 | 	mutex_unlock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 |  | 
 | 402 | 	/* | 
 | 403 | 	 * Was there any error during startup? If yes then clean up: | 
 | 404 | 	 */ | 
 | 405 | 	if (destroy) { | 
 | 406 | 		destroy_workqueue(wq); | 
 | 407 | 		wq = NULL; | 
 | 408 | 	} | 
 | 409 | 	return wq; | 
 | 410 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 411 | EXPORT_SYMBOL_GPL(__create_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 |  | 
 | 413 | static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) | 
 | 414 | { | 
 | 415 | 	struct cpu_workqueue_struct *cwq; | 
 | 416 | 	unsigned long flags; | 
 | 417 | 	struct task_struct *p; | 
 | 418 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 419 | 	cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | 	spin_lock_irqsave(&cwq->lock, flags); | 
 | 421 | 	p = cwq->thread; | 
 | 422 | 	cwq->thread = NULL; | 
 | 423 | 	spin_unlock_irqrestore(&cwq->lock, flags); | 
 | 424 | 	if (p) | 
 | 425 | 		kthread_stop(p); | 
 | 426 | } | 
 | 427 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 428 | /** | 
 | 429 |  * destroy_workqueue - safely terminate a workqueue | 
 | 430 |  * @wq: target workqueue | 
 | 431 |  * | 
 | 432 |  * Safely destroy a workqueue. All work currently pending will be done first. | 
 | 433 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | void destroy_workqueue(struct workqueue_struct *wq) | 
 | 435 | { | 
 | 436 | 	int cpu; | 
 | 437 |  | 
 | 438 | 	flush_workqueue(wq); | 
 | 439 |  | 
 | 440 | 	/* We don't need the distraction of CPUs appearing and vanishing. */ | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 441 | 	mutex_lock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | 	if (is_single_threaded(wq)) | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 443 | 		cleanup_workqueue_thread(wq, singlethread_cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | 	else { | 
 | 445 | 		for_each_online_cpu(cpu) | 
 | 446 | 			cleanup_workqueue_thread(wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | 		list_del(&wq->list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | 	} | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 449 | 	mutex_unlock(&workqueue_mutex); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 450 | 	free_percpu(wq->cpu_wq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | 	kfree(wq); | 
 | 452 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 453 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 |  | 
 | 455 | static struct workqueue_struct *keventd_wq; | 
 | 456 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 457 | /** | 
 | 458 |  * schedule_work - put work task in global workqueue | 
 | 459 |  * @work: job to be done | 
 | 460 |  * | 
 | 461 |  * This puts a job in the kernel-global workqueue. | 
 | 462 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | int fastcall schedule_work(struct work_struct *work) | 
 | 464 | { | 
 | 465 | 	return queue_work(keventd_wq, work); | 
 | 466 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 467 | EXPORT_SYMBOL(schedule_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 469 | /** | 
 | 470 |  * schedule_delayed_work - put work task in global workqueue after delay | 
 | 471 |  * @work: job to be done | 
 | 472 |  * @delay: number of jiffies to wait | 
 | 473 |  * | 
 | 474 |  * After waiting for a given time this puts a job in the kernel-global | 
 | 475 |  * workqueue. | 
 | 476 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) | 
 | 478 | { | 
 | 479 | 	return queue_delayed_work(keventd_wq, work, delay); | 
 | 480 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 481 | EXPORT_SYMBOL(schedule_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 483 | /** | 
 | 484 |  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 
 | 485 |  * @cpu: cpu to use | 
 | 486 |  * @work: job to be done | 
 | 487 |  * @delay: number of jiffies to wait | 
 | 488 |  * | 
 | 489 |  * After waiting for a given time this puts a job in the kernel-global | 
 | 490 |  * workqueue on the specified CPU. | 
 | 491 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | int schedule_delayed_work_on(int cpu, | 
 | 493 | 			struct work_struct *work, unsigned long delay) | 
 | 494 | { | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 495 | 	return queue_delayed_work_on(cpu, keventd_wq, work, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 497 | EXPORT_SYMBOL(schedule_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 499 | /** | 
 | 500 |  * schedule_on_each_cpu - call a function on each online CPU from keventd | 
 | 501 |  * @func: the function to call | 
 | 502 |  * @info: a pointer to pass to func() | 
 | 503 |  * | 
 | 504 |  * Returns zero on success. | 
 | 505 |  * Returns -ve errno on failure. | 
 | 506 |  * | 
 | 507 |  * Appears to be racy against CPU hotplug. | 
 | 508 |  * | 
 | 509 |  * schedule_on_each_cpu() is very slow. | 
 | 510 |  */ | 
 | 511 | int schedule_on_each_cpu(void (*func)(void *info), void *info) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 512 | { | 
 | 513 | 	int cpu; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 514 | 	struct work_struct *works; | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 515 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 516 | 	works = alloc_percpu(struct work_struct); | 
 | 517 | 	if (!works) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 518 | 		return -ENOMEM; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 519 |  | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 520 | 	mutex_lock(&workqueue_mutex); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 521 | 	for_each_online_cpu(cpu) { | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 522 | 		INIT_WORK(per_cpu_ptr(works, cpu), func, info); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 523 | 		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 524 | 				per_cpu_ptr(works, cpu)); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 525 | 	} | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 526 | 	mutex_unlock(&workqueue_mutex); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 527 | 	flush_workqueue(keventd_wq); | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 528 | 	free_percpu(works); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 529 | 	return 0; | 
 | 530 | } | 
 | 531 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | void flush_scheduled_work(void) | 
 | 533 | { | 
 | 534 | 	flush_workqueue(keventd_wq); | 
 | 535 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 536 | EXPORT_SYMBOL(flush_scheduled_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 |  | 
 | 538 | /** | 
 | 539 |  * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 
 | 540 |  *			work whose handler rearms the delayed work. | 
 | 541 |  * @wq:   the controlling workqueue structure | 
 | 542 |  * @work: the delayed work struct | 
 | 543 |  */ | 
| James Bottomley | 81ddef7 | 2005-04-16 15:23:59 -0700 | [diff] [blame] | 544 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | 
 | 545 | 				       struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | { | 
 | 547 | 	while (!cancel_delayed_work(work)) | 
 | 548 | 		flush_workqueue(wq); | 
 | 549 | } | 
| James Bottomley | 81ddef7 | 2005-04-16 15:23:59 -0700 | [diff] [blame] | 550 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 |  | 
 | 552 | /** | 
 | 553 |  * cancel_rearming_delayed_work - reliably kill off a delayed keventd | 
 | 554 |  *			work whose handler rearms the delayed work. | 
 | 555 |  * @work: the delayed work struct | 
 | 556 |  */ | 
 | 557 | void cancel_rearming_delayed_work(struct work_struct *work) | 
 | 558 | { | 
 | 559 | 	cancel_rearming_delayed_workqueue(keventd_wq, work); | 
 | 560 | } | 
 | 561 | EXPORT_SYMBOL(cancel_rearming_delayed_work); | 
 | 562 |  | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 563 | /** | 
 | 564 |  * execute_in_process_context - reliably execute the routine with user context | 
 | 565 |  * @fn:		the function to execute | 
 | 566 |  * @data:	data to pass to the function | 
 | 567 |  * @ew:		guaranteed storage for the execute work structure (must | 
 | 568 |  *		be available when the work executes) | 
 | 569 |  * | 
 | 570 |  * Executes the function immediately if process context is available, | 
 | 571 |  * otherwise schedules the function for delayed execution. | 
 | 572 |  * | 
 | 573 |  * Returns:	0 - function was executed | 
 | 574 |  *		1 - function was scheduled for execution | 
 | 575 |  */ | 
 | 576 | int execute_in_process_context(void (*fn)(void *data), void *data, | 
 | 577 | 			       struct execute_work *ew) | 
 | 578 | { | 
 | 579 | 	if (!in_interrupt()) { | 
 | 580 | 		fn(data); | 
 | 581 | 		return 0; | 
 | 582 | 	} | 
 | 583 |  | 
 | 584 | 	INIT_WORK(&ew->work, fn, data); | 
 | 585 | 	schedule_work(&ew->work); | 
 | 586 |  | 
 | 587 | 	return 1; | 
 | 588 | } | 
 | 589 | EXPORT_SYMBOL_GPL(execute_in_process_context); | 
 | 590 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | int keventd_up(void) | 
 | 592 | { | 
 | 593 | 	return keventd_wq != NULL; | 
 | 594 | } | 
 | 595 |  | 
 | 596 | int current_is_keventd(void) | 
 | 597 | { | 
 | 598 | 	struct cpu_workqueue_struct *cwq; | 
 | 599 | 	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */ | 
 | 600 | 	int ret = 0; | 
 | 601 |  | 
 | 602 | 	BUG_ON(!keventd_wq); | 
 | 603 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 604 | 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | 	if (current == cwq->thread) | 
 | 606 | 		ret = 1; | 
 | 607 |  | 
 | 608 | 	return ret; | 
 | 609 |  | 
 | 610 | } | 
 | 611 |  | 
 | 612 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 613 | /* Take the work from this (downed) CPU. */ | 
 | 614 | static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) | 
 | 615 | { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 616 | 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 617 | 	struct list_head list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | 	struct work_struct *work; | 
 | 619 |  | 
 | 620 | 	spin_lock_irq(&cwq->lock); | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 621 | 	list_replace_init(&cwq->worklist, &list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 |  | 
 | 623 | 	while (!list_empty(&list)) { | 
 | 624 | 		printk("Taking work for %s\n", wq->name); | 
 | 625 | 		work = list_entry(list.next,struct work_struct,entry); | 
 | 626 | 		list_del(&work->entry); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 627 | 		__queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | 	} | 
 | 629 | 	spin_unlock_irq(&cwq->lock); | 
 | 630 | } | 
 | 631 |  | 
 | 632 | /* We're holding the cpucontrol mutex here */ | 
| Chandra Seetharaman | 9c7b216 | 2006-06-27 02:54:07 -0700 | [diff] [blame] | 633 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | 				  unsigned long action, | 
 | 635 | 				  void *hcpu) | 
 | 636 | { | 
 | 637 | 	unsigned int hotcpu = (unsigned long)hcpu; | 
 | 638 | 	struct workqueue_struct *wq; | 
 | 639 |  | 
 | 640 | 	switch (action) { | 
 | 641 | 	case CPU_UP_PREPARE: | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 642 | 		mutex_lock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | 		/* Create a new workqueue thread for it. */ | 
 | 644 | 		list_for_each_entry(wq, &workqueues, list) { | 
| Mika Kukkonen | 230649d | 2005-09-06 15:17:17 -0700 | [diff] [blame] | 645 | 			if (!create_workqueue_thread(wq, hotcpu)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | 				printk("workqueue for %i failed\n", hotcpu); | 
 | 647 | 				return NOTIFY_BAD; | 
 | 648 | 			} | 
 | 649 | 		} | 
 | 650 | 		break; | 
 | 651 |  | 
 | 652 | 	case CPU_ONLINE: | 
 | 653 | 		/* Kick off worker threads. */ | 
 | 654 | 		list_for_each_entry(wq, &workqueues, list) { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 655 | 			struct cpu_workqueue_struct *cwq; | 
 | 656 |  | 
 | 657 | 			cwq = per_cpu_ptr(wq->cpu_wq, hotcpu); | 
 | 658 | 			kthread_bind(cwq->thread, hotcpu); | 
 | 659 | 			wake_up_process(cwq->thread); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | 		} | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 661 | 		mutex_unlock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | 		break; | 
 | 663 |  | 
 | 664 | 	case CPU_UP_CANCELED: | 
 | 665 | 		list_for_each_entry(wq, &workqueues, list) { | 
| Heiko Carstens | fc75cdf | 2006-06-25 05:49:10 -0700 | [diff] [blame] | 666 | 			if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) | 
 | 667 | 				continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | 			/* Unbind so it can run. */ | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 669 | 			kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, | 
| Heiko Carstens | a4c4af7 | 2005-11-07 00:58:38 -0800 | [diff] [blame] | 670 | 				     any_online_cpu(cpu_online_map)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | 			cleanup_workqueue_thread(wq, hotcpu); | 
 | 672 | 		} | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 673 | 		mutex_unlock(&workqueue_mutex); | 
 | 674 | 		break; | 
 | 675 |  | 
 | 676 | 	case CPU_DOWN_PREPARE: | 
 | 677 | 		mutex_lock(&workqueue_mutex); | 
 | 678 | 		break; | 
 | 679 |  | 
 | 680 | 	case CPU_DOWN_FAILED: | 
 | 681 | 		mutex_unlock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | 		break; | 
 | 683 |  | 
 | 684 | 	case CPU_DEAD: | 
 | 685 | 		list_for_each_entry(wq, &workqueues, list) | 
 | 686 | 			cleanup_workqueue_thread(wq, hotcpu); | 
 | 687 | 		list_for_each_entry(wq, &workqueues, list) | 
 | 688 | 			take_over_work(wq, hotcpu); | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 689 | 		mutex_unlock(&workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | 		break; | 
 | 691 | 	} | 
 | 692 |  | 
 | 693 | 	return NOTIFY_OK; | 
 | 694 | } | 
 | 695 | #endif | 
 | 696 |  | 
 | 697 | void init_workqueues(void) | 
 | 698 | { | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 699 | 	singlethread_cpu = first_cpu(cpu_possible_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | 	hotcpu_notifier(workqueue_cpu_callback, 0); | 
 | 701 | 	keventd_wq = create_workqueue("events"); | 
 | 702 | 	BUG_ON(!keventd_wq); | 
 | 703 | } | 
 | 704 |  |