| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/kernel/workqueue.c | 
 | 3 |  * | 
 | 4 |  * Generic mechanism for defining kernel helper threads for running | 
 | 5 |  * arbitrary tasks in process context. | 
 | 6 |  * | 
 | 7 |  * Started by Ingo Molnar, Copyright (C) 2002 | 
 | 8 |  * | 
 | 9 |  * Derived from the taskqueue/keventd code by: | 
 | 10 |  * | 
 | 11 |  *   David Woodhouse <dwmw2@infradead.org> | 
 | 12 |  *   Andrew Morton <andrewm@uow.edu.au> | 
 | 13 |  *   Kai Petzke <wpp@marie.physik.tu-berlin.de> | 
 | 14 |  *   Theodore Ts'o <tytso@mit.edu> | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 15 |  * | 
 | 16 |  * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  */ | 
 | 18 |  | 
 | 19 | #include <linux/module.h> | 
 | 20 | #include <linux/kernel.h> | 
 | 21 | #include <linux/sched.h> | 
 | 22 | #include <linux/init.h> | 
 | 23 | #include <linux/signal.h> | 
 | 24 | #include <linux/completion.h> | 
 | 25 | #include <linux/workqueue.h> | 
 | 26 | #include <linux/slab.h> | 
 | 27 | #include <linux/cpu.h> | 
 | 28 | #include <linux/notifier.h> | 
 | 29 | #include <linux/kthread.h> | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 30 | #include <linux/hardirq.h> | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 31 | #include <linux/mempolicy.h> | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 32 | #include <linux/freezer.h> | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 33 | #include <linux/kallsyms.h> | 
 | 34 | #include <linux/debug_locks.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 |  | 
 | 36 | /* | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 37 |  * The per-CPU workqueue (if single thread, we always use the first | 
 | 38 |  * possible cpu). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  */ | 
 | 40 | struct cpu_workqueue_struct { | 
 | 41 |  | 
 | 42 | 	spinlock_t lock; | 
 | 43 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | 	struct list_head worklist; | 
 | 45 | 	wait_queue_head_t more_work; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 46 | 	struct work_struct *current_work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
 | 48 | 	struct workqueue_struct *wq; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 49 | 	struct task_struct *thread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 |  | 
 | 51 | 	int run_depth;		/* Detect run_workqueue() recursion depth */ | 
 | 52 | } ____cacheline_aligned; | 
 | 53 |  | 
 | 54 | /* | 
 | 55 |  * The externally visible workqueue abstraction is an array of | 
 | 56 |  * per-CPU workqueues: | 
 | 57 |  */ | 
 | 58 | struct workqueue_struct { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 59 | 	struct cpu_workqueue_struct *cpu_wq; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 60 | 	struct list_head list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | 	const char *name; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 62 | 	int singlethread; | 
| Oleg Nesterov | 319c2a9 | 2007-05-09 02:34:06 -0700 | [diff] [blame] | 63 | 	int freezeable;		/* Freeze threads during suspend */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | }; | 
 | 65 |  | 
 | 66 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 
 | 67 |    threads to each one as cpus come/go. */ | 
| Andrew Morton | 9b41ea7 | 2006-08-13 23:24:26 -0700 | [diff] [blame] | 68 | static DEFINE_MUTEX(workqueue_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | static LIST_HEAD(workqueues); | 
 | 70 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 71 | static int singlethread_cpu __read_mostly; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 72 | static cpumask_t cpu_singlethread_map __read_mostly; | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 73 | /* | 
 | 74 |  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD | 
 | 75 |  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work | 
 | 76 |  * which comes in between can't use for_each_online_cpu(). We could | 
 | 77 |  * use cpu_possible_map, the cpumask below is more a documentation | 
 | 78 |  * than optimization. | 
 | 79 |  */ | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 80 | static cpumask_t cpu_populated_map __read_mostly; | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 81 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | /* If it's single threaded, it isn't in the list of workqueues. */ | 
 | 83 | static inline int is_single_threaded(struct workqueue_struct *wq) | 
 | 84 | { | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 85 | 	return wq->singlethread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | } | 
 | 87 |  | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 88 | static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) | 
 | 89 | { | 
 | 90 | 	return is_single_threaded(wq) | 
 | 91 | 		? &cpu_singlethread_map : &cpu_populated_map; | 
 | 92 | } | 
 | 93 |  | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 94 | static | 
 | 95 | struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) | 
 | 96 | { | 
 | 97 | 	if (unlikely(is_single_threaded(wq))) | 
 | 98 | 		cpu = singlethread_cpu; | 
 | 99 | 	return per_cpu_ptr(wq->cpu_wq, cpu); | 
 | 100 | } | 
 | 101 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 102 | /* | 
 | 103 |  * Set the workqueue on which a work item is to be run | 
 | 104 |  * - Must *only* be called if the pending flag is set | 
 | 105 |  */ | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 106 | static inline void set_wq_data(struct work_struct *work, | 
 | 107 | 				struct cpu_workqueue_struct *cwq) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 108 | { | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 109 | 	unsigned long new; | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 110 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 111 | 	BUG_ON(!work_pending(work)); | 
 | 112 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 113 | 	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 114 | 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); | 
 | 115 | 	atomic_long_set(&work->data, new); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 116 | } | 
 | 117 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 118 | static inline | 
 | 119 | struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 120 | { | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 121 | 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 122 | } | 
 | 123 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 124 | static void insert_work(struct cpu_workqueue_struct *cwq, | 
 | 125 | 				struct work_struct *work, int tail) | 
 | 126 | { | 
 | 127 | 	set_wq_data(work, cwq); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 128 | 	/* | 
 | 129 | 	 * Ensure that we get the right work->data if we see the | 
 | 130 | 	 * result of list_add() below, see try_to_grab_pending(). | 
 | 131 | 	 */ | 
 | 132 | 	smp_wmb(); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 133 | 	if (tail) | 
 | 134 | 		list_add_tail(&work->entry, &cwq->worklist); | 
 | 135 | 	else | 
 | 136 | 		list_add(&work->entry, &cwq->worklist); | 
 | 137 | 	wake_up(&cwq->more_work); | 
 | 138 | } | 
 | 139 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | /* Preempt must be disabled. */ | 
 | 141 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 
 | 142 | 			 struct work_struct *work) | 
 | 143 | { | 
 | 144 | 	unsigned long flags; | 
 | 145 |  | 
 | 146 | 	spin_lock_irqsave(&cwq->lock, flags); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 147 | 	insert_work(cwq, work, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | 	spin_unlock_irqrestore(&cwq->lock, flags); | 
 | 149 | } | 
 | 150 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 151 | /** | 
 | 152 |  * queue_work - queue work on a workqueue | 
 | 153 |  * @wq: workqueue to use | 
 | 154 |  * @work: work to queue | 
 | 155 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 156 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 |  * | 
 | 158 |  * We queue the work to the CPU it was submitted, but there is no | 
 | 159 |  * guarantee that it will be processed by that CPU. | 
 | 160 |  */ | 
 | 161 | int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | 
 | 162 | { | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 163 | 	int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 165 | 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | 		BUG_ON(!list_empty(&work->entry)); | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 167 | 		__queue_work(wq_per_cpu(wq, get_cpu()), work); | 
 | 168 | 		put_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | 		ret = 1; | 
 | 170 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | 	return ret; | 
 | 172 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 173 | EXPORT_SYMBOL_GPL(queue_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 175 | void delayed_work_timer_fn(unsigned long __data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 177 | 	struct delayed_work *dwork = (struct delayed_work *)__data; | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 178 | 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); | 
 | 179 | 	struct workqueue_struct *wq = cwq->wq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 |  | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 181 | 	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | } | 
 | 183 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 184 | /** | 
 | 185 |  * queue_delayed_work - queue work on a workqueue after delay | 
 | 186 |  * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 187 |  * @dwork: delayable work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 188 |  * @delay: number of jiffies to wait before queueing | 
 | 189 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 190 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 191 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 193 | 			struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | { | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 195 | 	timer_stats_timer_set_start_info(&dwork->timer); | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 196 | 	if (delay == 0) | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 197 | 		return queue_work(wq, &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 |  | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 199 | 	return queue_delayed_work_on(-1, wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 201 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 203 | /** | 
 | 204 |  * queue_delayed_work_on - queue work on specific CPU after delay | 
 | 205 |  * @cpu: CPU number to execute work on | 
 | 206 |  * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 207 |  * @dwork: work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 208 |  * @delay: number of jiffies to wait before queueing | 
 | 209 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 210 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 211 |  */ | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 212 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 213 | 			struct delayed_work *dwork, unsigned long delay) | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 214 | { | 
 | 215 | 	int ret = 0; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 216 | 	struct timer_list *timer = &dwork->timer; | 
 | 217 | 	struct work_struct *work = &dwork->work; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 218 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 219 | 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 220 | 		BUG_ON(timer_pending(timer)); | 
 | 221 | 		BUG_ON(!list_empty(&work->entry)); | 
 | 222 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 223 | 		/* This stores cwq for the moment, for the timer_fn */ | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 224 | 		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 225 | 		timer->expires = jiffies + delay; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 226 | 		timer->data = (unsigned long)dwork; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 227 | 		timer->function = delayed_work_timer_fn; | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 228 |  | 
 | 229 | 		if (unlikely(cpu >= 0)) | 
 | 230 | 			add_timer_on(timer, cpu); | 
 | 231 | 		else | 
 | 232 | 			add_timer(timer); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 233 | 		ret = 1; | 
 | 234 | 	} | 
 | 235 | 	return ret; | 
 | 236 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 237 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 |  | 
| Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 239 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | { | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 241 | 	spin_lock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | 	cwq->run_depth++; | 
 | 243 | 	if (cwq->run_depth > 3) { | 
 | 244 | 		/* morton gets to eat his hat */ | 
 | 245 | 		printk("%s: recursion depth exceeded: %d\n", | 
 | 246 | 			__FUNCTION__, cwq->run_depth); | 
 | 247 | 		dump_stack(); | 
 | 248 | 	} | 
 | 249 | 	while (!list_empty(&cwq->worklist)) { | 
 | 250 | 		struct work_struct *work = list_entry(cwq->worklist.next, | 
 | 251 | 						struct work_struct, entry); | 
| David Howells | 6bb49e5 | 2006-11-22 14:54:45 +0000 | [diff] [blame] | 252 | 		work_func_t f = work->func; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 254 | 		cwq->current_work = work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | 		list_del_init(cwq->worklist.next); | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 256 | 		spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 |  | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 258 | 		BUG_ON(get_wq_data(work) != cwq); | 
| Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 259 | 		work_clear_pending(work); | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 260 | 		f(work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 |  | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 262 | 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 
 | 263 | 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 
 | 264 | 					"%s/0x%08x/%d\n", | 
 | 265 | 					current->comm, preempt_count(), | 
 | 266 | 				       	current->pid); | 
 | 267 | 			printk(KERN_ERR "    last function: "); | 
 | 268 | 			print_symbol("%s\n", (unsigned long)f); | 
 | 269 | 			debug_show_held_locks(current); | 
 | 270 | 			dump_stack(); | 
 | 271 | 		} | 
 | 272 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 273 | 		spin_lock_irq(&cwq->lock); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 274 | 		cwq->current_work = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | 	} | 
 | 276 | 	cwq->run_depth--; | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 277 | 	spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | } | 
 | 279 |  | 
 | 280 | static int worker_thread(void *__cwq) | 
 | 281 | { | 
 | 282 | 	struct cpu_workqueue_struct *cwq = __cwq; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 283 | 	DEFINE_WAIT(wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 |  | 
| Oleg Nesterov | 319c2a9 | 2007-05-09 02:34:06 -0700 | [diff] [blame] | 285 | 	if (!cwq->wq->freezeable) | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 286 | 		current->flags |= PF_NOFREEZE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 |  | 
 | 288 | 	set_user_nice(current, -5); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 290 | 	for (;;) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 291 | 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 292 | 		if (!freezing(current) && | 
 | 293 | 		    !kthread_should_stop() && | 
 | 294 | 		    list_empty(&cwq->worklist)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | 			schedule(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 296 | 		finish_wait(&cwq->more_work, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 |  | 
| Oleg Nesterov | 85f4186 | 2007-05-09 02:34:20 -0700 | [diff] [blame] | 298 | 		try_to_freeze(); | 
 | 299 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 300 | 		if (kthread_should_stop()) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 301 | 			break; | 
 | 302 |  | 
 | 303 | 		run_workqueue(cwq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | 	} | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 305 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | 	return 0; | 
 | 307 | } | 
 | 308 |  | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 309 | struct wq_barrier { | 
 | 310 | 	struct work_struct	work; | 
 | 311 | 	struct completion	done; | 
 | 312 | }; | 
 | 313 |  | 
 | 314 | static void wq_barrier_func(struct work_struct *work) | 
 | 315 | { | 
 | 316 | 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work); | 
 | 317 | 	complete(&barr->done); | 
 | 318 | } | 
 | 319 |  | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 320 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 
 | 321 | 					struct wq_barrier *barr, int tail) | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 322 | { | 
 | 323 | 	INIT_WORK(&barr->work, wq_barrier_func); | 
 | 324 | 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 
 | 325 |  | 
 | 326 | 	init_completion(&barr->done); | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 327 |  | 
 | 328 | 	insert_work(cwq, &barr->work, tail); | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 329 | } | 
 | 330 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 331 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | { | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 333 | 	int active; | 
 | 334 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | 	if (cwq->thread == current) { | 
 | 336 | 		/* | 
 | 337 | 		 * Probably keventd trying to flush its own queue. So simply run | 
 | 338 | 		 * it by hand rather than deadlocking. | 
 | 339 | 		 */ | 
 | 340 | 		run_workqueue(cwq); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 341 | 		active = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | 	} else { | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 343 | 		struct wq_barrier barr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 345 | 		active = 0; | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 346 | 		spin_lock_irq(&cwq->lock); | 
 | 347 | 		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 
 | 348 | 			insert_wq_barrier(cwq, &barr, 1); | 
 | 349 | 			active = 1; | 
 | 350 | 		} | 
 | 351 | 		spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 |  | 
| Oleg Nesterov | d721304 | 2007-05-09 02:34:07 -0700 | [diff] [blame] | 353 | 		if (active) | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 354 | 			wait_for_completion(&barr.done); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | 	} | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 356 |  | 
 | 357 | 	return active; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | } | 
 | 359 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 360 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 |  * flush_workqueue - ensure that any scheduled work has run to completion. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 362 |  * @wq: workqueue to flush | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 |  * | 
 | 364 |  * Forces execution of the workqueue and blocks until its completion. | 
 | 365 |  * This is typically used in driver shutdown handlers. | 
 | 366 |  * | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 367 |  * We sleep until all works which were queued on entry have been handled, | 
 | 368 |  * but we are not livelocked by new incoming ones. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 |  * | 
 | 370 |  * This function used to run the workqueues itself.  Now we just wait for the | 
 | 371 |  * helper threads to do it. | 
 | 372 |  */ | 
 | 373 | void fastcall flush_workqueue(struct workqueue_struct *wq) | 
 | 374 | { | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 375 | 	const cpumask_t *cpu_map = wq_cpu_map(wq); | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 376 | 	int cpu; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 377 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 378 | 	might_sleep(); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 379 | 	for_each_cpu_mask(cpu, *cpu_map) | 
 | 380 | 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 382 | EXPORT_SYMBOL_GPL(flush_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 384 | /* | 
 | 385 |  * Upon a successful return, the caller "owns" WORK_STRUCT_PENDING bit, | 
 | 386 |  * so this work can't be re-armed in any way. | 
 | 387 |  */ | 
 | 388 | static int try_to_grab_pending(struct work_struct *work) | 
 | 389 | { | 
 | 390 | 	struct cpu_workqueue_struct *cwq; | 
 | 391 | 	int ret = 0; | 
 | 392 |  | 
 | 393 | 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) | 
 | 394 | 		return 1; | 
 | 395 |  | 
 | 396 | 	/* | 
 | 397 | 	 * The queueing is in progress, or it is already queued. Try to | 
 | 398 | 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. | 
 | 399 | 	 */ | 
 | 400 |  | 
 | 401 | 	cwq = get_wq_data(work); | 
 | 402 | 	if (!cwq) | 
 | 403 | 		return ret; | 
 | 404 |  | 
 | 405 | 	spin_lock_irq(&cwq->lock); | 
 | 406 | 	if (!list_empty(&work->entry)) { | 
 | 407 | 		/* | 
 | 408 | 		 * This work is queued, but perhaps we locked the wrong cwq. | 
 | 409 | 		 * In that case we must see the new value after rmb(), see | 
 | 410 | 		 * insert_work()->wmb(). | 
 | 411 | 		 */ | 
 | 412 | 		smp_rmb(); | 
 | 413 | 		if (cwq == get_wq_data(work)) { | 
 | 414 | 			list_del_init(&work->entry); | 
 | 415 | 			ret = 1; | 
 | 416 | 		} | 
 | 417 | 	} | 
 | 418 | 	spin_unlock_irq(&cwq->lock); | 
 | 419 |  | 
 | 420 | 	return ret; | 
 | 421 | } | 
 | 422 |  | 
 | 423 | static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 424 | 				struct work_struct *work) | 
 | 425 | { | 
 | 426 | 	struct wq_barrier barr; | 
 | 427 | 	int running = 0; | 
 | 428 |  | 
 | 429 | 	spin_lock_irq(&cwq->lock); | 
 | 430 | 	if (unlikely(cwq->current_work == work)) { | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 431 | 		insert_wq_barrier(cwq, &barr, 0); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 432 | 		running = 1; | 
 | 433 | 	} | 
 | 434 | 	spin_unlock_irq(&cwq->lock); | 
 | 435 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 436 | 	if (unlikely(running)) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 437 | 		wait_for_completion(&barr.done); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 438 | } | 
 | 439 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 440 | static void wait_on_work(struct work_struct *work) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 441 | { | 
 | 442 | 	struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 443 | 	struct workqueue_struct *wq; | 
 | 444 | 	const cpumask_t *cpu_map; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 445 | 	int cpu; | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 446 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 447 | 	might_sleep(); | 
 | 448 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 449 | 	cwq = get_wq_data(work); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 450 | 	if (!cwq) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 451 | 		return; | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 452 |  | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 453 | 	wq = cwq->wq; | 
 | 454 | 	cpu_map = wq_cpu_map(wq); | 
 | 455 |  | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 456 | 	for_each_cpu_mask(cpu, *cpu_map) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 457 | 		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 
 | 458 | } | 
 | 459 |  | 
 | 460 | /** | 
 | 461 |  * cancel_work_sync - block until a work_struct's callback has terminated | 
 | 462 |  * @work: the work which is to be flushed | 
 | 463 |  * | 
 | 464 |  * cancel_work_sync() will cancel the work if it is queued. If the work's | 
 | 465 |  * callback appears to be running, cancel_work_sync() will block until it | 
 | 466 |  * has completed. | 
 | 467 |  * | 
 | 468 |  * It is possible to use this function if the work re-queues itself. It can | 
 | 469 |  * cancel the work even if it migrates to another workqueue, however in that | 
 | 470 |  * case it only guarantees that work->func() has completed on the last queued | 
 | 471 |  * workqueue. | 
 | 472 |  * | 
 | 473 |  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not | 
 | 474 |  * pending, otherwise it goes into a busy-wait loop until the timer expires. | 
 | 475 |  * | 
 | 476 |  * The caller must ensure that workqueue_struct on which this work was last | 
 | 477 |  * queued can't be destroyed before this function returns. | 
 | 478 |  */ | 
 | 479 | void cancel_work_sync(struct work_struct *work) | 
 | 480 | { | 
 | 481 | 	while (!try_to_grab_pending(work)) | 
 | 482 | 		cpu_relax(); | 
 | 483 | 	wait_on_work(work); | 
 | 484 | 	work_clear_pending(work); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 485 | } | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 486 | EXPORT_SYMBOL_GPL(cancel_work_sync); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 487 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 488 | /** | 
 | 489 |  * cancel_rearming_delayed_work - reliably kill off a delayed work. | 
 | 490 |  * @dwork: the delayed work struct | 
 | 491 |  * | 
 | 492 |  * It is possible to use this function if @dwork rearms itself via queue_work() | 
 | 493 |  * or queue_delayed_work(). See also the comment for cancel_work_sync(). | 
 | 494 |  */ | 
 | 495 | void cancel_rearming_delayed_work(struct delayed_work *dwork) | 
 | 496 | { | 
 | 497 | 	while (!del_timer(&dwork->timer) && | 
 | 498 | 	       !try_to_grab_pending(&dwork->work)) | 
 | 499 | 		cpu_relax(); | 
 | 500 | 	wait_on_work(&dwork->work); | 
 | 501 | 	work_clear_pending(&dwork->work); | 
 | 502 | } | 
 | 503 | EXPORT_SYMBOL(cancel_rearming_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 505 | static struct workqueue_struct *keventd_wq __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 507 | /** | 
 | 508 |  * schedule_work - put work task in global workqueue | 
 | 509 |  * @work: job to be done | 
 | 510 |  * | 
 | 511 |  * This puts a job in the kernel-global workqueue. | 
 | 512 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | int fastcall schedule_work(struct work_struct *work) | 
 | 514 | { | 
 | 515 | 	return queue_work(keventd_wq, work); | 
 | 516 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 517 | EXPORT_SYMBOL(schedule_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 519 | /** | 
 | 520 |  * schedule_delayed_work - put work task in global workqueue after delay | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 521 |  * @dwork: job to be done | 
 | 522 |  * @delay: number of jiffies to wait or 0 for immediate execution | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 523 |  * | 
 | 524 |  * After waiting for a given time this puts a job in the kernel-global | 
 | 525 |  * workqueue. | 
 | 526 |  */ | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 527 | int fastcall schedule_delayed_work(struct delayed_work *dwork, | 
 | 528 | 					unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | { | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 530 | 	timer_stats_timer_set_start_info(&dwork->timer); | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 531 | 	return queue_delayed_work(keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 533 | EXPORT_SYMBOL(schedule_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 535 | /** | 
 | 536 |  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 
 | 537 |  * @cpu: cpu to use | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 538 |  * @dwork: job to be done | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 539 |  * @delay: number of jiffies to wait | 
 | 540 |  * | 
 | 541 |  * After waiting for a given time this puts a job in the kernel-global | 
 | 542 |  * workqueue on the specified CPU. | 
 | 543 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | int schedule_delayed_work_on(int cpu, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 545 | 			struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 547 | 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 549 | EXPORT_SYMBOL(schedule_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 551 | /** | 
 | 552 |  * schedule_on_each_cpu - call a function on each online CPU from keventd | 
 | 553 |  * @func: the function to call | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 554 |  * | 
 | 555 |  * Returns zero on success. | 
 | 556 |  * Returns -ve errno on failure. | 
 | 557 |  * | 
 | 558 |  * Appears to be racy against CPU hotplug. | 
 | 559 |  * | 
 | 560 |  * schedule_on_each_cpu() is very slow. | 
 | 561 |  */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 562 | int schedule_on_each_cpu(work_func_t func) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 563 | { | 
 | 564 | 	int cpu; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 565 | 	struct work_struct *works; | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 566 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 567 | 	works = alloc_percpu(struct work_struct); | 
 | 568 | 	if (!works) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 569 | 		return -ENOMEM; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 570 |  | 
| Andrew Morton | e18f3ff | 2007-05-09 02:33:50 -0700 | [diff] [blame] | 571 | 	preempt_disable();		/* CPU hotplug */ | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 572 | 	for_each_online_cpu(cpu) { | 
| Ingo Molnar | 9bfb183 | 2006-12-18 20:05:09 +0100 | [diff] [blame] | 573 | 		struct work_struct *work = per_cpu_ptr(works, cpu); | 
 | 574 |  | 
 | 575 | 		INIT_WORK(work, func); | 
 | 576 | 		set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); | 
 | 577 | 		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 578 | 	} | 
| Andrew Morton | e18f3ff | 2007-05-09 02:33:50 -0700 | [diff] [blame] | 579 | 	preempt_enable(); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 580 | 	flush_workqueue(keventd_wq); | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 581 | 	free_percpu(works); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 582 | 	return 0; | 
 | 583 | } | 
 | 584 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | void flush_scheduled_work(void) | 
 | 586 | { | 
 | 587 | 	flush_workqueue(keventd_wq); | 
 | 588 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 589 | EXPORT_SYMBOL(flush_scheduled_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 |  | 
 | 591 | /** | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 592 |  * execute_in_process_context - reliably execute the routine with user context | 
 | 593 |  * @fn:		the function to execute | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 594 |  * @ew:		guaranteed storage for the execute work structure (must | 
 | 595 |  *		be available when the work executes) | 
 | 596 |  * | 
 | 597 |  * Executes the function immediately if process context is available, | 
 | 598 |  * otherwise schedules the function for delayed execution. | 
 | 599 |  * | 
 | 600 |  * Returns:	0 - function was executed | 
 | 601 |  *		1 - function was scheduled for execution | 
 | 602 |  */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 603 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 604 | { | 
 | 605 | 	if (!in_interrupt()) { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 606 | 		fn(&ew->work); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 607 | 		return 0; | 
 | 608 | 	} | 
 | 609 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 610 | 	INIT_WORK(&ew->work, fn); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 611 | 	schedule_work(&ew->work); | 
 | 612 |  | 
 | 613 | 	return 1; | 
 | 614 | } | 
 | 615 | EXPORT_SYMBOL_GPL(execute_in_process_context); | 
 | 616 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | int keventd_up(void) | 
 | 618 | { | 
 | 619 | 	return keventd_wq != NULL; | 
 | 620 | } | 
 | 621 |  | 
 | 622 | int current_is_keventd(void) | 
 | 623 | { | 
 | 624 | 	struct cpu_workqueue_struct *cwq; | 
 | 625 | 	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */ | 
 | 626 | 	int ret = 0; | 
 | 627 |  | 
 | 628 | 	BUG_ON(!keventd_wq); | 
 | 629 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 630 | 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | 	if (current == cwq->thread) | 
 | 632 | 		ret = 1; | 
 | 633 |  | 
 | 634 | 	return ret; | 
 | 635 |  | 
 | 636 | } | 
 | 637 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 638 | static struct cpu_workqueue_struct * | 
 | 639 | init_cpu_workqueue(struct workqueue_struct *wq, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 641 | 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 643 | 	cwq->wq = wq; | 
 | 644 | 	spin_lock_init(&cwq->lock); | 
 | 645 | 	INIT_LIST_HEAD(&cwq->worklist); | 
 | 646 | 	init_waitqueue_head(&cwq->more_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 648 | 	return cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | } | 
 | 650 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 651 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 653 | 	struct workqueue_struct *wq = cwq->wq; | 
 | 654 | 	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; | 
 | 655 | 	struct task_struct *p; | 
 | 656 |  | 
 | 657 | 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); | 
 | 658 | 	/* | 
 | 659 | 	 * Nobody can add the work_struct to this cwq, | 
 | 660 | 	 *	if (caller is __create_workqueue) | 
 | 661 | 	 *		nobody should see this wq | 
 | 662 | 	 *	else // caller is CPU_UP_PREPARE | 
 | 663 | 	 *		cpu is not on cpu_online_map | 
 | 664 | 	 * so we can abort safely. | 
 | 665 | 	 */ | 
 | 666 | 	if (IS_ERR(p)) | 
 | 667 | 		return PTR_ERR(p); | 
 | 668 |  | 
 | 669 | 	cwq->thread = p; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 670 |  | 
 | 671 | 	return 0; | 
 | 672 | } | 
 | 673 |  | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 674 | static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
 | 675 | { | 
 | 676 | 	struct task_struct *p = cwq->thread; | 
 | 677 |  | 
 | 678 | 	if (p != NULL) { | 
 | 679 | 		if (cpu >= 0) | 
 | 680 | 			kthread_bind(p, cpu); | 
 | 681 | 		wake_up_process(p); | 
 | 682 | 	} | 
 | 683 | } | 
 | 684 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 685 | struct workqueue_struct *__create_workqueue(const char *name, | 
 | 686 | 					    int singlethread, int freezeable) | 
 | 687 | { | 
 | 688 | 	struct workqueue_struct *wq; | 
 | 689 | 	struct cpu_workqueue_struct *cwq; | 
 | 690 | 	int err = 0, cpu; | 
 | 691 |  | 
 | 692 | 	wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 
 | 693 | 	if (!wq) | 
 | 694 | 		return NULL; | 
 | 695 |  | 
 | 696 | 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); | 
 | 697 | 	if (!wq->cpu_wq) { | 
 | 698 | 		kfree(wq); | 
 | 699 | 		return NULL; | 
 | 700 | 	} | 
 | 701 |  | 
 | 702 | 	wq->name = name; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 703 | 	wq->singlethread = singlethread; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 704 | 	wq->freezeable = freezeable; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 705 | 	INIT_LIST_HEAD(&wq->list); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 706 |  | 
 | 707 | 	if (singlethread) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 708 | 		cwq = init_cpu_workqueue(wq, singlethread_cpu); | 
 | 709 | 		err = create_workqueue_thread(cwq, singlethread_cpu); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 710 | 		start_workqueue_thread(cwq, -1); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 711 | 	} else { | 
 | 712 | 		mutex_lock(&workqueue_mutex); | 
 | 713 | 		list_add(&wq->list, &workqueues); | 
 | 714 |  | 
 | 715 | 		for_each_possible_cpu(cpu) { | 
 | 716 | 			cwq = init_cpu_workqueue(wq, cpu); | 
 | 717 | 			if (err || !cpu_online(cpu)) | 
 | 718 | 				continue; | 
 | 719 | 			err = create_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 720 | 			start_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 721 | 		} | 
 | 722 | 		mutex_unlock(&workqueue_mutex); | 
 | 723 | 	} | 
 | 724 |  | 
 | 725 | 	if (err) { | 
 | 726 | 		destroy_workqueue(wq); | 
 | 727 | 		wq = NULL; | 
 | 728 | 	} | 
 | 729 | 	return wq; | 
 | 730 | } | 
 | 731 | EXPORT_SYMBOL_GPL(__create_workqueue); | 
 | 732 |  | 
 | 733 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
 | 734 | { | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 735 | 	/* | 
 | 736 | 	 * Our caller is either destroy_workqueue() or CPU_DEAD, | 
 | 737 | 	 * workqueue_mutex protects cwq->thread | 
 | 738 | 	 */ | 
 | 739 | 	if (cwq->thread == NULL) | 
 | 740 | 		return; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 741 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 742 | 	/* | 
 | 743 | 	 * If the caller is CPU_DEAD the single flush_cpu_workqueue() | 
 | 744 | 	 * is not enough, a concurrent flush_workqueue() can insert a | 
 | 745 | 	 * barrier after us. | 
 | 746 | 	 * When ->worklist becomes empty it is safe to exit because no | 
 | 747 | 	 * more work_structs can be queued on this cwq: flush_workqueue | 
 | 748 | 	 * checks list_empty(), and a "normal" queue_work() can't use | 
 | 749 | 	 * a dead CPU. | 
 | 750 | 	 */ | 
 | 751 | 	while (flush_cpu_workqueue(cwq)) | 
 | 752 | 		; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 753 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 754 | 	kthread_stop(cwq->thread); | 
 | 755 | 	cwq->thread = NULL; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 756 | } | 
 | 757 |  | 
 | 758 | /** | 
 | 759 |  * destroy_workqueue - safely terminate a workqueue | 
 | 760 |  * @wq: target workqueue | 
 | 761 |  * | 
 | 762 |  * Safely destroy a workqueue. All work currently pending will be done first. | 
 | 763 |  */ | 
 | 764 | void destroy_workqueue(struct workqueue_struct *wq) | 
 | 765 | { | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 766 | 	const cpumask_t *cpu_map = wq_cpu_map(wq); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 767 | 	struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 768 | 	int cpu; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 769 |  | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 770 | 	mutex_lock(&workqueue_mutex); | 
 | 771 | 	list_del(&wq->list); | 
 | 772 | 	mutex_unlock(&workqueue_mutex); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 773 |  | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 774 | 	for_each_cpu_mask(cpu, *cpu_map) { | 
 | 775 | 		cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
 | 776 | 		cleanup_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 777 | 	} | 
 | 778 |  | 
 | 779 | 	free_percpu(wq->cpu_wq); | 
 | 780 | 	kfree(wq); | 
 | 781 | } | 
 | 782 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 
 | 783 |  | 
 | 784 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | 
 | 785 | 						unsigned long action, | 
 | 786 | 						void *hcpu) | 
 | 787 | { | 
 | 788 | 	unsigned int cpu = (unsigned long)hcpu; | 
 | 789 | 	struct cpu_workqueue_struct *cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | 	struct workqueue_struct *wq; | 
 | 791 |  | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 792 | 	action &= ~CPU_TASKS_FROZEN; | 
 | 793 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | 	switch (action) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 795 | 	case CPU_LOCK_ACQUIRE: | 
 | 796 | 		mutex_lock(&workqueue_mutex); | 
 | 797 | 		return NOTIFY_OK; | 
 | 798 |  | 
 | 799 | 	case CPU_LOCK_RELEASE: | 
 | 800 | 		mutex_unlock(&workqueue_mutex); | 
 | 801 | 		return NOTIFY_OK; | 
 | 802 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | 	case CPU_UP_PREPARE: | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 804 | 		cpu_set(cpu, cpu_populated_map); | 
 | 805 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 807 | 	list_for_each_entry(wq, &workqueues, list) { | 
 | 808 | 		cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 809 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 810 | 		switch (action) { | 
 | 811 | 		case CPU_UP_PREPARE: | 
 | 812 | 			if (!create_workqueue_thread(cwq, cpu)) | 
 | 813 | 				break; | 
 | 814 | 			printk(KERN_ERR "workqueue for %i failed\n", cpu); | 
 | 815 | 			return NOTIFY_BAD; | 
 | 816 |  | 
 | 817 | 		case CPU_ONLINE: | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 818 | 			start_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 819 | 			break; | 
 | 820 |  | 
 | 821 | 		case CPU_UP_CANCELED: | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 822 | 			start_workqueue_thread(cwq, -1); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 823 | 		case CPU_DEAD: | 
 | 824 | 			cleanup_workqueue_thread(cwq, cpu); | 
 | 825 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 826 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | 	} | 
 | 828 |  | 
 | 829 | 	return NOTIFY_OK; | 
 | 830 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 |  | 
| Oleg Nesterov | c12920d | 2007-05-09 02:34:14 -0700 | [diff] [blame] | 832 | void __init init_workqueues(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 834 | 	cpu_populated_map = cpu_online_map; | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 835 | 	singlethread_cpu = first_cpu(cpu_possible_map); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 836 | 	cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | 	hotcpu_notifier(workqueue_cpu_callback, 0); | 
 | 838 | 	keventd_wq = create_workqueue("events"); | 
 | 839 | 	BUG_ON(!keventd_wq); | 
 | 840 | } |