| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/kernel/workqueue.c | 
 | 3 |  * | 
 | 4 |  * Generic mechanism for defining kernel helper threads for running | 
 | 5 |  * arbitrary tasks in process context. | 
 | 6 |  * | 
 | 7 |  * Started by Ingo Molnar, Copyright (C) 2002 | 
 | 8 |  * | 
 | 9 |  * Derived from the taskqueue/keventd code by: | 
 | 10 |  * | 
 | 11 |  *   David Woodhouse <dwmw2@infradead.org> | 
 | 12 |  *   Andrew Morton <andrewm@uow.edu.au> | 
 | 13 |  *   Kai Petzke <wpp@marie.physik.tu-berlin.de> | 
 | 14 |  *   Theodore Ts'o <tytso@mit.edu> | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 15 |  * | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 16 |  * Made to use alloc_percpu by Christoph Lameter. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  */ | 
 | 18 |  | 
 | 19 | #include <linux/module.h> | 
 | 20 | #include <linux/kernel.h> | 
 | 21 | #include <linux/sched.h> | 
 | 22 | #include <linux/init.h> | 
 | 23 | #include <linux/signal.h> | 
 | 24 | #include <linux/completion.h> | 
 | 25 | #include <linux/workqueue.h> | 
 | 26 | #include <linux/slab.h> | 
 | 27 | #include <linux/cpu.h> | 
 | 28 | #include <linux/notifier.h> | 
 | 29 | #include <linux/kthread.h> | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 30 | #include <linux/hardirq.h> | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 31 | #include <linux/mempolicy.h> | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 32 | #include <linux/freezer.h> | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 33 | #include <linux/kallsyms.h> | 
 | 34 | #include <linux/debug_locks.h> | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 35 | #include <linux/lockdep.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
 | 37 | /* | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 38 |  * The per-CPU workqueue (if single thread, we always use the first | 
 | 39 |  * possible cpu). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  */ | 
 | 41 | struct cpu_workqueue_struct { | 
 | 42 |  | 
 | 43 | 	spinlock_t lock; | 
 | 44 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | 	struct list_head worklist; | 
 | 46 | 	wait_queue_head_t more_work; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 47 | 	struct work_struct *current_work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 |  | 
 | 49 | 	struct workqueue_struct *wq; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 50 | 	struct task_struct *thread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 |  | 
 | 52 | 	int run_depth;		/* Detect run_workqueue() recursion depth */ | 
 | 53 | } ____cacheline_aligned; | 
 | 54 |  | 
 | 55 | /* | 
 | 56 |  * The externally visible workqueue abstraction is an array of | 
 | 57 |  * per-CPU workqueues: | 
 | 58 |  */ | 
 | 59 | struct workqueue_struct { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 60 | 	struct cpu_workqueue_struct *cpu_wq; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 61 | 	struct list_head list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | 	const char *name; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 63 | 	int singlethread; | 
| Oleg Nesterov | 319c2a9 | 2007-05-09 02:34:06 -0700 | [diff] [blame] | 64 | 	int freezeable;		/* Freeze threads during suspend */ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 65 | #ifdef CONFIG_LOCKDEP | 
 | 66 | 	struct lockdep_map lockdep_map; | 
 | 67 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | }; | 
 | 69 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 70 | /* Serializes the accesses to the list of workqueues. */ | 
 | 71 | static DEFINE_SPINLOCK(workqueue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | static LIST_HEAD(workqueues); | 
 | 73 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 74 | static int singlethread_cpu __read_mostly; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 75 | static cpumask_t cpu_singlethread_map __read_mostly; | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 76 | /* | 
 | 77 |  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD | 
 | 78 |  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work | 
 | 79 |  * which comes in between can't use for_each_online_cpu(). We could | 
 | 80 |  * use cpu_possible_map, the cpumask below is more a documentation | 
 | 81 |  * than optimization. | 
 | 82 |  */ | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 83 | static cpumask_t cpu_populated_map __read_mostly; | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 84 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | /* If it's single threaded, it isn't in the list of workqueues. */ | 
 | 86 | static inline int is_single_threaded(struct workqueue_struct *wq) | 
 | 87 | { | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 88 | 	return wq->singlethread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | } | 
 | 90 |  | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 91 | static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) | 
 | 92 | { | 
 | 93 | 	return is_single_threaded(wq) | 
 | 94 | 		? &cpu_singlethread_map : &cpu_populated_map; | 
 | 95 | } | 
 | 96 |  | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 97 | static | 
 | 98 | struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) | 
 | 99 | { | 
 | 100 | 	if (unlikely(is_single_threaded(wq))) | 
 | 101 | 		cpu = singlethread_cpu; | 
 | 102 | 	return per_cpu_ptr(wq->cpu_wq, cpu); | 
 | 103 | } | 
 | 104 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 105 | /* | 
 | 106 |  * Set the workqueue on which a work item is to be run | 
 | 107 |  * - Must *only* be called if the pending flag is set | 
 | 108 |  */ | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 109 | static inline void set_wq_data(struct work_struct *work, | 
 | 110 | 				struct cpu_workqueue_struct *cwq) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 111 | { | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 112 | 	unsigned long new; | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 113 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 114 | 	BUG_ON(!work_pending(work)); | 
 | 115 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 116 | 	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 117 | 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); | 
 | 118 | 	atomic_long_set(&work->data, new); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 119 | } | 
 | 120 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 121 | static inline | 
 | 122 | struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 123 | { | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 124 | 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 125 | } | 
 | 126 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 127 | static void insert_work(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 128 | 			struct work_struct *work, struct list_head *head) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 129 | { | 
 | 130 | 	set_wq_data(work, cwq); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 131 | 	/* | 
 | 132 | 	 * Ensure that we get the right work->data if we see the | 
 | 133 | 	 * result of list_add() below, see try_to_grab_pending(). | 
 | 134 | 	 */ | 
 | 135 | 	smp_wmb(); | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 136 | 	list_add_tail(&work->entry, head); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 137 | 	wake_up(&cwq->more_work); | 
 | 138 | } | 
 | 139 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 
 | 141 | 			 struct work_struct *work) | 
 | 142 | { | 
 | 143 | 	unsigned long flags; | 
 | 144 |  | 
 | 145 | 	spin_lock_irqsave(&cwq->lock, flags); | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 146 | 	insert_work(cwq, work, &cwq->worklist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | 	spin_unlock_irqrestore(&cwq->lock, flags); | 
 | 148 | } | 
 | 149 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 150 | /** | 
 | 151 |  * queue_work - queue work on a workqueue | 
 | 152 |  * @wq: workqueue to use | 
 | 153 |  * @work: work to queue | 
 | 154 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 155 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 |  * | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 157 |  * We queue the work to the CPU on which it was submitted, but if the CPU dies | 
 | 158 |  * it can be processed by another CPU. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 160 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | { | 
| Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 162 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 |  | 
| Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 164 | 	ret = queue_work_on(get_cpu(), wq, work); | 
 | 165 | 	put_cpu(); | 
 | 166 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | 	return ret; | 
 | 168 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 169 | EXPORT_SYMBOL_GPL(queue_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 |  | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 171 | /** | 
 | 172 |  * queue_work_on - queue work on specific cpu | 
 | 173 |  * @cpu: CPU number to execute work on | 
 | 174 |  * @wq: workqueue to use | 
 | 175 |  * @work: work to queue | 
 | 176 |  * | 
 | 177 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
 | 178 |  * | 
 | 179 |  * We queue the work to a specific CPU, the caller must ensure it | 
 | 180 |  * can't go away. | 
 | 181 |  */ | 
 | 182 | int | 
 | 183 | queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) | 
 | 184 | { | 
 | 185 | 	int ret = 0; | 
 | 186 |  | 
 | 187 | 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
 | 188 | 		BUG_ON(!list_empty(&work->entry)); | 
 | 189 | 		__queue_work(wq_per_cpu(wq, cpu), work); | 
 | 190 | 		ret = 1; | 
 | 191 | 	} | 
 | 192 | 	return ret; | 
 | 193 | } | 
 | 194 | EXPORT_SYMBOL_GPL(queue_work_on); | 
 | 195 |  | 
| Li Zefan | 6d141c3 | 2008-02-08 04:21:09 -0800 | [diff] [blame] | 196 | static void delayed_work_timer_fn(unsigned long __data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 198 | 	struct delayed_work *dwork = (struct delayed_work *)__data; | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 199 | 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); | 
 | 200 | 	struct workqueue_struct *wq = cwq->wq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 |  | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 202 | 	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } | 
 | 204 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 205 | /** | 
 | 206 |  * queue_delayed_work - queue work on a workqueue after delay | 
 | 207 |  * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 208 |  * @dwork: delayable work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 209 |  * @delay: number of jiffies to wait before queueing | 
 | 210 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 211 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 212 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 213 | int queue_delayed_work(struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 214 | 			struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 216 | 	if (delay == 0) | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 217 | 		return queue_work(wq, &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 |  | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 219 | 	return queue_delayed_work_on(-1, wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 221 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 223 | /** | 
 | 224 |  * queue_delayed_work_on - queue work on specific CPU after delay | 
 | 225 |  * @cpu: CPU number to execute work on | 
 | 226 |  * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 227 |  * @dwork: work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 228 |  * @delay: number of jiffies to wait before queueing | 
 | 229 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 230 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 231 |  */ | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 232 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 233 | 			struct delayed_work *dwork, unsigned long delay) | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 234 | { | 
 | 235 | 	int ret = 0; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 236 | 	struct timer_list *timer = &dwork->timer; | 
 | 237 | 	struct work_struct *work = &dwork->work; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 238 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 239 | 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 240 | 		BUG_ON(timer_pending(timer)); | 
 | 241 | 		BUG_ON(!list_empty(&work->entry)); | 
 | 242 |  | 
| Andrew Liu | 8a3e77c | 2008-05-01 04:35:14 -0700 | [diff] [blame] | 243 | 		timer_stats_timer_set_start_info(&dwork->timer); | 
 | 244 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 245 | 		/* This stores cwq for the moment, for the timer_fn */ | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 246 | 		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 247 | 		timer->expires = jiffies + delay; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 248 | 		timer->data = (unsigned long)dwork; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 249 | 		timer->function = delayed_work_timer_fn; | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 250 |  | 
 | 251 | 		if (unlikely(cpu >= 0)) | 
 | 252 | 			add_timer_on(timer, cpu); | 
 | 253 | 		else | 
 | 254 | 			add_timer(timer); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 255 | 		ret = 1; | 
 | 256 | 	} | 
 | 257 | 	return ret; | 
 | 258 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 259 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 |  | 
| Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 261 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | { | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 263 | 	spin_lock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | 	cwq->run_depth++; | 
 | 265 | 	if (cwq->run_depth > 3) { | 
 | 266 | 		/* morton gets to eat his hat */ | 
 | 267 | 		printk("%s: recursion depth exceeded: %d\n", | 
| Harvey Harrison | af1f16d | 2008-04-30 00:55:08 -0700 | [diff] [blame] | 268 | 			__func__, cwq->run_depth); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | 		dump_stack(); | 
 | 270 | 	} | 
 | 271 | 	while (!list_empty(&cwq->worklist)) { | 
 | 272 | 		struct work_struct *work = list_entry(cwq->worklist.next, | 
 | 273 | 						struct work_struct, entry); | 
| David Howells | 6bb49e5 | 2006-11-22 14:54:45 +0000 | [diff] [blame] | 274 | 		work_func_t f = work->func; | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 275 | #ifdef CONFIG_LOCKDEP | 
 | 276 | 		/* | 
 | 277 | 		 * It is permissible to free the struct work_struct | 
 | 278 | 		 * from inside the function that is called from it, | 
 | 279 | 		 * this we need to take into account for lockdep too. | 
 | 280 | 		 * To avoid bogus "held lock freed" warnings as well | 
 | 281 | 		 * as problems when looking into work->lockdep_map, | 
 | 282 | 		 * make a copy and use that here. | 
 | 283 | 		 */ | 
 | 284 | 		struct lockdep_map lockdep_map = work->lockdep_map; | 
 | 285 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 287 | 		cwq->current_work = work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | 		list_del_init(cwq->worklist.next); | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 289 | 		spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 |  | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 291 | 		BUG_ON(get_wq_data(work) != cwq); | 
| Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 292 | 		work_clear_pending(work); | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 293 | 		lock_map_acquire(&cwq->wq->lockdep_map); | 
 | 294 | 		lock_map_acquire(&lockdep_map); | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 295 | 		f(work); | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 296 | 		lock_map_release(&lockdep_map); | 
 | 297 | 		lock_map_release(&cwq->wq->lockdep_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 |  | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 299 | 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 
 | 300 | 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 
 | 301 | 					"%s/0x%08x/%d\n", | 
 | 302 | 					current->comm, preempt_count(), | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 303 | 				       	task_pid_nr(current)); | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 304 | 			printk(KERN_ERR "    last function: "); | 
 | 305 | 			print_symbol("%s\n", (unsigned long)f); | 
 | 306 | 			debug_show_held_locks(current); | 
 | 307 | 			dump_stack(); | 
 | 308 | 		} | 
 | 309 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 310 | 		spin_lock_irq(&cwq->lock); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 311 | 		cwq->current_work = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | 	} | 
 | 313 | 	cwq->run_depth--; | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 314 | 	spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | } | 
 | 316 |  | 
 | 317 | static int worker_thread(void *__cwq) | 
 | 318 | { | 
 | 319 | 	struct cpu_workqueue_struct *cwq = __cwq; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 320 | 	DEFINE_WAIT(wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 |  | 
| Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 322 | 	if (cwq->wq->freezeable) | 
 | 323 | 		set_freezable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 |  | 
 | 325 | 	set_user_nice(current, -5); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 327 | 	for (;;) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 328 | 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 329 | 		if (!freezing(current) && | 
 | 330 | 		    !kthread_should_stop() && | 
 | 331 | 		    list_empty(&cwq->worklist)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | 			schedule(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 333 | 		finish_wait(&cwq->more_work, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 |  | 
| Oleg Nesterov | 85f4186 | 2007-05-09 02:34:20 -0700 | [diff] [blame] | 335 | 		try_to_freeze(); | 
 | 336 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 337 | 		if (kthread_should_stop()) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 338 | 			break; | 
 | 339 |  | 
 | 340 | 		run_workqueue(cwq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | 	} | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 342 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | 	return 0; | 
 | 344 | } | 
 | 345 |  | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 346 | struct wq_barrier { | 
 | 347 | 	struct work_struct	work; | 
 | 348 | 	struct completion	done; | 
 | 349 | }; | 
 | 350 |  | 
 | 351 | static void wq_barrier_func(struct work_struct *work) | 
 | 352 | { | 
 | 353 | 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work); | 
 | 354 | 	complete(&barr->done); | 
 | 355 | } | 
 | 356 |  | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 357 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 358 | 			struct wq_barrier *barr, struct list_head *head) | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 359 | { | 
 | 360 | 	INIT_WORK(&barr->work, wq_barrier_func); | 
 | 361 | 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 
 | 362 |  | 
 | 363 | 	init_completion(&barr->done); | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 364 |  | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 365 | 	insert_work(cwq, &barr->work, head); | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 366 | } | 
 | 367 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 368 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | { | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 370 | 	int active; | 
 | 371 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | 	if (cwq->thread == current) { | 
 | 373 | 		/* | 
 | 374 | 		 * Probably keventd trying to flush its own queue. So simply run | 
 | 375 | 		 * it by hand rather than deadlocking. | 
 | 376 | 		 */ | 
 | 377 | 		run_workqueue(cwq); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 378 | 		active = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | 	} else { | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 380 | 		struct wq_barrier barr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 382 | 		active = 0; | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 383 | 		spin_lock_irq(&cwq->lock); | 
 | 384 | 		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 385 | 			insert_wq_barrier(cwq, &barr, &cwq->worklist); | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 386 | 			active = 1; | 
 | 387 | 		} | 
 | 388 | 		spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 |  | 
| Oleg Nesterov | d721304 | 2007-05-09 02:34:07 -0700 | [diff] [blame] | 390 | 		if (active) | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 391 | 			wait_for_completion(&barr.done); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | 	} | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 393 |  | 
 | 394 | 	return active; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | } | 
 | 396 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 397 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 |  * flush_workqueue - ensure that any scheduled work has run to completion. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 399 |  * @wq: workqueue to flush | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 |  * | 
 | 401 |  * Forces execution of the workqueue and blocks until its completion. | 
 | 402 |  * This is typically used in driver shutdown handlers. | 
 | 403 |  * | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 404 |  * We sleep until all works which were queued on entry have been handled, | 
 | 405 |  * but we are not livelocked by new incoming ones. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 |  * | 
 | 407 |  * This function used to run the workqueues itself.  Now we just wait for the | 
 | 408 |  * helper threads to do it. | 
 | 409 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 410 | void flush_workqueue(struct workqueue_struct *wq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | { | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 412 | 	const cpumask_t *cpu_map = wq_cpu_map(wq); | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 413 | 	int cpu; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 414 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 415 | 	might_sleep(); | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 416 | 	lock_map_acquire(&wq->lockdep_map); | 
 | 417 | 	lock_map_release(&wq->lockdep_map); | 
| Mike Travis | 363ab6f | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 418 | 	for_each_cpu_mask_nr(cpu, *cpu_map) | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 419 | 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 421 | EXPORT_SYMBOL_GPL(flush_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 |  | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 423 | /** | 
 | 424 |  * flush_work - block until a work_struct's callback has terminated | 
 | 425 |  * @work: the work which is to be flushed | 
 | 426 |  * | 
| Oleg Nesterov | a67da70 | 2008-07-25 01:47:52 -0700 | [diff] [blame] | 427 |  * Returns false if @work has already terminated. | 
 | 428 |  * | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 429 |  * It is expected that, prior to calling flush_work(), the caller has | 
 | 430 |  * arranged for the work to not be requeued, otherwise it doesn't make | 
 | 431 |  * sense to use this function. | 
 | 432 |  */ | 
 | 433 | int flush_work(struct work_struct *work) | 
 | 434 | { | 
 | 435 | 	struct cpu_workqueue_struct *cwq; | 
 | 436 | 	struct list_head *prev; | 
 | 437 | 	struct wq_barrier barr; | 
 | 438 |  | 
 | 439 | 	might_sleep(); | 
 | 440 | 	cwq = get_wq_data(work); | 
 | 441 | 	if (!cwq) | 
 | 442 | 		return 0; | 
 | 443 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 444 | 	lock_map_acquire(&cwq->wq->lockdep_map); | 
 | 445 | 	lock_map_release(&cwq->wq->lockdep_map); | 
| Oleg Nesterov | a67da70 | 2008-07-25 01:47:52 -0700 | [diff] [blame] | 446 |  | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 447 | 	prev = NULL; | 
 | 448 | 	spin_lock_irq(&cwq->lock); | 
 | 449 | 	if (!list_empty(&work->entry)) { | 
 | 450 | 		/* | 
 | 451 | 		 * See the comment near try_to_grab_pending()->smp_rmb(). | 
 | 452 | 		 * If it was re-queued under us we are not going to wait. | 
 | 453 | 		 */ | 
 | 454 | 		smp_rmb(); | 
 | 455 | 		if (unlikely(cwq != get_wq_data(work))) | 
 | 456 | 			goto out; | 
 | 457 | 		prev = &work->entry; | 
 | 458 | 	} else { | 
 | 459 | 		if (cwq->current_work != work) | 
 | 460 | 			goto out; | 
 | 461 | 		prev = &cwq->worklist; | 
 | 462 | 	} | 
 | 463 | 	insert_wq_barrier(cwq, &barr, prev->next); | 
 | 464 | out: | 
 | 465 | 	spin_unlock_irq(&cwq->lock); | 
 | 466 | 	if (!prev) | 
 | 467 | 		return 0; | 
 | 468 |  | 
 | 469 | 	wait_for_completion(&barr.done); | 
 | 470 | 	return 1; | 
 | 471 | } | 
 | 472 | EXPORT_SYMBOL_GPL(flush_work); | 
 | 473 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 474 | /* | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 475 |  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 476 |  * so this work can't be re-armed in any way. | 
 | 477 |  */ | 
 | 478 | static int try_to_grab_pending(struct work_struct *work) | 
 | 479 | { | 
 | 480 | 	struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 481 | 	int ret = -1; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 482 |  | 
 | 483 | 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 484 | 		return 0; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 485 |  | 
 | 486 | 	/* | 
 | 487 | 	 * The queueing is in progress, or it is already queued. Try to | 
 | 488 | 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. | 
 | 489 | 	 */ | 
 | 490 |  | 
 | 491 | 	cwq = get_wq_data(work); | 
 | 492 | 	if (!cwq) | 
 | 493 | 		return ret; | 
 | 494 |  | 
 | 495 | 	spin_lock_irq(&cwq->lock); | 
 | 496 | 	if (!list_empty(&work->entry)) { | 
 | 497 | 		/* | 
 | 498 | 		 * This work is queued, but perhaps we locked the wrong cwq. | 
 | 499 | 		 * In that case we must see the new value after rmb(), see | 
 | 500 | 		 * insert_work()->wmb(). | 
 | 501 | 		 */ | 
 | 502 | 		smp_rmb(); | 
 | 503 | 		if (cwq == get_wq_data(work)) { | 
 | 504 | 			list_del_init(&work->entry); | 
 | 505 | 			ret = 1; | 
 | 506 | 		} | 
 | 507 | 	} | 
 | 508 | 	spin_unlock_irq(&cwq->lock); | 
 | 509 |  | 
 | 510 | 	return ret; | 
 | 511 | } | 
 | 512 |  | 
 | 513 | static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 514 | 				struct work_struct *work) | 
 | 515 | { | 
 | 516 | 	struct wq_barrier barr; | 
 | 517 | 	int running = 0; | 
 | 518 |  | 
 | 519 | 	spin_lock_irq(&cwq->lock); | 
 | 520 | 	if (unlikely(cwq->current_work == work)) { | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 521 | 		insert_wq_barrier(cwq, &barr, cwq->worklist.next); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 522 | 		running = 1; | 
 | 523 | 	} | 
 | 524 | 	spin_unlock_irq(&cwq->lock); | 
 | 525 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 526 | 	if (unlikely(running)) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 527 | 		wait_for_completion(&barr.done); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 528 | } | 
 | 529 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 530 | static void wait_on_work(struct work_struct *work) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 531 | { | 
 | 532 | 	struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 533 | 	struct workqueue_struct *wq; | 
 | 534 | 	const cpumask_t *cpu_map; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 535 | 	int cpu; | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 536 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 537 | 	might_sleep(); | 
 | 538 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 539 | 	lock_map_acquire(&work->lockdep_map); | 
 | 540 | 	lock_map_release(&work->lockdep_map); | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 541 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 542 | 	cwq = get_wq_data(work); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 543 | 	if (!cwq) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 544 | 		return; | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 545 |  | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 546 | 	wq = cwq->wq; | 
 | 547 | 	cpu_map = wq_cpu_map(wq); | 
 | 548 |  | 
| Mike Travis | 363ab6f | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 549 | 	for_each_cpu_mask_nr(cpu, *cpu_map) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 550 | 		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 
 | 551 | } | 
 | 552 |  | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 553 | static int __cancel_work_timer(struct work_struct *work, | 
 | 554 | 				struct timer_list* timer) | 
 | 555 | { | 
 | 556 | 	int ret; | 
 | 557 |  | 
 | 558 | 	do { | 
 | 559 | 		ret = (timer && likely(del_timer(timer))); | 
 | 560 | 		if (!ret) | 
 | 561 | 			ret = try_to_grab_pending(work); | 
 | 562 | 		wait_on_work(work); | 
 | 563 | 	} while (unlikely(ret < 0)); | 
 | 564 |  | 
 | 565 | 	work_clear_pending(work); | 
 | 566 | 	return ret; | 
 | 567 | } | 
 | 568 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 569 | /** | 
 | 570 |  * cancel_work_sync - block until a work_struct's callback has terminated | 
 | 571 |  * @work: the work which is to be flushed | 
 | 572 |  * | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 573 |  * Returns true if @work was pending. | 
 | 574 |  * | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 575 |  * cancel_work_sync() will cancel the work if it is queued. If the work's | 
 | 576 |  * callback appears to be running, cancel_work_sync() will block until it | 
 | 577 |  * has completed. | 
 | 578 |  * | 
 | 579 |  * It is possible to use this function if the work re-queues itself. It can | 
 | 580 |  * cancel the work even if it migrates to another workqueue, however in that | 
 | 581 |  * case it only guarantees that work->func() has completed on the last queued | 
 | 582 |  * workqueue. | 
 | 583 |  * | 
 | 584 |  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not | 
 | 585 |  * pending, otherwise it goes into a busy-wait loop until the timer expires. | 
 | 586 |  * | 
 | 587 |  * The caller must ensure that workqueue_struct on which this work was last | 
 | 588 |  * queued can't be destroyed before this function returns. | 
 | 589 |  */ | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 590 | int cancel_work_sync(struct work_struct *work) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 591 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 592 | 	return __cancel_work_timer(work, NULL); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 593 | } | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 594 | EXPORT_SYMBOL_GPL(cancel_work_sync); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 595 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 596 | /** | 
| Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 597 |  * cancel_delayed_work_sync - reliably kill off a delayed work. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 598 |  * @dwork: the delayed work struct | 
 | 599 |  * | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 600 |  * Returns true if @dwork was pending. | 
 | 601 |  * | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 602 |  * It is possible to use this function if @dwork rearms itself via queue_work() | 
 | 603 |  * or queue_delayed_work(). See also the comment for cancel_work_sync(). | 
 | 604 |  */ | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 605 | int cancel_delayed_work_sync(struct delayed_work *dwork) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 606 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 607 | 	return __cancel_work_timer(&dwork->work, &dwork->timer); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 608 | } | 
| Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 609 | EXPORT_SYMBOL(cancel_delayed_work_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 611 | static struct workqueue_struct *keventd_wq __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 613 | /** | 
 | 614 |  * schedule_work - put work task in global workqueue | 
 | 615 |  * @work: job to be done | 
 | 616 |  * | 
 | 617 |  * This puts a job in the kernel-global workqueue. | 
 | 618 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 619 | int schedule_work(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | { | 
 | 621 | 	return queue_work(keventd_wq, work); | 
 | 622 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 623 | EXPORT_SYMBOL(schedule_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 |  | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 625 | /* | 
 | 626 |  * schedule_work_on - put work task on a specific cpu | 
 | 627 |  * @cpu: cpu to put the work task on | 
 | 628 |  * @work: job to be done | 
 | 629 |  * | 
 | 630 |  * This puts a job on a specific cpu | 
 | 631 |  */ | 
 | 632 | int schedule_work_on(int cpu, struct work_struct *work) | 
 | 633 | { | 
 | 634 | 	return queue_work_on(cpu, keventd_wq, work); | 
 | 635 | } | 
 | 636 | EXPORT_SYMBOL(schedule_work_on); | 
 | 637 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 638 | /** | 
 | 639 |  * schedule_delayed_work - put work task in global workqueue after delay | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 640 |  * @dwork: job to be done | 
 | 641 |  * @delay: number of jiffies to wait or 0 for immediate execution | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 642 |  * | 
 | 643 |  * After waiting for a given time this puts a job in the kernel-global | 
 | 644 |  * workqueue. | 
 | 645 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 646 | int schedule_delayed_work(struct delayed_work *dwork, | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 647 | 					unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 649 | 	return queue_delayed_work(keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 651 | EXPORT_SYMBOL(schedule_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 653 | /** | 
 | 654 |  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 
 | 655 |  * @cpu: cpu to use | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 656 |  * @dwork: job to be done | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 657 |  * @delay: number of jiffies to wait | 
 | 658 |  * | 
 | 659 |  * After waiting for a given time this puts a job in the kernel-global | 
 | 660 |  * workqueue on the specified CPU. | 
 | 661 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | int schedule_delayed_work_on(int cpu, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 663 | 			struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 665 | 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 667 | EXPORT_SYMBOL(schedule_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 669 | /** | 
 | 670 |  * schedule_on_each_cpu - call a function on each online CPU from keventd | 
 | 671 |  * @func: the function to call | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 672 |  * | 
 | 673 |  * Returns zero on success. | 
 | 674 |  * Returns -ve errno on failure. | 
 | 675 |  * | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 676 |  * schedule_on_each_cpu() is very slow. | 
 | 677 |  */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 678 | int schedule_on_each_cpu(work_func_t func) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 679 | { | 
 | 680 | 	int cpu; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 681 | 	struct work_struct *works; | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 682 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 683 | 	works = alloc_percpu(struct work_struct); | 
 | 684 | 	if (!works) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 685 | 		return -ENOMEM; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 686 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 687 | 	get_online_cpus(); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 688 | 	for_each_online_cpu(cpu) { | 
| Ingo Molnar | 9bfb183 | 2006-12-18 20:05:09 +0100 | [diff] [blame] | 689 | 		struct work_struct *work = per_cpu_ptr(works, cpu); | 
 | 690 |  | 
 | 691 | 		INIT_WORK(work, func); | 
| Oleg Nesterov | 8de6d30 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 692 | 		schedule_work_on(cpu, work); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 693 | 	} | 
| Oleg Nesterov | 8616a89 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 694 | 	for_each_online_cpu(cpu) | 
 | 695 | 		flush_work(per_cpu_ptr(works, cpu)); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 696 | 	put_online_cpus(); | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 697 | 	free_percpu(works); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 698 | 	return 0; | 
 | 699 | } | 
 | 700 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | void flush_scheduled_work(void) | 
 | 702 | { | 
 | 703 | 	flush_workqueue(keventd_wq); | 
 | 704 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 705 | EXPORT_SYMBOL(flush_scheduled_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 |  | 
 | 707 | /** | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 708 |  * execute_in_process_context - reliably execute the routine with user context | 
 | 709 |  * @fn:		the function to execute | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 710 |  * @ew:		guaranteed storage for the execute work structure (must | 
 | 711 |  *		be available when the work executes) | 
 | 712 |  * | 
 | 713 |  * Executes the function immediately if process context is available, | 
 | 714 |  * otherwise schedules the function for delayed execution. | 
 | 715 |  * | 
 | 716 |  * Returns:	0 - function was executed | 
 | 717 |  *		1 - function was scheduled for execution | 
 | 718 |  */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 719 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 720 | { | 
 | 721 | 	if (!in_interrupt()) { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 722 | 		fn(&ew->work); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 723 | 		return 0; | 
 | 724 | 	} | 
 | 725 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 726 | 	INIT_WORK(&ew->work, fn); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 727 | 	schedule_work(&ew->work); | 
 | 728 |  | 
 | 729 | 	return 1; | 
 | 730 | } | 
 | 731 | EXPORT_SYMBOL_GPL(execute_in_process_context); | 
 | 732 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | int keventd_up(void) | 
 | 734 | { | 
 | 735 | 	return keventd_wq != NULL; | 
 | 736 | } | 
 | 737 |  | 
 | 738 | int current_is_keventd(void) | 
 | 739 | { | 
 | 740 | 	struct cpu_workqueue_struct *cwq; | 
| Hugh Dickins | d243769 | 2007-08-27 16:06:19 +0100 | [diff] [blame] | 741 | 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | 	int ret = 0; | 
 | 743 |  | 
 | 744 | 	BUG_ON(!keventd_wq); | 
 | 745 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 746 | 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | 	if (current == cwq->thread) | 
 | 748 | 		ret = 1; | 
 | 749 |  | 
 | 750 | 	return ret; | 
 | 751 |  | 
 | 752 | } | 
 | 753 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 754 | static struct cpu_workqueue_struct * | 
 | 755 | init_cpu_workqueue(struct workqueue_struct *wq, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 757 | 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 759 | 	cwq->wq = wq; | 
 | 760 | 	spin_lock_init(&cwq->lock); | 
 | 761 | 	INIT_LIST_HEAD(&cwq->worklist); | 
 | 762 | 	init_waitqueue_head(&cwq->more_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 763 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 764 | 	return cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | } | 
 | 766 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 767 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 769 | 	struct workqueue_struct *wq = cwq->wq; | 
 | 770 | 	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; | 
 | 771 | 	struct task_struct *p; | 
 | 772 |  | 
 | 773 | 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); | 
 | 774 | 	/* | 
 | 775 | 	 * Nobody can add the work_struct to this cwq, | 
 | 776 | 	 *	if (caller is __create_workqueue) | 
 | 777 | 	 *		nobody should see this wq | 
 | 778 | 	 *	else // caller is CPU_UP_PREPARE | 
 | 779 | 	 *		cpu is not on cpu_online_map | 
 | 780 | 	 * so we can abort safely. | 
 | 781 | 	 */ | 
 | 782 | 	if (IS_ERR(p)) | 
 | 783 | 		return PTR_ERR(p); | 
 | 784 |  | 
 | 785 | 	cwq->thread = p; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 786 |  | 
 | 787 | 	return 0; | 
 | 788 | } | 
 | 789 |  | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 790 | static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
 | 791 | { | 
 | 792 | 	struct task_struct *p = cwq->thread; | 
 | 793 |  | 
 | 794 | 	if (p != NULL) { | 
 | 795 | 		if (cpu >= 0) | 
 | 796 | 			kthread_bind(p, cpu); | 
 | 797 | 		wake_up_process(p); | 
 | 798 | 	} | 
 | 799 | } | 
 | 800 |  | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 801 | struct workqueue_struct *__create_workqueue_key(const char *name, | 
 | 802 | 						int singlethread, | 
 | 803 | 						int freezeable, | 
| Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 804 | 						struct lock_class_key *key, | 
 | 805 | 						const char *lock_name) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 806 | { | 
 | 807 | 	struct workqueue_struct *wq; | 
 | 808 | 	struct cpu_workqueue_struct *cwq; | 
 | 809 | 	int err = 0, cpu; | 
 | 810 |  | 
 | 811 | 	wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 
 | 812 | 	if (!wq) | 
 | 813 | 		return NULL; | 
 | 814 |  | 
 | 815 | 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); | 
 | 816 | 	if (!wq->cpu_wq) { | 
 | 817 | 		kfree(wq); | 
 | 818 | 		return NULL; | 
 | 819 | 	} | 
 | 820 |  | 
 | 821 | 	wq->name = name; | 
| Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 822 | 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 823 | 	wq->singlethread = singlethread; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 824 | 	wq->freezeable = freezeable; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 825 | 	INIT_LIST_HEAD(&wq->list); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 826 |  | 
 | 827 | 	if (singlethread) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 828 | 		cwq = init_cpu_workqueue(wq, singlethread_cpu); | 
 | 829 | 		err = create_workqueue_thread(cwq, singlethread_cpu); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 830 | 		start_workqueue_thread(cwq, -1); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 831 | 	} else { | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 832 | 		cpu_maps_update_begin(); | 
| Oleg Nesterov | 6af8bf3 | 2008-07-29 22:33:49 -0700 | [diff] [blame] | 833 | 		/* | 
 | 834 | 		 * We must place this wq on list even if the code below fails. | 
 | 835 | 		 * cpu_down(cpu) can remove cpu from cpu_populated_map before | 
 | 836 | 		 * destroy_workqueue() takes the lock, in that case we leak | 
 | 837 | 		 * cwq[cpu]->thread. | 
 | 838 | 		 */ | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 839 | 		spin_lock(&workqueue_lock); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 840 | 		list_add(&wq->list, &workqueues); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 841 | 		spin_unlock(&workqueue_lock); | 
| Oleg Nesterov | 6af8bf3 | 2008-07-29 22:33:49 -0700 | [diff] [blame] | 842 | 		/* | 
 | 843 | 		 * We must initialize cwqs for each possible cpu even if we | 
 | 844 | 		 * are going to call destroy_workqueue() finally. Otherwise | 
 | 845 | 		 * cpu_up() can hit the uninitialized cwq once we drop the | 
 | 846 | 		 * lock. | 
 | 847 | 		 */ | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 848 | 		for_each_possible_cpu(cpu) { | 
 | 849 | 			cwq = init_cpu_workqueue(wq, cpu); | 
 | 850 | 			if (err || !cpu_online(cpu)) | 
 | 851 | 				continue; | 
 | 852 | 			err = create_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 853 | 			start_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 854 | 		} | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 855 | 		cpu_maps_update_done(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 856 | 	} | 
 | 857 |  | 
 | 858 | 	if (err) { | 
 | 859 | 		destroy_workqueue(wq); | 
 | 860 | 		wq = NULL; | 
 | 861 | 	} | 
 | 862 | 	return wq; | 
 | 863 | } | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 864 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 865 |  | 
| Oleg Nesterov | 1e35eaa | 2008-04-29 01:00:28 -0700 | [diff] [blame] | 866 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 867 | { | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 868 | 	/* | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 869 | 	 * Our caller is either destroy_workqueue() or CPU_POST_DEAD, | 
 | 870 | 	 * cpu_add_remove_lock protects cwq->thread. | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 871 | 	 */ | 
 | 872 | 	if (cwq->thread == NULL) | 
 | 873 | 		return; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 874 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 875 | 	lock_map_acquire(&cwq->wq->lockdep_map); | 
 | 876 | 	lock_map_release(&cwq->wq->lockdep_map); | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 877 |  | 
| Oleg Nesterov | 13c2216 | 2007-07-17 04:03:55 -0700 | [diff] [blame] | 878 | 	flush_cpu_workqueue(cwq); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 879 | 	/* | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 880 | 	 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, | 
| Oleg Nesterov | 13c2216 | 2007-07-17 04:03:55 -0700 | [diff] [blame] | 881 | 	 * a concurrent flush_workqueue() can insert a barrier after us. | 
 | 882 | 	 * However, in that case run_workqueue() won't return and check | 
 | 883 | 	 * kthread_should_stop() until it flushes all work_struct's. | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 884 | 	 * When ->worklist becomes empty it is safe to exit because no | 
 | 885 | 	 * more work_structs can be queued on this cwq: flush_workqueue | 
 | 886 | 	 * checks list_empty(), and a "normal" queue_work() can't use | 
 | 887 | 	 * a dead CPU. | 
 | 888 | 	 */ | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 889 | 	kthread_stop(cwq->thread); | 
 | 890 | 	cwq->thread = NULL; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 891 | } | 
 | 892 |  | 
 | 893 | /** | 
 | 894 |  * destroy_workqueue - safely terminate a workqueue | 
 | 895 |  * @wq: target workqueue | 
 | 896 |  * | 
 | 897 |  * Safely destroy a workqueue. All work currently pending will be done first. | 
 | 898 |  */ | 
 | 899 | void destroy_workqueue(struct workqueue_struct *wq) | 
 | 900 | { | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 901 | 	const cpumask_t *cpu_map = wq_cpu_map(wq); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 902 | 	int cpu; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 903 |  | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 904 | 	cpu_maps_update_begin(); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 905 | 	spin_lock(&workqueue_lock); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 906 | 	list_del(&wq->list); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 907 | 	spin_unlock(&workqueue_lock); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 908 |  | 
| Mike Travis | 363ab6f | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 909 | 	for_each_cpu_mask_nr(cpu, *cpu_map) | 
| Oleg Nesterov | 1e35eaa | 2008-04-29 01:00:28 -0700 | [diff] [blame] | 910 | 		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 911 |  	cpu_maps_update_done(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 912 |  | 
 | 913 | 	free_percpu(wq->cpu_wq); | 
 | 914 | 	kfree(wq); | 
 | 915 | } | 
 | 916 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 
 | 917 |  | 
 | 918 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | 
 | 919 | 						unsigned long action, | 
 | 920 | 						void *hcpu) | 
 | 921 | { | 
 | 922 | 	unsigned int cpu = (unsigned long)hcpu; | 
 | 923 | 	struct cpu_workqueue_struct *cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | 	struct workqueue_struct *wq; | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 925 | 	int ret = NOTIFY_OK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 |  | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 927 | 	action &= ~CPU_TASKS_FROZEN; | 
 | 928 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | 	switch (action) { | 
 | 930 | 	case CPU_UP_PREPARE: | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 931 | 		cpu_set(cpu, cpu_populated_map); | 
 | 932 | 	} | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 933 | undo: | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 934 | 	list_for_each_entry(wq, &workqueues, list) { | 
 | 935 | 		cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 936 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 937 | 		switch (action) { | 
 | 938 | 		case CPU_UP_PREPARE: | 
 | 939 | 			if (!create_workqueue_thread(cwq, cpu)) | 
 | 940 | 				break; | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 941 | 			printk(KERN_ERR "workqueue [%s] for %i failed\n", | 
 | 942 | 				wq->name, cpu); | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 943 | 			action = CPU_UP_CANCELED; | 
 | 944 | 			ret = NOTIFY_BAD; | 
 | 945 | 			goto undo; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 946 |  | 
 | 947 | 		case CPU_ONLINE: | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 948 | 			start_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 949 | 			break; | 
 | 950 |  | 
 | 951 | 		case CPU_UP_CANCELED: | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 952 | 			start_workqueue_thread(cwq, -1); | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 953 | 		case CPU_POST_DEAD: | 
| Oleg Nesterov | 1e35eaa | 2008-04-29 01:00:28 -0700 | [diff] [blame] | 954 | 			cleanup_workqueue_thread(cwq); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 955 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 957 | 	} | 
 | 958 |  | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 959 | 	switch (action) { | 
 | 960 | 	case CPU_UP_CANCELED: | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 961 | 	case CPU_POST_DEAD: | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 962 | 		cpu_clear(cpu, cpu_populated_map); | 
 | 963 | 	} | 
 | 964 |  | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 965 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 966 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 |  | 
| Oleg Nesterov | c12920d | 2007-05-09 02:34:14 -0700 | [diff] [blame] | 968 | void __init init_workqueues(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 970 | 	cpu_populated_map = cpu_online_map; | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 971 | 	singlethread_cpu = first_cpu(cpu_possible_map); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 972 | 	cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | 	hotcpu_notifier(workqueue_cpu_callback, 0); | 
 | 974 | 	keventd_wq = create_workqueue("events"); | 
 | 975 | 	BUG_ON(!keventd_wq); | 
 | 976 | } |