| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/kernel/workqueue.c | 
|  | 3 | * | 
|  | 4 | * Generic mechanism for defining kernel helper threads for running | 
|  | 5 | * arbitrary tasks in process context. | 
|  | 6 | * | 
|  | 7 | * Started by Ingo Molnar, Copyright (C) 2002 | 
|  | 8 | * | 
|  | 9 | * Derived from the taskqueue/keventd code by: | 
|  | 10 | * | 
|  | 11 | *   David Woodhouse <dwmw2@infradead.org> | 
| Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 12 | *   Andrew Morton | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | *   Kai Petzke <wpp@marie.physik.tu-berlin.de> | 
|  | 14 | *   Theodore Ts'o <tytso@mit.edu> | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 15 | * | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 16 | * Made to use alloc_percpu by Christoph Lameter. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ | 
|  | 18 |  | 
|  | 19 | #include <linux/module.h> | 
|  | 20 | #include <linux/kernel.h> | 
|  | 21 | #include <linux/sched.h> | 
|  | 22 | #include <linux/init.h> | 
|  | 23 | #include <linux/signal.h> | 
|  | 24 | #include <linux/completion.h> | 
|  | 25 | #include <linux/workqueue.h> | 
|  | 26 | #include <linux/slab.h> | 
|  | 27 | #include <linux/cpu.h> | 
|  | 28 | #include <linux/notifier.h> | 
|  | 29 | #include <linux/kthread.h> | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 30 | #include <linux/hardirq.h> | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 31 | #include <linux/mempolicy.h> | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 32 | #include <linux/freezer.h> | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 33 | #include <linux/kallsyms.h> | 
|  | 34 | #include <linux/debug_locks.h> | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 35 | #include <linux/lockdep.h> | 
| Zhaolei | fb39125 | 2009-04-17 15:15:51 +0800 | [diff] [blame] | 36 | #define CREATE_TRACE_POINTS | 
|  | 37 | #include <trace/events/workqueue.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 |  | 
|  | 39 | /* | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 40 | * The per-CPU workqueue (if single thread, we always use the first | 
|  | 41 | * possible cpu). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | */ | 
|  | 43 | struct cpu_workqueue_struct { | 
|  | 44 |  | 
|  | 45 | spinlock_t lock; | 
|  | 46 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | struct list_head worklist; | 
|  | 48 | wait_queue_head_t more_work; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 49 | struct work_struct *current_work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 |  | 
|  | 51 | struct workqueue_struct *wq; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 52 | struct task_struct *thread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | } ____cacheline_aligned; | 
|  | 54 |  | 
|  | 55 | /* | 
|  | 56 | * The externally visible workqueue abstraction is an array of | 
|  | 57 | * per-CPU workqueues: | 
|  | 58 | */ | 
|  | 59 | struct workqueue_struct { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 60 | struct cpu_workqueue_struct *cpu_wq; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 61 | struct list_head list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | const char *name; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 63 | int singlethread; | 
| Oleg Nesterov | 319c2a9 | 2007-05-09 02:34:06 -0700 | [diff] [blame] | 64 | int freezeable;		/* Freeze threads during suspend */ | 
| Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 65 | int rt; | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 66 | #ifdef CONFIG_LOCKDEP | 
|  | 67 | struct lockdep_map lockdep_map; | 
|  | 68 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | }; | 
|  | 70 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 71 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 
|  | 72 |  | 
|  | 73 | static struct debug_obj_descr work_debug_descr; | 
|  | 74 |  | 
|  | 75 | /* | 
|  | 76 | * fixup_init is called when: | 
|  | 77 | * - an active object is initialized | 
|  | 78 | */ | 
|  | 79 | static int work_fixup_init(void *addr, enum debug_obj_state state) | 
|  | 80 | { | 
|  | 81 | struct work_struct *work = addr; | 
|  | 82 |  | 
|  | 83 | switch (state) { | 
|  | 84 | case ODEBUG_STATE_ACTIVE: | 
|  | 85 | cancel_work_sync(work); | 
|  | 86 | debug_object_init(work, &work_debug_descr); | 
|  | 87 | return 1; | 
|  | 88 | default: | 
|  | 89 | return 0; | 
|  | 90 | } | 
|  | 91 | } | 
|  | 92 |  | 
|  | 93 | /* | 
|  | 94 | * fixup_activate is called when: | 
|  | 95 | * - an active object is activated | 
|  | 96 | * - an unknown object is activated (might be a statically initialized object) | 
|  | 97 | */ | 
|  | 98 | static int work_fixup_activate(void *addr, enum debug_obj_state state) | 
|  | 99 | { | 
|  | 100 | struct work_struct *work = addr; | 
|  | 101 |  | 
|  | 102 | switch (state) { | 
|  | 103 |  | 
|  | 104 | case ODEBUG_STATE_NOTAVAILABLE: | 
|  | 105 | /* | 
|  | 106 | * This is not really a fixup. The work struct was | 
|  | 107 | * statically initialized. We just make sure that it | 
|  | 108 | * is tracked in the object tracker. | 
|  | 109 | */ | 
|  | 110 | if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { | 
|  | 111 | debug_object_init(work, &work_debug_descr); | 
|  | 112 | debug_object_activate(work, &work_debug_descr); | 
|  | 113 | return 0; | 
|  | 114 | } | 
|  | 115 | WARN_ON_ONCE(1); | 
|  | 116 | return 0; | 
|  | 117 |  | 
|  | 118 | case ODEBUG_STATE_ACTIVE: | 
|  | 119 | WARN_ON(1); | 
|  | 120 |  | 
|  | 121 | default: | 
|  | 122 | return 0; | 
|  | 123 | } | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | /* | 
|  | 127 | * fixup_free is called when: | 
|  | 128 | * - an active object is freed | 
|  | 129 | */ | 
|  | 130 | static int work_fixup_free(void *addr, enum debug_obj_state state) | 
|  | 131 | { | 
|  | 132 | struct work_struct *work = addr; | 
|  | 133 |  | 
|  | 134 | switch (state) { | 
|  | 135 | case ODEBUG_STATE_ACTIVE: | 
|  | 136 | cancel_work_sync(work); | 
|  | 137 | debug_object_free(work, &work_debug_descr); | 
|  | 138 | return 1; | 
|  | 139 | default: | 
|  | 140 | return 0; | 
|  | 141 | } | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | static struct debug_obj_descr work_debug_descr = { | 
|  | 145 | .name		= "work_struct", | 
|  | 146 | .fixup_init	= work_fixup_init, | 
|  | 147 | .fixup_activate	= work_fixup_activate, | 
|  | 148 | .fixup_free	= work_fixup_free, | 
|  | 149 | }; | 
|  | 150 |  | 
|  | 151 | static inline void debug_work_activate(struct work_struct *work) | 
|  | 152 | { | 
|  | 153 | debug_object_activate(work, &work_debug_descr); | 
|  | 154 | } | 
|  | 155 |  | 
|  | 156 | static inline void debug_work_deactivate(struct work_struct *work) | 
|  | 157 | { | 
|  | 158 | debug_object_deactivate(work, &work_debug_descr); | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | void __init_work(struct work_struct *work, int onstack) | 
|  | 162 | { | 
|  | 163 | if (onstack) | 
|  | 164 | debug_object_init_on_stack(work, &work_debug_descr); | 
|  | 165 | else | 
|  | 166 | debug_object_init(work, &work_debug_descr); | 
|  | 167 | } | 
|  | 168 | EXPORT_SYMBOL_GPL(__init_work); | 
|  | 169 |  | 
|  | 170 | void destroy_work_on_stack(struct work_struct *work) | 
|  | 171 | { | 
|  | 172 | debug_object_free(work, &work_debug_descr); | 
|  | 173 | } | 
|  | 174 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); | 
|  | 175 |  | 
|  | 176 | #else | 
|  | 177 | static inline void debug_work_activate(struct work_struct *work) { } | 
|  | 178 | static inline void debug_work_deactivate(struct work_struct *work) { } | 
|  | 179 | #endif | 
|  | 180 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 181 | /* Serializes the accesses to the list of workqueues. */ | 
|  | 182 | static DEFINE_SPINLOCK(workqueue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | static LIST_HEAD(workqueues); | 
|  | 184 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 185 | static int singlethread_cpu __read_mostly; | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 186 | static const struct cpumask *cpu_singlethread_map __read_mostly; | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 187 | /* | 
|  | 188 | * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD | 
|  | 189 | * flushes cwq->worklist. This means that flush_workqueue/wait_on_work | 
|  | 190 | * which comes in between can't use for_each_online_cpu(). We could | 
|  | 191 | * use cpu_possible_map, the cpumask below is more a documentation | 
|  | 192 | * than optimization. | 
|  | 193 | */ | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 194 | static cpumask_var_t cpu_populated_map __read_mostly; | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 195 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | /* If it's single threaded, it isn't in the list of workqueues. */ | 
| David Howells | 6cc88bc | 2008-11-14 10:39:21 +1100 | [diff] [blame] | 197 | static inline int is_wq_single_threaded(struct workqueue_struct *wq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | { | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 199 | return wq->singlethread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | } | 
|  | 201 |  | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 202 | static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 203 | { | 
| David Howells | 6cc88bc | 2008-11-14 10:39:21 +1100 | [diff] [blame] | 204 | return is_wq_single_threaded(wq) | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 205 | ? cpu_singlethread_map : cpu_populated_map; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 206 | } | 
|  | 207 |  | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 208 | static | 
|  | 209 | struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) | 
|  | 210 | { | 
| David Howells | 6cc88bc | 2008-11-14 10:39:21 +1100 | [diff] [blame] | 211 | if (unlikely(is_wq_single_threaded(wq))) | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 212 | cpu = singlethread_cpu; | 
|  | 213 | return per_cpu_ptr(wq->cpu_wq, cpu); | 
|  | 214 | } | 
|  | 215 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 216 | /* | 
|  | 217 | * Set the workqueue on which a work item is to be run | 
|  | 218 | * - Must *only* be called if the pending flag is set | 
|  | 219 | */ | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 220 | static inline void set_wq_data(struct work_struct *work, | 
|  | 221 | struct cpu_workqueue_struct *cwq) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 222 | { | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 223 | unsigned long new; | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 224 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 225 | BUG_ON(!work_pending(work)); | 
|  | 226 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 227 | new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 228 | new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); | 
|  | 229 | atomic_long_set(&work->data, new); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 230 | } | 
|  | 231 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 232 | static inline | 
|  | 233 | struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 234 | { | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 235 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 236 | } | 
|  | 237 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 238 | static void insert_work(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 239 | struct work_struct *work, struct list_head *head) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 240 | { | 
| Frederic Weisbecker | e1d8aa9 | 2009-01-12 23:15:46 +0100 | [diff] [blame] | 241 | trace_workqueue_insertion(cwq->thread, work); | 
|  | 242 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 243 | set_wq_data(work, cwq); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 244 | /* | 
|  | 245 | * Ensure that we get the right work->data if we see the | 
|  | 246 | * result of list_add() below, see try_to_grab_pending(). | 
|  | 247 | */ | 
|  | 248 | smp_wmb(); | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 249 | list_add_tail(&work->entry, head); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 250 | wake_up(&cwq->more_work); | 
|  | 251 | } | 
|  | 252 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 
|  | 254 | struct work_struct *work) | 
|  | 255 | { | 
|  | 256 | unsigned long flags; | 
|  | 257 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 258 | debug_work_activate(work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | spin_lock_irqsave(&cwq->lock, flags); | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 260 | insert_work(cwq, work, &cwq->worklist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | spin_unlock_irqrestore(&cwq->lock, flags); | 
|  | 262 | } | 
|  | 263 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 264 | /** | 
|  | 265 | * queue_work - queue work on a workqueue | 
|  | 266 | * @wq: workqueue to use | 
|  | 267 | * @work: work to queue | 
|  | 268 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 269 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | * | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 271 | * We queue the work to the CPU on which it was submitted, but if the CPU dies | 
|  | 272 | * it can be processed by another CPU. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 274 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | { | 
| Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 276 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 |  | 
| Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 278 | ret = queue_work_on(get_cpu(), wq, work); | 
|  | 279 | put_cpu(); | 
|  | 280 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | return ret; | 
|  | 282 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 283 | EXPORT_SYMBOL_GPL(queue_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 |  | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 285 | /** | 
|  | 286 | * queue_work_on - queue work on specific cpu | 
|  | 287 | * @cpu: CPU number to execute work on | 
|  | 288 | * @wq: workqueue to use | 
|  | 289 | * @work: work to queue | 
|  | 290 | * | 
|  | 291 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
|  | 292 | * | 
|  | 293 | * We queue the work to a specific CPU, the caller must ensure it | 
|  | 294 | * can't go away. | 
|  | 295 | */ | 
|  | 296 | int | 
|  | 297 | queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) | 
|  | 298 | { | 
|  | 299 | int ret = 0; | 
|  | 300 |  | 
|  | 301 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
|  | 302 | BUG_ON(!list_empty(&work->entry)); | 
|  | 303 | __queue_work(wq_per_cpu(wq, cpu), work); | 
|  | 304 | ret = 1; | 
|  | 305 | } | 
|  | 306 | return ret; | 
|  | 307 | } | 
|  | 308 | EXPORT_SYMBOL_GPL(queue_work_on); | 
|  | 309 |  | 
| Li Zefan | 6d141c3 | 2008-02-08 04:21:09 -0800 | [diff] [blame] | 310 | static void delayed_work_timer_fn(unsigned long __data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 312 | struct delayed_work *dwork = (struct delayed_work *)__data; | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 313 | struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); | 
|  | 314 | struct workqueue_struct *wq = cwq->wq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 |  | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 316 | __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | } | 
|  | 318 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 319 | /** | 
|  | 320 | * queue_delayed_work - queue work on a workqueue after delay | 
|  | 321 | * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 322 | * @dwork: delayable work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 323 | * @delay: number of jiffies to wait before queueing | 
|  | 324 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 325 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 326 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 327 | int queue_delayed_work(struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 328 | struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 330 | if (delay == 0) | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 331 | return queue_work(wq, &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 |  | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 333 | return queue_delayed_work_on(-1, wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 335 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 337 | /** | 
|  | 338 | * queue_delayed_work_on - queue work on specific CPU after delay | 
|  | 339 | * @cpu: CPU number to execute work on | 
|  | 340 | * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 341 | * @dwork: work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 342 | * @delay: number of jiffies to wait before queueing | 
|  | 343 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 344 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 345 | */ | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 346 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 347 | struct delayed_work *dwork, unsigned long delay) | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 348 | { | 
|  | 349 | int ret = 0; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 350 | struct timer_list *timer = &dwork->timer; | 
|  | 351 | struct work_struct *work = &dwork->work; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 352 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 353 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 354 | BUG_ON(timer_pending(timer)); | 
|  | 355 | BUG_ON(!list_empty(&work->entry)); | 
|  | 356 |  | 
| Andrew Liu | 8a3e77c | 2008-05-01 04:35:14 -0700 | [diff] [blame] | 357 | timer_stats_timer_set_start_info(&dwork->timer); | 
|  | 358 |  | 
| Oleg Nesterov | ed7c0fe | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 359 | /* This stores cwq for the moment, for the timer_fn */ | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 360 | set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 361 | timer->expires = jiffies + delay; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 362 | timer->data = (unsigned long)dwork; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 363 | timer->function = delayed_work_timer_fn; | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 364 |  | 
|  | 365 | if (unlikely(cpu >= 0)) | 
|  | 366 | add_timer_on(timer, cpu); | 
|  | 367 | else | 
|  | 368 | add_timer(timer); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 369 | ret = 1; | 
|  | 370 | } | 
|  | 371 | return ret; | 
|  | 372 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 373 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 |  | 
| Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 375 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | { | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 377 | spin_lock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | while (!list_empty(&cwq->worklist)) { | 
|  | 379 | struct work_struct *work = list_entry(cwq->worklist.next, | 
|  | 380 | struct work_struct, entry); | 
| David Howells | 6bb49e5 | 2006-11-22 14:54:45 +0000 | [diff] [blame] | 381 | work_func_t f = work->func; | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 382 | #ifdef CONFIG_LOCKDEP | 
|  | 383 | /* | 
|  | 384 | * It is permissible to free the struct work_struct | 
|  | 385 | * from inside the function that is called from it, | 
|  | 386 | * this we need to take into account for lockdep too. | 
|  | 387 | * To avoid bogus "held lock freed" warnings as well | 
|  | 388 | * as problems when looking into work->lockdep_map, | 
|  | 389 | * make a copy and use that here. | 
|  | 390 | */ | 
|  | 391 | struct lockdep_map lockdep_map = work->lockdep_map; | 
|  | 392 | #endif | 
| Frederic Weisbecker | e1d8aa9 | 2009-01-12 23:15:46 +0100 | [diff] [blame] | 393 | trace_workqueue_execution(cwq->thread, work); | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 394 | debug_work_deactivate(work); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 395 | cwq->current_work = work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | list_del_init(cwq->worklist.next); | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 397 | spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 |  | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 399 | BUG_ON(get_wq_data(work) != cwq); | 
| Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 400 | work_clear_pending(work); | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 401 | lock_map_acquire(&cwq->wq->lockdep_map); | 
|  | 402 | lock_map_acquire(&lockdep_map); | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 403 | f(work); | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 404 | lock_map_release(&lockdep_map); | 
|  | 405 | lock_map_release(&cwq->wq->lockdep_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 |  | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 407 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 
|  | 408 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 
|  | 409 | "%s/0x%08x/%d\n", | 
|  | 410 | current->comm, preempt_count(), | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 411 | task_pid_nr(current)); | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 412 | printk(KERN_ERR "    last function: "); | 
|  | 413 | print_symbol("%s\n", (unsigned long)f); | 
|  | 414 | debug_show_held_locks(current); | 
|  | 415 | dump_stack(); | 
|  | 416 | } | 
|  | 417 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 418 | spin_lock_irq(&cwq->lock); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 419 | cwq->current_work = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | } | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 421 | spin_unlock_irq(&cwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | } | 
|  | 423 |  | 
|  | 424 | static int worker_thread(void *__cwq) | 
|  | 425 | { | 
|  | 426 | struct cpu_workqueue_struct *cwq = __cwq; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 427 | DEFINE_WAIT(wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 |  | 
| Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 429 | if (cwq->wq->freezeable) | 
|  | 430 | set_freezable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 432 | for (;;) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 433 | prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 434 | if (!freezing(current) && | 
|  | 435 | !kthread_should_stop() && | 
|  | 436 | list_empty(&cwq->worklist)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | schedule(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 438 | finish_wait(&cwq->more_work, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 |  | 
| Oleg Nesterov | 85f4186 | 2007-05-09 02:34:20 -0700 | [diff] [blame] | 440 | try_to_freeze(); | 
|  | 441 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 442 | if (kthread_should_stop()) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 443 | break; | 
|  | 444 |  | 
|  | 445 | run_workqueue(cwq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | } | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 447 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | return 0; | 
|  | 449 | } | 
|  | 450 |  | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 451 | struct wq_barrier { | 
|  | 452 | struct work_struct	work; | 
|  | 453 | struct completion	done; | 
|  | 454 | }; | 
|  | 455 |  | 
|  | 456 | static void wq_barrier_func(struct work_struct *work) | 
|  | 457 | { | 
|  | 458 | struct wq_barrier *barr = container_of(work, struct wq_barrier, work); | 
|  | 459 | complete(&barr->done); | 
|  | 460 | } | 
|  | 461 |  | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 462 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 463 | struct wq_barrier *barr, struct list_head *head) | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 464 | { | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 465 | /* | 
|  | 466 | * debugobject calls are safe here even with cwq->lock locked | 
|  | 467 | * as we know for sure that this will not trigger any of the | 
|  | 468 | * checks and call back into the fixup functions where we | 
|  | 469 | * might deadlock. | 
|  | 470 | */ | 
|  | 471 | INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 472 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 
|  | 473 |  | 
|  | 474 | init_completion(&barr->done); | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 475 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 476 | debug_work_activate(&barr->work); | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 477 | insert_work(cwq, &barr->work, head); | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 478 | } | 
|  | 479 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 480 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | { | 
| Lai Jiangshan | 2355b70 | 2009-04-02 16:58:24 -0700 | [diff] [blame] | 482 | int active = 0; | 
|  | 483 | struct wq_barrier barr; | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 484 |  | 
| Lai Jiangshan | 2355b70 | 2009-04-02 16:58:24 -0700 | [diff] [blame] | 485 | WARN_ON(cwq->thread == current); | 
|  | 486 |  | 
|  | 487 | spin_lock_irq(&cwq->lock); | 
|  | 488 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 
|  | 489 | insert_wq_barrier(cwq, &barr, &cwq->worklist); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 490 | active = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | } | 
| Lai Jiangshan | 2355b70 | 2009-04-02 16:58:24 -0700 | [diff] [blame] | 492 | spin_unlock_irq(&cwq->lock); | 
|  | 493 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 494 | if (active) { | 
| Lai Jiangshan | 2355b70 | 2009-04-02 16:58:24 -0700 | [diff] [blame] | 495 | wait_for_completion(&barr.done); | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 496 | destroy_work_on_stack(&barr.work); | 
|  | 497 | } | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 498 |  | 
|  | 499 | return active; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | } | 
|  | 501 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 502 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | * flush_workqueue - ensure that any scheduled work has run to completion. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 504 | * @wq: workqueue to flush | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | * | 
|  | 506 | * Forces execution of the workqueue and blocks until its completion. | 
|  | 507 | * This is typically used in driver shutdown handlers. | 
|  | 508 | * | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 509 | * We sleep until all works which were queued on entry have been handled, | 
|  | 510 | * but we are not livelocked by new incoming ones. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | * | 
|  | 512 | * This function used to run the workqueues itself.  Now we just wait for the | 
|  | 513 | * helper threads to do it. | 
|  | 514 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 515 | void flush_workqueue(struct workqueue_struct *wq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | { | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 517 | const struct cpumask *cpu_map = wq_cpu_map(wq); | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 518 | int cpu; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 519 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 520 | might_sleep(); | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 521 | lock_map_acquire(&wq->lockdep_map); | 
|  | 522 | lock_map_release(&wq->lockdep_map); | 
| Rusty Russell | aa85ea5 | 2009-03-30 22:05:15 -0600 | [diff] [blame] | 523 | for_each_cpu(cpu, cpu_map) | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 524 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 526 | EXPORT_SYMBOL_GPL(flush_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 |  | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 528 | /** | 
|  | 529 | * flush_work - block until a work_struct's callback has terminated | 
|  | 530 | * @work: the work which is to be flushed | 
|  | 531 | * | 
| Oleg Nesterov | a67da70 | 2008-07-25 01:47:52 -0700 | [diff] [blame] | 532 | * Returns false if @work has already terminated. | 
|  | 533 | * | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 534 | * It is expected that, prior to calling flush_work(), the caller has | 
|  | 535 | * arranged for the work to not be requeued, otherwise it doesn't make | 
|  | 536 | * sense to use this function. | 
|  | 537 | */ | 
|  | 538 | int flush_work(struct work_struct *work) | 
|  | 539 | { | 
|  | 540 | struct cpu_workqueue_struct *cwq; | 
|  | 541 | struct list_head *prev; | 
|  | 542 | struct wq_barrier barr; | 
|  | 543 |  | 
|  | 544 | might_sleep(); | 
|  | 545 | cwq = get_wq_data(work); | 
|  | 546 | if (!cwq) | 
|  | 547 | return 0; | 
|  | 548 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 549 | lock_map_acquire(&cwq->wq->lockdep_map); | 
|  | 550 | lock_map_release(&cwq->wq->lockdep_map); | 
| Oleg Nesterov | a67da70 | 2008-07-25 01:47:52 -0700 | [diff] [blame] | 551 |  | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 552 | prev = NULL; | 
|  | 553 | spin_lock_irq(&cwq->lock); | 
|  | 554 | if (!list_empty(&work->entry)) { | 
|  | 555 | /* | 
|  | 556 | * See the comment near try_to_grab_pending()->smp_rmb(). | 
|  | 557 | * If it was re-queued under us we are not going to wait. | 
|  | 558 | */ | 
|  | 559 | smp_rmb(); | 
|  | 560 | if (unlikely(cwq != get_wq_data(work))) | 
|  | 561 | goto out; | 
|  | 562 | prev = &work->entry; | 
|  | 563 | } else { | 
|  | 564 | if (cwq->current_work != work) | 
|  | 565 | goto out; | 
|  | 566 | prev = &cwq->worklist; | 
|  | 567 | } | 
|  | 568 | insert_wq_barrier(cwq, &barr, prev->next); | 
|  | 569 | out: | 
|  | 570 | spin_unlock_irq(&cwq->lock); | 
|  | 571 | if (!prev) | 
|  | 572 | return 0; | 
|  | 573 |  | 
|  | 574 | wait_for_completion(&barr.done); | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 575 | destroy_work_on_stack(&barr.work); | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 576 | return 1; | 
|  | 577 | } | 
|  | 578 | EXPORT_SYMBOL_GPL(flush_work); | 
|  | 579 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 580 | /* | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 581 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 582 | * so this work can't be re-armed in any way. | 
|  | 583 | */ | 
|  | 584 | static int try_to_grab_pending(struct work_struct *work) | 
|  | 585 | { | 
|  | 586 | struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 587 | int ret = -1; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 588 |  | 
|  | 589 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 590 | return 0; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 591 |  | 
|  | 592 | /* | 
|  | 593 | * The queueing is in progress, or it is already queued. Try to | 
|  | 594 | * steal it from ->worklist without clearing WORK_STRUCT_PENDING. | 
|  | 595 | */ | 
|  | 596 |  | 
|  | 597 | cwq = get_wq_data(work); | 
|  | 598 | if (!cwq) | 
|  | 599 | return ret; | 
|  | 600 |  | 
|  | 601 | spin_lock_irq(&cwq->lock); | 
|  | 602 | if (!list_empty(&work->entry)) { | 
|  | 603 | /* | 
|  | 604 | * This work is queued, but perhaps we locked the wrong cwq. | 
|  | 605 | * In that case we must see the new value after rmb(), see | 
|  | 606 | * insert_work()->wmb(). | 
|  | 607 | */ | 
|  | 608 | smp_rmb(); | 
|  | 609 | if (cwq == get_wq_data(work)) { | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 610 | debug_work_deactivate(work); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 611 | list_del_init(&work->entry); | 
|  | 612 | ret = 1; | 
|  | 613 | } | 
|  | 614 | } | 
|  | 615 | spin_unlock_irq(&cwq->lock); | 
|  | 616 |  | 
|  | 617 | return ret; | 
|  | 618 | } | 
|  | 619 |  | 
|  | 620 | static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 621 | struct work_struct *work) | 
|  | 622 | { | 
|  | 623 | struct wq_barrier barr; | 
|  | 624 | int running = 0; | 
|  | 625 |  | 
|  | 626 | spin_lock_irq(&cwq->lock); | 
|  | 627 | if (unlikely(cwq->current_work == work)) { | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 628 | insert_wq_barrier(cwq, &barr, cwq->worklist.next); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 629 | running = 1; | 
|  | 630 | } | 
|  | 631 | spin_unlock_irq(&cwq->lock); | 
|  | 632 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 633 | if (unlikely(running)) { | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 634 | wait_for_completion(&barr.done); | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 635 | destroy_work_on_stack(&barr.work); | 
|  | 636 | } | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 637 | } | 
|  | 638 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 639 | static void wait_on_work(struct work_struct *work) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 640 | { | 
|  | 641 | struct cpu_workqueue_struct *cwq; | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 642 | struct workqueue_struct *wq; | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 643 | const struct cpumask *cpu_map; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 644 | int cpu; | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 645 |  | 
| Oleg Nesterov | f293ea9 | 2007-05-09 02:34:10 -0700 | [diff] [blame] | 646 | might_sleep(); | 
|  | 647 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 648 | lock_map_acquire(&work->lockdep_map); | 
|  | 649 | lock_map_release(&work->lockdep_map); | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 650 |  | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 651 | cwq = get_wq_data(work); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 652 | if (!cwq) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 653 | return; | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 654 |  | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 655 | wq = cwq->wq; | 
|  | 656 | cpu_map = wq_cpu_map(wq); | 
|  | 657 |  | 
| Rusty Russell | aa85ea5 | 2009-03-30 22:05:15 -0600 | [diff] [blame] | 658 | for_each_cpu(cpu, cpu_map) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 659 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 
|  | 660 | } | 
|  | 661 |  | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 662 | static int __cancel_work_timer(struct work_struct *work, | 
|  | 663 | struct timer_list* timer) | 
|  | 664 | { | 
|  | 665 | int ret; | 
|  | 666 |  | 
|  | 667 | do { | 
|  | 668 | ret = (timer && likely(del_timer(timer))); | 
|  | 669 | if (!ret) | 
|  | 670 | ret = try_to_grab_pending(work); | 
|  | 671 | wait_on_work(work); | 
|  | 672 | } while (unlikely(ret < 0)); | 
|  | 673 |  | 
|  | 674 | work_clear_pending(work); | 
|  | 675 | return ret; | 
|  | 676 | } | 
|  | 677 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 678 | /** | 
|  | 679 | * cancel_work_sync - block until a work_struct's callback has terminated | 
|  | 680 | * @work: the work which is to be flushed | 
|  | 681 | * | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 682 | * Returns true if @work was pending. | 
|  | 683 | * | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 684 | * cancel_work_sync() will cancel the work if it is queued. If the work's | 
|  | 685 | * callback appears to be running, cancel_work_sync() will block until it | 
|  | 686 | * has completed. | 
|  | 687 | * | 
|  | 688 | * It is possible to use this function if the work re-queues itself. It can | 
|  | 689 | * cancel the work even if it migrates to another workqueue, however in that | 
|  | 690 | * case it only guarantees that work->func() has completed on the last queued | 
|  | 691 | * workqueue. | 
|  | 692 | * | 
|  | 693 | * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not | 
|  | 694 | * pending, otherwise it goes into a busy-wait loop until the timer expires. | 
|  | 695 | * | 
|  | 696 | * The caller must ensure that workqueue_struct on which this work was last | 
|  | 697 | * queued can't be destroyed before this function returns. | 
|  | 698 | */ | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 699 | int cancel_work_sync(struct work_struct *work) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 700 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 701 | return __cancel_work_timer(work, NULL); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 702 | } | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 703 | EXPORT_SYMBOL_GPL(cancel_work_sync); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 704 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 705 | /** | 
| Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 706 | * cancel_delayed_work_sync - reliably kill off a delayed work. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 707 | * @dwork: the delayed work struct | 
|  | 708 | * | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 709 | * Returns true if @dwork was pending. | 
|  | 710 | * | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 711 | * It is possible to use this function if @dwork rearms itself via queue_work() | 
|  | 712 | * or queue_delayed_work(). See also the comment for cancel_work_sync(). | 
|  | 713 | */ | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 714 | int cancel_delayed_work_sync(struct delayed_work *dwork) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 715 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 716 | return __cancel_work_timer(&dwork->work, &dwork->timer); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 717 | } | 
| Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 718 | EXPORT_SYMBOL(cancel_delayed_work_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 720 | static struct workqueue_struct *keventd_wq __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 722 | /** | 
|  | 723 | * schedule_work - put work task in global workqueue | 
|  | 724 | * @work: job to be done | 
|  | 725 | * | 
| Bart Van Assche | 5b0f437d | 2009-07-30 19:00:53 +0200 | [diff] [blame] | 726 | * Returns zero if @work was already on the kernel-global workqueue and | 
|  | 727 | * non-zero otherwise. | 
|  | 728 | * | 
|  | 729 | * This puts a job in the kernel-global workqueue if it was not already | 
|  | 730 | * queued and leaves it in the same position on the kernel-global | 
|  | 731 | * workqueue otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 732 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 733 | int schedule_work(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | { | 
|  | 735 | return queue_work(keventd_wq, work); | 
|  | 736 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 737 | EXPORT_SYMBOL(schedule_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 |  | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 739 | /* | 
|  | 740 | * schedule_work_on - put work task on a specific cpu | 
|  | 741 | * @cpu: cpu to put the work task on | 
|  | 742 | * @work: job to be done | 
|  | 743 | * | 
|  | 744 | * This puts a job on a specific cpu | 
|  | 745 | */ | 
|  | 746 | int schedule_work_on(int cpu, struct work_struct *work) | 
|  | 747 | { | 
|  | 748 | return queue_work_on(cpu, keventd_wq, work); | 
|  | 749 | } | 
|  | 750 | EXPORT_SYMBOL(schedule_work_on); | 
|  | 751 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 752 | /** | 
|  | 753 | * schedule_delayed_work - put work task in global workqueue after delay | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 754 | * @dwork: job to be done | 
|  | 755 | * @delay: number of jiffies to wait or 0 for immediate execution | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 756 | * | 
|  | 757 | * After waiting for a given time this puts a job in the kernel-global | 
|  | 758 | * workqueue. | 
|  | 759 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 760 | int schedule_delayed_work(struct delayed_work *dwork, | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 761 | unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 763 | return queue_delayed_work(keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 765 | EXPORT_SYMBOL(schedule_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 767 | /** | 
| Linus Torvalds | 8c53e46 | 2009-10-14 09:16:42 -0700 | [diff] [blame] | 768 | * flush_delayed_work - block until a dwork_struct's callback has terminated | 
|  | 769 | * @dwork: the delayed work which is to be flushed | 
|  | 770 | * | 
|  | 771 | * Any timeout is cancelled, and any pending work is run immediately. | 
|  | 772 | */ | 
|  | 773 | void flush_delayed_work(struct delayed_work *dwork) | 
|  | 774 | { | 
|  | 775 | if (del_timer_sync(&dwork->timer)) { | 
|  | 776 | struct cpu_workqueue_struct *cwq; | 
|  | 777 | cwq = wq_per_cpu(keventd_wq, get_cpu()); | 
|  | 778 | __queue_work(cwq, &dwork->work); | 
|  | 779 | put_cpu(); | 
|  | 780 | } | 
|  | 781 | flush_work(&dwork->work); | 
|  | 782 | } | 
|  | 783 | EXPORT_SYMBOL(flush_delayed_work); | 
|  | 784 |  | 
|  | 785 | /** | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 786 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 
|  | 787 | * @cpu: cpu to use | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 788 | * @dwork: job to be done | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 789 | * @delay: number of jiffies to wait | 
|  | 790 | * | 
|  | 791 | * After waiting for a given time this puts a job in the kernel-global | 
|  | 792 | * workqueue on the specified CPU. | 
|  | 793 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | int schedule_delayed_work_on(int cpu, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 795 | struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 797 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 799 | EXPORT_SYMBOL(schedule_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 801 | /** | 
|  | 802 | * schedule_on_each_cpu - call a function on each online CPU from keventd | 
|  | 803 | * @func: the function to call | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 804 | * | 
|  | 805 | * Returns zero on success. | 
|  | 806 | * Returns -ve errno on failure. | 
|  | 807 | * | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 808 | * schedule_on_each_cpu() is very slow. | 
|  | 809 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 810 | int schedule_on_each_cpu(work_func_t func) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 811 | { | 
|  | 812 | int cpu; | 
| Andi Kleen | 65a6446 | 2009-10-14 06:22:47 +0200 | [diff] [blame] | 813 | int orig = -1; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 814 | struct work_struct *works; | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 815 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 816 | works = alloc_percpu(struct work_struct); | 
|  | 817 | if (!works) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 818 | return -ENOMEM; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 819 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 820 | get_online_cpus(); | 
| Tejun Heo | 9398180 | 2009-11-17 14:06:20 -0800 | [diff] [blame] | 821 |  | 
|  | 822 | /* | 
|  | 823 | * When running in keventd don't schedule a work item on | 
|  | 824 | * itself.  Can just call directly because the work queue is | 
|  | 825 | * already bound.  This also is faster. | 
|  | 826 | */ | 
|  | 827 | if (current_is_keventd()) | 
|  | 828 | orig = raw_smp_processor_id(); | 
|  | 829 |  | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 830 | for_each_online_cpu(cpu) { | 
| Ingo Molnar | 9bfb183 | 2006-12-18 20:05:09 +0100 | [diff] [blame] | 831 | struct work_struct *work = per_cpu_ptr(works, cpu); | 
|  | 832 |  | 
|  | 833 | INIT_WORK(work, func); | 
| Andi Kleen | 65a6446 | 2009-10-14 06:22:47 +0200 | [diff] [blame] | 834 | if (cpu != orig) | 
| Tejun Heo | 9398180 | 2009-11-17 14:06:20 -0800 | [diff] [blame] | 835 | schedule_work_on(cpu, work); | 
| Andi Kleen | 65a6446 | 2009-10-14 06:22:47 +0200 | [diff] [blame] | 836 | } | 
| Tejun Heo | 9398180 | 2009-11-17 14:06:20 -0800 | [diff] [blame] | 837 | if (orig >= 0) | 
|  | 838 | func(per_cpu_ptr(works, orig)); | 
|  | 839 |  | 
|  | 840 | for_each_online_cpu(cpu) | 
|  | 841 | flush_work(per_cpu_ptr(works, cpu)); | 
|  | 842 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 843 | put_online_cpus(); | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 844 | free_percpu(works); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 845 | return 0; | 
|  | 846 | } | 
|  | 847 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | void flush_scheduled_work(void) | 
|  | 849 | { | 
|  | 850 | flush_workqueue(keventd_wq); | 
|  | 851 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 852 | EXPORT_SYMBOL(flush_scheduled_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 |  | 
|  | 854 | /** | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 855 | * execute_in_process_context - reliably execute the routine with user context | 
|  | 856 | * @fn:		the function to execute | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 857 | * @ew:		guaranteed storage for the execute work structure (must | 
|  | 858 | *		be available when the work executes) | 
|  | 859 | * | 
|  | 860 | * Executes the function immediately if process context is available, | 
|  | 861 | * otherwise schedules the function for delayed execution. | 
|  | 862 | * | 
|  | 863 | * Returns:	0 - function was executed | 
|  | 864 | *		1 - function was scheduled for execution | 
|  | 865 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 866 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 867 | { | 
|  | 868 | if (!in_interrupt()) { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 869 | fn(&ew->work); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 870 | return 0; | 
|  | 871 | } | 
|  | 872 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 873 | INIT_WORK(&ew->work, fn); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 874 | schedule_work(&ew->work); | 
|  | 875 |  | 
|  | 876 | return 1; | 
|  | 877 | } | 
|  | 878 | EXPORT_SYMBOL_GPL(execute_in_process_context); | 
|  | 879 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | int keventd_up(void) | 
|  | 881 | { | 
|  | 882 | return keventd_wq != NULL; | 
|  | 883 | } | 
|  | 884 |  | 
|  | 885 | int current_is_keventd(void) | 
|  | 886 | { | 
|  | 887 | struct cpu_workqueue_struct *cwq; | 
| Hugh Dickins | d243769 | 2007-08-27 16:06:19 +0100 | [diff] [blame] | 888 | int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | int ret = 0; | 
|  | 890 |  | 
|  | 891 | BUG_ON(!keventd_wq); | 
|  | 892 |  | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 893 | cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | if (current == cwq->thread) | 
|  | 895 | ret = 1; | 
|  | 896 |  | 
|  | 897 | return ret; | 
|  | 898 |  | 
|  | 899 | } | 
|  | 900 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 901 | static struct cpu_workqueue_struct * | 
|  | 902 | init_cpu_workqueue(struct workqueue_struct *wq, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | { | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 904 | struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 906 | cwq->wq = wq; | 
|  | 907 | spin_lock_init(&cwq->lock); | 
|  | 908 | INIT_LIST_HEAD(&cwq->worklist); | 
|  | 909 | init_waitqueue_head(&cwq->more_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 911 | return cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 | } | 
|  | 913 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 914 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | { | 
| Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 916 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 917 | struct workqueue_struct *wq = cwq->wq; | 
| David Howells | 6cc88bc | 2008-11-14 10:39:21 +1100 | [diff] [blame] | 918 | const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 919 | struct task_struct *p; | 
|  | 920 |  | 
|  | 921 | p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); | 
|  | 922 | /* | 
|  | 923 | * Nobody can add the work_struct to this cwq, | 
|  | 924 | *	if (caller is __create_workqueue) | 
|  | 925 | *		nobody should see this wq | 
|  | 926 | *	else // caller is CPU_UP_PREPARE | 
|  | 927 | *		cpu is not on cpu_online_map | 
|  | 928 | * so we can abort safely. | 
|  | 929 | */ | 
|  | 930 | if (IS_ERR(p)) | 
|  | 931 | return PTR_ERR(p); | 
| Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 932 | if (cwq->wq->rt) | 
|  | 933 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 934 | cwq->thread = p; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 935 |  | 
| Frederic Weisbecker | e1d8aa9 | 2009-01-12 23:15:46 +0100 | [diff] [blame] | 936 | trace_workqueue_creation(cwq->thread, cpu); | 
|  | 937 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 938 | return 0; | 
|  | 939 | } | 
|  | 940 |  | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 941 | static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 
|  | 942 | { | 
|  | 943 | struct task_struct *p = cwq->thread; | 
|  | 944 |  | 
|  | 945 | if (p != NULL) { | 
|  | 946 | if (cpu >= 0) | 
|  | 947 | kthread_bind(p, cpu); | 
|  | 948 | wake_up_process(p); | 
|  | 949 | } | 
|  | 950 | } | 
|  | 951 |  | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 952 | struct workqueue_struct *__create_workqueue_key(const char *name, | 
|  | 953 | int singlethread, | 
|  | 954 | int freezeable, | 
| Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 955 | int rt, | 
| Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 956 | struct lock_class_key *key, | 
|  | 957 | const char *lock_name) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 958 | { | 
|  | 959 | struct workqueue_struct *wq; | 
|  | 960 | struct cpu_workqueue_struct *cwq; | 
|  | 961 | int err = 0, cpu; | 
|  | 962 |  | 
|  | 963 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 
|  | 964 | if (!wq) | 
|  | 965 | return NULL; | 
|  | 966 |  | 
|  | 967 | wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); | 
|  | 968 | if (!wq->cpu_wq) { | 
|  | 969 | kfree(wq); | 
|  | 970 | return NULL; | 
|  | 971 | } | 
|  | 972 |  | 
|  | 973 | wq->name = name; | 
| Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 974 | lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 975 | wq->singlethread = singlethread; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 976 | wq->freezeable = freezeable; | 
| Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 977 | wq->rt = rt; | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 978 | INIT_LIST_HEAD(&wq->list); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 979 |  | 
|  | 980 | if (singlethread) { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 981 | cwq = init_cpu_workqueue(wq, singlethread_cpu); | 
|  | 982 | err = create_workqueue_thread(cwq, singlethread_cpu); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 983 | start_workqueue_thread(cwq, -1); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 984 | } else { | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 985 | cpu_maps_update_begin(); | 
| Oleg Nesterov | 6af8bf3 | 2008-07-29 22:33:49 -0700 | [diff] [blame] | 986 | /* | 
|  | 987 | * We must place this wq on list even if the code below fails. | 
|  | 988 | * cpu_down(cpu) can remove cpu from cpu_populated_map before | 
|  | 989 | * destroy_workqueue() takes the lock, in that case we leak | 
|  | 990 | * cwq[cpu]->thread. | 
|  | 991 | */ | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 992 | spin_lock(&workqueue_lock); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 993 | list_add(&wq->list, &workqueues); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 994 | spin_unlock(&workqueue_lock); | 
| Oleg Nesterov | 6af8bf3 | 2008-07-29 22:33:49 -0700 | [diff] [blame] | 995 | /* | 
|  | 996 | * We must initialize cwqs for each possible cpu even if we | 
|  | 997 | * are going to call destroy_workqueue() finally. Otherwise | 
|  | 998 | * cpu_up() can hit the uninitialized cwq once we drop the | 
|  | 999 | * lock. | 
|  | 1000 | */ | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1001 | for_each_possible_cpu(cpu) { | 
|  | 1002 | cwq = init_cpu_workqueue(wq, cpu); | 
|  | 1003 | if (err || !cpu_online(cpu)) | 
|  | 1004 | continue; | 
|  | 1005 | err = create_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 1006 | start_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1007 | } | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 1008 | cpu_maps_update_done(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1009 | } | 
|  | 1010 |  | 
|  | 1011 | if (err) { | 
|  | 1012 | destroy_workqueue(wq); | 
|  | 1013 | wq = NULL; | 
|  | 1014 | } | 
|  | 1015 | return wq; | 
|  | 1016 | } | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 1017 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1018 |  | 
| Oleg Nesterov | 1e35eaa | 2008-04-29 01:00:28 -0700 | [diff] [blame] | 1019 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1020 | { | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 1021 | /* | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 1022 | * Our caller is either destroy_workqueue() or CPU_POST_DEAD, | 
|  | 1023 | * cpu_add_remove_lock protects cwq->thread. | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 1024 | */ | 
|  | 1025 | if (cwq->thread == NULL) | 
|  | 1026 | return; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1027 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 1028 | lock_map_acquire(&cwq->wq->lockdep_map); | 
|  | 1029 | lock_map_release(&cwq->wq->lockdep_map); | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 1030 |  | 
| Oleg Nesterov | 13c2216 | 2007-07-17 04:03:55 -0700 | [diff] [blame] | 1031 | flush_cpu_workqueue(cwq); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 1032 | /* | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 1033 | * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, | 
| Oleg Nesterov | 13c2216 | 2007-07-17 04:03:55 -0700 | [diff] [blame] | 1034 | * a concurrent flush_workqueue() can insert a barrier after us. | 
|  | 1035 | * However, in that case run_workqueue() won't return and check | 
|  | 1036 | * kthread_should_stop() until it flushes all work_struct's. | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 1037 | * When ->worklist becomes empty it is safe to exit because no | 
|  | 1038 | * more work_structs can be queued on this cwq: flush_workqueue | 
|  | 1039 | * checks list_empty(), and a "normal" queue_work() can't use | 
|  | 1040 | * a dead CPU. | 
|  | 1041 | */ | 
| Frederic Weisbecker | e1d8aa9 | 2009-01-12 23:15:46 +0100 | [diff] [blame] | 1042 | trace_workqueue_destruction(cwq->thread); | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 1043 | kthread_stop(cwq->thread); | 
|  | 1044 | cwq->thread = NULL; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1045 | } | 
|  | 1046 |  | 
|  | 1047 | /** | 
|  | 1048 | * destroy_workqueue - safely terminate a workqueue | 
|  | 1049 | * @wq: target workqueue | 
|  | 1050 | * | 
|  | 1051 | * Safely destroy a workqueue. All work currently pending will be done first. | 
|  | 1052 | */ | 
|  | 1053 | void destroy_workqueue(struct workqueue_struct *wq) | 
|  | 1054 | { | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 1055 | const struct cpumask *cpu_map = wq_cpu_map(wq); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 1056 | int cpu; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1057 |  | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 1058 | cpu_maps_update_begin(); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 1059 | spin_lock(&workqueue_lock); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 1060 | list_del(&wq->list); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 1061 | spin_unlock(&workqueue_lock); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1062 |  | 
| Rusty Russell | aa85ea5 | 2009-03-30 22:05:15 -0600 | [diff] [blame] | 1063 | for_each_cpu(cpu, cpu_map) | 
| Oleg Nesterov | 1e35eaa | 2008-04-29 01:00:28 -0700 | [diff] [blame] | 1064 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 1065 | cpu_maps_update_done(); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1066 |  | 
|  | 1067 | free_percpu(wq->cpu_wq); | 
|  | 1068 | kfree(wq); | 
|  | 1069 | } | 
|  | 1070 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 
|  | 1071 |  | 
|  | 1072 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | 
|  | 1073 | unsigned long action, | 
|  | 1074 | void *hcpu) | 
|  | 1075 | { | 
|  | 1076 | unsigned int cpu = (unsigned long)hcpu; | 
|  | 1077 | struct cpu_workqueue_struct *cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | struct workqueue_struct *wq; | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 1079 | int ret = NOTIFY_OK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1080 |  | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1081 | action &= ~CPU_TASKS_FROZEN; | 
|  | 1082 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 | switch (action) { | 
|  | 1084 | case CPU_UP_PREPARE: | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 1085 | cpumask_set_cpu(cpu, cpu_populated_map); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1086 | } | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 1087 | undo: | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1088 | list_for_each_entry(wq, &workqueues, list) { | 
|  | 1089 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 1090 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1091 | switch (action) { | 
|  | 1092 | case CPU_UP_PREPARE: | 
|  | 1093 | if (!create_workqueue_thread(cwq, cpu)) | 
|  | 1094 | break; | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 1095 | printk(KERN_ERR "workqueue [%s] for %i failed\n", | 
|  | 1096 | wq->name, cpu); | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 1097 | action = CPU_UP_CANCELED; | 
|  | 1098 | ret = NOTIFY_BAD; | 
|  | 1099 | goto undo; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1100 |  | 
|  | 1101 | case CPU_ONLINE: | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 1102 | start_workqueue_thread(cwq, cpu); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1103 | break; | 
|  | 1104 |  | 
|  | 1105 | case CPU_UP_CANCELED: | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 1106 | start_workqueue_thread(cwq, -1); | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 1107 | case CPU_POST_DEAD: | 
| Oleg Nesterov | 1e35eaa | 2008-04-29 01:00:28 -0700 | [diff] [blame] | 1108 | cleanup_workqueue_thread(cwq); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1109 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | } | 
|  | 1112 |  | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 1113 | switch (action) { | 
|  | 1114 | case CPU_UP_CANCELED: | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 1115 | case CPU_POST_DEAD: | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 1116 | cpumask_clear_cpu(cpu, cpu_populated_map); | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 1117 | } | 
|  | 1118 |  | 
| Oleg Nesterov | 8448502 | 2008-07-25 01:47:54 -0700 | [diff] [blame] | 1119 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 |  | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 1122 | #ifdef CONFIG_SMP | 
| Rusty Russell | 8ccad40 | 2009-01-16 15:31:15 -0800 | [diff] [blame] | 1123 |  | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 1124 | struct work_for_cpu { | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 1125 | struct completion completion; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 1126 | long (*fn)(void *); | 
|  | 1127 | void *arg; | 
|  | 1128 | long ret; | 
|  | 1129 | }; | 
|  | 1130 |  | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 1131 | static int do_work_for_cpu(void *_wfc) | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 1132 | { | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 1133 | struct work_for_cpu *wfc = _wfc; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 1134 | wfc->ret = wfc->fn(wfc->arg); | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 1135 | complete(&wfc->completion); | 
|  | 1136 | return 0; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 1137 | } | 
|  | 1138 |  | 
|  | 1139 | /** | 
|  | 1140 | * work_on_cpu - run a function in user context on a particular cpu | 
|  | 1141 | * @cpu: the cpu to run on | 
|  | 1142 | * @fn: the function to run | 
|  | 1143 | * @arg: the function arg | 
|  | 1144 | * | 
| Rusty Russell | 31ad908 | 2009-01-16 15:31:15 -0800 | [diff] [blame] | 1145 | * This will return the value @fn returns. | 
|  | 1146 | * It is up to the caller to ensure that the cpu doesn't go offline. | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 1147 | * The caller must not hold any locks which would prevent @fn from completing. | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 1148 | */ | 
|  | 1149 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 
|  | 1150 | { | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 1151 | struct task_struct *sub_thread; | 
|  | 1152 | struct work_for_cpu wfc = { | 
|  | 1153 | .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), | 
|  | 1154 | .fn = fn, | 
|  | 1155 | .arg = arg, | 
|  | 1156 | }; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 1157 |  | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 1158 | sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); | 
|  | 1159 | if (IS_ERR(sub_thread)) | 
|  | 1160 | return PTR_ERR(sub_thread); | 
|  | 1161 | kthread_bind(sub_thread, cpu); | 
|  | 1162 | wake_up_process(sub_thread); | 
|  | 1163 | wait_for_completion(&wfc.completion); | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 1164 | return wfc.ret; | 
|  | 1165 | } | 
|  | 1166 | EXPORT_SYMBOL_GPL(work_on_cpu); | 
|  | 1167 | #endif /* CONFIG_SMP */ | 
|  | 1168 |  | 
| Oleg Nesterov | c12920d | 2007-05-09 02:34:14 -0700 | [diff] [blame] | 1169 | void __init init_workqueues(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1170 | { | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 1171 | alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); | 
|  | 1172 |  | 
|  | 1173 | cpumask_copy(cpu_populated_map, cpu_online_mask); | 
|  | 1174 | singlethread_cpu = cpumask_first(cpu_possible_mask); | 
|  | 1175 | cpu_singlethread_map = cpumask_of(singlethread_cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | hotcpu_notifier(workqueue_cpu_callback, 0); | 
|  | 1177 | keventd_wq = create_workqueue("events"); | 
|  | 1178 | BUG_ON(!keventd_wq); | 
|  | 1179 | } |