blob: 5592eba79bf238192012514371b7bfc8b663dd0f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Tejun Heoc54fce62010-09-10 16:51:36 +02002 * kernel/workqueue.c - generic async execution with shared worker pool
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Tejun Heoc54fce62010-09-10 16:51:36 +02004 * Copyright (C) 2002 Ingo Molnar
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
Tejun Heoc54fce62010-09-10 16:51:36 +02006 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080011 *
Christoph Lametercde53532008-07-04 09:59:22 -070012 * Made to use alloc_percpu by Christoph Lameter.
Tejun Heoc54fce62010-09-10 16:51:36 +020013 *
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
16 *
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
22 *
23 * Please read Documentation/workqueue.txt for details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 */
25
Paul Gortmaker9984de12011-05-23 14:51:41 -040026#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/init.h>
30#include <linux/signal.h>
31#include <linux/completion.h>
32#include <linux/workqueue.h>
33#include <linux/slab.h>
34#include <linux/cpu.h>
35#include <linux/notifier.h>
36#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060037#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070038#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080039#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080040#include <linux/kallsyms.h>
41#include <linux/debug_locks.h>
Johannes Berg4e6045f2007-10-18 23:39:55 -070042#include <linux/lockdep.h>
Tejun Heoc34056a2010-06-29 10:07:11 +020043#include <linux/idr.h>
Syed Rameez Mustafa1bee7b92013-07-15 11:52:09 -070044#include <linux/bug.h>
shumash7f490b22015-10-06 09:49:52 -060045#include <linux/moduleparam.h>
Tejun Heoe22bee72010-06-29 10:07:14 +020046
47#include "workqueue_sched.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Tejun Heoc8e55f32010-06-29 10:07:12 +020049enum {
Tejun Heodb7bccf2010-06-29 10:07:12 +020050 /* global_cwq flags */
Tejun Heo22ad5642012-07-12 14:46:37 -070051 GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */
52 GCWQ_FREEZING = 1 << 1, /* freeze in progress */
53
54 /* pool flags */
55 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
56 POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */
Tejun Heodb7bccf2010-06-29 10:07:12 +020057
Tejun Heoc8e55f32010-06-29 10:07:12 +020058 /* worker flags */
59 WORKER_STARTED = 1 << 0, /* started */
60 WORKER_DIE = 1 << 1, /* die die die */
61 WORKER_IDLE = 1 << 2, /* is idle */
Tejun Heoe22bee72010-06-29 10:07:14 +020062 WORKER_PREP = 1 << 3, /* preparing to run works */
Tejun Heodb7bccf2010-06-29 10:07:12 +020063 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
Tejun Heoe22bee72010-06-29 10:07:14 +020064 WORKER_REBIND = 1 << 5, /* mom is home, come back */
Tejun Heofb0e7be2010-06-29 10:07:15 +020065 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
Tejun Heof3421792010-07-02 10:03:51 +020066 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
Tejun Heoe22bee72010-06-29 10:07:14 +020067
Tejun Heofb0e7be2010-06-29 10:07:15 +020068 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
Tejun Heof3421792010-07-02 10:03:51 +020069 WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
Tejun Heodb7bccf2010-06-29 10:07:12 +020070
71 /* gcwq->trustee_state */
72 TRUSTEE_START = 0, /* start */
73 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
74 TRUSTEE_BUTCHER = 2, /* butcher workers */
75 TRUSTEE_RELEASE = 3, /* release workers */
76 TRUSTEE_DONE = 4, /* trustee is done */
Tejun Heoc8e55f32010-06-29 10:07:12 +020077
Tejun Heodcb32ee2012-07-13 22:16:45 -070078 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */
Tejun Heo9c6bae02012-07-13 22:16:44 -070079
Tejun Heoc8e55f32010-06-29 10:07:12 +020080 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
81 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
82 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
Tejun Heodb7bccf2010-06-29 10:07:12 +020083
Tejun Heoe22bee72010-06-29 10:07:14 +020084 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
85 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
86
Tejun Heo3233cdb2011-02-16 18:10:19 +010087 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
88 /* call for help after 10ms
89 (min two ticks) */
Tejun Heoe22bee72010-06-29 10:07:14 +020090 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
91 CREATE_COOLDOWN = HZ, /* time to breath after fail */
Tejun Heodb7bccf2010-06-29 10:07:12 +020092 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
Tejun Heoe22bee72010-06-29 10:07:14 +020093
94 /*
95 * Rescue workers are used only on emergencies and shared by
96 * all cpus. Give -20.
97 */
98 RESCUER_NICE_LEVEL = -20,
Tejun Heodcb32ee2012-07-13 22:16:45 -070099 HIGHPRI_NICE_LEVEL = -20,
Tejun Heoc8e55f32010-06-29 10:07:12 +0200100};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102/*
Tejun Heo4690c4a2010-06-29 10:07:10 +0200103 * Structure fields follow one of the following exclusion rules.
104 *
Tejun Heoe41e7042010-08-24 14:22:47 +0200105 * I: Modifiable by initialization/destruction paths and read-only for
106 * everyone else.
Tejun Heo4690c4a2010-06-29 10:07:10 +0200107 *
Tejun Heoe22bee72010-06-29 10:07:14 +0200108 * P: Preemption protected. Disabling preemption is enough and should
109 * only be modified and accessed from the local cpu.
110 *
Tejun Heo8b03ae32010-06-29 10:07:12 +0200111 * L: gcwq->lock protected. Access with gcwq->lock held.
Tejun Heo4690c4a2010-06-29 10:07:10 +0200112 *
Tejun Heoe22bee72010-06-29 10:07:14 +0200113 * X: During normal operation, modification requires gcwq->lock and
114 * should be done only from local cpu. Either disabling preemption
115 * on local cpu or grabbing gcwq->lock is enough for read access.
Tejun Heof3421792010-07-02 10:03:51 +0200116 * If GCWQ_DISASSOCIATED is set, it's identical to L.
Tejun Heoe22bee72010-06-29 10:07:14 +0200117 *
Tejun Heo73f53c42010-06-29 10:07:11 +0200118 * F: wq->flush_mutex protected.
119 *
Tejun Heo4690c4a2010-06-29 10:07:10 +0200120 * W: workqueue_lock protected.
121 */
122
Tejun Heo8b03ae32010-06-29 10:07:12 +0200123struct global_cwq;
Tejun Heo58658882012-07-12 14:46:37 -0700124struct worker_pool;
Tejun Heoc34056a2010-06-29 10:07:11 +0200125
Tejun Heoe22bee72010-06-29 10:07:14 +0200126/*
127 * The poor guys doing the actual heavy lifting. All on-duty workers
128 * are either serving the manager role, on idle list or on busy hash.
129 */
Tejun Heoc34056a2010-06-29 10:07:11 +0200130struct worker {
Tejun Heoc8e55f32010-06-29 10:07:12 +0200131 /* on idle list while idle, on busy hash table while busy */
132 union {
133 struct list_head entry; /* L: while idle */
134 struct hlist_node hentry; /* L: while busy */
135 };
136
Tejun Heoc34056a2010-06-29 10:07:11 +0200137 struct work_struct *current_work; /* L: work being processed */
Tejun Heo55e3e1f2012-12-18 10:35:02 -0800138 work_func_t current_func; /* L: current_work's fn */
Tejun Heo8cca0ee2010-06-29 10:07:13 +0200139 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
Tejun Heoaffee4b2010-06-29 10:07:12 +0200140 struct list_head scheduled; /* L: scheduled works */
Tejun Heoc34056a2010-06-29 10:07:11 +0200141 struct task_struct *task; /* I: worker task */
Tejun Heo58658882012-07-12 14:46:37 -0700142 struct worker_pool *pool; /* I: the associated pool */
Tejun Heoe22bee72010-06-29 10:07:14 +0200143 /* 64 bytes boundary on 64bit, 32 on 32bit */
144 unsigned long last_active; /* L: last active timestamp */
145 unsigned int flags; /* X: flags */
Tejun Heoc34056a2010-06-29 10:07:11 +0200146 int id; /* I: worker id */
Tejun Heoe22bee72010-06-29 10:07:14 +0200147 struct work_struct rebind_work; /* L: rebind worker to cpu */
Tejun Heoc34056a2010-06-29 10:07:11 +0200148};
149
Tejun Heo58658882012-07-12 14:46:37 -0700150struct worker_pool {
151 struct global_cwq *gcwq; /* I: the owning gcwq */
Tejun Heo22ad5642012-07-12 14:46:37 -0700152 unsigned int flags; /* X: flags */
Tejun Heo58658882012-07-12 14:46:37 -0700153
154 struct list_head worklist; /* L: list of pending works */
155 int nr_workers; /* L: total number of workers */
156 int nr_idle; /* L: currently idle ones */
157
158 struct list_head idle_list; /* X: list of idle workers */
159 struct timer_list idle_timer; /* L: worker idle timeout */
160 struct timer_list mayday_timer; /* L: SOS timer for workers */
161
162 struct ida worker_ida; /* L: for worker IDs */
163 struct worker *first_idle; /* L: first idle worker */
164};
165
Tejun Heo4690c4a2010-06-29 10:07:10 +0200166/*
Tejun Heoe22bee72010-06-29 10:07:14 +0200167 * Global per-cpu workqueue. There's one and only one for each cpu
168 * and all works are queued and processed here regardless of their
169 * target workqueues.
Tejun Heo8b03ae32010-06-29 10:07:12 +0200170 */
171struct global_cwq {
172 spinlock_t lock; /* the gcwq lock */
173 unsigned int cpu; /* I: the associated cpu */
Tejun Heodb7bccf2010-06-29 10:07:12 +0200174 unsigned int flags; /* L: GCWQ_* flags */
Tejun Heoc8e55f32010-06-29 10:07:12 +0200175
Tejun Heo58658882012-07-12 14:46:37 -0700176 /* workers are chained either in busy_hash or pool idle_list */
Tejun Heoc8e55f32010-06-29 10:07:12 +0200177 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
178 /* L: hash of busy workers */
179
Tejun Heodcb32ee2012-07-13 22:16:45 -0700180 struct worker_pool pools[2]; /* normal and highpri pools */
Tejun Heodb7bccf2010-06-29 10:07:12 +0200181
182 struct task_struct *trustee; /* L: for gcwq shutdown */
183 unsigned int trustee_state; /* L: trustee state */
184 wait_queue_head_t trustee_wait; /* trustee wait */
Tejun Heo8b03ae32010-06-29 10:07:12 +0200185} ____cacheline_aligned_in_smp;
186
187/*
Tejun Heo502ca9d2010-06-29 10:07:13 +0200188 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
Tejun Heo0f900042010-06-29 10:07:11 +0200189 * work_struct->data are used for flags and thus cwqs need to be
190 * aligned at two's power of the number of flag bits.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 */
192struct cpu_workqueue_struct {
Tejun Heo58658882012-07-12 14:46:37 -0700193 struct worker_pool *pool; /* I: the associated pool */
Tejun Heo4690c4a2010-06-29 10:07:10 +0200194 struct workqueue_struct *wq; /* I: the owning workqueue */
Tejun Heo73f53c42010-06-29 10:07:11 +0200195 int work_color; /* L: current color */
196 int flush_color; /* L: flushing color */
197 int nr_in_flight[WORK_NR_COLORS];
198 /* L: nr of in_flight works */
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200199 int nr_active; /* L: nr of active works */
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200200 int max_active; /* L: max active works */
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200201 struct list_head delayed_works; /* L: delayed works */
Tejun Heo0f900042010-06-29 10:07:11 +0200202};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204/*
Tejun Heo73f53c42010-06-29 10:07:11 +0200205 * Structure used to wait for workqueue flush.
206 */
207struct wq_flusher {
208 struct list_head list; /* F: list of flushers */
209 int flush_color; /* F: flush color waiting for */
210 struct completion done; /* flush completion */
211};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Tejun Heo73f53c42010-06-29 10:07:11 +0200213/*
Tejun Heof2e005a2010-07-20 15:59:09 +0200214 * All cpumasks are assumed to be always set on UP and thus can't be
215 * used to determine whether there's something to be done.
216 */
217#ifdef CONFIG_SMP
218typedef cpumask_var_t mayday_mask_t;
219#define mayday_test_and_set_cpu(cpu, mask) \
220 cpumask_test_and_set_cpu((cpu), (mask))
221#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
222#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
Tejun Heo9c375472010-08-31 11:18:34 +0200223#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
Tejun Heof2e005a2010-07-20 15:59:09 +0200224#define free_mayday_mask(mask) free_cpumask_var((mask))
225#else
226typedef unsigned long mayday_mask_t;
227#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
228#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
229#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
230#define alloc_mayday_mask(maskp, gfp) true
231#define free_mayday_mask(mask) do { } while (0)
232#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
234/*
235 * The externally visible workqueue abstraction is an array of
236 * per-CPU workqueues:
237 */
238struct workqueue_struct {
Tejun Heo9c5a2ba2011-04-05 18:01:44 +0200239 unsigned int flags; /* W: WQ_* flags */
Tejun Heobdbc5dd2010-07-02 10:03:51 +0200240 union {
241 struct cpu_workqueue_struct __percpu *pcpu;
242 struct cpu_workqueue_struct *single;
243 unsigned long v;
244 } cpu_wq; /* I: cwq's */
Tejun Heo4690c4a2010-06-29 10:07:10 +0200245 struct list_head list; /* W: list of all workqueues */
Tejun Heo73f53c42010-06-29 10:07:11 +0200246
247 struct mutex flush_mutex; /* protects wq flushing */
248 int work_color; /* F: current work color */
249 int flush_color; /* F: current flush color */
250 atomic_t nr_cwqs_to_flush; /* flush in progress */
251 struct wq_flusher *first_flusher; /* F: first flusher */
252 struct list_head flusher_queue; /* F: flush waiters */
253 struct list_head flusher_overflow; /* F: flush overflow list */
254
Tejun Heof2e005a2010-07-20 15:59:09 +0200255 mayday_mask_t mayday_mask; /* cpus requesting rescue */
Tejun Heoe22bee72010-06-29 10:07:14 +0200256 struct worker *rescuer; /* I: rescue worker */
257
Tejun Heo9c5a2ba2011-04-05 18:01:44 +0200258 int nr_drainers; /* W: drain in progress */
Tejun Heodcd989c2010-06-29 10:07:14 +0200259 int saved_max_active; /* W: saved cwq max_active */
Johannes Berg4e6045f2007-10-18 23:39:55 -0700260#ifdef CONFIG_LOCKDEP
Tejun Heo4690c4a2010-06-29 10:07:10 +0200261 struct lockdep_map lockdep_map;
Johannes Berg4e6045f2007-10-18 23:39:55 -0700262#endif
Tejun Heob196be82012-01-10 15:11:35 -0800263 char name[]; /* I: workqueue name */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264};
265
shumashb2f60df2015-07-18 09:03:08 -0600266/* see the comment above the definition of WQ_POWER_EFFICIENT */
267#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
268static bool wq_power_efficient = true;
269#else
270static bool wq_power_efficient;
271#endif
272
273module_param_named(power_efficient, wq_power_efficient, bool, 0644);
274
Tejun Heod320c032010-06-29 10:07:14 +0200275struct workqueue_struct *system_wq __read_mostly;
276struct workqueue_struct *system_long_wq __read_mostly;
277struct workqueue_struct *system_nrt_wq __read_mostly;
Tejun Heof3421792010-07-02 10:03:51 +0200278struct workqueue_struct *system_unbound_wq __read_mostly;
Tejun Heo24d51ad2011-02-21 09:52:50 +0100279struct workqueue_struct *system_freezable_wq __read_mostly;
Alan Stern62d3c542012-03-02 10:51:00 +0100280struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
shumash157ecab2015-07-18 09:12:19 -0600281struct workqueue_struct *system_power_efficient_wq __read_mostly;
282struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
Tejun Heod320c032010-06-29 10:07:14 +0200283EXPORT_SYMBOL_GPL(system_wq);
284EXPORT_SYMBOL_GPL(system_long_wq);
285EXPORT_SYMBOL_GPL(system_nrt_wq);
Tejun Heof3421792010-07-02 10:03:51 +0200286EXPORT_SYMBOL_GPL(system_unbound_wq);
Tejun Heo24d51ad2011-02-21 09:52:50 +0100287EXPORT_SYMBOL_GPL(system_freezable_wq);
Alan Stern62d3c542012-03-02 10:51:00 +0100288EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
shumash157ecab2015-07-18 09:12:19 -0600289EXPORT_SYMBOL_GPL(system_power_efficient_wq);
290EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
291
Tejun Heo97bd2342010-10-05 10:41:14 +0200292#define CREATE_TRACE_POINTS
293#include <trace/events/workqueue.h>
294
Tejun Heo9c6bae02012-07-13 22:16:44 -0700295#define for_each_worker_pool(pool, gcwq) \
Tejun Heodcb32ee2012-07-13 22:16:45 -0700296 for ((pool) = &(gcwq)->pools[0]; \
297 (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
Tejun Heo9c6bae02012-07-13 22:16:44 -0700298
Tejun Heodb7bccf2010-06-29 10:07:12 +0200299#define for_each_busy_worker(worker, i, pos, gcwq) \
300 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
301 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
302
Tejun Heof3421792010-07-02 10:03:51 +0200303static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
304 unsigned int sw)
305{
306 if (cpu < nr_cpu_ids) {
307 if (sw & 1) {
308 cpu = cpumask_next(cpu, mask);
309 if (cpu < nr_cpu_ids)
310 return cpu;
311 }
312 if (sw & 2)
313 return WORK_CPU_UNBOUND;
314 }
315 return WORK_CPU_NONE;
316}
317
318static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
319 struct workqueue_struct *wq)
320{
321 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
322}
323
Tejun Heo09884952010-08-01 11:50:12 +0200324/*
325 * CPU iterators
326 *
327 * An extra gcwq is defined for an invalid cpu number
328 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
329 * specific CPU. The following iterators are similar to
330 * for_each_*_cpu() iterators but also considers the unbound gcwq.
331 *
332 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
333 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
334 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
335 * WORK_CPU_UNBOUND for unbound workqueues
336 */
Tejun Heof3421792010-07-02 10:03:51 +0200337#define for_each_gcwq_cpu(cpu) \
338 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
339 (cpu) < WORK_CPU_NONE; \
340 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
341
342#define for_each_online_gcwq_cpu(cpu) \
343 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
344 (cpu) < WORK_CPU_NONE; \
345 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
346
347#define for_each_cwq_cpu(cpu, wq) \
348 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
349 (cpu) < WORK_CPU_NONE; \
350 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
351
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900352#ifdef CONFIG_DEBUG_OBJECTS_WORK
353
354static struct debug_obj_descr work_debug_descr;
355
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100356static void *work_debug_hint(void *addr)
357{
358 return ((struct work_struct *) addr)->func;
359}
360
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900361/*
362 * fixup_init is called when:
363 * - an active object is initialized
364 */
365static int work_fixup_init(void *addr, enum debug_obj_state state)
366{
367 struct work_struct *work = addr;
368
369 switch (state) {
370 case ODEBUG_STATE_ACTIVE:
371 cancel_work_sync(work);
372 debug_object_init(work, &work_debug_descr);
373 return 1;
374 default:
375 return 0;
376 }
377}
378
379/*
380 * fixup_activate is called when:
381 * - an active object is activated
382 * - an unknown object is activated (might be a statically initialized object)
383 */
384static int work_fixup_activate(void *addr, enum debug_obj_state state)
385{
386 struct work_struct *work = addr;
387
388 switch (state) {
389
390 case ODEBUG_STATE_NOTAVAILABLE:
391 /*
392 * This is not really a fixup. The work struct was
393 * statically initialized. We just make sure that it
394 * is tracked in the object tracker.
395 */
Tejun Heo22df02b2010-06-29 10:07:10 +0200396 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900397 debug_object_init(work, &work_debug_descr);
398 debug_object_activate(work, &work_debug_descr);
399 return 0;
400 }
401 WARN_ON_ONCE(1);
402 return 0;
403
404 case ODEBUG_STATE_ACTIVE:
405 WARN_ON(1);
406
407 default:
408 return 0;
409 }
410}
411
412/*
413 * fixup_free is called when:
414 * - an active object is freed
415 */
416static int work_fixup_free(void *addr, enum debug_obj_state state)
417{
418 struct work_struct *work = addr;
419
420 switch (state) {
421 case ODEBUG_STATE_ACTIVE:
422 cancel_work_sync(work);
423 debug_object_free(work, &work_debug_descr);
424 return 1;
425 default:
426 return 0;
427 }
428}
429
430static struct debug_obj_descr work_debug_descr = {
431 .name = "work_struct",
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100432 .debug_hint = work_debug_hint,
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900433 .fixup_init = work_fixup_init,
434 .fixup_activate = work_fixup_activate,
435 .fixup_free = work_fixup_free,
436};
437
438static inline void debug_work_activate(struct work_struct *work)
439{
440 debug_object_activate(work, &work_debug_descr);
441}
442
443static inline void debug_work_deactivate(struct work_struct *work)
444{
445 debug_object_deactivate(work, &work_debug_descr);
446}
447
448void __init_work(struct work_struct *work, int onstack)
449{
450 if (onstack)
451 debug_object_init_on_stack(work, &work_debug_descr);
452 else
453 debug_object_init(work, &work_debug_descr);
454}
455EXPORT_SYMBOL_GPL(__init_work);
456
457void destroy_work_on_stack(struct work_struct *work)
458{
459 debug_object_free(work, &work_debug_descr);
460}
461EXPORT_SYMBOL_GPL(destroy_work_on_stack);
462
463#else
464static inline void debug_work_activate(struct work_struct *work) { }
465static inline void debug_work_deactivate(struct work_struct *work) { }
466#endif
467
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100468/* Serializes the accesses to the list of workqueues. */
469static DEFINE_SPINLOCK(workqueue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470static LIST_HEAD(workqueues);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200471static bool workqueue_freezing; /* W: have wqs started freezing? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Oleg Nesterov14441962007-05-23 13:57:57 -0700473/*
Tejun Heoe22bee72010-06-29 10:07:14 +0200474 * The almighty global cpu workqueues. nr_running is the only field
475 * which is expected to be used frequently by other cpus via
476 * try_to_wake_up(). Put it in a separate cacheline.
Oleg Nesterov14441962007-05-23 13:57:57 -0700477 */
Tejun Heo8b03ae32010-06-29 10:07:12 +0200478static DEFINE_PER_CPU(struct global_cwq, global_cwq);
Tejun Heo9c6bae02012-07-13 22:16:44 -0700479static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800480
Tejun Heof3421792010-07-02 10:03:51 +0200481/*
482 * Global cpu workqueue and nr_running counter for unbound gcwq. The
483 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
484 * workers have WORKER_UNBOUND set.
485 */
486static struct global_cwq unbound_global_cwq;
Tejun Heo9c6bae02012-07-13 22:16:44 -0700487static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
488 [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */
489};
Tejun Heof3421792010-07-02 10:03:51 +0200490
Tejun Heoc34056a2010-06-29 10:07:11 +0200491static int worker_thread(void *__worker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Tejun Heodcb32ee2012-07-13 22:16:45 -0700493static int worker_pool_pri(struct worker_pool *pool)
494{
495 return pool - pool->gcwq->pools;
496}
497
Tejun Heo8b03ae32010-06-29 10:07:12 +0200498static struct global_cwq *get_gcwq(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499{
Tejun Heof3421792010-07-02 10:03:51 +0200500 if (cpu != WORK_CPU_UNBOUND)
501 return &per_cpu(global_cwq, cpu);
502 else
503 return &unbound_global_cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504}
505
Tejun Heo7ef6a932012-07-12 14:46:37 -0700506static atomic_t *get_pool_nr_running(struct worker_pool *pool)
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700507{
Tejun Heo7ef6a932012-07-12 14:46:37 -0700508 int cpu = pool->gcwq->cpu;
Tejun Heodcb32ee2012-07-13 22:16:45 -0700509 int idx = worker_pool_pri(pool);
Tejun Heo7ef6a932012-07-12 14:46:37 -0700510
Tejun Heof3421792010-07-02 10:03:51 +0200511 if (cpu != WORK_CPU_UNBOUND)
Tejun Heo9c6bae02012-07-13 22:16:44 -0700512 return &per_cpu(pool_nr_running, cpu)[idx];
Tejun Heof3421792010-07-02 10:03:51 +0200513 else
Tejun Heo9c6bae02012-07-13 22:16:44 -0700514 return &unbound_pool_nr_running[idx];
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700515}
516
Tejun Heo4690c4a2010-06-29 10:07:10 +0200517static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
518 struct workqueue_struct *wq)
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700519{
Tejun Heof3421792010-07-02 10:03:51 +0200520 if (!(wq->flags & WQ_UNBOUND)) {
Lai Jiangshane06ffa12012-03-09 18:03:20 +0800521 if (likely(cpu < nr_cpu_ids))
Tejun Heof3421792010-07-02 10:03:51 +0200522 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
Tejun Heof3421792010-07-02 10:03:51 +0200523 } else if (likely(cpu == WORK_CPU_UNBOUND))
524 return wq->cpu_wq.single;
525 return NULL;
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700526}
527
Tejun Heo73f53c42010-06-29 10:07:11 +0200528static unsigned int work_color_to_flags(int color)
529{
530 return color << WORK_STRUCT_COLOR_SHIFT;
531}
532
533static int get_work_color(struct work_struct *work)
534{
535 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
536 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
537}
538
539static int work_next_color(int color)
540{
541 return (color + 1) % WORK_NR_COLORS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542}
543
David Howells4594bf12006-12-07 11:33:26 +0000544/*
Tejun Heoe1201532010-07-22 14:14:25 +0200545 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
546 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is
547 * cleared and the work data contains the cpu number it was last on.
Tejun Heo7a22ad72010-06-29 10:07:13 +0200548 *
549 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
550 * cwq, cpu or clear work->data. These functions should only be
551 * called while the work is owned - ie. while the PENDING bit is set.
552 *
553 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
554 * corresponding to a work. gcwq is available once the work has been
555 * queued anywhere after initialization. cwq is available only from
556 * queueing until execution starts.
David Howells4594bf12006-12-07 11:33:26 +0000557 */
Tejun Heo7a22ad72010-06-29 10:07:13 +0200558static inline void set_work_data(struct work_struct *work, unsigned long data,
559 unsigned long flags)
David Howells365970a2006-11-22 14:54:49 +0000560{
David Howells4594bf12006-12-07 11:33:26 +0000561 BUG_ON(!work_pending(work));
Tejun Heo7a22ad72010-06-29 10:07:13 +0200562 atomic_long_set(&work->data, data | flags | work_static(work));
David Howells365970a2006-11-22 14:54:49 +0000563}
David Howells365970a2006-11-22 14:54:49 +0000564
Tejun Heo7a22ad72010-06-29 10:07:13 +0200565static void set_work_cwq(struct work_struct *work,
566 struct cpu_workqueue_struct *cwq,
567 unsigned long extra_flags)
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200568{
Tejun Heo7a22ad72010-06-29 10:07:13 +0200569 set_work_data(work, (unsigned long)cwq,
Tejun Heoe1201532010-07-22 14:14:25 +0200570 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200571}
572
Tejun Heo7a22ad72010-06-29 10:07:13 +0200573static void set_work_cpu(struct work_struct *work, unsigned int cpu)
David Howells365970a2006-11-22 14:54:49 +0000574{
Tejun Heo7a22ad72010-06-29 10:07:13 +0200575 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
576}
577
578static void clear_work_data(struct work_struct *work)
579{
580 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
581}
582
Tejun Heo7a22ad72010-06-29 10:07:13 +0200583static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
584{
Tejun Heoe1201532010-07-22 14:14:25 +0200585 unsigned long data = atomic_long_read(&work->data);
Tejun Heo7a22ad72010-06-29 10:07:13 +0200586
Tejun Heoe1201532010-07-22 14:14:25 +0200587 if (data & WORK_STRUCT_CWQ)
588 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
589 else
Srinivasarao Pb6e586c2013-09-18 14:33:45 +0530590 {
591 WARN_ON_ONCE(1);
Tejun Heoe1201532010-07-22 14:14:25 +0200592 return NULL;
Srinivasarao Pb6e586c2013-09-18 14:33:45 +0530593 }
Tejun Heo7a22ad72010-06-29 10:07:13 +0200594}
595
596static struct global_cwq *get_work_gcwq(struct work_struct *work)
597{
Tejun Heoe1201532010-07-22 14:14:25 +0200598 unsigned long data = atomic_long_read(&work->data);
Tejun Heo7a22ad72010-06-29 10:07:13 +0200599 unsigned int cpu;
600
Tejun Heoe1201532010-07-22 14:14:25 +0200601 if (data & WORK_STRUCT_CWQ)
602 return ((struct cpu_workqueue_struct *)
Tejun Heo58658882012-07-12 14:46:37 -0700603 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
Tejun Heo7a22ad72010-06-29 10:07:13 +0200604
605 cpu = data >> WORK_STRUCT_FLAG_BITS;
Tejun Heobdbc5dd2010-07-02 10:03:51 +0200606 if (cpu == WORK_CPU_NONE)
Tejun Heo7a22ad72010-06-29 10:07:13 +0200607 return NULL;
608
Tejun Heof3421792010-07-02 10:03:51 +0200609 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
Tejun Heo7a22ad72010-06-29 10:07:13 +0200610 return get_gcwq(cpu);
David Howells365970a2006-11-22 14:54:49 +0000611}
612
613/*
Tejun Heodcb32ee2012-07-13 22:16:45 -0700614 * Policy functions. These define the policies on how the global worker
615 * pools are managed. Unless noted otherwise, these functions assume that
616 * they're being called with gcwq->lock held.
David Howells365970a2006-11-22 14:54:49 +0000617 */
Tejun Heoe22bee72010-06-29 10:07:14 +0200618
Tejun Heo7ef6a932012-07-12 14:46:37 -0700619static bool __need_more_worker(struct worker_pool *pool)
David Howells365970a2006-11-22 14:54:49 +0000620{
Tejun Heodcb32ee2012-07-13 22:16:45 -0700621 return !atomic_read(get_pool_nr_running(pool));
David Howells365970a2006-11-22 14:54:49 +0000622}
623
Tejun Heoe22bee72010-06-29 10:07:14 +0200624/*
625 * Need to wake up a worker? Called from anything but currently
626 * running workers.
Tejun Heob7b5c682012-07-12 14:46:37 -0700627 *
628 * Note that, because unbound workers never contribute to nr_running, this
629 * function will always return %true for unbound gcwq as long as the
630 * worklist isn't empty.
Tejun Heoe22bee72010-06-29 10:07:14 +0200631 */
Tejun Heo7ef6a932012-07-12 14:46:37 -0700632static bool need_more_worker(struct worker_pool *pool)
David Howells365970a2006-11-22 14:54:49 +0000633{
Tejun Heo7ef6a932012-07-12 14:46:37 -0700634 return !list_empty(&pool->worklist) && __need_more_worker(pool);
David Howells365970a2006-11-22 14:54:49 +0000635}
636
Tejun Heoe22bee72010-06-29 10:07:14 +0200637/* Can I start working? Called from busy but !running workers. */
Tejun Heo7ef6a932012-07-12 14:46:37 -0700638static bool may_start_working(struct worker_pool *pool)
Tejun Heoe22bee72010-06-29 10:07:14 +0200639{
Tejun Heo7ef6a932012-07-12 14:46:37 -0700640 return pool->nr_idle;
Tejun Heoe22bee72010-06-29 10:07:14 +0200641}
642
643/* Do I need to keep working? Called from currently running workers. */
Tejun Heo7ef6a932012-07-12 14:46:37 -0700644static bool keep_working(struct worker_pool *pool)
Tejun Heoe22bee72010-06-29 10:07:14 +0200645{
Tejun Heo7ef6a932012-07-12 14:46:37 -0700646 atomic_t *nr_running = get_pool_nr_running(pool);
Tejun Heoe22bee72010-06-29 10:07:14 +0200647
Tejun Heodcb32ee2012-07-13 22:16:45 -0700648 return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
Tejun Heoe22bee72010-06-29 10:07:14 +0200649}
650
651/* Do we need a new worker? Called from manager. */
Tejun Heo7ef6a932012-07-12 14:46:37 -0700652static bool need_to_create_worker(struct worker_pool *pool)
Tejun Heoe22bee72010-06-29 10:07:14 +0200653{
Tejun Heo7ef6a932012-07-12 14:46:37 -0700654 return need_more_worker(pool) && !may_start_working(pool);
Tejun Heoe22bee72010-06-29 10:07:14 +0200655}
656
657/* Do I need to be the manager? */
Tejun Heo7ef6a932012-07-12 14:46:37 -0700658static bool need_to_manage_workers(struct worker_pool *pool)
Tejun Heoe22bee72010-06-29 10:07:14 +0200659{
Tejun Heo7ef6a932012-07-12 14:46:37 -0700660 return need_to_create_worker(pool) ||
Tejun Heo22ad5642012-07-12 14:46:37 -0700661 (pool->flags & POOL_MANAGE_WORKERS);
Tejun Heoe22bee72010-06-29 10:07:14 +0200662}
663
664/* Do we have too many workers and should some go away? */
Tejun Heo7ef6a932012-07-12 14:46:37 -0700665static bool too_many_workers(struct worker_pool *pool)
Tejun Heoe22bee72010-06-29 10:07:14 +0200666{
Tejun Heo22ad5642012-07-12 14:46:37 -0700667 bool managing = pool->flags & POOL_MANAGING_WORKERS;
Tejun Heo7ef6a932012-07-12 14:46:37 -0700668 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
669 int nr_busy = pool->nr_workers - nr_idle;
Tejun Heoe22bee72010-06-29 10:07:14 +0200670
671 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
672}
673
674/*
675 * Wake up functions.
676 */
677
Tejun Heo7e116292010-06-29 10:07:13 +0200678/* Return the first worker. Safe with preemption disabled */
Tejun Heo7ef6a932012-07-12 14:46:37 -0700679static struct worker *first_worker(struct worker_pool *pool)
Tejun Heo7e116292010-06-29 10:07:13 +0200680{
Tejun Heo7ef6a932012-07-12 14:46:37 -0700681 if (unlikely(list_empty(&pool->idle_list)))
Tejun Heo7e116292010-06-29 10:07:13 +0200682 return NULL;
683
Tejun Heo7ef6a932012-07-12 14:46:37 -0700684 return list_first_entry(&pool->idle_list, struct worker, entry);
Tejun Heo7e116292010-06-29 10:07:13 +0200685}
686
687/**
688 * wake_up_worker - wake up an idle worker
Tejun Heo7ef6a932012-07-12 14:46:37 -0700689 * @pool: worker pool to wake worker from
Tejun Heo7e116292010-06-29 10:07:13 +0200690 *
Tejun Heo7ef6a932012-07-12 14:46:37 -0700691 * Wake up the first idle worker of @pool.
Tejun Heo7e116292010-06-29 10:07:13 +0200692 *
693 * CONTEXT:
694 * spin_lock_irq(gcwq->lock).
695 */
Tejun Heo7ef6a932012-07-12 14:46:37 -0700696static void wake_up_worker(struct worker_pool *pool)
Tejun Heo7e116292010-06-29 10:07:13 +0200697{
Tejun Heo7ef6a932012-07-12 14:46:37 -0700698 struct worker *worker = first_worker(pool);
Tejun Heo7e116292010-06-29 10:07:13 +0200699
700 if (likely(worker))
701 wake_up_process(worker->task);
702}
703
Tejun Heo4690c4a2010-06-29 10:07:10 +0200704/**
Tejun Heoe22bee72010-06-29 10:07:14 +0200705 * wq_worker_waking_up - a worker is waking up
706 * @task: task waking up
707 * @cpu: CPU @task is waking up to
708 *
709 * This function is called during try_to_wake_up() when a worker is
710 * being awoken.
711 *
712 * CONTEXT:
713 * spin_lock_irq(rq->lock)
714 */
715void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
716{
717 struct worker *worker = kthread_data(task);
718
Steven Rostedt2d646722010-12-03 23:12:33 -0500719 if (!(worker->flags & WORKER_NOT_RUNNING))
Tejun Heo7ef6a932012-07-12 14:46:37 -0700720 atomic_inc(get_pool_nr_running(worker->pool));
Tejun Heoe22bee72010-06-29 10:07:14 +0200721}
722
723/**
724 * wq_worker_sleeping - a worker is going to sleep
725 * @task: task going to sleep
726 * @cpu: CPU in question, must be the current CPU number
727 *
728 * This function is called during schedule() when a busy worker is
729 * going to sleep. Worker on the same cpu can be woken up by
730 * returning pointer to its task.
731 *
732 * CONTEXT:
733 * spin_lock_irq(rq->lock)
734 *
735 * RETURNS:
736 * Worker task on @cpu to wake up, %NULL if none.
737 */
738struct task_struct *wq_worker_sleeping(struct task_struct *task,
739 unsigned int cpu)
740{
741 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
Tejun Heo58658882012-07-12 14:46:37 -0700742 struct worker_pool *pool = worker->pool;
Tejun Heo7ef6a932012-07-12 14:46:37 -0700743 atomic_t *nr_running = get_pool_nr_running(pool);
Tejun Heoe22bee72010-06-29 10:07:14 +0200744
Steven Rostedt2d646722010-12-03 23:12:33 -0500745 if (worker->flags & WORKER_NOT_RUNNING)
Tejun Heoe22bee72010-06-29 10:07:14 +0200746 return NULL;
747
748 /* this can only happen on the local cpu */
749 BUG_ON(cpu != raw_smp_processor_id());
750
751 /*
752 * The counterpart of the following dec_and_test, implied mb,
753 * worklist not empty test sequence is in insert_work().
754 * Please read comment there.
755 *
756 * NOT_RUNNING is clear. This means that trustee is not in
757 * charge and we're running on the local cpu w/ rq lock held
758 * and preemption disabled, which in turn means that none else
759 * could be manipulating idle_list, so dereferencing idle_list
760 * without gcwq lock is safe.
761 */
Tejun Heo58658882012-07-12 14:46:37 -0700762 if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
Tejun Heo7ef6a932012-07-12 14:46:37 -0700763 to_wakeup = first_worker(pool);
Tejun Heoe22bee72010-06-29 10:07:14 +0200764 return to_wakeup ? to_wakeup->task : NULL;
765}
766
767/**
768 * worker_set_flags - set worker flags and adjust nr_running accordingly
Tejun Heocb444762010-07-02 10:03:50 +0200769 * @worker: self
Tejun Heod302f012010-06-29 10:07:13 +0200770 * @flags: flags to set
771 * @wakeup: wakeup an idle worker if necessary
772 *
Tejun Heoe22bee72010-06-29 10:07:14 +0200773 * Set @flags in @worker->flags and adjust nr_running accordingly. If
774 * nr_running becomes zero and @wakeup is %true, an idle worker is
775 * woken up.
Tejun Heod302f012010-06-29 10:07:13 +0200776 *
Tejun Heocb444762010-07-02 10:03:50 +0200777 * CONTEXT:
778 * spin_lock_irq(gcwq->lock)
Tejun Heod302f012010-06-29 10:07:13 +0200779 */
780static inline void worker_set_flags(struct worker *worker, unsigned int flags,
781 bool wakeup)
782{
Tejun Heo58658882012-07-12 14:46:37 -0700783 struct worker_pool *pool = worker->pool;
Tejun Heoe22bee72010-06-29 10:07:14 +0200784
Tejun Heocb444762010-07-02 10:03:50 +0200785 WARN_ON_ONCE(worker->task != current);
786
Tejun Heoe22bee72010-06-29 10:07:14 +0200787 /*
788 * If transitioning into NOT_RUNNING, adjust nr_running and
789 * wake up an idle worker as necessary if requested by
790 * @wakeup.
791 */
792 if ((flags & WORKER_NOT_RUNNING) &&
793 !(worker->flags & WORKER_NOT_RUNNING)) {
Tejun Heo7ef6a932012-07-12 14:46:37 -0700794 atomic_t *nr_running = get_pool_nr_running(pool);
Tejun Heoe22bee72010-06-29 10:07:14 +0200795
796 if (wakeup) {
797 if (atomic_dec_and_test(nr_running) &&
Tejun Heo58658882012-07-12 14:46:37 -0700798 !list_empty(&pool->worklist))
Tejun Heo7ef6a932012-07-12 14:46:37 -0700799 wake_up_worker(pool);
Tejun Heoe22bee72010-06-29 10:07:14 +0200800 } else
801 atomic_dec(nr_running);
802 }
803
Tejun Heod302f012010-06-29 10:07:13 +0200804 worker->flags |= flags;
805}
806
807/**
Tejun Heoe22bee72010-06-29 10:07:14 +0200808 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
Tejun Heocb444762010-07-02 10:03:50 +0200809 * @worker: self
Tejun Heod302f012010-06-29 10:07:13 +0200810 * @flags: flags to clear
811 *
Tejun Heoe22bee72010-06-29 10:07:14 +0200812 * Clear @flags in @worker->flags and adjust nr_running accordingly.
Tejun Heod302f012010-06-29 10:07:13 +0200813 *
Tejun Heocb444762010-07-02 10:03:50 +0200814 * CONTEXT:
815 * spin_lock_irq(gcwq->lock)
Tejun Heod302f012010-06-29 10:07:13 +0200816 */
817static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
818{
Tejun Heo7ef6a932012-07-12 14:46:37 -0700819 struct worker_pool *pool = worker->pool;
Tejun Heoe22bee72010-06-29 10:07:14 +0200820 unsigned int oflags = worker->flags;
821
Tejun Heocb444762010-07-02 10:03:50 +0200822 WARN_ON_ONCE(worker->task != current);
823
Tejun Heod302f012010-06-29 10:07:13 +0200824 worker->flags &= ~flags;
Tejun Heoe22bee72010-06-29 10:07:14 +0200825
Tejun Heo42c025f2011-01-11 15:58:49 +0100826 /*
827 * If transitioning out of NOT_RUNNING, increment nr_running. Note
828 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
829 * of multiple flags, not a single flag.
830 */
Tejun Heoe22bee72010-06-29 10:07:14 +0200831 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
832 if (!(worker->flags & WORKER_NOT_RUNNING))
Tejun Heo7ef6a932012-07-12 14:46:37 -0700833 atomic_inc(get_pool_nr_running(pool));
Tejun Heod302f012010-06-29 10:07:13 +0200834}
835
836/**
Tejun Heoc8e55f32010-06-29 10:07:12 +0200837 * busy_worker_head - return the busy hash head for a work
838 * @gcwq: gcwq of interest
839 * @work: work to be hashed
840 *
841 * Return hash head of @gcwq for @work.
842 *
843 * CONTEXT:
844 * spin_lock_irq(gcwq->lock).
845 *
846 * RETURNS:
847 * Pointer to the hash head.
848 */
849static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
850 struct work_struct *work)
851{
852 const int base_shift = ilog2(sizeof(struct work_struct));
853 unsigned long v = (unsigned long)work;
854
855 /* simple shift and fold hash, do we need something better? */
856 v >>= base_shift;
857 v += v >> BUSY_WORKER_HASH_ORDER;
858 v &= BUSY_WORKER_HASH_MASK;
859
860 return &gcwq->busy_hash[v];
861}
862
863/**
Tejun Heo8cca0ee2010-06-29 10:07:13 +0200864 * __find_worker_executing_work - find worker which is executing a work
865 * @gcwq: gcwq of interest
866 * @bwh: hash head as returned by busy_worker_head()
867 * @work: work to find worker for
868 *
869 * Find a worker which is executing @work on @gcwq. @bwh should be
870 * the hash head obtained by calling busy_worker_head() with the same
871 * work.
872 *
873 * CONTEXT:
874 * spin_lock_irq(gcwq->lock).
875 *
876 * RETURNS:
877 * Pointer to worker which is executing @work if found, NULL
878 * otherwise.
879 */
880static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
881 struct hlist_head *bwh,
882 struct work_struct *work)
883{
884 struct worker *worker;
885 struct hlist_node *tmp;
886
887 hlist_for_each_entry(worker, tmp, bwh, hentry)
Tejun Heo55e3e1f2012-12-18 10:35:02 -0800888 if (worker->current_work == work &&
889 worker->current_func == work->func)
Tejun Heo8cca0ee2010-06-29 10:07:13 +0200890 return worker;
891 return NULL;
892}
893
894/**
895 * find_worker_executing_work - find worker which is executing a work
896 * @gcwq: gcwq of interest
897 * @work: work to find worker for
898 *
Tejun Heo55e3e1f2012-12-18 10:35:02 -0800899 * Find a worker which is executing @work on @gcwq by searching
900 * @gcwq->busy_hash which is keyed by the address of @work. For a worker
901 * to match, its current execution should match the address of @work and
902 * its work function. This is to avoid unwanted dependency between
903 * unrelated work executions through a work item being recycled while still
904 * being executed.
905 *
906 * This is a bit tricky. A work item may be freed once its execution
907 * starts and nothing prevents the freed area from being recycled for
908 * another work item. If the same work item address ends up being reused
909 * before the original execution finishes, workqueue will identify the
910 * recycled work item as currently executing and make it wait until the
911 * current execution finishes, introducing an unwanted dependency.
912 *
913 * This function checks the work item address, work function and workqueue
914 * to avoid false positives. Note that this isn't complete as one may
915 * construct a work function which can introduce dependency onto itself
916 * through a recycled work item. Well, if somebody wants to shoot oneself
917 * in the foot that badly, there's only so much we can do, and if such
918 * deadlock actually occurs, it should be easy to locate the culprit work
919 * function.
Tejun Heo8cca0ee2010-06-29 10:07:13 +0200920 *
921 * CONTEXT:
922 * spin_lock_irq(gcwq->lock).
923 *
924 * RETURNS:
925 * Pointer to worker which is executing @work if found, NULL
926 * otherwise.
927 */
928static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
929 struct work_struct *work)
930{
931 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
932 work);
933}
934
935/**
Tejun Heo7e116292010-06-29 10:07:13 +0200936 * insert_work - insert a work into gcwq
Tejun Heo4690c4a2010-06-29 10:07:10 +0200937 * @cwq: cwq @work belongs to
938 * @work: work to insert
939 * @head: insertion point
940 * @extra_flags: extra WORK_STRUCT_* flags to set
941 *
Tejun Heo7e116292010-06-29 10:07:13 +0200942 * Insert @work which belongs to @cwq into @gcwq after @head.
943 * @extra_flags is or'd to work_struct flags.
Tejun Heo4690c4a2010-06-29 10:07:10 +0200944 *
945 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200946 * spin_lock_irq(gcwq->lock).
Tejun Heo4690c4a2010-06-29 10:07:10 +0200947 */
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700948static void insert_work(struct cpu_workqueue_struct *cwq,
Tejun Heo4690c4a2010-06-29 10:07:10 +0200949 struct work_struct *work, struct list_head *head,
950 unsigned int extra_flags)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700951{
Tejun Heo7ef6a932012-07-12 14:46:37 -0700952 struct worker_pool *pool = cwq->pool;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100953
Tejun Heo4690c4a2010-06-29 10:07:10 +0200954 /* we own @work, set data and link */
Tejun Heo7a22ad72010-06-29 10:07:13 +0200955 set_work_cwq(work, cwq, extra_flags);
Tejun Heo4690c4a2010-06-29 10:07:10 +0200956
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700957 /*
958 * Ensure that we get the right work->data if we see the
959 * result of list_add() below, see try_to_grab_pending().
960 */
961 smp_wmb();
Tejun Heo4690c4a2010-06-29 10:07:10 +0200962
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700963 list_add_tail(&work->entry, head);
Tejun Heoe22bee72010-06-29 10:07:14 +0200964
965 /*
966 * Ensure either worker_sched_deactivated() sees the above
967 * list_add_tail() or we see zero nr_running to avoid workers
968 * lying around lazily while there are works to be processed.
969 */
970 smp_mb();
971
Tejun Heo7ef6a932012-07-12 14:46:37 -0700972 if (__need_more_worker(pool))
973 wake_up_worker(pool);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700974}
975
Tejun Heoc8efcc22010-12-20 19:32:04 +0100976/*
977 * Test whether @work is being queued from another work executing on the
978 * same workqueue. This is rather expensive and should only be used from
979 * cold paths.
980 */
981static bool is_chained_work(struct workqueue_struct *wq)
982{
983 unsigned long flags;
984 unsigned int cpu;
985
986 for_each_gcwq_cpu(cpu) {
987 struct global_cwq *gcwq = get_gcwq(cpu);
988 struct worker *worker;
989 struct hlist_node *pos;
990 int i;
991
992 spin_lock_irqsave(&gcwq->lock, flags);
993 for_each_busy_worker(worker, i, pos, gcwq) {
994 if (worker->task != current)
995 continue;
996 spin_unlock_irqrestore(&gcwq->lock, flags);
997 /*
998 * I'm @worker, no locking necessary. See if @work
999 * is headed to the same workqueue.
1000 */
1001 return worker->current_cwq->wq == wq;
1002 }
1003 spin_unlock_irqrestore(&gcwq->lock, flags);
1004 }
1005 return false;
1006}
1007
Tejun Heo4690c4a2010-06-29 10:07:10 +02001008static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 struct work_struct *work)
1010{
Tejun Heo502ca9d2010-06-29 10:07:13 +02001011 struct global_cwq *gcwq;
1012 struct cpu_workqueue_struct *cwq;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001013 struct list_head *worklist;
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001014 unsigned int work_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 unsigned long flags;
1016
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001017 debug_work_activate(work);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001018
Tejun Heoc8efcc22010-12-20 19:32:04 +01001019 /* if dying, only works from the same workqueue are allowed */
Tejun Heo9c5a2ba2011-04-05 18:01:44 +02001020 if (unlikely(wq->flags & WQ_DRAINING) &&
Tejun Heoc8efcc22010-12-20 19:32:04 +01001021 WARN_ON_ONCE(!is_chained_work(wq)))
Tejun Heoe41e7042010-08-24 14:22:47 +02001022 return;
1023
Tejun Heoc7fc77f2010-07-02 10:03:51 +02001024 /* determine gcwq to use */
1025 if (!(wq->flags & WQ_UNBOUND)) {
Tejun Heo18aa9ef2010-06-29 10:07:13 +02001026 struct global_cwq *last_gcwq;
1027
Tejun Heoc7fc77f2010-07-02 10:03:51 +02001028 if (unlikely(cpu == WORK_CPU_UNBOUND))
1029 cpu = raw_smp_processor_id();
1030
Tejun Heo18aa9ef2010-06-29 10:07:13 +02001031 /*
1032 * It's multi cpu. If @wq is non-reentrant and @work
1033 * was previously on a different cpu, it might still
1034 * be running there, in which case the work needs to
1035 * be queued on that cpu to guarantee non-reentrance.
1036 */
Tejun Heo502ca9d2010-06-29 10:07:13 +02001037 gcwq = get_gcwq(cpu);
Tejun Heo18aa9ef2010-06-29 10:07:13 +02001038 if (wq->flags & WQ_NON_REENTRANT &&
1039 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
1040 struct worker *worker;
1041
1042 spin_lock_irqsave(&last_gcwq->lock, flags);
1043
1044 worker = find_worker_executing_work(last_gcwq, work);
1045
1046 if (worker && worker->current_cwq->wq == wq)
1047 gcwq = last_gcwq;
1048 else {
1049 /* meh... not running there, queue here */
1050 spin_unlock_irqrestore(&last_gcwq->lock, flags);
1051 spin_lock_irqsave(&gcwq->lock, flags);
1052 }
1053 } else
1054 spin_lock_irqsave(&gcwq->lock, flags);
Tejun Heof3421792010-07-02 10:03:51 +02001055 } else {
1056 gcwq = get_gcwq(WORK_CPU_UNBOUND);
1057 spin_lock_irqsave(&gcwq->lock, flags);
Tejun Heo502ca9d2010-06-29 10:07:13 +02001058 }
1059
1060 /* gcwq determined, get cwq and queue */
1061 cwq = get_cwq(gcwq->cpu, wq);
Tejun Heocdadf002010-10-05 10:49:55 +02001062 trace_workqueue_queue_work(cpu, cwq, work);
Tejun Heo502ca9d2010-06-29 10:07:13 +02001063
Tejun Heo4690c4a2010-06-29 10:07:10 +02001064 BUG_ON(!list_empty(&work->entry));
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001065
Tejun Heo73f53c42010-06-29 10:07:11 +02001066 cwq->nr_in_flight[cwq->work_color]++;
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001067 work_flags = work_color_to_flags(cwq->work_color);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001068
1069 if (likely(cwq->nr_active < cwq->max_active)) {
Tejun Heocdadf002010-10-05 10:49:55 +02001070 trace_workqueue_activate_work(work);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001071 cwq->nr_active++;
Tejun Heodcb32ee2012-07-13 22:16:45 -07001072 worklist = &cwq->pool->worklist;
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001073 } else {
1074 work_flags |= WORK_STRUCT_DELAYED;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001075 worklist = &cwq->delayed_works;
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001076 }
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001077
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001078 insert_work(cwq, work, worklist, work_flags);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001079
Tejun Heo8b03ae32010-06-29 10:07:12 +02001080 spin_unlock_irqrestore(&gcwq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081}
1082
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001083/**
1084 * queue_work - queue work on a workqueue
1085 * @wq: workqueue to use
1086 * @work: work to queue
1087 *
Alan Stern057647f2006-10-28 10:38:58 -07001088 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 *
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -07001090 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1091 * it can be processed by another CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001093int queue_work(struct workqueue_struct *wq, struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094{
Oleg Nesterovef1ca232008-07-25 01:47:53 -07001095 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
Oleg Nesterovef1ca232008-07-25 01:47:53 -07001097 ret = queue_work_on(get_cpu(), wq, work);
1098 put_cpu();
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 return ret;
1101}
Dave Jonesae90dd52006-06-30 01:40:45 -04001102EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
Zhang Ruic1a220e2008-07-23 21:28:39 -07001104/**
1105 * queue_work_on - queue work on specific cpu
1106 * @cpu: CPU number to execute work on
1107 * @wq: workqueue to use
1108 * @work: work to queue
1109 *
1110 * Returns 0 if @work was already on a queue, non-zero otherwise.
1111 *
1112 * We queue the work to a specific CPU, the caller must ensure it
1113 * can't go away.
1114 */
1115int
1116queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1117{
1118 int ret = 0;
1119
Tejun Heo22df02b2010-06-29 10:07:10 +02001120 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
Tejun Heo4690c4a2010-06-29 10:07:10 +02001121 __queue_work(cpu, wq, work);
Zhang Ruic1a220e2008-07-23 21:28:39 -07001122 ret = 1;
1123 }
1124 return ret;
1125}
1126EXPORT_SYMBOL_GPL(queue_work_on);
1127
Li Zefan6d141c32008-02-08 04:21:09 -08001128static void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129{
David Howells52bad642006-11-22 14:54:01 +00001130 struct delayed_work *dwork = (struct delayed_work *)__data;
Tejun Heo7a22ad72010-06-29 10:07:13 +02001131 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Srinivasarao Pb6e586c2013-09-18 14:33:45 +05301133 if (cwq != NULL)
1134 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135}
1136
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001137/**
1138 * queue_delayed_work - queue work on a workqueue after delay
1139 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -08001140 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001141 * @delay: number of jiffies to wait before queueing
1142 *
Alan Stern057647f2006-10-28 10:38:58 -07001143 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001144 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001145int queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +00001146 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147{
David Howells52bad642006-11-22 14:54:01 +00001148 if (delay == 0)
Oleg Nesterov63bc0362007-05-09 02:34:16 -07001149 return queue_work(wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Oleg Nesterov63bc0362007-05-09 02:34:16 -07001151 return queue_delayed_work_on(-1, wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152}
Dave Jonesae90dd52006-06-30 01:40:45 -04001153EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001155/**
1156 * queue_delayed_work_on - queue work on specific CPU after delay
1157 * @cpu: CPU number to execute work on
1158 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -08001159 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001160 * @delay: number of jiffies to wait before queueing
1161 *
Alan Stern057647f2006-10-28 10:38:58 -07001162 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001163 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001164int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +00001165 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001166{
1167 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +00001168 struct timer_list *timer = &dwork->timer;
1169 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001170
Tejun Heo22df02b2010-06-29 10:07:10 +02001171 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
Tejun Heoc7fc77f2010-07-02 10:03:51 +02001172 unsigned int lcpu;
Tejun Heo7a22ad72010-06-29 10:07:13 +02001173
Tejun Heo4afca922012-12-04 07:40:39 -08001174 WARN_ON_ONCE(timer_pending(timer));
1175 WARN_ON_ONCE(!list_empty(&work->entry));
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001176
Andrew Liu8a3e77c2008-05-01 04:35:14 -07001177 timer_stats_timer_set_start_info(&dwork->timer);
1178
Tejun Heo7a22ad72010-06-29 10:07:13 +02001179 /*
1180 * This stores cwq for the moment, for the timer_fn.
1181 * Note that the work's gcwq is preserved to allow
1182 * reentrance detection for delayed works.
1183 */
Tejun Heoc7fc77f2010-07-02 10:03:51 +02001184 if (!(wq->flags & WQ_UNBOUND)) {
1185 struct global_cwq *gcwq = get_work_gcwq(work);
1186
1187 if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1188 lcpu = gcwq->cpu;
1189 else
1190 lcpu = raw_smp_processor_id();
1191 } else
1192 lcpu = WORK_CPU_UNBOUND;
1193
Tejun Heo7a22ad72010-06-29 10:07:13 +02001194 set_work_cwq(work, get_cwq(lcpu, wq), 0);
Tejun Heoc7fc77f2010-07-02 10:03:51 +02001195
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001196 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +00001197 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001198 timer->function = delayed_work_timer_fn;
Oleg Nesterov63bc0362007-05-09 02:34:16 -07001199
1200 if (unlikely(cpu >= 0))
1201 add_timer_on(timer, cpu);
1202 else
1203 add_timer(timer);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001204 ret = 1;
1205 }
1206 return ret;
1207}
Dave Jonesae90dd52006-06-30 01:40:45 -04001208EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
Tejun Heoc8e55f32010-06-29 10:07:12 +02001210/**
1211 * worker_enter_idle - enter idle state
1212 * @worker: worker which is entering idle state
1213 *
1214 * @worker is entering idle state. Update stats and idle timer if
1215 * necessary.
1216 *
1217 * LOCKING:
1218 * spin_lock_irq(gcwq->lock).
1219 */
1220static void worker_enter_idle(struct worker *worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221{
Tejun Heo58658882012-07-12 14:46:37 -07001222 struct worker_pool *pool = worker->pool;
1223 struct global_cwq *gcwq = pool->gcwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
Tejun Heoc8e55f32010-06-29 10:07:12 +02001225 BUG_ON(worker->flags & WORKER_IDLE);
1226 BUG_ON(!list_empty(&worker->entry) &&
1227 (worker->hentry.next || worker->hentry.pprev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
Tejun Heocb444762010-07-02 10:03:50 +02001229 /* can't use worker_set_flags(), also called from start_worker() */
1230 worker->flags |= WORKER_IDLE;
Tejun Heo58658882012-07-12 14:46:37 -07001231 pool->nr_idle++;
Tejun Heoe22bee72010-06-29 10:07:14 +02001232 worker->last_active = jiffies;
Peter Zijlstrad5abe662006-12-06 20:37:26 -08001233
Tejun Heoc8e55f32010-06-29 10:07:12 +02001234 /* idle_list is LIFO */
Tejun Heo58658882012-07-12 14:46:37 -07001235 list_add(&worker->entry, &pool->idle_list);
Tejun Heodb7bccf2010-06-29 10:07:12 +02001236
Tejun Heoe22bee72010-06-29 10:07:14 +02001237 if (likely(!(worker->flags & WORKER_ROGUE))) {
Tejun Heo7ef6a932012-07-12 14:46:37 -07001238 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
Tejun Heo58658882012-07-12 14:46:37 -07001239 mod_timer(&pool->idle_timer,
Tejun Heoe22bee72010-06-29 10:07:14 +02001240 jiffies + IDLE_WORKER_TIMEOUT);
1241 } else
Tejun Heodb7bccf2010-06-29 10:07:12 +02001242 wake_up_all(&gcwq->trustee_wait);
Tejun Heocb444762010-07-02 10:03:50 +02001243
Tejun Heo24312d32012-05-14 15:04:50 -07001244 /*
1245 * Sanity check nr_running. Because trustee releases gcwq->lock
1246 * between setting %WORKER_ROGUE and zapping nr_running, the
1247 * warning may trigger spuriously. Check iff trustee is idle.
1248 */
1249 WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
Tejun Heo58658882012-07-12 14:46:37 -07001250 pool->nr_workers == pool->nr_idle &&
Tejun Heo7ef6a932012-07-12 14:46:37 -07001251 atomic_read(get_pool_nr_running(pool)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252}
1253
Tejun Heoc8e55f32010-06-29 10:07:12 +02001254/**
1255 * worker_leave_idle - leave idle state
1256 * @worker: worker which is leaving idle state
1257 *
1258 * @worker is leaving idle state. Update stats.
1259 *
1260 * LOCKING:
1261 * spin_lock_irq(gcwq->lock).
1262 */
1263static void worker_leave_idle(struct worker *worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264{
Tejun Heo58658882012-07-12 14:46:37 -07001265 struct worker_pool *pool = worker->pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
Tejun Heoc8e55f32010-06-29 10:07:12 +02001267 BUG_ON(!(worker->flags & WORKER_IDLE));
Tejun Heod302f012010-06-29 10:07:13 +02001268 worker_clr_flags(worker, WORKER_IDLE);
Tejun Heo58658882012-07-12 14:46:37 -07001269 pool->nr_idle--;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001270 list_del_init(&worker->entry);
1271}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Tejun Heoe22bee72010-06-29 10:07:14 +02001273/**
1274 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1275 * @worker: self
1276 *
1277 * Works which are scheduled while the cpu is online must at least be
1278 * scheduled to a worker which is bound to the cpu so that if they are
1279 * flushed from cpu callbacks while cpu is going down, they are
1280 * guaranteed to execute on the cpu.
1281 *
1282 * This function is to be used by rogue workers and rescuers to bind
1283 * themselves to the target cpu and may race with cpu going down or
1284 * coming online. kthread_bind() can't be used because it may put the
1285 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1286 * verbatim as it's best effort and blocking and gcwq may be
1287 * [dis]associated in the meantime.
1288 *
1289 * This function tries set_cpus_allowed() and locks gcwq and verifies
1290 * the binding against GCWQ_DISASSOCIATED which is set during
1291 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1292 * idle state or fetches works without dropping lock, it can guarantee
1293 * the scheduling requirement described in the first paragraph.
1294 *
1295 * CONTEXT:
1296 * Might sleep. Called without any lock but returns with gcwq->lock
1297 * held.
1298 *
1299 * RETURNS:
1300 * %true if the associated gcwq is online (@worker is successfully
1301 * bound), %false if offline.
1302 */
1303static bool worker_maybe_bind_and_lock(struct worker *worker)
Namhyung Kim972fa1c2010-08-22 23:19:43 +09001304__acquires(&gcwq->lock)
Tejun Heoe22bee72010-06-29 10:07:14 +02001305{
Tejun Heo58658882012-07-12 14:46:37 -07001306 struct global_cwq *gcwq = worker->pool->gcwq;
Tejun Heoe22bee72010-06-29 10:07:14 +02001307 struct task_struct *task = worker->task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
Tejun Heoe22bee72010-06-29 10:07:14 +02001309 while (true) {
1310 /*
1311 * The following call may fail, succeed or succeed
1312 * without actually migrating the task to the cpu if
1313 * it races with cpu hotunplug operation. Verify
1314 * against GCWQ_DISASSOCIATED.
1315 */
Tejun Heof3421792010-07-02 10:03:51 +02001316 if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1317 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
Oleg Nesterov85f41862007-05-09 02:34:20 -07001318
Tejun Heoe22bee72010-06-29 10:07:14 +02001319 spin_lock_irq(&gcwq->lock);
1320 if (gcwq->flags & GCWQ_DISASSOCIATED)
1321 return false;
1322 if (task_cpu(task) == gcwq->cpu &&
1323 cpumask_equal(&current->cpus_allowed,
1324 get_cpu_mask(gcwq->cpu)))
1325 return true;
1326 spin_unlock_irq(&gcwq->lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001327
Tejun Heo5035b202011-04-29 18:08:37 +02001328 /*
1329 * We've raced with CPU hot[un]plug. Give it a breather
1330 * and retry migration. cond_resched() is required here;
1331 * otherwise, we might deadlock against cpu_stop trying to
1332 * bring down the CPU on non-preemptive kernel.
1333 */
Tejun Heoe22bee72010-06-29 10:07:14 +02001334 cpu_relax();
Tejun Heo5035b202011-04-29 18:08:37 +02001335 cond_resched();
Tejun Heoe22bee72010-06-29 10:07:14 +02001336 }
1337}
1338
1339/*
1340 * Function for worker->rebind_work used to rebind rogue busy workers
1341 * to the associated cpu which is coming back online. This is
1342 * scheduled by cpu up but can race with other cpu hotplug operations
1343 * and may be executed twice without intervening cpu down.
1344 */
1345static void worker_rebind_fn(struct work_struct *work)
1346{
1347 struct worker *worker = container_of(work, struct worker, rebind_work);
Tejun Heo58658882012-07-12 14:46:37 -07001348 struct global_cwq *gcwq = worker->pool->gcwq;
Tejun Heoe22bee72010-06-29 10:07:14 +02001349
1350 if (worker_maybe_bind_and_lock(worker))
1351 worker_clr_flags(worker, WORKER_REBIND);
1352
1353 spin_unlock_irq(&gcwq->lock);
1354}
1355
Tejun Heoc34056a2010-06-29 10:07:11 +02001356static struct worker *alloc_worker(void)
1357{
1358 struct worker *worker;
1359
1360 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
Tejun Heoc8e55f32010-06-29 10:07:12 +02001361 if (worker) {
1362 INIT_LIST_HEAD(&worker->entry);
Tejun Heoaffee4b2010-06-29 10:07:12 +02001363 INIT_LIST_HEAD(&worker->scheduled);
Tejun Heoe22bee72010-06-29 10:07:14 +02001364 INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1365 /* on creation a worker is in !idle && prep state */
1366 worker->flags = WORKER_PREP;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001367 }
Tejun Heoc34056a2010-06-29 10:07:11 +02001368 return worker;
1369}
1370
1371/**
1372 * create_worker - create a new workqueue worker
Tejun Heo7ef6a932012-07-12 14:46:37 -07001373 * @pool: pool the new worker will belong to
Tejun Heoc34056a2010-06-29 10:07:11 +02001374 * @bind: whether to set affinity to @cpu or not
1375 *
Tejun Heo7ef6a932012-07-12 14:46:37 -07001376 * Create a new worker which is bound to @pool. The returned worker
Tejun Heoc34056a2010-06-29 10:07:11 +02001377 * can be started by calling start_worker() or destroyed using
1378 * destroy_worker().
1379 *
1380 * CONTEXT:
1381 * Might sleep. Does GFP_KERNEL allocations.
1382 *
1383 * RETURNS:
1384 * Pointer to the newly created worker.
1385 */
Tejun Heo7ef6a932012-07-12 14:46:37 -07001386static struct worker *create_worker(struct worker_pool *pool, bool bind)
Tejun Heoc34056a2010-06-29 10:07:11 +02001387{
Tejun Heo7ef6a932012-07-12 14:46:37 -07001388 struct global_cwq *gcwq = pool->gcwq;
Tejun Heof3421792010-07-02 10:03:51 +02001389 bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
Tejun Heodcb32ee2012-07-13 22:16:45 -07001390 const char *pri = worker_pool_pri(pool) ? "H" : "";
Tejun Heoc34056a2010-06-29 10:07:11 +02001391 struct worker *worker = NULL;
Tejun Heof3421792010-07-02 10:03:51 +02001392 int id = -1;
Tejun Heoc34056a2010-06-29 10:07:11 +02001393
Tejun Heo8b03ae32010-06-29 10:07:12 +02001394 spin_lock_irq(&gcwq->lock);
Tejun Heo58658882012-07-12 14:46:37 -07001395 while (ida_get_new(&pool->worker_ida, &id)) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02001396 spin_unlock_irq(&gcwq->lock);
Tejun Heo58658882012-07-12 14:46:37 -07001397 if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
Tejun Heoc34056a2010-06-29 10:07:11 +02001398 goto fail;
Tejun Heo8b03ae32010-06-29 10:07:12 +02001399 spin_lock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +02001400 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02001401 spin_unlock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +02001402
1403 worker = alloc_worker();
1404 if (!worker)
1405 goto fail;
1406
Tejun Heo58658882012-07-12 14:46:37 -07001407 worker->pool = pool;
Tejun Heoc34056a2010-06-29 10:07:11 +02001408 worker->id = id;
1409
Tejun Heof3421792010-07-02 10:03:51 +02001410 if (!on_unbound_cpu)
Eric Dumazet94dcf292011-03-22 16:30:45 -07001411 worker->task = kthread_create_on_node(worker_thread,
Tejun Heodcb32ee2012-07-13 22:16:45 -07001412 worker, cpu_to_node(gcwq->cpu),
1413 "kworker/%u:%d%s", gcwq->cpu, id, pri);
Tejun Heof3421792010-07-02 10:03:51 +02001414 else
1415 worker->task = kthread_create(worker_thread, worker,
Tejun Heodcb32ee2012-07-13 22:16:45 -07001416 "kworker/u:%d%s", id, pri);
Tejun Heoc34056a2010-06-29 10:07:11 +02001417 if (IS_ERR(worker->task))
1418 goto fail;
1419
Tejun Heodcb32ee2012-07-13 22:16:45 -07001420 if (worker_pool_pri(pool))
1421 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
1422
Tejun Heodb7bccf2010-06-29 10:07:12 +02001423 /*
1424 * A rogue worker will become a regular one if CPU comes
1425 * online later on. Make sure every worker has
1426 * PF_THREAD_BOUND set.
1427 */
Tejun Heof3421792010-07-02 10:03:51 +02001428 if (bind && !on_unbound_cpu)
Tejun Heo8b03ae32010-06-29 10:07:12 +02001429 kthread_bind(worker->task, gcwq->cpu);
Tejun Heof3421792010-07-02 10:03:51 +02001430 else {
Tejun Heodb7bccf2010-06-29 10:07:12 +02001431 worker->task->flags |= PF_THREAD_BOUND;
Tejun Heof3421792010-07-02 10:03:51 +02001432 if (on_unbound_cpu)
1433 worker->flags |= WORKER_UNBOUND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 }
Oleg Nesterov3af244332007-05-09 02:34:09 -07001435
Tejun Heoc34056a2010-06-29 10:07:11 +02001436 return worker;
1437fail:
1438 if (id >= 0) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02001439 spin_lock_irq(&gcwq->lock);
Tejun Heo58658882012-07-12 14:46:37 -07001440 ida_remove(&pool->worker_ida, id);
Tejun Heo8b03ae32010-06-29 10:07:12 +02001441 spin_unlock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +02001442 }
1443 kfree(worker);
1444 return NULL;
1445}
1446
1447/**
1448 * start_worker - start a newly created worker
1449 * @worker: worker to start
1450 *
Tejun Heoc8e55f32010-06-29 10:07:12 +02001451 * Make the gcwq aware of @worker and start it.
Tejun Heoc34056a2010-06-29 10:07:11 +02001452 *
1453 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001454 * spin_lock_irq(gcwq->lock).
Tejun Heoc34056a2010-06-29 10:07:11 +02001455 */
1456static void start_worker(struct worker *worker)
1457{
Tejun Heocb444762010-07-02 10:03:50 +02001458 worker->flags |= WORKER_STARTED;
Tejun Heo58658882012-07-12 14:46:37 -07001459 worker->pool->nr_workers++;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001460 worker_enter_idle(worker);
Tejun Heoc34056a2010-06-29 10:07:11 +02001461 wake_up_process(worker->task);
1462}
1463
1464/**
1465 * destroy_worker - destroy a workqueue worker
1466 * @worker: worker to be destroyed
1467 *
Tejun Heoc8e55f32010-06-29 10:07:12 +02001468 * Destroy @worker and adjust @gcwq stats accordingly.
1469 *
1470 * CONTEXT:
1471 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
Tejun Heoc34056a2010-06-29 10:07:11 +02001472 */
1473static void destroy_worker(struct worker *worker)
1474{
Tejun Heo58658882012-07-12 14:46:37 -07001475 struct worker_pool *pool = worker->pool;
1476 struct global_cwq *gcwq = pool->gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +02001477 int id = worker->id;
1478
1479 /* sanity check frenzy */
1480 BUG_ON(worker->current_work);
Tejun Heoaffee4b2010-06-29 10:07:12 +02001481 BUG_ON(!list_empty(&worker->scheduled));
Tejun Heoc34056a2010-06-29 10:07:11 +02001482
Tejun Heoc8e55f32010-06-29 10:07:12 +02001483 if (worker->flags & WORKER_STARTED)
Tejun Heo58658882012-07-12 14:46:37 -07001484 pool->nr_workers--;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001485 if (worker->flags & WORKER_IDLE)
Tejun Heo58658882012-07-12 14:46:37 -07001486 pool->nr_idle--;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001487
Lai Jiangshan23f09132014-02-15 22:02:28 +08001488 /*
1489 * Once WORKER_DIE is set, the kworker may destroy itself at any
1490 * point. Pin to ensure the task stays until we're done with it.
1491 */
1492 get_task_struct(worker->task);
1493
Tejun Heoc8e55f32010-06-29 10:07:12 +02001494 list_del_init(&worker->entry);
Tejun Heocb444762010-07-02 10:03:50 +02001495 worker->flags |= WORKER_DIE;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001496
1497 spin_unlock_irq(&gcwq->lock);
1498
Tejun Heoc34056a2010-06-29 10:07:11 +02001499 kthread_stop(worker->task);
Lai Jiangshan23f09132014-02-15 22:02:28 +08001500 put_task_struct(worker->task);
Tejun Heoc34056a2010-06-29 10:07:11 +02001501 kfree(worker);
1502
Tejun Heo8b03ae32010-06-29 10:07:12 +02001503 spin_lock_irq(&gcwq->lock);
Tejun Heo58658882012-07-12 14:46:37 -07001504 ida_remove(&pool->worker_ida, id);
Tejun Heoc34056a2010-06-29 10:07:11 +02001505}
1506
Tejun Heo7ef6a932012-07-12 14:46:37 -07001507static void idle_worker_timeout(unsigned long __pool)
Tejun Heoe22bee72010-06-29 10:07:14 +02001508{
Tejun Heo7ef6a932012-07-12 14:46:37 -07001509 struct worker_pool *pool = (void *)__pool;
1510 struct global_cwq *gcwq = pool->gcwq;
Tejun Heoe22bee72010-06-29 10:07:14 +02001511
1512 spin_lock_irq(&gcwq->lock);
1513
Tejun Heo7ef6a932012-07-12 14:46:37 -07001514 if (too_many_workers(pool)) {
Tejun Heoe22bee72010-06-29 10:07:14 +02001515 struct worker *worker;
1516 unsigned long expires;
1517
1518 /* idle_list is kept in LIFO order, check the last one */
Tejun Heo7ef6a932012-07-12 14:46:37 -07001519 worker = list_entry(pool->idle_list.prev, struct worker, entry);
Tejun Heoe22bee72010-06-29 10:07:14 +02001520 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1521
1522 if (time_before(jiffies, expires))
Tejun Heo7ef6a932012-07-12 14:46:37 -07001523 mod_timer(&pool->idle_timer, expires);
Tejun Heoe22bee72010-06-29 10:07:14 +02001524 else {
1525 /* it's been idle for too long, wake up manager */
Tejun Heo22ad5642012-07-12 14:46:37 -07001526 pool->flags |= POOL_MANAGE_WORKERS;
Tejun Heo7ef6a932012-07-12 14:46:37 -07001527 wake_up_worker(pool);
Tejun Heoe22bee72010-06-29 10:07:14 +02001528 }
1529 }
1530
1531 spin_unlock_irq(&gcwq->lock);
1532}
1533
1534static bool send_mayday(struct work_struct *work)
1535{
1536 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1537 struct workqueue_struct *wq = cwq->wq;
Tejun Heof3421792010-07-02 10:03:51 +02001538 unsigned int cpu;
Tejun Heoe22bee72010-06-29 10:07:14 +02001539
1540 if (!(wq->flags & WQ_RESCUER))
1541 return false;
1542
1543 /* mayday mayday mayday */
Tejun Heo58658882012-07-12 14:46:37 -07001544 cpu = cwq->pool->gcwq->cpu;
Tejun Heof3421792010-07-02 10:03:51 +02001545 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1546 if (cpu == WORK_CPU_UNBOUND)
1547 cpu = 0;
Tejun Heof2e005a2010-07-20 15:59:09 +02001548 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
Tejun Heoe22bee72010-06-29 10:07:14 +02001549 wake_up_process(wq->rescuer->task);
1550 return true;
1551}
1552
Tejun Heo7ef6a932012-07-12 14:46:37 -07001553static void gcwq_mayday_timeout(unsigned long __pool)
Tejun Heoe22bee72010-06-29 10:07:14 +02001554{
Tejun Heo7ef6a932012-07-12 14:46:37 -07001555 struct worker_pool *pool = (void *)__pool;
1556 struct global_cwq *gcwq = pool->gcwq;
Tejun Heoe22bee72010-06-29 10:07:14 +02001557 struct work_struct *work;
1558
1559 spin_lock_irq(&gcwq->lock);
1560
Tejun Heo7ef6a932012-07-12 14:46:37 -07001561 if (need_to_create_worker(pool)) {
Tejun Heoe22bee72010-06-29 10:07:14 +02001562 /*
1563 * We've been trying to create a new worker but
1564 * haven't been successful. We might be hitting an
1565 * allocation deadlock. Send distress signals to
1566 * rescuers.
1567 */
Tejun Heo7ef6a932012-07-12 14:46:37 -07001568 list_for_each_entry(work, &pool->worklist, entry)
Tejun Heoe22bee72010-06-29 10:07:14 +02001569 send_mayday(work);
1570 }
1571
1572 spin_unlock_irq(&gcwq->lock);
1573
Tejun Heo7ef6a932012-07-12 14:46:37 -07001574 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
Tejun Heoe22bee72010-06-29 10:07:14 +02001575}
1576
1577/**
1578 * maybe_create_worker - create a new worker if necessary
Tejun Heo7ef6a932012-07-12 14:46:37 -07001579 * @pool: pool to create a new worker for
Tejun Heoe22bee72010-06-29 10:07:14 +02001580 *
Tejun Heo7ef6a932012-07-12 14:46:37 -07001581 * Create a new worker for @pool if necessary. @pool is guaranteed to
Tejun Heoe22bee72010-06-29 10:07:14 +02001582 * have at least one idle worker on return from this function. If
1583 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
Tejun Heo7ef6a932012-07-12 14:46:37 -07001584 * sent to all rescuers with works scheduled on @pool to resolve
Tejun Heoe22bee72010-06-29 10:07:14 +02001585 * possible allocation deadlock.
1586 *
1587 * On return, need_to_create_worker() is guaranteed to be false and
1588 * may_start_working() true.
1589 *
1590 * LOCKING:
1591 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1592 * multiple times. Does GFP_KERNEL allocations. Called only from
1593 * manager.
1594 *
1595 * RETURNS:
1596 * false if no action was taken and gcwq->lock stayed locked, true
1597 * otherwise.
1598 */
Tejun Heo7ef6a932012-07-12 14:46:37 -07001599static bool maybe_create_worker(struct worker_pool *pool)
Namhyung Kim06bd6eb2010-08-22 23:19:42 +09001600__releases(&gcwq->lock)
1601__acquires(&gcwq->lock)
Tejun Heoe22bee72010-06-29 10:07:14 +02001602{
Tejun Heo7ef6a932012-07-12 14:46:37 -07001603 struct global_cwq *gcwq = pool->gcwq;
1604
1605 if (!need_to_create_worker(pool))
Tejun Heoe22bee72010-06-29 10:07:14 +02001606 return false;
1607restart:
Tejun Heo9f9c2362010-07-14 11:31:20 +02001608 spin_unlock_irq(&gcwq->lock);
1609
Tejun Heoe22bee72010-06-29 10:07:14 +02001610 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
Tejun Heo7ef6a932012-07-12 14:46:37 -07001611 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
Tejun Heoe22bee72010-06-29 10:07:14 +02001612
1613 while (true) {
1614 struct worker *worker;
1615
Tejun Heo7ef6a932012-07-12 14:46:37 -07001616 worker = create_worker(pool, true);
Tejun Heoe22bee72010-06-29 10:07:14 +02001617 if (worker) {
Tejun Heo7ef6a932012-07-12 14:46:37 -07001618 del_timer_sync(&pool->mayday_timer);
Tejun Heoe22bee72010-06-29 10:07:14 +02001619 spin_lock_irq(&gcwq->lock);
1620 start_worker(worker);
Tejun Heo7ef6a932012-07-12 14:46:37 -07001621 BUG_ON(need_to_create_worker(pool));
Tejun Heoe22bee72010-06-29 10:07:14 +02001622 return true;
1623 }
1624
Tejun Heo7ef6a932012-07-12 14:46:37 -07001625 if (!need_to_create_worker(pool))
Tejun Heoe22bee72010-06-29 10:07:14 +02001626 break;
1627
Tejun Heoe22bee72010-06-29 10:07:14 +02001628 __set_current_state(TASK_INTERRUPTIBLE);
1629 schedule_timeout(CREATE_COOLDOWN);
Tejun Heo9f9c2362010-07-14 11:31:20 +02001630
Tejun Heo7ef6a932012-07-12 14:46:37 -07001631 if (!need_to_create_worker(pool))
Tejun Heoe22bee72010-06-29 10:07:14 +02001632 break;
1633 }
1634
Tejun Heo7ef6a932012-07-12 14:46:37 -07001635 del_timer_sync(&pool->mayday_timer);
Tejun Heoe22bee72010-06-29 10:07:14 +02001636 spin_lock_irq(&gcwq->lock);
Tejun Heo7ef6a932012-07-12 14:46:37 -07001637 if (need_to_create_worker(pool))
Tejun Heoe22bee72010-06-29 10:07:14 +02001638 goto restart;
1639 return true;
1640}
1641
1642/**
1643 * maybe_destroy_worker - destroy workers which have been idle for a while
Tejun Heo7ef6a932012-07-12 14:46:37 -07001644 * @pool: pool to destroy workers for
Tejun Heoe22bee72010-06-29 10:07:14 +02001645 *
Tejun Heo7ef6a932012-07-12 14:46:37 -07001646 * Destroy @pool workers which have been idle for longer than
Tejun Heoe22bee72010-06-29 10:07:14 +02001647 * IDLE_WORKER_TIMEOUT.
1648 *
1649 * LOCKING:
1650 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1651 * multiple times. Called only from manager.
1652 *
1653 * RETURNS:
1654 * false if no action was taken and gcwq->lock stayed locked, true
1655 * otherwise.
1656 */
Tejun Heo7ef6a932012-07-12 14:46:37 -07001657static bool maybe_destroy_workers(struct worker_pool *pool)
Tejun Heoe22bee72010-06-29 10:07:14 +02001658{
1659 bool ret = false;
1660
Tejun Heo7ef6a932012-07-12 14:46:37 -07001661 while (too_many_workers(pool)) {
Tejun Heoe22bee72010-06-29 10:07:14 +02001662 struct worker *worker;
1663 unsigned long expires;
1664
Tejun Heo7ef6a932012-07-12 14:46:37 -07001665 worker = list_entry(pool->idle_list.prev, struct worker, entry);
Tejun Heoe22bee72010-06-29 10:07:14 +02001666 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1667
1668 if (time_before(jiffies, expires)) {
Tejun Heo7ef6a932012-07-12 14:46:37 -07001669 mod_timer(&pool->idle_timer, expires);
Tejun Heoe22bee72010-06-29 10:07:14 +02001670 break;
1671 }
1672
1673 destroy_worker(worker);
1674 ret = true;
1675 }
1676
1677 return ret;
1678}
1679
1680/**
1681 * manage_workers - manage worker pool
1682 * @worker: self
1683 *
1684 * Assume the manager role and manage gcwq worker pool @worker belongs
1685 * to. At any given time, there can be only zero or one manager per
1686 * gcwq. The exclusion is handled automatically by this function.
1687 *
1688 * The caller can safely start processing works on false return. On
1689 * true return, it's guaranteed that need_to_create_worker() is false
1690 * and may_start_working() is true.
1691 *
1692 * CONTEXT:
1693 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1694 * multiple times. Does GFP_KERNEL allocations.
1695 *
1696 * RETURNS:
1697 * false if no action was taken and gcwq->lock stayed locked, true if
1698 * some action was taken.
1699 */
1700static bool manage_workers(struct worker *worker)
1701{
Tejun Heo7ef6a932012-07-12 14:46:37 -07001702 struct worker_pool *pool = worker->pool;
1703 struct global_cwq *gcwq = pool->gcwq;
Tejun Heoe22bee72010-06-29 10:07:14 +02001704 bool ret = false;
1705
Tejun Heo22ad5642012-07-12 14:46:37 -07001706 if (pool->flags & POOL_MANAGING_WORKERS)
Tejun Heoe22bee72010-06-29 10:07:14 +02001707 return ret;
1708
Tejun Heo22ad5642012-07-12 14:46:37 -07001709 pool->flags &= ~POOL_MANAGE_WORKERS;
1710 pool->flags |= POOL_MANAGING_WORKERS;
Tejun Heoe22bee72010-06-29 10:07:14 +02001711
1712 /*
1713 * Destroy and then create so that may_start_working() is true
1714 * on return.
1715 */
Tejun Heo7ef6a932012-07-12 14:46:37 -07001716 ret |= maybe_destroy_workers(pool);
1717 ret |= maybe_create_worker(pool);
Tejun Heoe22bee72010-06-29 10:07:14 +02001718
Tejun Heo22ad5642012-07-12 14:46:37 -07001719 pool->flags &= ~POOL_MANAGING_WORKERS;
Tejun Heoe22bee72010-06-29 10:07:14 +02001720
1721 /*
1722 * The trustee might be waiting to take over the manager
1723 * position, tell it we're done.
1724 */
1725 if (unlikely(gcwq->trustee))
1726 wake_up_all(&gcwq->trustee_wait);
1727
1728 return ret;
1729}
1730
Tejun Heoa62428c2010-06-29 10:07:10 +02001731/**
Tejun Heoaffee4b2010-06-29 10:07:12 +02001732 * move_linked_works - move linked works to a list
1733 * @work: start of series of works to be scheduled
1734 * @head: target list to append @work to
1735 * @nextp: out paramter for nested worklist walking
1736 *
1737 * Schedule linked works starting from @work to @head. Work series to
1738 * be scheduled starts at @work and includes any consecutive work with
1739 * WORK_STRUCT_LINKED set in its predecessor.
1740 *
1741 * If @nextp is not NULL, it's updated to point to the next work of
1742 * the last scheduled work. This allows move_linked_works() to be
1743 * nested inside outer list_for_each_entry_safe().
1744 *
1745 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001746 * spin_lock_irq(gcwq->lock).
Tejun Heoaffee4b2010-06-29 10:07:12 +02001747 */
1748static void move_linked_works(struct work_struct *work, struct list_head *head,
1749 struct work_struct **nextp)
1750{
1751 struct work_struct *n;
1752
1753 /*
1754 * Linked worklist will always end before the end of the list,
1755 * use NULL for list head.
1756 */
1757 list_for_each_entry_safe_from(work, n, NULL, entry) {
1758 list_move_tail(&work->entry, head);
1759 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1760 break;
1761 }
1762
1763 /*
1764 * If we're already inside safe list traversal and have moved
1765 * multiple works to the scheduled queue, the next position
1766 * needs to be updated.
1767 */
1768 if (nextp)
1769 *nextp = n;
1770}
1771
Lai Jiangshan31eafff2012-09-18 10:40:00 -07001772static void cwq_activate_delayed_work(struct work_struct *work)
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001773{
Lai Jiangshan31eafff2012-09-18 10:40:00 -07001774 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001775
Tejun Heocdadf002010-10-05 10:49:55 +02001776 trace_workqueue_activate_work(work);
Tejun Heodcb32ee2012-07-13 22:16:45 -07001777 move_linked_works(work, &cwq->pool->worklist, NULL);
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001778 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001779 cwq->nr_active++;
1780}
1781
Lai Jiangshan31eafff2012-09-18 10:40:00 -07001782static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1783{
1784 struct work_struct *work = list_first_entry(&cwq->delayed_works,
1785 struct work_struct, entry);
1786
1787 cwq_activate_delayed_work(work);
1788}
1789
Tejun Heoaffee4b2010-06-29 10:07:12 +02001790/**
Tejun Heo73f53c42010-06-29 10:07:11 +02001791 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1792 * @cwq: cwq of interest
1793 * @color: color of work which left the queue
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001794 * @delayed: for a delayed work
Tejun Heo73f53c42010-06-29 10:07:11 +02001795 *
1796 * A work either has completed or is removed from pending queue,
1797 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1798 *
1799 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001800 * spin_lock_irq(gcwq->lock).
Tejun Heo73f53c42010-06-29 10:07:11 +02001801 */
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001802static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1803 bool delayed)
Tejun Heo73f53c42010-06-29 10:07:11 +02001804{
1805 /* ignore uncolored works */
1806 if (color == WORK_NO_COLOR)
1807 return;
1808
1809 cwq->nr_in_flight[color]--;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001810
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001811 if (!delayed) {
1812 cwq->nr_active--;
1813 if (!list_empty(&cwq->delayed_works)) {
1814 /* one down, submit a delayed one */
1815 if (cwq->nr_active < cwq->max_active)
1816 cwq_activate_first_delayed(cwq);
1817 }
Tejun Heo502ca9d2010-06-29 10:07:13 +02001818 }
Tejun Heo73f53c42010-06-29 10:07:11 +02001819
1820 /* is flush in progress and are we at the flushing tip? */
1821 if (likely(cwq->flush_color != color))
1822 return;
1823
1824 /* are there still in-flight works? */
1825 if (cwq->nr_in_flight[color])
1826 return;
1827
1828 /* this cwq is done, clear flush_color */
1829 cwq->flush_color = -1;
1830
1831 /*
1832 * If this was the last cwq, wake up the first flusher. It
1833 * will handle the rest.
1834 */
1835 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1836 complete(&cwq->wq->first_flusher->done);
1837}
1838
1839/**
Tejun Heoa62428c2010-06-29 10:07:10 +02001840 * process_one_work - process single work
Tejun Heoc34056a2010-06-29 10:07:11 +02001841 * @worker: self
Tejun Heoa62428c2010-06-29 10:07:10 +02001842 * @work: work to process
1843 *
1844 * Process @work. This function contains all the logics necessary to
1845 * process a single work including synchronization against and
1846 * interaction with other workers on the same cpu, queueing and
1847 * flushing. As long as context requirement is met, any worker can
1848 * call this function to process a work.
1849 *
1850 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001851 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
Tejun Heoa62428c2010-06-29 10:07:10 +02001852 */
Tejun Heoc34056a2010-06-29 10:07:11 +02001853static void process_one_work(struct worker *worker, struct work_struct *work)
Namhyung Kim06bd6eb2010-08-22 23:19:42 +09001854__releases(&gcwq->lock)
1855__acquires(&gcwq->lock)
Tejun Heoa62428c2010-06-29 10:07:10 +02001856{
Tejun Heo7e116292010-06-29 10:07:13 +02001857 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
Tejun Heo58658882012-07-12 14:46:37 -07001858 struct worker_pool *pool = worker->pool;
1859 struct global_cwq *gcwq = pool->gcwq;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001860 struct hlist_head *bwh = busy_worker_head(gcwq, work);
Tejun Heofb0e7be2010-06-29 10:07:15 +02001861 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
Tejun Heo73f53c42010-06-29 10:07:11 +02001862 int work_color;
Tejun Heo7e116292010-06-29 10:07:13 +02001863 struct worker *collision;
Tejun Heoa62428c2010-06-29 10:07:10 +02001864#ifdef CONFIG_LOCKDEP
1865 /*
1866 * It is permissible to free the struct work_struct from
1867 * inside the function that is called from it, this we need to
1868 * take into account for lockdep too. To avoid bogus "held
1869 * lock freed" warnings as well as problems when looking into
1870 * work->lockdep_map, make a copy and use that here.
1871 */
1872 struct lockdep_map lockdep_map = work->lockdep_map;
1873#endif
Tejun Heo7e116292010-06-29 10:07:13 +02001874 /*
1875 * A single work shouldn't be executed concurrently by
1876 * multiple workers on a single cpu. Check whether anyone is
1877 * already processing the work. If so, defer the work to the
1878 * currently executing one.
1879 */
1880 collision = __find_worker_executing_work(gcwq, bwh, work);
1881 if (unlikely(collision)) {
1882 move_linked_works(work, &collision->scheduled, NULL);
1883 return;
1884 }
1885
Tejun Heoa62428c2010-06-29 10:07:10 +02001886 /* claim and process */
Tejun Heoa62428c2010-06-29 10:07:10 +02001887 debug_work_deactivate(work);
Tejun Heoc8e55f32010-06-29 10:07:12 +02001888 hlist_add_head(&worker->hentry, bwh);
Tejun Heoc34056a2010-06-29 10:07:11 +02001889 worker->current_work = work;
Tejun Heo55e3e1f2012-12-18 10:35:02 -08001890 worker->current_func = work->func;
Tejun Heo8cca0ee2010-06-29 10:07:13 +02001891 worker->current_cwq = cwq;
Tejun Heo73f53c42010-06-29 10:07:11 +02001892 work_color = get_work_color(work);
Tejun Heo7a22ad72010-06-29 10:07:13 +02001893
Tejun Heo7a22ad72010-06-29 10:07:13 +02001894 /* record the current cpu number in the work data and dequeue */
1895 set_work_cpu(work, gcwq->cpu);
Tejun Heoa62428c2010-06-29 10:07:10 +02001896 list_del_init(&work->entry);
1897
Tejun Heo649027d2010-06-29 10:07:14 +02001898 /*
Tejun Heofb0e7be2010-06-29 10:07:15 +02001899 * CPU intensive works don't participate in concurrency
1900 * management. They're the scheduler's responsibility.
1901 */
1902 if (unlikely(cpu_intensive))
1903 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1904
Tejun Heob7b5c682012-07-12 14:46:37 -07001905 /*
1906 * Unbound gcwq isn't concurrency managed and work items should be
1907 * executed ASAP. Wake up another worker if necessary.
1908 */
Tejun Heo7ef6a932012-07-12 14:46:37 -07001909 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
1910 wake_up_worker(pool);
Tejun Heob7b5c682012-07-12 14:46:37 -07001911
Tejun Heo8b03ae32010-06-29 10:07:12 +02001912 spin_unlock_irq(&gcwq->lock);
Tejun Heoa62428c2010-06-29 10:07:10 +02001913
Tejun Heo66307ae2012-08-03 10:30:45 -07001914 smp_wmb(); /* paired with test_and_set_bit(PENDING) */
Tejun Heoa62428c2010-06-29 10:07:10 +02001915 work_clear_pending(work);
Tejun Heo66307ae2012-08-03 10:30:45 -07001916
Tejun Heoe1594892011-01-09 23:32:15 +01001917 lock_map_acquire_read(&cwq->wq->lockdep_map);
Tejun Heoa62428c2010-06-29 10:07:10 +02001918 lock_map_acquire(&lockdep_map);
Arjan van de Vene36c8862010-08-21 13:07:26 -07001919 trace_workqueue_execute_start(work);
Tejun Heo55e3e1f2012-12-18 10:35:02 -08001920 worker->current_func(work);
Arjan van de Vene36c8862010-08-21 13:07:26 -07001921 /*
1922 * While we must be careful to not use "work" after this, the trace
1923 * point will only record its address.
1924 */
1925 trace_workqueue_execute_end(work);
Tejun Heoa62428c2010-06-29 10:07:10 +02001926 lock_map_release(&lockdep_map);
1927 lock_map_release(&cwq->wq->lockdep_map);
1928
1929 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
Tejun Heo55e3e1f2012-12-18 10:35:02 -08001930 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
1931 " last function: %pf\n",
1932 current->comm, preempt_count(), task_pid_nr(current),
1933 worker->current_func);
Tejun Heoa62428c2010-06-29 10:07:10 +02001934 debug_show_held_locks(current);
Syed Rameez Mustafa1bee7b92013-07-15 11:52:09 -07001935 BUG_ON(PANIC_CORRUPTION);
Tejun Heoa62428c2010-06-29 10:07:10 +02001936 dump_stack();
1937 }
1938
Tejun Heo00cef7a2013-08-28 17:33:37 -04001939 /*
1940 * The following prevents a kworker from hogging CPU on !PREEMPT
1941 * kernels, where a requeueing work item waiting for something to
1942 * happen could deadlock with stop_machine as such work item could
1943 * indefinitely requeue itself while all other CPUs are trapped in
1944 * stop_machine.
1945 */
1946 cond_resched();
1947
Tejun Heo8b03ae32010-06-29 10:07:12 +02001948 spin_lock_irq(&gcwq->lock);
Tejun Heoa62428c2010-06-29 10:07:10 +02001949
Tejun Heofb0e7be2010-06-29 10:07:15 +02001950 /* clear cpu intensive status */
1951 if (unlikely(cpu_intensive))
1952 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1953
Tejun Heoa62428c2010-06-29 10:07:10 +02001954 /* we're done with it, release */
Tejun Heoc8e55f32010-06-29 10:07:12 +02001955 hlist_del_init(&worker->hentry);
Tejun Heoc34056a2010-06-29 10:07:11 +02001956 worker->current_work = NULL;
Tejun Heo55e3e1f2012-12-18 10:35:02 -08001957 worker->current_func = NULL;
Tejun Heo8cca0ee2010-06-29 10:07:13 +02001958 worker->current_cwq = NULL;
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001959 cwq_dec_nr_in_flight(cwq, work_color, false);
Tejun Heoa62428c2010-06-29 10:07:10 +02001960}
1961
Tejun Heoaffee4b2010-06-29 10:07:12 +02001962/**
1963 * process_scheduled_works - process scheduled works
1964 * @worker: self
1965 *
1966 * Process all scheduled works. Please note that the scheduled list
1967 * may change while processing a work, so this function repeatedly
1968 * fetches a work from the top and executes it.
1969 *
1970 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001971 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
Tejun Heoaffee4b2010-06-29 10:07:12 +02001972 * multiple times.
1973 */
1974static void process_scheduled_works(struct worker *worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975{
Tejun Heoaffee4b2010-06-29 10:07:12 +02001976 while (!list_empty(&worker->scheduled)) {
1977 struct work_struct *work = list_first_entry(&worker->scheduled,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 struct work_struct, entry);
Tejun Heoc34056a2010-06-29 10:07:11 +02001979 process_one_work(worker, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981}
1982
Tejun Heo4690c4a2010-06-29 10:07:10 +02001983/**
1984 * worker_thread - the worker thread function
Tejun Heoc34056a2010-06-29 10:07:11 +02001985 * @__worker: self
Tejun Heo4690c4a2010-06-29 10:07:10 +02001986 *
Tejun Heoe22bee72010-06-29 10:07:14 +02001987 * The gcwq worker thread function. There's a single dynamic pool of
1988 * these per each cpu. These workers process all works regardless of
1989 * their specific target workqueue. The only exception is works which
1990 * belong to workqueues with a rescuer which will be explained in
1991 * rescuer_thread().
Tejun Heo4690c4a2010-06-29 10:07:10 +02001992 */
Tejun Heoc34056a2010-06-29 10:07:11 +02001993static int worker_thread(void *__worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994{
Tejun Heoc34056a2010-06-29 10:07:11 +02001995 struct worker *worker = __worker;
Tejun Heo58658882012-07-12 14:46:37 -07001996 struct worker_pool *pool = worker->pool;
1997 struct global_cwq *gcwq = pool->gcwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
Tejun Heoe22bee72010-06-29 10:07:14 +02001999 /* tell the scheduler that this is a workqueue worker */
2000 worker->task->flags |= PF_WQ_WORKER;
Tejun Heoc8e55f32010-06-29 10:07:12 +02002001woke_up:
Tejun Heoc8e55f32010-06-29 10:07:12 +02002002 spin_lock_irq(&gcwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
Tejun Heoc8e55f32010-06-29 10:07:12 +02002004 /* DIE can be set only while we're idle, checking here is enough */
2005 if (worker->flags & WORKER_DIE) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02002006 spin_unlock_irq(&gcwq->lock);
Tejun Heoe22bee72010-06-29 10:07:14 +02002007 worker->task->flags &= ~PF_WQ_WORKER;
Tejun Heoc8e55f32010-06-29 10:07:12 +02002008 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 }
2010
Tejun Heoc8e55f32010-06-29 10:07:12 +02002011 worker_leave_idle(worker);
Tejun Heodb7bccf2010-06-29 10:07:12 +02002012recheck:
Tejun Heoe22bee72010-06-29 10:07:14 +02002013 /* no more worker necessary? */
Tejun Heo7ef6a932012-07-12 14:46:37 -07002014 if (!need_more_worker(pool))
Tejun Heoe22bee72010-06-29 10:07:14 +02002015 goto sleep;
2016
2017 /* do we need to manage? */
Tejun Heo7ef6a932012-07-12 14:46:37 -07002018 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
Tejun Heoe22bee72010-06-29 10:07:14 +02002019 goto recheck;
2020
Tejun Heoc8e55f32010-06-29 10:07:12 +02002021 /*
2022 * ->scheduled list can only be filled while a worker is
2023 * preparing to process a work or actually processing it.
2024 * Make sure nobody diddled with it while I was sleeping.
2025 */
2026 BUG_ON(!list_empty(&worker->scheduled));
2027
Tejun Heoe22bee72010-06-29 10:07:14 +02002028 /*
2029 * When control reaches this point, we're guaranteed to have
2030 * at least one idle worker or that someone else has already
2031 * assumed the manager role.
2032 */
2033 worker_clr_flags(worker, WORKER_PREP);
2034
2035 do {
Tejun Heoc8e55f32010-06-29 10:07:12 +02002036 struct work_struct *work =
Tejun Heo58658882012-07-12 14:46:37 -07002037 list_first_entry(&pool->worklist,
Tejun Heoc8e55f32010-06-29 10:07:12 +02002038 struct work_struct, entry);
2039
2040 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2041 /* optimization path, not strictly necessary */
2042 process_one_work(worker, work);
2043 if (unlikely(!list_empty(&worker->scheduled)))
2044 process_scheduled_works(worker);
2045 } else {
2046 move_linked_works(work, &worker->scheduled, NULL);
2047 process_scheduled_works(worker);
2048 }
Tejun Heo7ef6a932012-07-12 14:46:37 -07002049 } while (keep_working(pool));
Tejun Heoc8e55f32010-06-29 10:07:12 +02002050
Tejun Heoe22bee72010-06-29 10:07:14 +02002051 worker_set_flags(worker, WORKER_PREP, false);
Tejun Heod313dd82010-07-02 10:03:51 +02002052sleep:
Tejun Heo7ef6a932012-07-12 14:46:37 -07002053 if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
Tejun Heoe22bee72010-06-29 10:07:14 +02002054 goto recheck;
Tejun Heod313dd82010-07-02 10:03:51 +02002055
Tejun Heoc8e55f32010-06-29 10:07:12 +02002056 /*
Tejun Heoe22bee72010-06-29 10:07:14 +02002057 * gcwq->lock is held and there's no work to process and no
2058 * need to manage, sleep. Workers are woken up only while
2059 * holding gcwq->lock or from local cpu, so setting the
2060 * current state before releasing gcwq->lock is enough to
2061 * prevent losing any event.
Tejun Heoc8e55f32010-06-29 10:07:12 +02002062 */
2063 worker_enter_idle(worker);
2064 __set_current_state(TASK_INTERRUPTIBLE);
2065 spin_unlock_irq(&gcwq->lock);
2066 schedule();
2067 goto woke_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068}
2069
Tejun Heoe22bee72010-06-29 10:07:14 +02002070/**
2071 * rescuer_thread - the rescuer thread function
2072 * @__wq: the associated workqueue
2073 *
2074 * Workqueue rescuer thread function. There's one rescuer for each
2075 * workqueue which has WQ_RESCUER set.
2076 *
2077 * Regular work processing on a gcwq may block trying to create a new
2078 * worker which uses GFP_KERNEL allocation which has slight chance of
2079 * developing into deadlock if some works currently on the same queue
2080 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2081 * the problem rescuer solves.
2082 *
2083 * When such condition is possible, the gcwq summons rescuers of all
2084 * workqueues which have works queued on the gcwq and let them process
2085 * those works so that forward progress can be guaranteed.
2086 *
2087 * This should happen rarely.
2088 */
2089static int rescuer_thread(void *__wq)
2090{
2091 struct workqueue_struct *wq = __wq;
2092 struct worker *rescuer = wq->rescuer;
2093 struct list_head *scheduled = &rescuer->scheduled;
Tejun Heof3421792010-07-02 10:03:51 +02002094 bool is_unbound = wq->flags & WQ_UNBOUND;
Tejun Heoe22bee72010-06-29 10:07:14 +02002095 unsigned int cpu;
2096
2097 set_user_nice(current, RESCUER_NICE_LEVEL);
2098repeat:
2099 set_current_state(TASK_INTERRUPTIBLE);
2100
Mike Galbraithdbdd7f02012-11-28 07:17:18 +01002101 if (kthread_should_stop()) {
2102 __set_current_state(TASK_RUNNING);
Tejun Heoe22bee72010-06-29 10:07:14 +02002103 return 0;
Mike Galbraithdbdd7f02012-11-28 07:17:18 +01002104 }
Tejun Heoe22bee72010-06-29 10:07:14 +02002105
Tejun Heof3421792010-07-02 10:03:51 +02002106 /*
2107 * See whether any cpu is asking for help. Unbounded
2108 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2109 */
Tejun Heof2e005a2010-07-20 15:59:09 +02002110 for_each_mayday_cpu(cpu, wq->mayday_mask) {
Tejun Heof3421792010-07-02 10:03:51 +02002111 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2112 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
Tejun Heo58658882012-07-12 14:46:37 -07002113 struct worker_pool *pool = cwq->pool;
2114 struct global_cwq *gcwq = pool->gcwq;
Tejun Heoe22bee72010-06-29 10:07:14 +02002115 struct work_struct *work, *n;
2116
2117 __set_current_state(TASK_RUNNING);
Tejun Heof2e005a2010-07-20 15:59:09 +02002118 mayday_clear_cpu(cpu, wq->mayday_mask);
Tejun Heoe22bee72010-06-29 10:07:14 +02002119
2120 /* migrate to the target cpu if possible */
Tejun Heo58658882012-07-12 14:46:37 -07002121 rescuer->pool = pool;
Tejun Heoe22bee72010-06-29 10:07:14 +02002122 worker_maybe_bind_and_lock(rescuer);
2123
2124 /*
2125 * Slurp in all works issued via this workqueue and
2126 * process'em.
2127 */
2128 BUG_ON(!list_empty(&rescuer->scheduled));
Tejun Heo58658882012-07-12 14:46:37 -07002129 list_for_each_entry_safe(work, n, &pool->worklist, entry)
Tejun Heoe22bee72010-06-29 10:07:14 +02002130 if (get_work_cwq(work) == cwq)
2131 move_linked_works(work, scheduled, &n);
2132
2133 process_scheduled_works(rescuer);
Tejun Heo75769582011-02-14 14:04:46 +01002134
2135 /*
2136 * Leave this gcwq. If keep_working() is %true, notify a
2137 * regular worker; otherwise, we end up with 0 concurrency
2138 * and stalling the execution.
2139 */
Tejun Heo7ef6a932012-07-12 14:46:37 -07002140 if (keep_working(pool))
2141 wake_up_worker(pool);
Tejun Heo75769582011-02-14 14:04:46 +01002142
Tejun Heoe22bee72010-06-29 10:07:14 +02002143 spin_unlock_irq(&gcwq->lock);
2144 }
2145
2146 schedule();
2147 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148}
2149
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07002150struct wq_barrier {
2151 struct work_struct work;
2152 struct completion done;
2153};
2154
2155static void wq_barrier_func(struct work_struct *work)
2156{
2157 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2158 complete(&barr->done);
2159}
2160
Tejun Heo4690c4a2010-06-29 10:07:10 +02002161/**
2162 * insert_wq_barrier - insert a barrier work
2163 * @cwq: cwq to insert barrier into
2164 * @barr: wq_barrier to insert
Tejun Heoaffee4b2010-06-29 10:07:12 +02002165 * @target: target work to attach @barr to
2166 * @worker: worker currently executing @target, NULL if @target is not executing
Tejun Heo4690c4a2010-06-29 10:07:10 +02002167 *
Tejun Heoaffee4b2010-06-29 10:07:12 +02002168 * @barr is linked to @target such that @barr is completed only after
2169 * @target finishes execution. Please note that the ordering
2170 * guarantee is observed only with respect to @target and on the local
2171 * cpu.
2172 *
2173 * Currently, a queued barrier can't be canceled. This is because
2174 * try_to_grab_pending() can't determine whether the work to be
2175 * grabbed is at the head of the queue and thus can't clear LINKED
2176 * flag of the previous work while there must be a valid next work
2177 * after a work with LINKED flag set.
2178 *
2179 * Note that when @worker is non-NULL, @target may be modified
2180 * underneath us, so we can't reliably determine cwq from @target.
Tejun Heo4690c4a2010-06-29 10:07:10 +02002181 *
2182 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02002183 * spin_lock_irq(gcwq->lock).
Tejun Heo4690c4a2010-06-29 10:07:10 +02002184 */
Oleg Nesterov83c22522007-05-09 02:33:54 -07002185static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
Tejun Heoaffee4b2010-06-29 10:07:12 +02002186 struct wq_barrier *barr,
2187 struct work_struct *target, struct worker *worker)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07002188{
Tejun Heoaffee4b2010-06-29 10:07:12 +02002189 struct list_head *head;
2190 unsigned int linked = 0;
2191
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09002192 /*
Tejun Heo8b03ae32010-06-29 10:07:12 +02002193 * debugobject calls are safe here even with gcwq->lock locked
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09002194 * as we know for sure that this will not trigger any of the
2195 * checks and call back into the fixup functions where we
2196 * might deadlock.
2197 */
Andrew Mortonca1cab32010-10-26 14:22:34 -07002198 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
Tejun Heo22df02b2010-06-29 10:07:10 +02002199 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07002200 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -07002201
Tejun Heoaffee4b2010-06-29 10:07:12 +02002202 /*
2203 * If @target is currently being executed, schedule the
2204 * barrier to the worker; otherwise, put it after @target.
2205 */
2206 if (worker)
2207 head = worker->scheduled.next;
2208 else {
2209 unsigned long *bits = work_data_bits(target);
2210
2211 head = target->entry.next;
2212 /* there can already be other linked works, inherit and set */
2213 linked = *bits & WORK_STRUCT_LINKED;
2214 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2215 }
2216
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09002217 debug_work_activate(&barr->work);
Tejun Heoaffee4b2010-06-29 10:07:12 +02002218 insert_work(cwq, &barr->work, head,
2219 work_color_to_flags(WORK_NO_COLOR) | linked);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07002220}
2221
Tejun Heo73f53c42010-06-29 10:07:11 +02002222/**
2223 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2224 * @wq: workqueue being flushed
2225 * @flush_color: new flush color, < 0 for no-op
2226 * @work_color: new work color, < 0 for no-op
2227 *
2228 * Prepare cwqs for workqueue flushing.
2229 *
2230 * If @flush_color is non-negative, flush_color on all cwqs should be
2231 * -1. If no cwq has in-flight commands at the specified color, all
2232 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2233 * has in flight commands, its cwq->flush_color is set to
2234 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2235 * wakeup logic is armed and %true is returned.
2236 *
2237 * The caller should have initialized @wq->first_flusher prior to
2238 * calling this function with non-negative @flush_color. If
2239 * @flush_color is negative, no flush color update is done and %false
2240 * is returned.
2241 *
2242 * If @work_color is non-negative, all cwqs should have the same
2243 * work_color which is previous to @work_color and all will be
2244 * advanced to @work_color.
2245 *
2246 * CONTEXT:
2247 * mutex_lock(wq->flush_mutex).
2248 *
2249 * RETURNS:
2250 * %true if @flush_color >= 0 and there's something to flush. %false
2251 * otherwise.
2252 */
2253static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2254 int flush_color, int work_color)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255{
Tejun Heo73f53c42010-06-29 10:07:11 +02002256 bool wait = false;
2257 unsigned int cpu;
Oleg Nesterov14441962007-05-23 13:57:57 -07002258
Tejun Heo73f53c42010-06-29 10:07:11 +02002259 if (flush_color >= 0) {
2260 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2261 atomic_set(&wq->nr_cwqs_to_flush, 1);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09002262 }
Oleg Nesterov14441962007-05-23 13:57:57 -07002263
Tejun Heof3421792010-07-02 10:03:51 +02002264 for_each_cwq_cpu(cpu, wq) {
Tejun Heo73f53c42010-06-29 10:07:11 +02002265 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Tejun Heo58658882012-07-12 14:46:37 -07002266 struct global_cwq *gcwq = cwq->pool->gcwq;
Tejun Heo73f53c42010-06-29 10:07:11 +02002267
Tejun Heo8b03ae32010-06-29 10:07:12 +02002268 spin_lock_irq(&gcwq->lock);
Tejun Heo73f53c42010-06-29 10:07:11 +02002269
2270 if (flush_color >= 0) {
2271 BUG_ON(cwq->flush_color != -1);
2272
2273 if (cwq->nr_in_flight[flush_color]) {
2274 cwq->flush_color = flush_color;
2275 atomic_inc(&wq->nr_cwqs_to_flush);
2276 wait = true;
2277 }
2278 }
2279
2280 if (work_color >= 0) {
2281 BUG_ON(work_color != work_next_color(cwq->work_color));
2282 cwq->work_color = work_color;
2283 }
2284
Tejun Heo8b03ae32010-06-29 10:07:12 +02002285 spin_unlock_irq(&gcwq->lock);
Tejun Heo73f53c42010-06-29 10:07:11 +02002286 }
2287
2288 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2289 complete(&wq->first_flusher->done);
2290
2291 return wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292}
2293
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002294/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002296 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 *
2298 * Forces execution of the workqueue and blocks until its completion.
2299 * This is typically used in driver shutdown handlers.
2300 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07002301 * We sleep until all works which were queued on entry have been handled,
2302 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002304void flush_workqueue(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305{
Tejun Heo73f53c42010-06-29 10:07:11 +02002306 struct wq_flusher this_flusher = {
2307 .list = LIST_HEAD_INIT(this_flusher.list),
2308 .flush_color = -1,
2309 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2310 };
2311 int next_color;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07002312
Ingo Molnar3295f0e2008-08-11 10:30:30 +02002313 lock_map_acquire(&wq->lockdep_map);
2314 lock_map_release(&wq->lockdep_map);
Tejun Heo73f53c42010-06-29 10:07:11 +02002315
2316 mutex_lock(&wq->flush_mutex);
2317
2318 /*
2319 * Start-to-wait phase
2320 */
2321 next_color = work_next_color(wq->work_color);
2322
2323 if (next_color != wq->flush_color) {
2324 /*
2325 * Color space is not full. The current work_color
2326 * becomes our flush_color and work_color is advanced
2327 * by one.
2328 */
2329 BUG_ON(!list_empty(&wq->flusher_overflow));
2330 this_flusher.flush_color = wq->work_color;
2331 wq->work_color = next_color;
2332
2333 if (!wq->first_flusher) {
2334 /* no flush in progress, become the first flusher */
2335 BUG_ON(wq->flush_color != this_flusher.flush_color);
2336
2337 wq->first_flusher = &this_flusher;
2338
2339 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2340 wq->work_color)) {
2341 /* nothing to flush, done */
2342 wq->flush_color = next_color;
2343 wq->first_flusher = NULL;
2344 goto out_unlock;
2345 }
2346 } else {
2347 /* wait in queue */
2348 BUG_ON(wq->flush_color == this_flusher.flush_color);
2349 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2350 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2351 }
2352 } else {
2353 /*
2354 * Oops, color space is full, wait on overflow queue.
2355 * The next flush completion will assign us
2356 * flush_color and transfer to flusher_queue.
2357 */
2358 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2359 }
2360
2361 mutex_unlock(&wq->flush_mutex);
2362
2363 wait_for_completion(&this_flusher.done);
2364
2365 /*
2366 * Wake-up-and-cascade phase
2367 *
2368 * First flushers are responsible for cascading flushes and
2369 * handling overflow. Non-first flushers can simply return.
2370 */
2371 if (wq->first_flusher != &this_flusher)
2372 return;
2373
2374 mutex_lock(&wq->flush_mutex);
2375
Tejun Heo4ce48b32010-07-02 10:03:51 +02002376 /* we might have raced, check again with mutex held */
2377 if (wq->first_flusher != &this_flusher)
2378 goto out_unlock;
2379
Tejun Heo73f53c42010-06-29 10:07:11 +02002380 wq->first_flusher = NULL;
2381
2382 BUG_ON(!list_empty(&this_flusher.list));
2383 BUG_ON(wq->flush_color != this_flusher.flush_color);
2384
2385 while (true) {
2386 struct wq_flusher *next, *tmp;
2387
2388 /* complete all the flushers sharing the current flush color */
2389 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2390 if (next->flush_color != wq->flush_color)
2391 break;
2392 list_del_init(&next->list);
2393 complete(&next->done);
2394 }
2395
2396 BUG_ON(!list_empty(&wq->flusher_overflow) &&
2397 wq->flush_color != work_next_color(wq->work_color));
2398
2399 /* this flush_color is finished, advance by one */
2400 wq->flush_color = work_next_color(wq->flush_color);
2401
2402 /* one color has been freed, handle overflow queue */
2403 if (!list_empty(&wq->flusher_overflow)) {
2404 /*
2405 * Assign the same color to all overflowed
2406 * flushers, advance work_color and append to
2407 * flusher_queue. This is the start-to-wait
2408 * phase for these overflowed flushers.
2409 */
2410 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2411 tmp->flush_color = wq->work_color;
2412
2413 wq->work_color = work_next_color(wq->work_color);
2414
2415 list_splice_tail_init(&wq->flusher_overflow,
2416 &wq->flusher_queue);
2417 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2418 }
2419
2420 if (list_empty(&wq->flusher_queue)) {
2421 BUG_ON(wq->flush_color != wq->work_color);
2422 break;
2423 }
2424
2425 /*
2426 * Need to flush more colors. Make the next flusher
2427 * the new first flusher and arm cwqs.
2428 */
2429 BUG_ON(wq->flush_color == wq->work_color);
2430 BUG_ON(wq->flush_color != next->flush_color);
2431
2432 list_del_init(&next->list);
2433 wq->first_flusher = next;
2434
2435 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2436 break;
2437
2438 /*
2439 * Meh... this color is already done, clear first
2440 * flusher and repeat cascading.
2441 */
2442 wq->first_flusher = NULL;
2443 }
2444
2445out_unlock:
2446 mutex_unlock(&wq->flush_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447}
Dave Jonesae90dd52006-06-30 01:40:45 -04002448EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449
Tejun Heo9c5a2ba2011-04-05 18:01:44 +02002450/**
2451 * drain_workqueue - drain a workqueue
2452 * @wq: workqueue to drain
2453 *
2454 * Wait until the workqueue becomes empty. While draining is in progress,
2455 * only chain queueing is allowed. IOW, only currently pending or running
2456 * work items on @wq can queue further work items on it. @wq is flushed
2457 * repeatedly until it becomes empty. The number of flushing is detemined
2458 * by the depth of chaining and should be relatively short. Whine if it
2459 * takes too long.
2460 */
2461void drain_workqueue(struct workqueue_struct *wq)
2462{
2463 unsigned int flush_cnt = 0;
2464 unsigned int cpu;
2465
2466 /*
2467 * __queue_work() needs to test whether there are drainers, is much
2468 * hotter than drain_workqueue() and already looks at @wq->flags.
2469 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2470 */
2471 spin_lock(&workqueue_lock);
2472 if (!wq->nr_drainers++)
2473 wq->flags |= WQ_DRAINING;
2474 spin_unlock(&workqueue_lock);
2475reflush:
2476 flush_workqueue(wq);
2477
2478 for_each_cwq_cpu(cpu, wq) {
2479 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Thomas Tuttlefa2563e2011-09-14 16:22:28 -07002480 bool drained;
Tejun Heo9c5a2ba2011-04-05 18:01:44 +02002481
Tejun Heo58658882012-07-12 14:46:37 -07002482 spin_lock_irq(&cwq->pool->gcwq->lock);
Thomas Tuttlefa2563e2011-09-14 16:22:28 -07002483 drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
Tejun Heo58658882012-07-12 14:46:37 -07002484 spin_unlock_irq(&cwq->pool->gcwq->lock);
Thomas Tuttlefa2563e2011-09-14 16:22:28 -07002485
2486 if (drained)
Tejun Heo9c5a2ba2011-04-05 18:01:44 +02002487 continue;
2488
2489 if (++flush_cnt == 10 ||
2490 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2491 pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
2492 wq->name, flush_cnt);
2493 goto reflush;
2494 }
2495
2496 spin_lock(&workqueue_lock);
2497 if (!--wq->nr_drainers)
2498 wq->flags &= ~WQ_DRAINING;
2499 spin_unlock(&workqueue_lock);
2500}
2501EXPORT_SYMBOL_GPL(drain_workqueue);
2502
Tejun Heobaf59022010-09-16 10:42:16 +02002503static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2504 bool wait_executing)
2505{
2506 struct worker *worker = NULL;
2507 struct global_cwq *gcwq;
2508 struct cpu_workqueue_struct *cwq;
2509
2510 might_sleep();
2511 gcwq = get_work_gcwq(work);
2512 if (!gcwq)
2513 return false;
2514
2515 spin_lock_irq(&gcwq->lock);
2516 if (!list_empty(&work->entry)) {
2517 /*
2518 * See the comment near try_to_grab_pending()->smp_rmb().
2519 * If it was re-queued to a different gcwq under us, we
2520 * are not going to wait.
2521 */
2522 smp_rmb();
2523 cwq = get_work_cwq(work);
Tejun Heo58658882012-07-12 14:46:37 -07002524 if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
Tejun Heobaf59022010-09-16 10:42:16 +02002525 goto already_gone;
2526 } else if (wait_executing) {
2527 worker = find_worker_executing_work(gcwq, work);
2528 if (!worker)
2529 goto already_gone;
2530 cwq = worker->current_cwq;
2531 } else
2532 goto already_gone;
2533
2534 insert_wq_barrier(cwq, barr, work, worker);
2535 spin_unlock_irq(&gcwq->lock);
2536
Tejun Heoe1594892011-01-09 23:32:15 +01002537 /*
2538 * If @max_active is 1 or rescuer is in use, flushing another work
2539 * item on the same workqueue may lead to deadlock. Make sure the
2540 * flusher is not running on the same workqueue by verifying write
2541 * access.
2542 */
2543 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2544 lock_map_acquire(&cwq->wq->lockdep_map);
2545 else
2546 lock_map_acquire_read(&cwq->wq->lockdep_map);
Tejun Heobaf59022010-09-16 10:42:16 +02002547 lock_map_release(&cwq->wq->lockdep_map);
Tejun Heoe1594892011-01-09 23:32:15 +01002548
Tejun Heobaf59022010-09-16 10:42:16 +02002549 return true;
2550already_gone:
2551 spin_unlock_irq(&gcwq->lock);
2552 return false;
2553}
2554
Oleg Nesterovdb700892008-07-25 01:47:49 -07002555/**
Tejun Heo401a8d02010-09-16 10:36:00 +02002556 * flush_work - wait for a work to finish executing the last queueing instance
2557 * @work: the work to flush
Oleg Nesterovdb700892008-07-25 01:47:49 -07002558 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002559 * Wait until @work has finished execution. This function considers
2560 * only the last queueing instance of @work. If @work has been
2561 * enqueued across different CPUs on a non-reentrant workqueue or on
2562 * multiple workqueues, @work might still be executing on return on
2563 * some of the CPUs from earlier queueing.
Oleg Nesterova67da702008-07-25 01:47:52 -07002564 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002565 * If @work was queued only on a non-reentrant, ordered or unbound
2566 * workqueue, @work is guaranteed to be idle on return if it hasn't
2567 * been requeued since flush started.
2568 *
2569 * RETURNS:
2570 * %true if flush_work() waited for the work to finish execution,
2571 * %false if it was already idle.
Oleg Nesterovdb700892008-07-25 01:47:49 -07002572 */
Tejun Heo401a8d02010-09-16 10:36:00 +02002573bool flush_work(struct work_struct *work)
Oleg Nesterovdb700892008-07-25 01:47:49 -07002574{
Oleg Nesterovdb700892008-07-25 01:47:49 -07002575 struct wq_barrier barr;
2576
Tejun Heobaf59022010-09-16 10:42:16 +02002577 if (start_flush_work(work, &barr, true)) {
2578 wait_for_completion(&barr.done);
2579 destroy_work_on_stack(&barr.work);
2580 return true;
2581 } else
2582 return false;
Oleg Nesterovdb700892008-07-25 01:47:49 -07002583}
2584EXPORT_SYMBOL_GPL(flush_work);
2585
Tejun Heo401a8d02010-09-16 10:36:00 +02002586static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2587{
2588 struct wq_barrier barr;
2589 struct worker *worker;
2590
2591 spin_lock_irq(&gcwq->lock);
2592
2593 worker = find_worker_executing_work(gcwq, work);
2594 if (unlikely(worker))
2595 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2596
2597 spin_unlock_irq(&gcwq->lock);
2598
2599 if (unlikely(worker)) {
2600 wait_for_completion(&barr.done);
2601 destroy_work_on_stack(&barr.work);
2602 return true;
2603 } else
2604 return false;
2605}
2606
2607static bool wait_on_work(struct work_struct *work)
2608{
2609 bool ret = false;
2610 int cpu;
2611
2612 might_sleep();
2613
2614 lock_map_acquire(&work->lockdep_map);
2615 lock_map_release(&work->lockdep_map);
2616
2617 for_each_gcwq_cpu(cpu)
2618 ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2619 return ret;
2620}
2621
Tejun Heo09383492010-09-16 10:48:29 +02002622/**
2623 * flush_work_sync - wait until a work has finished execution
2624 * @work: the work to flush
2625 *
2626 * Wait until @work has finished execution. On return, it's
2627 * guaranteed that all queueing instances of @work which happened
2628 * before this function is called are finished. In other words, if
2629 * @work hasn't been requeued since this function was called, @work is
2630 * guaranteed to be idle on return.
2631 *
2632 * RETURNS:
2633 * %true if flush_work_sync() waited for the work to finish execution,
2634 * %false if it was already idle.
2635 */
2636bool flush_work_sync(struct work_struct *work)
2637{
2638 struct wq_barrier barr;
2639 bool pending, waited;
2640
2641 /* we'll wait for executions separately, queue barr only if pending */
2642 pending = start_flush_work(work, &barr, false);
2643
2644 /* wait for executions to finish */
2645 waited = wait_on_work(work);
2646
2647 /* wait for the pending one */
2648 if (pending) {
2649 wait_for_completion(&barr.done);
2650 destroy_work_on_stack(&barr.work);
2651 }
2652
2653 return pending || waited;
2654}
2655EXPORT_SYMBOL_GPL(flush_work_sync);
2656
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002657/*
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002658 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002659 * so this work can't be re-armed in any way.
2660 */
2661static int try_to_grab_pending(struct work_struct *work)
2662{
Tejun Heo8b03ae32010-06-29 10:07:12 +02002663 struct global_cwq *gcwq;
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002664 int ret = -1;
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002665
Tejun Heo22df02b2010-06-29 10:07:10 +02002666 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002667 return 0;
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002668
2669 /*
2670 * The queueing is in progress, or it is already queued. Try to
2671 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2672 */
Tejun Heo7a22ad72010-06-29 10:07:13 +02002673 gcwq = get_work_gcwq(work);
2674 if (!gcwq)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002675 return ret;
2676
Tejun Heo8b03ae32010-06-29 10:07:12 +02002677 spin_lock_irq(&gcwq->lock);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002678 if (!list_empty(&work->entry)) {
2679 /*
Tejun Heo7a22ad72010-06-29 10:07:13 +02002680 * This work is queued, but perhaps we locked the wrong gcwq.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002681 * In that case we must see the new value after rmb(), see
2682 * insert_work()->wmb().
2683 */
2684 smp_rmb();
Tejun Heo7a22ad72010-06-29 10:07:13 +02002685 if (gcwq == get_work_gcwq(work)) {
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09002686 debug_work_deactivate(work);
Lai Jiangshan31eafff2012-09-18 10:40:00 -07002687
2688 /*
2689 * A delayed work item cannot be grabbed directly
2690 * because it might have linked NO_COLOR work items
2691 * which, if left on the delayed_list, will confuse
2692 * cwq->nr_active management later on and cause
2693 * stall. Make sure the work item is activated
2694 * before grabbing.
2695 */
2696 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
2697 cwq_activate_delayed_work(work);
2698
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002699 list_del_init(&work->entry);
Tejun Heo7a22ad72010-06-29 10:07:13 +02002700 cwq_dec_nr_in_flight(get_work_cwq(work),
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02002701 get_work_color(work),
2702 *work_data_bits(work) & WORK_STRUCT_DELAYED);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002703 ret = 1;
2704 }
2705 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02002706 spin_unlock_irq(&gcwq->lock);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002707
2708 return ret;
2709}
2710
Tejun Heo401a8d02010-09-16 10:36:00 +02002711static bool __cancel_work_timer(struct work_struct *work,
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002712 struct timer_list* timer)
2713{
2714 int ret;
2715
2716 do {
2717 ret = (timer && likely(del_timer(timer)));
2718 if (!ret)
2719 ret = try_to_grab_pending(work);
2720 wait_on_work(work);
2721 } while (unlikely(ret < 0));
2722
Tejun Heo7a22ad72010-06-29 10:07:13 +02002723 clear_work_data(work);
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002724 return ret;
2725}
2726
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002727/**
Tejun Heo401a8d02010-09-16 10:36:00 +02002728 * cancel_work_sync - cancel a work and wait for it to finish
2729 * @work: the work to cancel
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002730 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002731 * Cancel @work and wait for its execution to finish. This function
2732 * can be used even if the work re-queues itself or migrates to
2733 * another workqueue. On return from this function, @work is
2734 * guaranteed to be not pending or executing on any CPU.
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002735 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002736 * cancel_work_sync(&delayed_work->work) must not be used for
2737 * delayed_work's. Use cancel_delayed_work_sync() instead.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002738 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002739 * The caller must ensure that the workqueue on which @work was last
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002740 * queued can't be destroyed before this function returns.
Tejun Heo401a8d02010-09-16 10:36:00 +02002741 *
2742 * RETURNS:
2743 * %true if @work was pending, %false otherwise.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002744 */
Tejun Heo401a8d02010-09-16 10:36:00 +02002745bool cancel_work_sync(struct work_struct *work)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002746{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002747 return __cancel_work_timer(work, NULL);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07002748}
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07002749EXPORT_SYMBOL_GPL(cancel_work_sync);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07002750
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002751/**
Tejun Heo401a8d02010-09-16 10:36:00 +02002752 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2753 * @dwork: the delayed work to flush
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002754 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002755 * Delayed timer is cancelled and the pending work is queued for
2756 * immediate execution. Like flush_work(), this function only
2757 * considers the last queueing instance of @dwork.
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002758 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002759 * RETURNS:
2760 * %true if flush_work() waited for the work to finish execution,
2761 * %false if it was already idle.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002762 */
Tejun Heo401a8d02010-09-16 10:36:00 +02002763bool flush_delayed_work(struct delayed_work *dwork)
2764{
2765 if (del_timer_sync(&dwork->timer))
2766 __queue_work(raw_smp_processor_id(),
2767 get_work_cwq(&dwork->work)->wq, &dwork->work);
2768 return flush_work(&dwork->work);
2769}
2770EXPORT_SYMBOL(flush_delayed_work);
2771
2772/**
Tejun Heo09383492010-09-16 10:48:29 +02002773 * flush_delayed_work_sync - wait for a dwork to finish
2774 * @dwork: the delayed work to flush
2775 *
2776 * Delayed timer is cancelled and the pending work is queued for
2777 * execution immediately. Other than timer handling, its behavior
2778 * is identical to flush_work_sync().
2779 *
2780 * RETURNS:
2781 * %true if flush_work_sync() waited for the work to finish execution,
2782 * %false if it was already idle.
2783 */
2784bool flush_delayed_work_sync(struct delayed_work *dwork)
2785{
2786 if (del_timer_sync(&dwork->timer))
2787 __queue_work(raw_smp_processor_id(),
2788 get_work_cwq(&dwork->work)->wq, &dwork->work);
2789 return flush_work_sync(&dwork->work);
2790}
2791EXPORT_SYMBOL(flush_delayed_work_sync);
2792
2793/**
Tejun Heo401a8d02010-09-16 10:36:00 +02002794 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2795 * @dwork: the delayed work cancel
2796 *
2797 * This is cancel_work_sync() for delayed works.
2798 *
2799 * RETURNS:
2800 * %true if @dwork was pending, %false otherwise.
2801 */
2802bool cancel_delayed_work_sync(struct delayed_work *dwork)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002803{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002804 return __cancel_work_timer(&dwork->work, &dwork->timer);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002805}
Oleg Nesterovf5a421a2007-07-15 23:41:44 -07002806EXPORT_SYMBOL(cancel_delayed_work_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002808/**
2809 * schedule_work - put work task in global workqueue
2810 * @work: job to be done
2811 *
Bart Van Assche5b0f437d2009-07-30 19:00:53 +02002812 * Returns zero if @work was already on the kernel-global workqueue and
2813 * non-zero otherwise.
2814 *
2815 * This puts a job in the kernel-global workqueue if it was not already
2816 * queued and leaves it in the same position on the kernel-global
2817 * workqueue otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002818 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002819int schedule_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820{
Tejun Heod320c032010-06-29 10:07:14 +02002821 return queue_work(system_wq, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822}
Dave Jonesae90dd52006-06-30 01:40:45 -04002823EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824
Zhang Ruic1a220e2008-07-23 21:28:39 -07002825/*
2826 * schedule_work_on - put work task on a specific cpu
2827 * @cpu: cpu to put the work task on
2828 * @work: job to be done
2829 *
2830 * This puts a job on a specific cpu
2831 */
2832int schedule_work_on(int cpu, struct work_struct *work)
2833{
Tejun Heod320c032010-06-29 10:07:14 +02002834 return queue_work_on(cpu, system_wq, work);
Zhang Ruic1a220e2008-07-23 21:28:39 -07002835}
2836EXPORT_SYMBOL(schedule_work_on);
2837
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002838/**
2839 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +00002840 * @dwork: job to be done
2841 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002842 *
2843 * After waiting for a given time this puts a job in the kernel-global
2844 * workqueue.
2845 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002846int schedule_delayed_work(struct delayed_work *dwork,
Ingo Molnar82f67cd2007-02-16 01:28:13 -08002847 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848{
Tejun Heod320c032010-06-29 10:07:14 +02002849 return queue_delayed_work(system_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850}
Dave Jonesae90dd52006-06-30 01:40:45 -04002851EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002853/**
2854 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2855 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +00002856 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002857 * @delay: number of jiffies to wait
2858 *
2859 * After waiting for a given time this puts a job in the kernel-global
2860 * workqueue on the specified CPU.
2861 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +00002863 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864{
Tejun Heod320c032010-06-29 10:07:14 +02002865 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866}
Dave Jonesae90dd52006-06-30 01:40:45 -04002867EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868
Andrew Mortonb6136772006-06-25 05:47:49 -07002869/**
Tejun Heo31ddd872010-10-19 11:14:49 +02002870 * schedule_on_each_cpu - execute a function synchronously on each online CPU
Andrew Mortonb6136772006-06-25 05:47:49 -07002871 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -07002872 *
Tejun Heo31ddd872010-10-19 11:14:49 +02002873 * schedule_on_each_cpu() executes @func on each online CPU using the
2874 * system workqueue and blocks until all CPUs have completed.
Andrew Mortonb6136772006-06-25 05:47:49 -07002875 * schedule_on_each_cpu() is very slow.
Tejun Heo31ddd872010-10-19 11:14:49 +02002876 *
2877 * RETURNS:
2878 * 0 on success, -errno on failure.
Andrew Mortonb6136772006-06-25 05:47:49 -07002879 */
David Howells65f27f32006-11-22 14:55:48 +00002880int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -08002881{
2882 int cpu;
Namhyung Kim38f51562010-08-08 14:24:09 +02002883 struct work_struct __percpu *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -08002884
Andrew Mortonb6136772006-06-25 05:47:49 -07002885 works = alloc_percpu(struct work_struct);
2886 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -08002887 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -07002888
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002889 get_online_cpus();
Tejun Heo93981802009-11-17 14:06:20 -08002890
Christoph Lameter15316ba2006-01-08 01:00:43 -08002891 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +01002892 struct work_struct *work = per_cpu_ptr(works, cpu);
2893
2894 INIT_WORK(work, func);
Tejun Heob71ab8c2010-06-29 10:07:14 +02002895 schedule_work_on(cpu, work);
Andi Kleen65a64462009-10-14 06:22:47 +02002896 }
Tejun Heo93981802009-11-17 14:06:20 -08002897
2898 for_each_online_cpu(cpu)
2899 flush_work(per_cpu_ptr(works, cpu));
2900
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002901 put_online_cpus();
Andrew Mortonb6136772006-06-25 05:47:49 -07002902 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -08002903 return 0;
2904}
2905
Alan Sterneef6a7d2010-02-12 17:39:21 +09002906/**
2907 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2908 *
2909 * Forces execution of the kernel-global workqueue and blocks until its
2910 * completion.
2911 *
2912 * Think twice before calling this function! It's very easy to get into
2913 * trouble if you don't take great care. Either of the following situations
2914 * will lead to deadlock:
2915 *
2916 * One of the work items currently on the workqueue needs to acquire
2917 * a lock held by your code or its caller.
2918 *
2919 * Your code is running in the context of a work routine.
2920 *
2921 * They will be detected by lockdep when they occur, but the first might not
2922 * occur very often. It depends on what work items are on the workqueue and
2923 * what locks they need, which you have no control over.
2924 *
2925 * In most situations flushing the entire workqueue is overkill; you merely
2926 * need to know that a particular work item isn't queued and isn't running.
2927 * In such cases you should use cancel_delayed_work_sync() or
2928 * cancel_work_sync() instead.
2929 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930void flush_scheduled_work(void)
2931{
Tejun Heod320c032010-06-29 10:07:14 +02002932 flush_workqueue(system_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933}
Dave Jonesae90dd52006-06-30 01:40:45 -04002934EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935
2936/**
James Bottomley1fa44ec2006-02-23 12:43:43 -06002937 * execute_in_process_context - reliably execute the routine with user context
2938 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -06002939 * @ew: guaranteed storage for the execute work structure (must
2940 * be available when the work executes)
2941 *
2942 * Executes the function immediately if process context is available,
2943 * otherwise schedules the function for delayed execution.
2944 *
2945 * Returns: 0 - function was executed
2946 * 1 - function was scheduled for execution
2947 */
David Howells65f27f32006-11-22 14:55:48 +00002948int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -06002949{
2950 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +00002951 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -06002952 return 0;
2953 }
2954
David Howells65f27f32006-11-22 14:55:48 +00002955 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -06002956 schedule_work(&ew->work);
2957
2958 return 1;
2959}
2960EXPORT_SYMBOL_GPL(execute_in_process_context);
2961
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962int keventd_up(void)
2963{
Tejun Heod320c032010-06-29 10:07:14 +02002964 return system_wq != NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965}
2966
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002967static int alloc_cwqs(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968{
Oleg Nesterov3af244332007-05-09 02:34:09 -07002969 /*
Tejun Heo0f900042010-06-29 10:07:11 +02002970 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2971 * Make sure that the alignment isn't lower than that of
2972 * unsigned long long.
Oleg Nesterov3af244332007-05-09 02:34:09 -07002973 */
Tejun Heo0f900042010-06-29 10:07:11 +02002974 const size_t size = sizeof(struct cpu_workqueue_struct);
2975 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2976 __alignof__(unsigned long long));
Oleg Nesterov3af244332007-05-09 02:34:09 -07002977
Lai Jiangshane06ffa12012-03-09 18:03:20 +08002978 if (!(wq->flags & WQ_UNBOUND))
Tejun Heof3421792010-07-02 10:03:51 +02002979 wq->cpu_wq.pcpu = __alloc_percpu(size, align);
Tejun Heo931ac772010-07-20 11:07:48 +02002980 else {
Tejun Heof3421792010-07-02 10:03:51 +02002981 void *ptr;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +01002982
Tejun Heof3421792010-07-02 10:03:51 +02002983 /*
2984 * Allocate enough room to align cwq and put an extra
2985 * pointer at the end pointing back to the originally
2986 * allocated pointer which will be used for free.
2987 */
2988 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2989 if (ptr) {
2990 wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2991 *(void **)(wq->cpu_wq.single + 1) = ptr;
2992 }
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002993 }
Tejun Heof3421792010-07-02 10:03:51 +02002994
Tejun Heo0415b002011-03-24 18:50:09 +01002995 /* just in case, make sure it's actually aligned */
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002996 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2997 return wq->cpu_wq.v ? 0 : -ENOMEM;
Oleg Nesterov3af244332007-05-09 02:34:09 -07002998}
2999
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003000static void free_cwqs(struct workqueue_struct *wq)
Oleg Nesterov06ba38a2007-05-09 02:34:15 -07003001{
Lai Jiangshane06ffa12012-03-09 18:03:20 +08003002 if (!(wq->flags & WQ_UNBOUND))
Tejun Heof3421792010-07-02 10:03:51 +02003003 free_percpu(wq->cpu_wq.pcpu);
3004 else if (wq->cpu_wq.single) {
3005 /* the pointer to free is stored right after the cwq */
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003006 kfree(*(void **)(wq->cpu_wq.single + 1));
Oleg Nesterov06ba38a2007-05-09 02:34:15 -07003007 }
3008}
3009
Tejun Heof3421792010-07-02 10:03:51 +02003010static int wq_clamp_max_active(int max_active, unsigned int flags,
3011 const char *name)
Tejun Heob71ab8c2010-06-29 10:07:14 +02003012{
Tejun Heof3421792010-07-02 10:03:51 +02003013 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3014
3015 if (max_active < 1 || max_active > lim)
Tejun Heob71ab8c2010-06-29 10:07:14 +02003016 printk(KERN_WARNING "workqueue: max_active %d requested for %s "
3017 "is out of range, clamping between %d and %d\n",
Tejun Heof3421792010-07-02 10:03:51 +02003018 max_active, name, 1, lim);
Tejun Heob71ab8c2010-06-29 10:07:14 +02003019
Tejun Heof3421792010-07-02 10:03:51 +02003020 return clamp_val(max_active, 1, lim);
Tejun Heob71ab8c2010-06-29 10:07:14 +02003021}
3022
Tejun Heob196be82012-01-10 15:11:35 -08003023struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
Tejun Heod320c032010-06-29 10:07:14 +02003024 unsigned int flags,
3025 int max_active,
3026 struct lock_class_key *key,
Tejun Heob196be82012-01-10 15:11:35 -08003027 const char *lock_name, ...)
Oleg Nesterov3af244332007-05-09 02:34:09 -07003028{
Tejun Heob196be82012-01-10 15:11:35 -08003029 va_list args, args1;
Oleg Nesterov3af244332007-05-09 02:34:09 -07003030 struct workqueue_struct *wq;
Tejun Heoc34056a2010-06-29 10:07:11 +02003031 unsigned int cpu;
shumash7f490b22015-10-06 09:49:52 -06003032 size_t namelen;
shumashb2f60df2015-07-18 09:03:08 -06003033 /* see the comment above the definition of WQ_POWER_EFFICIENT */
3034 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
3035 flags |= WQ_UNBOUND;
3036
Tejun Heob196be82012-01-10 15:11:35 -08003037
3038 /* determine namelen, allocate wq and format name */
3039 va_start(args, lock_name);
3040 va_copy(args1, args);
3041 namelen = vsnprintf(NULL, 0, fmt, args) + 1;
3042
3043 wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
3044 if (!wq)
3045 goto err;
3046
3047 vsnprintf(wq->name, namelen, fmt, args1);
3048 va_end(args);
3049 va_end(args1);
Oleg Nesterov3af244332007-05-09 02:34:09 -07003050
Tejun Heof3421792010-07-02 10:03:51 +02003051 /*
Tejun Heo6370a6a2010-10-11 15:12:27 +02003052 * Workqueues which may be used during memory reclaim should
3053 * have a rescuer to guarantee forward progress.
3054 */
3055 if (flags & WQ_MEM_RECLAIM)
3056 flags |= WQ_RESCUER;
3057
Tejun Heod320c032010-06-29 10:07:14 +02003058 max_active = max_active ?: WQ_DFL_ACTIVE;
Tejun Heob196be82012-01-10 15:11:35 -08003059 max_active = wq_clamp_max_active(max_active, flags, wq->name);
Oleg Nesterov3af244332007-05-09 02:34:09 -07003060
Tejun Heob196be82012-01-10 15:11:35 -08003061 /* init wq */
Tejun Heo97e37d72010-06-29 10:07:10 +02003062 wq->flags = flags;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003063 wq->saved_max_active = max_active;
Tejun Heo73f53c42010-06-29 10:07:11 +02003064 mutex_init(&wq->flush_mutex);
3065 atomic_set(&wq->nr_cwqs_to_flush, 0);
3066 INIT_LIST_HEAD(&wq->flusher_queue);
3067 INIT_LIST_HEAD(&wq->flusher_overflow);
Oleg Nesterov3af244332007-05-09 02:34:09 -07003068
Johannes Bergeb13ba82008-01-16 09:51:58 +01003069 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
Oleg Nesterovcce1a162007-05-09 02:34:13 -07003070 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov3af244332007-05-09 02:34:09 -07003071
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003072 if (alloc_cwqs(wq) < 0)
3073 goto err;
3074
Tejun Heof3421792010-07-02 10:03:51 +02003075 for_each_cwq_cpu(cpu, wq) {
Tejun Heo15376632010-06-29 10:07:11 +02003076 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Tejun Heo8b03ae32010-06-29 10:07:12 +02003077 struct global_cwq *gcwq = get_gcwq(cpu);
Tejun Heodcb32ee2012-07-13 22:16:45 -07003078 int pool_idx = (bool)(flags & WQ_HIGHPRI);
Tejun Heo15376632010-06-29 10:07:11 +02003079
Tejun Heo0f900042010-06-29 10:07:11 +02003080 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
Tejun Heodcb32ee2012-07-13 22:16:45 -07003081 cwq->pool = &gcwq->pools[pool_idx];
Tejun Heoc34056a2010-06-29 10:07:11 +02003082 cwq->wq = wq;
Tejun Heo73f53c42010-06-29 10:07:11 +02003083 cwq->flush_color = -1;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02003084 cwq->max_active = max_active;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02003085 INIT_LIST_HEAD(&cwq->delayed_works);
Oleg Nesterov3af244332007-05-09 02:34:09 -07003086 }
3087
Tejun Heoe22bee72010-06-29 10:07:14 +02003088 if (flags & WQ_RESCUER) {
3089 struct worker *rescuer;
3090
Tejun Heof2e005a2010-07-20 15:59:09 +02003091 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
Tejun Heoe22bee72010-06-29 10:07:14 +02003092 goto err;
3093
3094 wq->rescuer = rescuer = alloc_worker();
3095 if (!rescuer)
3096 goto err;
3097
Tejun Heob196be82012-01-10 15:11:35 -08003098 rescuer->task = kthread_create(rescuer_thread, wq, "%s",
3099 wq->name);
Tejun Heoe22bee72010-06-29 10:07:14 +02003100 if (IS_ERR(rescuer->task))
3101 goto err;
3102
Tejun Heoe22bee72010-06-29 10:07:14 +02003103 rescuer->task->flags |= PF_THREAD_BOUND;
3104 wake_up_process(rescuer->task);
Oleg Nesterov3af244332007-05-09 02:34:09 -07003105 }
Oleg Nesterov3af244332007-05-09 02:34:09 -07003106
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003107 /*
3108 * workqueue_lock protects global freeze state and workqueues
3109 * list. Grab it, set max_active accordingly and add the new
3110 * workqueue to workqueues list.
3111 */
Tejun Heo15376632010-06-29 10:07:11 +02003112 spin_lock(&workqueue_lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003113
Tejun Heo58a69cb2011-02-16 09:25:31 +01003114 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
Tejun Heof3421792010-07-02 10:03:51 +02003115 for_each_cwq_cpu(cpu, wq)
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003116 get_cwq(cpu, wq)->max_active = 0;
3117
Tejun Heo15376632010-06-29 10:07:11 +02003118 list_add(&wq->list, &workqueues);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003119
Tejun Heo15376632010-06-29 10:07:11 +02003120 spin_unlock(&workqueue_lock);
3121
Oleg Nesterov3af244332007-05-09 02:34:09 -07003122 return wq;
Tejun Heo4690c4a2010-06-29 10:07:10 +02003123err:
3124 if (wq) {
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003125 free_cwqs(wq);
Tejun Heof2e005a2010-07-20 15:59:09 +02003126 free_mayday_mask(wq->mayday_mask);
Tejun Heoe22bee72010-06-29 10:07:14 +02003127 kfree(wq->rescuer);
Tejun Heo4690c4a2010-06-29 10:07:10 +02003128 kfree(wq);
3129 }
3130 return NULL;
Oleg Nesterov3af244332007-05-09 02:34:09 -07003131}
Tejun Heod320c032010-06-29 10:07:14 +02003132EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
Oleg Nesterov3af244332007-05-09 02:34:09 -07003133
3134/**
3135 * destroy_workqueue - safely terminate a workqueue
3136 * @wq: target workqueue
3137 *
3138 * Safely destroy a workqueue. All work currently pending will be done first.
3139 */
3140void destroy_workqueue(struct workqueue_struct *wq)
3141{
Tejun Heoc8e55f32010-06-29 10:07:12 +02003142 unsigned int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -07003143
Tejun Heo9c5a2ba2011-04-05 18:01:44 +02003144 /* drain it before proceeding with destruction */
3145 drain_workqueue(wq);
Tejun Heoc8efcc22010-12-20 19:32:04 +01003146
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003147 /*
3148 * wq list is used to freeze wq, remove from list after
3149 * flushing is complete in case freeze races us.
3150 */
Gautham R Shenoy95402b32008-01-25 21:08:02 +01003151 spin_lock(&workqueue_lock);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07003152 list_del(&wq->list);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01003153 spin_unlock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -07003154
Tejun Heoe22bee72010-06-29 10:07:14 +02003155 /* sanity check */
Tejun Heof3421792010-07-02 10:03:51 +02003156 for_each_cwq_cpu(cpu, wq) {
Tejun Heo73f53c42010-06-29 10:07:11 +02003157 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3158 int i;
Oleg Nesterov3af244332007-05-09 02:34:09 -07003159
Tejun Heo73f53c42010-06-29 10:07:11 +02003160 for (i = 0; i < WORK_NR_COLORS; i++)
3161 BUG_ON(cwq->nr_in_flight[i]);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02003162 BUG_ON(cwq->nr_active);
3163 BUG_ON(!list_empty(&cwq->delayed_works));
Tejun Heo73f53c42010-06-29 10:07:11 +02003164 }
Oleg Nesterov3af244332007-05-09 02:34:09 -07003165
Tejun Heoe22bee72010-06-29 10:07:14 +02003166 if (wq->flags & WQ_RESCUER) {
3167 kthread_stop(wq->rescuer->task);
Tejun Heof2e005a2010-07-20 15:59:09 +02003168 free_mayday_mask(wq->mayday_mask);
Xiaotian Feng8d9df9f2010-08-16 09:54:28 +02003169 kfree(wq->rescuer);
Tejun Heoe22bee72010-06-29 10:07:14 +02003170 }
3171
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003172 free_cwqs(wq);
Oleg Nesterov3af244332007-05-09 02:34:09 -07003173 kfree(wq);
3174}
3175EXPORT_SYMBOL_GPL(destroy_workqueue);
3176
Tejun Heodcd989c2010-06-29 10:07:14 +02003177/**
3178 * workqueue_set_max_active - adjust max_active of a workqueue
3179 * @wq: target workqueue
3180 * @max_active: new max_active value.
3181 *
3182 * Set max_active of @wq to @max_active.
3183 *
3184 * CONTEXT:
3185 * Don't call from IRQ context.
3186 */
3187void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3188{
3189 unsigned int cpu;
3190
Tejun Heof3421792010-07-02 10:03:51 +02003191 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
Tejun Heodcd989c2010-06-29 10:07:14 +02003192
3193 spin_lock(&workqueue_lock);
3194
3195 wq->saved_max_active = max_active;
3196
Tejun Heof3421792010-07-02 10:03:51 +02003197 for_each_cwq_cpu(cpu, wq) {
Tejun Heodcd989c2010-06-29 10:07:14 +02003198 struct global_cwq *gcwq = get_gcwq(cpu);
3199
3200 spin_lock_irq(&gcwq->lock);
3201
Tejun Heo58a69cb2011-02-16 09:25:31 +01003202 if (!(wq->flags & WQ_FREEZABLE) ||
Tejun Heodcd989c2010-06-29 10:07:14 +02003203 !(gcwq->flags & GCWQ_FREEZING))
3204 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3205
3206 spin_unlock_irq(&gcwq->lock);
3207 }
3208
3209 spin_unlock(&workqueue_lock);
3210}
3211EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3212
3213/**
3214 * workqueue_congested - test whether a workqueue is congested
3215 * @cpu: CPU in question
3216 * @wq: target workqueue
3217 *
3218 * Test whether @wq's cpu workqueue for @cpu is congested. There is
3219 * no synchronization around this function and the test result is
3220 * unreliable and only useful as advisory hints or for debugging.
3221 *
3222 * RETURNS:
3223 * %true if congested, %false otherwise.
3224 */
3225bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3226{
3227 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3228
3229 return !list_empty(&cwq->delayed_works);
3230}
3231EXPORT_SYMBOL_GPL(workqueue_congested);
3232
3233/**
3234 * work_cpu - return the last known associated cpu for @work
3235 * @work: the work of interest
3236 *
3237 * RETURNS:
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003238 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
Tejun Heodcd989c2010-06-29 10:07:14 +02003239 */
3240unsigned int work_cpu(struct work_struct *work)
3241{
3242 struct global_cwq *gcwq = get_work_gcwq(work);
3243
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003244 return gcwq ? gcwq->cpu : WORK_CPU_NONE;
Tejun Heodcd989c2010-06-29 10:07:14 +02003245}
3246EXPORT_SYMBOL_GPL(work_cpu);
3247
3248/**
3249 * work_busy - test whether a work is currently pending or running
3250 * @work: the work to be tested
3251 *
3252 * Test whether @work is currently pending or running. There is no
3253 * synchronization around this function and the test result is
3254 * unreliable and only useful as advisory hints or for debugging.
3255 * Especially for reentrant wqs, the pending state might hide the
3256 * running state.
3257 *
3258 * RETURNS:
3259 * OR'd bitmask of WORK_BUSY_* bits.
3260 */
3261unsigned int work_busy(struct work_struct *work)
3262{
3263 struct global_cwq *gcwq = get_work_gcwq(work);
3264 unsigned long flags;
3265 unsigned int ret = 0;
3266
3267 if (!gcwq)
3268 return false;
3269
3270 spin_lock_irqsave(&gcwq->lock, flags);
3271
3272 if (work_pending(work))
3273 ret |= WORK_BUSY_PENDING;
3274 if (find_worker_executing_work(gcwq, work))
3275 ret |= WORK_BUSY_RUNNING;
3276
3277 spin_unlock_irqrestore(&gcwq->lock, flags);
3278
3279 return ret;
3280}
3281EXPORT_SYMBOL_GPL(work_busy);
3282
Tejun Heodb7bccf2010-06-29 10:07:12 +02003283/*
3284 * CPU hotplug.
3285 *
Tejun Heoe22bee72010-06-29 10:07:14 +02003286 * There are two challenges in supporting CPU hotplug. Firstly, there
3287 * are a lot of assumptions on strong associations among work, cwq and
3288 * gcwq which make migrating pending and scheduled works very
3289 * difficult to implement without impacting hot paths. Secondly,
3290 * gcwqs serve mix of short, long and very long running works making
3291 * blocked draining impractical.
3292 *
3293 * This is solved by allowing a gcwq to be detached from CPU, running
3294 * it with unbound (rogue) workers and allowing it to be reattached
3295 * later if the cpu comes back online. A separate thread is created
3296 * to govern a gcwq in such state and is called the trustee of the
3297 * gcwq.
Tejun Heodb7bccf2010-06-29 10:07:12 +02003298 *
3299 * Trustee states and their descriptions.
3300 *
3301 * START Command state used on startup. On CPU_DOWN_PREPARE, a
3302 * new trustee is started with this state.
3303 *
3304 * IN_CHARGE Once started, trustee will enter this state after
Tejun Heoe22bee72010-06-29 10:07:14 +02003305 * assuming the manager role and making all existing
3306 * workers rogue. DOWN_PREPARE waits for trustee to
3307 * enter this state. After reaching IN_CHARGE, trustee
3308 * tries to execute the pending worklist until it's empty
3309 * and the state is set to BUTCHER, or the state is set
3310 * to RELEASE.
Tejun Heodb7bccf2010-06-29 10:07:12 +02003311 *
3312 * BUTCHER Command state which is set by the cpu callback after
3313 * the cpu has went down. Once this state is set trustee
3314 * knows that there will be no new works on the worklist
3315 * and once the worklist is empty it can proceed to
3316 * killing idle workers.
3317 *
3318 * RELEASE Command state which is set by the cpu callback if the
3319 * cpu down has been canceled or it has come online
3320 * again. After recognizing this state, trustee stops
Tejun Heoe22bee72010-06-29 10:07:14 +02003321 * trying to drain or butcher and clears ROGUE, rebinds
3322 * all remaining workers back to the cpu and releases
3323 * manager role.
Tejun Heodb7bccf2010-06-29 10:07:12 +02003324 *
3325 * DONE Trustee will enter this state after BUTCHER or RELEASE
3326 * is complete.
3327 *
3328 * trustee CPU draining
3329 * took over down complete
3330 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3331 * | | ^
3332 * | CPU is back online v return workers |
3333 * ----------------> RELEASE --------------
3334 */
3335
3336/**
3337 * trustee_wait_event_timeout - timed event wait for trustee
3338 * @cond: condition to wait for
3339 * @timeout: timeout in jiffies
3340 *
3341 * wait_event_timeout() for trustee to use. Handles locking and
3342 * checks for RELEASE request.
3343 *
3344 * CONTEXT:
3345 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3346 * multiple times. To be used by trustee.
3347 *
3348 * RETURNS:
3349 * Positive indicating left time if @cond is satisfied, 0 if timed
3350 * out, -1 if canceled.
3351 */
3352#define trustee_wait_event_timeout(cond, timeout) ({ \
3353 long __ret = (timeout); \
3354 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3355 __ret) { \
3356 spin_unlock_irq(&gcwq->lock); \
3357 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
3358 (gcwq->trustee_state == TRUSTEE_RELEASE), \
3359 __ret); \
3360 spin_lock_irq(&gcwq->lock); \
3361 } \
3362 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
3363})
3364
3365/**
3366 * trustee_wait_event - event wait for trustee
3367 * @cond: condition to wait for
3368 *
3369 * wait_event() for trustee to use. Automatically handles locking and
3370 * checks for CANCEL request.
3371 *
3372 * CONTEXT:
3373 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3374 * multiple times. To be used by trustee.
3375 *
3376 * RETURNS:
3377 * 0 if @cond is satisfied, -1 if canceled.
3378 */
3379#define trustee_wait_event(cond) ({ \
3380 long __ret1; \
3381 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3382 __ret1 < 0 ? -1 : 0; \
3383})
3384
Tejun Heo9c6bae02012-07-13 22:16:44 -07003385static bool gcwq_is_managing_workers(struct global_cwq *gcwq)
3386{
3387 struct worker_pool *pool;
3388
3389 for_each_worker_pool(pool, gcwq)
3390 if (pool->flags & POOL_MANAGING_WORKERS)
3391 return true;
3392 return false;
3393}
3394
3395static bool gcwq_has_idle_workers(struct global_cwq *gcwq)
3396{
3397 struct worker_pool *pool;
3398
3399 for_each_worker_pool(pool, gcwq)
3400 if (!list_empty(&pool->idle_list))
3401 return true;
3402 return false;
3403}
3404
Tejun Heodb7bccf2010-06-29 10:07:12 +02003405static int __cpuinit trustee_thread(void *__gcwq)
3406{
3407 struct global_cwq *gcwq = __gcwq;
Tejun Heo9c6bae02012-07-13 22:16:44 -07003408 struct worker_pool *pool;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003409 struct worker *worker;
Tejun Heoe22bee72010-06-29 10:07:14 +02003410 struct work_struct *work;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003411 struct hlist_node *pos;
Tejun Heoe22bee72010-06-29 10:07:14 +02003412 long rc;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003413 int i;
3414
3415 BUG_ON(gcwq->cpu != smp_processor_id());
3416
3417 spin_lock_irq(&gcwq->lock);
3418 /*
Tejun Heoe22bee72010-06-29 10:07:14 +02003419 * Claim the manager position and make all workers rogue.
3420 * Trustee must be bound to the target cpu and can't be
3421 * cancelled.
Tejun Heodb7bccf2010-06-29 10:07:12 +02003422 */
3423 BUG_ON(gcwq->cpu != smp_processor_id());
Tejun Heo9c6bae02012-07-13 22:16:44 -07003424 rc = trustee_wait_event(!gcwq_is_managing_workers(gcwq));
Tejun Heoe22bee72010-06-29 10:07:14 +02003425 BUG_ON(rc < 0);
3426
Tejun Heo9c6bae02012-07-13 22:16:44 -07003427 for_each_worker_pool(pool, gcwq) {
3428 pool->flags |= POOL_MANAGING_WORKERS;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003429
Tejun Heo9c6bae02012-07-13 22:16:44 -07003430 list_for_each_entry(worker, &pool->idle_list, entry)
3431 worker->flags |= WORKER_ROGUE;
3432 }
Tejun Heodb7bccf2010-06-29 10:07:12 +02003433
3434 for_each_busy_worker(worker, i, pos, gcwq)
Tejun Heocb444762010-07-02 10:03:50 +02003435 worker->flags |= WORKER_ROGUE;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003436
3437 /*
Tejun Heoe22bee72010-06-29 10:07:14 +02003438 * Call schedule() so that we cross rq->lock and thus can
3439 * guarantee sched callbacks see the rogue flag. This is
3440 * necessary as scheduler callbacks may be invoked from other
3441 * cpus.
3442 */
3443 spin_unlock_irq(&gcwq->lock);
3444 schedule();
3445 spin_lock_irq(&gcwq->lock);
3446
3447 /*
Tejun Heocb444762010-07-02 10:03:50 +02003448 * Sched callbacks are disabled now. Zap nr_running. After
3449 * this, nr_running stays zero and need_more_worker() and
3450 * keep_working() are always true as long as the worklist is
3451 * not empty.
Tejun Heoe22bee72010-06-29 10:07:14 +02003452 */
Tejun Heo9c6bae02012-07-13 22:16:44 -07003453 for_each_worker_pool(pool, gcwq)
3454 atomic_set(get_pool_nr_running(pool), 0);
Tejun Heoe22bee72010-06-29 10:07:14 +02003455
3456 spin_unlock_irq(&gcwq->lock);
Tejun Heo9c6bae02012-07-13 22:16:44 -07003457 for_each_worker_pool(pool, gcwq)
3458 del_timer_sync(&pool->idle_timer);
Tejun Heoe22bee72010-06-29 10:07:14 +02003459 spin_lock_irq(&gcwq->lock);
3460
3461 /*
Tejun Heodb7bccf2010-06-29 10:07:12 +02003462 * We're now in charge. Notify and proceed to drain. We need
3463 * to keep the gcwq running during the whole CPU down
3464 * procedure as other cpu hotunplug callbacks may need to
3465 * flush currently running tasks.
3466 */
3467 gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3468 wake_up_all(&gcwq->trustee_wait);
3469
3470 /*
3471 * The original cpu is in the process of dying and may go away
3472 * anytime now. When that happens, we and all workers would
Tejun Heoe22bee72010-06-29 10:07:14 +02003473 * be migrated to other cpus. Try draining any left work. We
3474 * want to get it over with ASAP - spam rescuers, wake up as
3475 * many idlers as necessary and create new ones till the
3476 * worklist is empty. Note that if the gcwq is frozen, there
Tejun Heo58a69cb2011-02-16 09:25:31 +01003477 * may be frozen works in freezable cwqs. Don't declare
Tejun Heoe22bee72010-06-29 10:07:14 +02003478 * completion while frozen.
Tejun Heodb7bccf2010-06-29 10:07:12 +02003479 */
Tejun Heo9c6bae02012-07-13 22:16:44 -07003480 while (true) {
3481 bool busy = false;
Tejun Heoe22bee72010-06-29 10:07:14 +02003482
Tejun Heo9c6bae02012-07-13 22:16:44 -07003483 for_each_worker_pool(pool, gcwq)
3484 busy |= pool->nr_workers != pool->nr_idle;
Tejun Heoe22bee72010-06-29 10:07:14 +02003485
Tejun Heo9c6bae02012-07-13 22:16:44 -07003486 if (!busy && !(gcwq->flags & GCWQ_FREEZING) &&
3487 gcwq->trustee_state != TRUSTEE_IN_CHARGE)
3488 break;
Tejun Heoe22bee72010-06-29 10:07:14 +02003489
Tejun Heo9c6bae02012-07-13 22:16:44 -07003490 for_each_worker_pool(pool, gcwq) {
3491 int nr_works = 0;
3492
3493 list_for_each_entry(work, &pool->worklist, entry) {
3494 send_mayday(work);
3495 nr_works++;
3496 }
3497
3498 list_for_each_entry(worker, &pool->idle_list, entry) {
3499 if (!nr_works--)
3500 break;
3501 wake_up_process(worker->task);
3502 }
3503
3504 if (need_to_create_worker(pool)) {
3505 spin_unlock_irq(&gcwq->lock);
3506 worker = create_worker(pool, false);
3507 spin_lock_irq(&gcwq->lock);
3508 if (worker) {
3509 worker->flags |= WORKER_ROGUE;
3510 start_worker(worker);
3511 }
Tejun Heoe22bee72010-06-29 10:07:14 +02003512 }
3513 }
3514
Tejun Heodb7bccf2010-06-29 10:07:12 +02003515 /* give a breather */
3516 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3517 break;
3518 }
3519
Tejun Heoe22bee72010-06-29 10:07:14 +02003520 /*
3521 * Either all works have been scheduled and cpu is down, or
3522 * cpu down has already been canceled. Wait for and butcher
3523 * all workers till we're canceled.
3524 */
3525 do {
Tejun Heo9c6bae02012-07-13 22:16:44 -07003526 rc = trustee_wait_event(gcwq_has_idle_workers(gcwq));
3527
3528 i = 0;
3529 for_each_worker_pool(pool, gcwq) {
3530 while (!list_empty(&pool->idle_list)) {
3531 worker = list_first_entry(&pool->idle_list,
3532 struct worker, entry);
3533 destroy_worker(worker);
3534 }
3535 i |= pool->nr_workers;
3536 }
3537 } while (i && rc >= 0);
Tejun Heoe22bee72010-06-29 10:07:14 +02003538
3539 /*
3540 * At this point, either draining has completed and no worker
3541 * is left, or cpu down has been canceled or the cpu is being
3542 * brought back up. There shouldn't be any idle one left.
3543 * Tell the remaining busy ones to rebind once it finishes the
3544 * currently scheduled works by scheduling the rebind_work.
3545 */
Tejun Heo9c6bae02012-07-13 22:16:44 -07003546 for_each_worker_pool(pool, gcwq)
3547 WARN_ON(!list_empty(&pool->idle_list));
Tejun Heoe22bee72010-06-29 10:07:14 +02003548
3549 for_each_busy_worker(worker, i, pos, gcwq) {
3550 struct work_struct *rebind_work = &worker->rebind_work;
Lai Jiangshan6adebb02012-09-02 00:28:19 +08003551 unsigned long worker_flags = worker->flags;
Tejun Heoe22bee72010-06-29 10:07:14 +02003552
3553 /*
3554 * Rebind_work may race with future cpu hotplug
3555 * operations. Use a separate flag to mark that
Lai Jiangshan6adebb02012-09-02 00:28:19 +08003556 * rebinding is scheduled. The morphing should
3557 * be atomic.
Tejun Heoe22bee72010-06-29 10:07:14 +02003558 */
Lai Jiangshan6adebb02012-09-02 00:28:19 +08003559 worker_flags |= WORKER_REBIND;
3560 worker_flags &= ~WORKER_ROGUE;
3561 ACCESS_ONCE(worker->flags) = worker_flags;
Tejun Heoe22bee72010-06-29 10:07:14 +02003562
3563 /* queue rebind_work, wq doesn't matter, use the default one */
3564 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3565 work_data_bits(rebind_work)))
3566 continue;
3567
3568 debug_work_activate(rebind_work);
Tejun Heod320c032010-06-29 10:07:14 +02003569 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
Tejun Heoe22bee72010-06-29 10:07:14 +02003570 worker->scheduled.next,
3571 work_color_to_flags(WORK_NO_COLOR));
3572 }
3573
3574 /* relinquish manager role */
Tejun Heo9c6bae02012-07-13 22:16:44 -07003575 for_each_worker_pool(pool, gcwq)
3576 pool->flags &= ~POOL_MANAGING_WORKERS;
Tejun Heoe22bee72010-06-29 10:07:14 +02003577
Tejun Heodb7bccf2010-06-29 10:07:12 +02003578 /* notify completion */
3579 gcwq->trustee = NULL;
3580 gcwq->trustee_state = TRUSTEE_DONE;
3581 wake_up_all(&gcwq->trustee_wait);
3582 spin_unlock_irq(&gcwq->lock);
3583 return 0;
3584}
3585
3586/**
3587 * wait_trustee_state - wait for trustee to enter the specified state
3588 * @gcwq: gcwq the trustee of interest belongs to
3589 * @state: target state to wait for
3590 *
3591 * Wait for the trustee to reach @state. DONE is already matched.
3592 *
3593 * CONTEXT:
3594 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3595 * multiple times. To be used by cpu_callback.
3596 */
3597static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
Namhyung Kim06bd6eb2010-08-22 23:19:42 +09003598__releases(&gcwq->lock)
3599__acquires(&gcwq->lock)
Tejun Heodb7bccf2010-06-29 10:07:12 +02003600{
3601 if (!(gcwq->trustee_state == state ||
3602 gcwq->trustee_state == TRUSTEE_DONE)) {
3603 spin_unlock_irq(&gcwq->lock);
3604 __wait_event(gcwq->trustee_wait,
3605 gcwq->trustee_state == state ||
3606 gcwq->trustee_state == TRUSTEE_DONE);
3607 spin_lock_irq(&gcwq->lock);
3608 }
3609}
3610
Oleg Nesterov3af244332007-05-09 02:34:09 -07003611static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3612 unsigned long action,
3613 void *hcpu)
3614{
3615 unsigned int cpu = (unsigned long)hcpu;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003616 struct global_cwq *gcwq = get_gcwq(cpu);
3617 struct task_struct *new_trustee = NULL;
Tejun Heo9c6bae02012-07-13 22:16:44 -07003618 struct worker *new_workers[NR_WORKER_POOLS] = { };
3619 struct worker_pool *pool;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003620 unsigned long flags;
Tejun Heo9c6bae02012-07-13 22:16:44 -07003621 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003623 action &= ~CPU_TASKS_FROZEN;
3624
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 switch (action) {
Tejun Heodb7bccf2010-06-29 10:07:12 +02003626 case CPU_DOWN_PREPARE:
3627 new_trustee = kthread_create(trustee_thread, gcwq,
3628 "workqueue_trustee/%d\n", cpu);
3629 if (IS_ERR(new_trustee))
3630 return notifier_from_errno(PTR_ERR(new_trustee));
3631 kthread_bind(new_trustee, cpu);
Tejun Heoe22bee72010-06-29 10:07:14 +02003632 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 case CPU_UP_PREPARE:
Tejun Heo9c6bae02012-07-13 22:16:44 -07003634 i = 0;
3635 for_each_worker_pool(pool, gcwq) {
3636 BUG_ON(pool->first_idle);
3637 new_workers[i] = create_worker(pool, false);
3638 if (!new_workers[i++])
3639 goto err_destroy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 }
3642
Tejun Heodb7bccf2010-06-29 10:07:12 +02003643 /* some are called w/ irq disabled, don't disturb irq status */
3644 spin_lock_irqsave(&gcwq->lock, flags);
3645
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -07003646 switch (action) {
Tejun Heodb7bccf2010-06-29 10:07:12 +02003647 case CPU_DOWN_PREPARE:
3648 /* initialize trustee and tell it to acquire the gcwq */
3649 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3650 gcwq->trustee = new_trustee;
3651 gcwq->trustee_state = TRUSTEE_START;
3652 wake_up_process(gcwq->trustee);
3653 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
Tejun Heoe22bee72010-06-29 10:07:14 +02003654 /* fall through */
3655 case CPU_UP_PREPARE:
Tejun Heo9c6bae02012-07-13 22:16:44 -07003656 i = 0;
3657 for_each_worker_pool(pool, gcwq) {
3658 BUG_ON(pool->first_idle);
3659 pool->first_idle = new_workers[i++];
3660 }
Tejun Heoe22bee72010-06-29 10:07:14 +02003661 break;
3662
3663 case CPU_DYING:
3664 /*
3665 * Before this, the trustee and all workers except for
3666 * the ones which are still executing works from
3667 * before the last CPU down must be on the cpu. After
3668 * this, they'll all be diasporas.
3669 */
3670 gcwq->flags |= GCWQ_DISASSOCIATED;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003671 break;
3672
Oleg Nesterov3da1c842008-07-25 01:47:50 -07003673 case CPU_POST_DEAD:
Tejun Heodb7bccf2010-06-29 10:07:12 +02003674 gcwq->trustee_state = TRUSTEE_BUTCHER;
Tejun Heoe22bee72010-06-29 10:07:14 +02003675 /* fall through */
3676 case CPU_UP_CANCELED:
Tejun Heo9c6bae02012-07-13 22:16:44 -07003677 for_each_worker_pool(pool, gcwq) {
3678 destroy_worker(pool->first_idle);
3679 pool->first_idle = NULL;
3680 }
Tejun Heodb7bccf2010-06-29 10:07:12 +02003681 break;
3682
3683 case CPU_DOWN_FAILED:
3684 case CPU_ONLINE:
Tejun Heoe22bee72010-06-29 10:07:14 +02003685 gcwq->flags &= ~GCWQ_DISASSOCIATED;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003686 if (gcwq->trustee_state != TRUSTEE_DONE) {
3687 gcwq->trustee_state = TRUSTEE_RELEASE;
3688 wake_up_process(gcwq->trustee);
3689 wait_trustee_state(gcwq, TRUSTEE_DONE);
3690 }
3691
Tejun Heoe22bee72010-06-29 10:07:14 +02003692 /*
3693 * Trustee is done and there might be no worker left.
3694 * Put the first_idle in and request a real manager to
3695 * take a look.
3696 */
Tejun Heo9c6bae02012-07-13 22:16:44 -07003697 for_each_worker_pool(pool, gcwq) {
3698 spin_unlock_irq(&gcwq->lock);
3699 kthread_bind(pool->first_idle->task, cpu);
3700 spin_lock_irq(&gcwq->lock);
3701 pool->flags |= POOL_MANAGE_WORKERS;
3702 start_worker(pool->first_idle);
3703 pool->first_idle = NULL;
3704 }
Tejun Heodb7bccf2010-06-29 10:07:12 +02003705 break;
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -07003706 }
3707
Tejun Heodb7bccf2010-06-29 10:07:12 +02003708 spin_unlock_irqrestore(&gcwq->lock, flags);
3709
Tejun Heo15376632010-06-29 10:07:11 +02003710 return notifier_from_errno(0);
Tejun Heo9c6bae02012-07-13 22:16:44 -07003711
3712err_destroy:
3713 if (new_trustee)
3714 kthread_stop(new_trustee);
3715
3716 spin_lock_irqsave(&gcwq->lock, flags);
3717 for (i = 0; i < NR_WORKER_POOLS; i++)
3718 if (new_workers[i])
3719 destroy_worker(new_workers[i]);
3720 spin_unlock_irqrestore(&gcwq->lock, flags);
3721
3722 return NOTIFY_BAD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724
Tejun Heod3b42542012-07-17 12:39:26 -07003725/*
3726 * Workqueues should be brought up before normal priority CPU notifiers.
3727 * This will be registered high priority CPU notifier.
3728 */
3729static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3730 unsigned long action,
3731 void *hcpu)
3732{
3733 switch (action & ~CPU_TASKS_FROZEN) {
3734 case CPU_UP_PREPARE:
3735 case CPU_UP_CANCELED:
3736 case CPU_DOWN_FAILED:
3737 case CPU_ONLINE:
3738 return workqueue_cpu_callback(nfb, action, hcpu);
3739 }
3740 return NOTIFY_OK;
3741}
3742
3743/*
3744 * Workqueues should be brought down after normal priority CPU notifiers.
3745 * This will be registered as low priority CPU notifier.
3746 */
3747static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3748 unsigned long action,
3749 void *hcpu)
3750{
3751 switch (action & ~CPU_TASKS_FROZEN) {
3752 case CPU_DOWN_PREPARE:
3753 case CPU_DYING:
3754 case CPU_POST_DEAD:
3755 return workqueue_cpu_callback(nfb, action, hcpu);
3756 }
3757 return NOTIFY_OK;
3758}
3759
Rusty Russell2d3854a2008-11-05 13:39:10 +11003760#ifdef CONFIG_SMP
Rusty Russell8ccad402009-01-16 15:31:15 -08003761
Rusty Russell2d3854a2008-11-05 13:39:10 +11003762struct work_for_cpu {
Tejun Heofc7da7e2012-09-18 12:48:43 -07003763 struct work_struct work;
Rusty Russell2d3854a2008-11-05 13:39:10 +11003764 long (*fn)(void *);
3765 void *arg;
3766 long ret;
3767};
3768
Tejun Heofc7da7e2012-09-18 12:48:43 -07003769static void work_for_cpu_fn(struct work_struct *work)
Rusty Russell2d3854a2008-11-05 13:39:10 +11003770{
Tejun Heofc7da7e2012-09-18 12:48:43 -07003771 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
3772
Rusty Russell2d3854a2008-11-05 13:39:10 +11003773 wfc->ret = wfc->fn(wfc->arg);
3774}
3775
3776/**
3777 * work_on_cpu - run a function in user context on a particular cpu
3778 * @cpu: the cpu to run on
3779 * @fn: the function to run
3780 * @arg: the function arg
3781 *
Rusty Russell31ad9082009-01-16 15:31:15 -08003782 * This will return the value @fn returns.
3783 * It is up to the caller to ensure that the cpu doesn't go offline.
Andrew Morton6b440032009-04-09 09:50:37 -06003784 * The caller must not hold any locks which would prevent @fn from completing.
Rusty Russell2d3854a2008-11-05 13:39:10 +11003785 */
3786long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3787{
Tejun Heofc7da7e2012-09-18 12:48:43 -07003788 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
Rusty Russell2d3854a2008-11-05 13:39:10 +11003789
Tejun Heofc7da7e2012-09-18 12:48:43 -07003790 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
3791 schedule_work_on(cpu, &wfc.work);
3792 flush_work(&wfc.work);
Rusty Russell2d3854a2008-11-05 13:39:10 +11003793 return wfc.ret;
3794}
3795EXPORT_SYMBOL_GPL(work_on_cpu);
3796#endif /* CONFIG_SMP */
3797
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003798#ifdef CONFIG_FREEZER
Rusty Russelle7577c52009-01-01 10:12:25 +10303799
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003800/**
3801 * freeze_workqueues_begin - begin freezing workqueues
3802 *
Tejun Heo58a69cb2011-02-16 09:25:31 +01003803 * Start freezing workqueues. After this function returns, all freezable
3804 * workqueues will queue new works to their frozen_works list instead of
3805 * gcwq->worklist.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003806 *
3807 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02003808 * Grabs and releases workqueue_lock and gcwq->lock's.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003809 */
3810void freeze_workqueues_begin(void)
3811{
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003812 unsigned int cpu;
3813
3814 spin_lock(&workqueue_lock);
3815
3816 BUG_ON(workqueue_freezing);
3817 workqueue_freezing = true;
3818
Tejun Heof3421792010-07-02 10:03:51 +02003819 for_each_gcwq_cpu(cpu) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02003820 struct global_cwq *gcwq = get_gcwq(cpu);
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003821 struct workqueue_struct *wq;
Tejun Heo8b03ae32010-06-29 10:07:12 +02003822
3823 spin_lock_irq(&gcwq->lock);
3824
Tejun Heodb7bccf2010-06-29 10:07:12 +02003825 BUG_ON(gcwq->flags & GCWQ_FREEZING);
3826 gcwq->flags |= GCWQ_FREEZING;
3827
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003828 list_for_each_entry(wq, &workqueues, list) {
3829 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3830
Tejun Heo58a69cb2011-02-16 09:25:31 +01003831 if (cwq && wq->flags & WQ_FREEZABLE)
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003832 cwq->max_active = 0;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003833 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02003834
3835 spin_unlock_irq(&gcwq->lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003836 }
3837
3838 spin_unlock(&workqueue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839}
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003840
3841/**
Tejun Heo58a69cb2011-02-16 09:25:31 +01003842 * freeze_workqueues_busy - are freezable workqueues still busy?
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003843 *
3844 * Check whether freezing is complete. This function must be called
3845 * between freeze_workqueues_begin() and thaw_workqueues().
3846 *
3847 * CONTEXT:
3848 * Grabs and releases workqueue_lock.
3849 *
3850 * RETURNS:
Tejun Heo58a69cb2011-02-16 09:25:31 +01003851 * %true if some freezable workqueues are still busy. %false if freezing
3852 * is complete.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003853 */
3854bool freeze_workqueues_busy(void)
3855{
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003856 unsigned int cpu;
3857 bool busy = false;
3858
3859 spin_lock(&workqueue_lock);
3860
3861 BUG_ON(!workqueue_freezing);
3862
Tejun Heof3421792010-07-02 10:03:51 +02003863 for_each_gcwq_cpu(cpu) {
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003864 struct workqueue_struct *wq;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003865 /*
3866 * nr_active is monotonically decreasing. It's safe
3867 * to peek without lock.
3868 */
3869 list_for_each_entry(wq, &workqueues, list) {
3870 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3871
Tejun Heo58a69cb2011-02-16 09:25:31 +01003872 if (!cwq || !(wq->flags & WQ_FREEZABLE))
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003873 continue;
3874
3875 BUG_ON(cwq->nr_active < 0);
3876 if (cwq->nr_active) {
3877 busy = true;
3878 goto out_unlock;
3879 }
3880 }
3881 }
3882out_unlock:
3883 spin_unlock(&workqueue_lock);
3884 return busy;
3885}
3886
3887/**
3888 * thaw_workqueues - thaw workqueues
3889 *
3890 * Thaw workqueues. Normal queueing is restored and all collected
Tejun Heo7e116292010-06-29 10:07:13 +02003891 * frozen works are transferred to their respective gcwq worklists.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003892 *
3893 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02003894 * Grabs and releases workqueue_lock and gcwq->lock's.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003895 */
3896void thaw_workqueues(void)
3897{
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003898 unsigned int cpu;
3899
3900 spin_lock(&workqueue_lock);
3901
3902 if (!workqueue_freezing)
3903 goto out_unlock;
3904
Tejun Heof3421792010-07-02 10:03:51 +02003905 for_each_gcwq_cpu(cpu) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02003906 struct global_cwq *gcwq = get_gcwq(cpu);
Tejun Heo9c6bae02012-07-13 22:16:44 -07003907 struct worker_pool *pool;
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003908 struct workqueue_struct *wq;
Tejun Heo8b03ae32010-06-29 10:07:12 +02003909
3910 spin_lock_irq(&gcwq->lock);
3911
Tejun Heodb7bccf2010-06-29 10:07:12 +02003912 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3913 gcwq->flags &= ~GCWQ_FREEZING;
3914
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003915 list_for_each_entry(wq, &workqueues, list) {
3916 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3917
Tejun Heo58a69cb2011-02-16 09:25:31 +01003918 if (!cwq || !(wq->flags & WQ_FREEZABLE))
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003919 continue;
3920
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003921 /* restore max_active and repopulate worklist */
3922 cwq->max_active = wq->saved_max_active;
3923
3924 while (!list_empty(&cwq->delayed_works) &&
3925 cwq->nr_active < cwq->max_active)
3926 cwq_activate_first_delayed(cwq);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003927 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02003928
Tejun Heo9c6bae02012-07-13 22:16:44 -07003929 for_each_worker_pool(pool, gcwq)
3930 wake_up_worker(pool);
Tejun Heoe22bee72010-06-29 10:07:14 +02003931
Tejun Heo8b03ae32010-06-29 10:07:12 +02003932 spin_unlock_irq(&gcwq->lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003933 }
3934
3935 workqueue_freezing = false;
3936out_unlock:
3937 spin_unlock(&workqueue_lock);
3938}
3939#endif /* CONFIG_FREEZER */
3940
Suresh Siddha6ee05782010-07-30 14:57:37 -07003941static int __init init_workqueues(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942{
Tejun Heoc34056a2010-06-29 10:07:11 +02003943 unsigned int cpu;
Tejun Heoc8e55f32010-06-29 10:07:12 +02003944 int i;
Tejun Heoc34056a2010-06-29 10:07:11 +02003945
Tejun Heod3b42542012-07-17 12:39:26 -07003946 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
3947 cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
Tejun Heo8b03ae32010-06-29 10:07:12 +02003948
3949 /* initialize gcwqs */
Tejun Heof3421792010-07-02 10:03:51 +02003950 for_each_gcwq_cpu(cpu) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02003951 struct global_cwq *gcwq = get_gcwq(cpu);
Tejun Heo9c6bae02012-07-13 22:16:44 -07003952 struct worker_pool *pool;
Tejun Heo8b03ae32010-06-29 10:07:12 +02003953
3954 spin_lock_init(&gcwq->lock);
3955 gcwq->cpu = cpu;
Tejun Heo477a3c32010-08-31 10:54:35 +02003956 gcwq->flags |= GCWQ_DISASSOCIATED;
Tejun Heo8b03ae32010-06-29 10:07:12 +02003957
Tejun Heoc8e55f32010-06-29 10:07:12 +02003958 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3959 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3960
Tejun Heo9c6bae02012-07-13 22:16:44 -07003961 for_each_worker_pool(pool, gcwq) {
3962 pool->gcwq = gcwq;
3963 INIT_LIST_HEAD(&pool->worklist);
3964 INIT_LIST_HEAD(&pool->idle_list);
Tejun Heoe22bee72010-06-29 10:07:14 +02003965
Tejun Heo9c6bae02012-07-13 22:16:44 -07003966 init_timer_deferrable(&pool->idle_timer);
3967 pool->idle_timer.function = idle_worker_timeout;
3968 pool->idle_timer.data = (unsigned long)pool;
Tejun Heoe22bee72010-06-29 10:07:14 +02003969
Tejun Heo9c6bae02012-07-13 22:16:44 -07003970 setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
3971 (unsigned long)pool);
3972
3973 ida_init(&pool->worker_ida);
3974 }
Tejun Heodb7bccf2010-06-29 10:07:12 +02003975
3976 gcwq->trustee_state = TRUSTEE_DONE;
3977 init_waitqueue_head(&gcwq->trustee_wait);
Tejun Heo8b03ae32010-06-29 10:07:12 +02003978 }
3979
Tejun Heoe22bee72010-06-29 10:07:14 +02003980 /* create the initial worker */
Tejun Heof3421792010-07-02 10:03:51 +02003981 for_each_online_gcwq_cpu(cpu) {
Tejun Heoe22bee72010-06-29 10:07:14 +02003982 struct global_cwq *gcwq = get_gcwq(cpu);
Tejun Heo9c6bae02012-07-13 22:16:44 -07003983 struct worker_pool *pool;
Tejun Heoe22bee72010-06-29 10:07:14 +02003984
Tejun Heo477a3c32010-08-31 10:54:35 +02003985 if (cpu != WORK_CPU_UNBOUND)
3986 gcwq->flags &= ~GCWQ_DISASSOCIATED;
Tejun Heo9c6bae02012-07-13 22:16:44 -07003987
3988 for_each_worker_pool(pool, gcwq) {
3989 struct worker *worker;
3990
3991 worker = create_worker(pool, true);
3992 BUG_ON(!worker);
3993 spin_lock_irq(&gcwq->lock);
3994 start_worker(worker);
3995 spin_unlock_irq(&gcwq->lock);
3996 }
Tejun Heoe22bee72010-06-29 10:07:14 +02003997 }
3998
Tejun Heod320c032010-06-29 10:07:14 +02003999 system_wq = alloc_workqueue("events", 0, 0);
4000 system_long_wq = alloc_workqueue("events_long", 0, 0);
4001 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
Tejun Heof3421792010-07-02 10:03:51 +02004002 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
4003 WQ_UNBOUND_MAX_ACTIVE);
Tejun Heo24d51ad2011-02-21 09:52:50 +01004004 system_freezable_wq = alloc_workqueue("events_freezable",
4005 WQ_FREEZABLE, 0);
Alan Stern62d3c542012-03-02 10:51:00 +01004006 system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
4007 WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
shumash157ecab2015-07-18 09:12:19 -06004008 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
4009 WQ_POWER_EFFICIENT, 0);
4010 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
4011 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
4012 0);
Hitoshi Mitakee5cba242010-11-26 12:06:44 +01004013 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
Alan Stern62d3c542012-03-02 10:51:00 +01004014 !system_unbound_wq || !system_freezable_wq ||
4015 !system_nrt_freezable_wq);
Suresh Siddha6ee05782010-07-30 14:57:37 -07004016 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017}
Suresh Siddha6ee05782010-07-30 14:57:37 -07004018early_initcall(init_workqueues);