| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 2 |  * kernel/workqueue.c - generic async execution with shared worker pool | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 |  * | 
| Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 4 |  * Copyright (C) 2002		Ingo Molnar | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 |  * | 
| Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 6 |  *   Derived from the taskqueue/keventd code by: | 
 | 7 |  *     David Woodhouse <dwmw2@infradead.org> | 
 | 8 |  *     Andrew Morton | 
 | 9 |  *     Kai Petzke <wpp@marie.physik.tu-berlin.de> | 
 | 10 |  *     Theodore Ts'o <tytso@mit.edu> | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 11 |  * | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 12 |  * Made to use alloc_percpu by Christoph Lameter. | 
| Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 13 |  * | 
 | 14 |  * Copyright (C) 2010		SUSE Linux Products GmbH | 
 | 15 |  * Copyright (C) 2010		Tejun Heo <tj@kernel.org> | 
 | 16 |  * | 
 | 17 |  * This is the generic async execution mechanism.  Work items as are | 
 | 18 |  * executed in process context.  The worker pool is shared and | 
 | 19 |  * automatically managed.  There is one worker pool for each CPU and | 
 | 20 |  * one extra for works which are better served by workers which are | 
 | 21 |  * not bound to any specific CPU. | 
 | 22 |  * | 
 | 23 |  * Please read Documentation/workqueue.txt for details. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 |  */ | 
 | 25 |  | 
 | 26 | #include <linux/module.h> | 
 | 27 | #include <linux/kernel.h> | 
 | 28 | #include <linux/sched.h> | 
 | 29 | #include <linux/init.h> | 
 | 30 | #include <linux/signal.h> | 
 | 31 | #include <linux/completion.h> | 
 | 32 | #include <linux/workqueue.h> | 
 | 33 | #include <linux/slab.h> | 
 | 34 | #include <linux/cpu.h> | 
 | 35 | #include <linux/notifier.h> | 
 | 36 | #include <linux/kthread.h> | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 37 | #include <linux/hardirq.h> | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 38 | #include <linux/mempolicy.h> | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 39 | #include <linux/freezer.h> | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 40 | #include <linux/kallsyms.h> | 
 | 41 | #include <linux/debug_locks.h> | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 42 | #include <linux/lockdep.h> | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 43 | #include <linux/idr.h> | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 44 |  | 
 | 45 | #include "workqueue_sched.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 47 | enum { | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 48 | 	/* global_cwq flags */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 49 | 	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */ | 
 | 50 | 	GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */ | 
 | 51 | 	GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 52 | 	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */ | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 53 | 	GCWQ_HIGHPRI_PENDING	= 1 << 4,	/* highpri works on queue */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 54 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 55 | 	/* worker flags */ | 
 | 56 | 	WORKER_STARTED		= 1 << 0,	/* started */ | 
 | 57 | 	WORKER_DIE		= 1 << 1,	/* die die die */ | 
 | 58 | 	WORKER_IDLE		= 1 << 2,	/* is idle */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 59 | 	WORKER_PREP		= 1 << 3,	/* preparing to run works */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 60 | 	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 61 | 	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */ | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 62 | 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 63 | 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 64 |  | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 65 | 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 66 | 				  WORKER_CPU_INTENSIVE | WORKER_UNBOUND, | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 67 |  | 
 | 68 | 	/* gcwq->trustee_state */ | 
 | 69 | 	TRUSTEE_START		= 0,		/* start */ | 
 | 70 | 	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */ | 
 | 71 | 	TRUSTEE_BUTCHER		= 2,		/* butcher workers */ | 
 | 72 | 	TRUSTEE_RELEASE		= 3,		/* release workers */ | 
 | 73 | 	TRUSTEE_DONE		= 4,		/* trustee is done */ | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 74 |  | 
 | 75 | 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */ | 
 | 76 | 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER, | 
 | 77 | 	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1, | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 78 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 79 | 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */ | 
 | 80 | 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */ | 
 | 81 |  | 
| Tejun Heo | 3233cdb | 2011-02-16 18:10:19 +0100 | [diff] [blame] | 82 | 	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2, | 
 | 83 | 						/* call for help after 10ms | 
 | 84 | 						   (min two ticks) */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 85 | 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */ | 
 | 86 | 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 87 | 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 88 |  | 
 | 89 | 	/* | 
 | 90 | 	 * Rescue workers are used only on emergencies and shared by | 
 | 91 | 	 * all cpus.  Give -20. | 
 | 92 | 	 */ | 
 | 93 | 	RESCUER_NICE_LEVEL	= -20, | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 94 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 |  | 
 | 96 | /* | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 97 |  * Structure fields follow one of the following exclusion rules. | 
 | 98 |  * | 
| Tejun Heo | e41e704 | 2010-08-24 14:22:47 +0200 | [diff] [blame] | 99 |  * I: Modifiable by initialization/destruction paths and read-only for | 
 | 100 |  *    everyone else. | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 101 |  * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 102 |  * P: Preemption protected.  Disabling preemption is enough and should | 
 | 103 |  *    only be modified and accessed from the local cpu. | 
 | 104 |  * | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 105 |  * L: gcwq->lock protected.  Access with gcwq->lock held. | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 106 |  * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 107 |  * X: During normal operation, modification requires gcwq->lock and | 
 | 108 |  *    should be done only from local cpu.  Either disabling preemption | 
 | 109 |  *    on local cpu or grabbing gcwq->lock is enough for read access. | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 110 |  *    If GCWQ_DISASSOCIATED is set, it's identical to L. | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 111 |  * | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 112 |  * F: wq->flush_mutex protected. | 
 | 113 |  * | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 114 |  * W: workqueue_lock protected. | 
 | 115 |  */ | 
 | 116 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 117 | struct global_cwq; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 118 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 119 | /* | 
 | 120 |  * The poor guys doing the actual heavy lifting.  All on-duty workers | 
 | 121 |  * are either serving the manager role, on idle list or on busy hash. | 
 | 122 |  */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 123 | struct worker { | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 124 | 	/* on idle list while idle, on busy hash table while busy */ | 
 | 125 | 	union { | 
 | 126 | 		struct list_head	entry;	/* L: while idle */ | 
 | 127 | 		struct hlist_node	hentry;	/* L: while busy */ | 
 | 128 | 	}; | 
 | 129 |  | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 130 | 	struct work_struct	*current_work;	/* L: work being processed */ | 
| Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 131 | 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 132 | 	struct list_head	scheduled;	/* L: scheduled works */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 133 | 	struct task_struct	*task;		/* I: worker task */ | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 134 | 	struct global_cwq	*gcwq;		/* I: the associated gcwq */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 135 | 	/* 64 bytes boundary on 64bit, 32 on 32bit */ | 
 | 136 | 	unsigned long		last_active;	/* L: last active timestamp */ | 
 | 137 | 	unsigned int		flags;		/* X: flags */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 138 | 	int			id;		/* I: worker id */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 139 | 	struct work_struct	rebind_work;	/* L: rebind worker to cpu */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 140 | }; | 
 | 141 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 142 | /* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 143 |  * Global per-cpu workqueue.  There's one and only one for each cpu | 
 | 144 |  * and all works are queued and processed here regardless of their | 
 | 145 |  * target workqueues. | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 146 |  */ | 
 | 147 | struct global_cwq { | 
 | 148 | 	spinlock_t		lock;		/* the gcwq lock */ | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 149 | 	struct list_head	worklist;	/* L: list of pending works */ | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 150 | 	unsigned int		cpu;		/* I: the associated cpu */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 151 | 	unsigned int		flags;		/* L: GCWQ_* flags */ | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 152 |  | 
 | 153 | 	int			nr_workers;	/* L: total number of workers */ | 
 | 154 | 	int			nr_idle;	/* L: currently idle ones */ | 
 | 155 |  | 
 | 156 | 	/* workers are chained either in the idle_list or busy_hash */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 157 | 	struct list_head	idle_list;	/* X: list of idle workers */ | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 158 | 	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE]; | 
 | 159 | 						/* L: hash of busy workers */ | 
 | 160 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 161 | 	struct timer_list	idle_timer;	/* L: worker idle timeout */ | 
 | 162 | 	struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */ | 
 | 163 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 164 | 	struct ida		worker_ida;	/* L: for worker IDs */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 165 |  | 
 | 166 | 	struct task_struct	*trustee;	/* L: for gcwq shutdown */ | 
 | 167 | 	unsigned int		trustee_state;	/* L: trustee state */ | 
 | 168 | 	wait_queue_head_t	trustee_wait;	/* trustee wait */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 169 | 	struct worker		*first_idle;	/* L: first idle worker */ | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 170 | } ____cacheline_aligned_in_smp; | 
 | 171 |  | 
 | 172 | /* | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 173 |  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 174 |  * work_struct->data are used for flags and thus cwqs need to be | 
 | 175 |  * aligned at two's power of the number of flag bits. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 |  */ | 
 | 177 | struct cpu_workqueue_struct { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 178 | 	struct global_cwq	*gcwq;		/* I: the associated gcwq */ | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 179 | 	struct workqueue_struct *wq;		/* I: the owning workqueue */ | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 180 | 	int			work_color;	/* L: current color */ | 
 | 181 | 	int			flush_color;	/* L: flushing color */ | 
 | 182 | 	int			nr_in_flight[WORK_NR_COLORS]; | 
 | 183 | 						/* L: nr of in_flight works */ | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 184 | 	int			nr_active;	/* L: nr of active works */ | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 185 | 	int			max_active;	/* L: max active works */ | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 186 | 	struct list_head	delayed_works;	/* L: delayed works */ | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 187 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | /* | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 190 |  * Structure used to wait for workqueue flush. | 
 | 191 |  */ | 
 | 192 | struct wq_flusher { | 
 | 193 | 	struct list_head	list;		/* F: list of flushers */ | 
 | 194 | 	int			flush_color;	/* F: flush color waiting for */ | 
 | 195 | 	struct completion	done;		/* flush completion */ | 
 | 196 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 198 | /* | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 199 |  * All cpumasks are assumed to be always set on UP and thus can't be | 
 | 200 |  * used to determine whether there's something to be done. | 
 | 201 |  */ | 
 | 202 | #ifdef CONFIG_SMP | 
 | 203 | typedef cpumask_var_t mayday_mask_t; | 
 | 204 | #define mayday_test_and_set_cpu(cpu, mask)	\ | 
 | 205 | 	cpumask_test_and_set_cpu((cpu), (mask)) | 
 | 206 | #define mayday_clear_cpu(cpu, mask)		cpumask_clear_cpu((cpu), (mask)) | 
 | 207 | #define for_each_mayday_cpu(cpu, mask)		for_each_cpu((cpu), (mask)) | 
| Tejun Heo | 9c37547 | 2010-08-31 11:18:34 +0200 | [diff] [blame] | 208 | #define alloc_mayday_mask(maskp, gfp)		zalloc_cpumask_var((maskp), (gfp)) | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 209 | #define free_mayday_mask(mask)			free_cpumask_var((mask)) | 
 | 210 | #else | 
 | 211 | typedef unsigned long mayday_mask_t; | 
 | 212 | #define mayday_test_and_set_cpu(cpu, mask)	test_and_set_bit(0, &(mask)) | 
 | 213 | #define mayday_clear_cpu(cpu, mask)		clear_bit(0, &(mask)) | 
 | 214 | #define for_each_mayday_cpu(cpu, mask)		if ((cpu) = 0, (mask)) | 
 | 215 | #define alloc_mayday_mask(maskp, gfp)		true | 
 | 216 | #define free_mayday_mask(mask)			do { } while (0) | 
 | 217 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 |  | 
 | 219 | /* | 
 | 220 |  * The externally visible workqueue abstraction is an array of | 
 | 221 |  * per-CPU workqueues: | 
 | 222 |  */ | 
 | 223 | struct workqueue_struct { | 
| Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 224 | 	unsigned int		flags;		/* W: WQ_* flags */ | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 225 | 	union { | 
 | 226 | 		struct cpu_workqueue_struct __percpu	*pcpu; | 
 | 227 | 		struct cpu_workqueue_struct		*single; | 
 | 228 | 		unsigned long				v; | 
 | 229 | 	} cpu_wq;				/* I: cwq's */ | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 230 | 	struct list_head	list;		/* W: list of all workqueues */ | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 231 |  | 
 | 232 | 	struct mutex		flush_mutex;	/* protects wq flushing */ | 
 | 233 | 	int			work_color;	/* F: current work color */ | 
 | 234 | 	int			flush_color;	/* F: current flush color */ | 
 | 235 | 	atomic_t		nr_cwqs_to_flush; /* flush in progress */ | 
 | 236 | 	struct wq_flusher	*first_flusher;	/* F: first flusher */ | 
 | 237 | 	struct list_head	flusher_queue;	/* F: flush waiters */ | 
 | 238 | 	struct list_head	flusher_overflow; /* F: flush overflow list */ | 
 | 239 |  | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 240 | 	mayday_mask_t		mayday_mask;	/* cpus requesting rescue */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 241 | 	struct worker		*rescuer;	/* I: rescue worker */ | 
 | 242 |  | 
| Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 243 | 	int			nr_drainers;	/* W: drain in progress */ | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 244 | 	int			saved_max_active; /* W: saved cwq max_active */ | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 245 | 	const char		*name;		/* I: workqueue name */ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 246 | #ifdef CONFIG_LOCKDEP | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 247 | 	struct lockdep_map	lockdep_map; | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 248 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | }; | 
 | 250 |  | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 251 | struct workqueue_struct *system_wq __read_mostly; | 
 | 252 | struct workqueue_struct *system_long_wq __read_mostly; | 
 | 253 | struct workqueue_struct *system_nrt_wq __read_mostly; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 254 | struct workqueue_struct *system_unbound_wq __read_mostly; | 
| Tejun Heo | 24d51ad | 2011-02-21 09:52:50 +0100 | [diff] [blame] | 255 | struct workqueue_struct *system_freezable_wq __read_mostly; | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 256 | EXPORT_SYMBOL_GPL(system_wq); | 
 | 257 | EXPORT_SYMBOL_GPL(system_long_wq); | 
 | 258 | EXPORT_SYMBOL_GPL(system_nrt_wq); | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 259 | EXPORT_SYMBOL_GPL(system_unbound_wq); | 
| Tejun Heo | 24d51ad | 2011-02-21 09:52:50 +0100 | [diff] [blame] | 260 | EXPORT_SYMBOL_GPL(system_freezable_wq); | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 261 |  | 
| Tejun Heo | 97bd234 | 2010-10-05 10:41:14 +0200 | [diff] [blame] | 262 | #define CREATE_TRACE_POINTS | 
 | 263 | #include <trace/events/workqueue.h> | 
 | 264 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 265 | #define for_each_busy_worker(worker, i, pos, gcwq)			\ | 
 | 266 | 	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\ | 
 | 267 | 		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) | 
 | 268 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 269 | static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, | 
 | 270 | 				  unsigned int sw) | 
 | 271 | { | 
 | 272 | 	if (cpu < nr_cpu_ids) { | 
 | 273 | 		if (sw & 1) { | 
 | 274 | 			cpu = cpumask_next(cpu, mask); | 
 | 275 | 			if (cpu < nr_cpu_ids) | 
 | 276 | 				return cpu; | 
 | 277 | 		} | 
 | 278 | 		if (sw & 2) | 
 | 279 | 			return WORK_CPU_UNBOUND; | 
 | 280 | 	} | 
 | 281 | 	return WORK_CPU_NONE; | 
 | 282 | } | 
 | 283 |  | 
 | 284 | static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, | 
 | 285 | 				struct workqueue_struct *wq) | 
 | 286 | { | 
 | 287 | 	return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); | 
 | 288 | } | 
 | 289 |  | 
| Tejun Heo | 0988495 | 2010-08-01 11:50:12 +0200 | [diff] [blame] | 290 | /* | 
 | 291 |  * CPU iterators | 
 | 292 |  * | 
 | 293 |  * An extra gcwq is defined for an invalid cpu number | 
 | 294 |  * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any | 
 | 295 |  * specific CPU.  The following iterators are similar to | 
 | 296 |  * for_each_*_cpu() iterators but also considers the unbound gcwq. | 
 | 297 |  * | 
 | 298 |  * for_each_gcwq_cpu()		: possible CPUs + WORK_CPU_UNBOUND | 
 | 299 |  * for_each_online_gcwq_cpu()	: online CPUs + WORK_CPU_UNBOUND | 
 | 300 |  * for_each_cwq_cpu()		: possible CPUs for bound workqueues, | 
 | 301 |  *				  WORK_CPU_UNBOUND for unbound workqueues | 
 | 302 |  */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 303 | #define for_each_gcwq_cpu(cpu)						\ | 
 | 304 | 	for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);		\ | 
 | 305 | 	     (cpu) < WORK_CPU_NONE;					\ | 
 | 306 | 	     (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) | 
 | 307 |  | 
 | 308 | #define for_each_online_gcwq_cpu(cpu)					\ | 
 | 309 | 	for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);		\ | 
 | 310 | 	     (cpu) < WORK_CPU_NONE;					\ | 
 | 311 | 	     (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) | 
 | 312 |  | 
 | 313 | #define for_each_cwq_cpu(cpu, wq)					\ | 
 | 314 | 	for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));	\ | 
 | 315 | 	     (cpu) < WORK_CPU_NONE;					\ | 
 | 316 | 	     (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) | 
 | 317 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 318 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 
 | 319 |  | 
 | 320 | static struct debug_obj_descr work_debug_descr; | 
 | 321 |  | 
| Stanislaw Gruszka | 9977728 | 2011-03-07 09:58:33 +0100 | [diff] [blame] | 322 | static void *work_debug_hint(void *addr) | 
 | 323 | { | 
 | 324 | 	return ((struct work_struct *) addr)->func; | 
 | 325 | } | 
 | 326 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 327 | /* | 
 | 328 |  * fixup_init is called when: | 
 | 329 |  * - an active object is initialized | 
 | 330 |  */ | 
 | 331 | static int work_fixup_init(void *addr, enum debug_obj_state state) | 
 | 332 | { | 
 | 333 | 	struct work_struct *work = addr; | 
 | 334 |  | 
 | 335 | 	switch (state) { | 
 | 336 | 	case ODEBUG_STATE_ACTIVE: | 
 | 337 | 		cancel_work_sync(work); | 
 | 338 | 		debug_object_init(work, &work_debug_descr); | 
 | 339 | 		return 1; | 
 | 340 | 	default: | 
 | 341 | 		return 0; | 
 | 342 | 	} | 
 | 343 | } | 
 | 344 |  | 
 | 345 | /* | 
 | 346 |  * fixup_activate is called when: | 
 | 347 |  * - an active object is activated | 
 | 348 |  * - an unknown object is activated (might be a statically initialized object) | 
 | 349 |  */ | 
 | 350 | static int work_fixup_activate(void *addr, enum debug_obj_state state) | 
 | 351 | { | 
 | 352 | 	struct work_struct *work = addr; | 
 | 353 |  | 
 | 354 | 	switch (state) { | 
 | 355 |  | 
 | 356 | 	case ODEBUG_STATE_NOTAVAILABLE: | 
 | 357 | 		/* | 
 | 358 | 		 * This is not really a fixup. The work struct was | 
 | 359 | 		 * statically initialized. We just make sure that it | 
 | 360 | 		 * is tracked in the object tracker. | 
 | 361 | 		 */ | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 362 | 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 363 | 			debug_object_init(work, &work_debug_descr); | 
 | 364 | 			debug_object_activate(work, &work_debug_descr); | 
 | 365 | 			return 0; | 
 | 366 | 		} | 
 | 367 | 		WARN_ON_ONCE(1); | 
 | 368 | 		return 0; | 
 | 369 |  | 
 | 370 | 	case ODEBUG_STATE_ACTIVE: | 
 | 371 | 		WARN_ON(1); | 
 | 372 |  | 
 | 373 | 	default: | 
 | 374 | 		return 0; | 
 | 375 | 	} | 
 | 376 | } | 
 | 377 |  | 
 | 378 | /* | 
 | 379 |  * fixup_free is called when: | 
 | 380 |  * - an active object is freed | 
 | 381 |  */ | 
 | 382 | static int work_fixup_free(void *addr, enum debug_obj_state state) | 
 | 383 | { | 
 | 384 | 	struct work_struct *work = addr; | 
 | 385 |  | 
 | 386 | 	switch (state) { | 
 | 387 | 	case ODEBUG_STATE_ACTIVE: | 
 | 388 | 		cancel_work_sync(work); | 
 | 389 | 		debug_object_free(work, &work_debug_descr); | 
 | 390 | 		return 1; | 
 | 391 | 	default: | 
 | 392 | 		return 0; | 
 | 393 | 	} | 
 | 394 | } | 
 | 395 |  | 
 | 396 | static struct debug_obj_descr work_debug_descr = { | 
 | 397 | 	.name		= "work_struct", | 
| Stanislaw Gruszka | 9977728 | 2011-03-07 09:58:33 +0100 | [diff] [blame] | 398 | 	.debug_hint	= work_debug_hint, | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 399 | 	.fixup_init	= work_fixup_init, | 
 | 400 | 	.fixup_activate	= work_fixup_activate, | 
 | 401 | 	.fixup_free	= work_fixup_free, | 
 | 402 | }; | 
 | 403 |  | 
 | 404 | static inline void debug_work_activate(struct work_struct *work) | 
 | 405 | { | 
 | 406 | 	debug_object_activate(work, &work_debug_descr); | 
 | 407 | } | 
 | 408 |  | 
 | 409 | static inline void debug_work_deactivate(struct work_struct *work) | 
 | 410 | { | 
 | 411 | 	debug_object_deactivate(work, &work_debug_descr); | 
 | 412 | } | 
 | 413 |  | 
 | 414 | void __init_work(struct work_struct *work, int onstack) | 
 | 415 | { | 
 | 416 | 	if (onstack) | 
 | 417 | 		debug_object_init_on_stack(work, &work_debug_descr); | 
 | 418 | 	else | 
 | 419 | 		debug_object_init(work, &work_debug_descr); | 
 | 420 | } | 
 | 421 | EXPORT_SYMBOL_GPL(__init_work); | 
 | 422 |  | 
 | 423 | void destroy_work_on_stack(struct work_struct *work) | 
 | 424 | { | 
 | 425 | 	debug_object_free(work, &work_debug_descr); | 
 | 426 | } | 
 | 427 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); | 
 | 428 |  | 
 | 429 | #else | 
 | 430 | static inline void debug_work_activate(struct work_struct *work) { } | 
 | 431 | static inline void debug_work_deactivate(struct work_struct *work) { } | 
 | 432 | #endif | 
 | 433 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 434 | /* Serializes the accesses to the list of workqueues. */ | 
 | 435 | static DEFINE_SPINLOCK(workqueue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | static LIST_HEAD(workqueues); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 437 | static bool workqueue_freezing;		/* W: have wqs started freezing? */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 439 | /* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 440 |  * The almighty global cpu workqueues.  nr_running is the only field | 
 | 441 |  * which is expected to be used frequently by other cpus via | 
 | 442 |  * try_to_wake_up().  Put it in a separate cacheline. | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 443 |  */ | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 444 | static DEFINE_PER_CPU(struct global_cwq, global_cwq); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 445 | static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 446 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 447 | /* | 
 | 448 |  * Global cpu workqueue and nr_running counter for unbound gcwq.  The | 
 | 449 |  * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its | 
 | 450 |  * workers have WORKER_UNBOUND set. | 
 | 451 |  */ | 
 | 452 | static struct global_cwq unbound_global_cwq; | 
 | 453 | static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);	/* always 0 */ | 
 | 454 |  | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 455 | static int worker_thread(void *__worker); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 457 | static struct global_cwq *get_gcwq(unsigned int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 459 | 	if (cpu != WORK_CPU_UNBOUND) | 
 | 460 | 		return &per_cpu(global_cwq, cpu); | 
 | 461 | 	else | 
 | 462 | 		return &unbound_global_cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | } | 
 | 464 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 465 | static atomic_t *get_gcwq_nr_running(unsigned int cpu) | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 466 | { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 467 | 	if (cpu != WORK_CPU_UNBOUND) | 
 | 468 | 		return &per_cpu(gcwq_nr_running, cpu); | 
 | 469 | 	else | 
 | 470 | 		return &unbound_gcwq_nr_running; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 471 | } | 
 | 472 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 473 | static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, | 
 | 474 | 					    struct workqueue_struct *wq) | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 475 | { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 476 | 	if (!(wq->flags & WQ_UNBOUND)) { | 
 | 477 | 		if (likely(cpu < nr_cpu_ids)) { | 
 | 478 | #ifdef CONFIG_SMP | 
 | 479 | 			return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 480 | #else | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 481 | 			return wq->cpu_wq.single; | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 482 | #endif | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 483 | 		} | 
 | 484 | 	} else if (likely(cpu == WORK_CPU_UNBOUND)) | 
 | 485 | 		return wq->cpu_wq.single; | 
 | 486 | 	return NULL; | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 487 | } | 
 | 488 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 489 | static unsigned int work_color_to_flags(int color) | 
 | 490 | { | 
 | 491 | 	return color << WORK_STRUCT_COLOR_SHIFT; | 
 | 492 | } | 
 | 493 |  | 
 | 494 | static int get_work_color(struct work_struct *work) | 
 | 495 | { | 
 | 496 | 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & | 
 | 497 | 		((1 << WORK_STRUCT_COLOR_BITS) - 1); | 
 | 498 | } | 
 | 499 |  | 
 | 500 | static int work_next_color(int color) | 
 | 501 | { | 
 | 502 | 	return (color + 1) % WORK_NR_COLORS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | } | 
 | 504 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 505 | /* | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 506 |  * A work's data points to the cwq with WORK_STRUCT_CWQ set while the | 
 | 507 |  * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is | 
 | 508 |  * cleared and the work data contains the cpu number it was last on. | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 509 |  * | 
 | 510 |  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the | 
 | 511 |  * cwq, cpu or clear work->data.  These functions should only be | 
 | 512 |  * called while the work is owned - ie. while the PENDING bit is set. | 
 | 513 |  * | 
 | 514 |  * get_work_[g]cwq() can be used to obtain the gcwq or cwq | 
 | 515 |  * corresponding to a work.  gcwq is available once the work has been | 
 | 516 |  * queued anywhere after initialization.  cwq is available only from | 
 | 517 |  * queueing until execution starts. | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 518 |  */ | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 519 | static inline void set_work_data(struct work_struct *work, unsigned long data, | 
 | 520 | 				 unsigned long flags) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 521 | { | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 522 | 	BUG_ON(!work_pending(work)); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 523 | 	atomic_long_set(&work->data, data | flags | work_static(work)); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 524 | } | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 525 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 526 | static void set_work_cwq(struct work_struct *work, | 
 | 527 | 			 struct cpu_workqueue_struct *cwq, | 
 | 528 | 			 unsigned long extra_flags) | 
| Oleg Nesterov | 4d707b9 | 2010-04-23 17:40:40 +0200 | [diff] [blame] | 529 | { | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 530 | 	set_work_data(work, (unsigned long)cwq, | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 531 | 		      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); | 
| Oleg Nesterov | 4d707b9 | 2010-04-23 17:40:40 +0200 | [diff] [blame] | 532 | } | 
 | 533 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 534 | static void set_work_cpu(struct work_struct *work, unsigned int cpu) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 535 | { | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 536 | 	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING); | 
 | 537 | } | 
 | 538 |  | 
 | 539 | static void clear_work_data(struct work_struct *work) | 
 | 540 | { | 
 | 541 | 	set_work_data(work, WORK_STRUCT_NO_CPU, 0); | 
 | 542 | } | 
 | 543 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 544 | static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) | 
 | 545 | { | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 546 | 	unsigned long data = atomic_long_read(&work->data); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 547 |  | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 548 | 	if (data & WORK_STRUCT_CWQ) | 
 | 549 | 		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); | 
 | 550 | 	else | 
 | 551 | 		return NULL; | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 552 | } | 
 | 553 |  | 
 | 554 | static struct global_cwq *get_work_gcwq(struct work_struct *work) | 
 | 555 | { | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 556 | 	unsigned long data = atomic_long_read(&work->data); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 557 | 	unsigned int cpu; | 
 | 558 |  | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 559 | 	if (data & WORK_STRUCT_CWQ) | 
 | 560 | 		return ((struct cpu_workqueue_struct *) | 
 | 561 | 			(data & WORK_STRUCT_WQ_DATA_MASK))->gcwq; | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 562 |  | 
 | 563 | 	cpu = data >> WORK_STRUCT_FLAG_BITS; | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 564 | 	if (cpu == WORK_CPU_NONE) | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 565 | 		return NULL; | 
 | 566 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 567 | 	BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 568 | 	return get_gcwq(cpu); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 569 | } | 
 | 570 |  | 
 | 571 | /* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 572 |  * Policy functions.  These define the policies on how the global | 
 | 573 |  * worker pool is managed.  Unless noted otherwise, these functions | 
 | 574 |  * assume that they're being called with gcwq->lock held. | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 575 |  */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 576 |  | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 577 | static bool __need_more_worker(struct global_cwq *gcwq) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 578 | { | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 579 | 	return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) || | 
 | 580 | 		gcwq->flags & GCWQ_HIGHPRI_PENDING; | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 581 | } | 
 | 582 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 583 | /* | 
 | 584 |  * Need to wake up a worker?  Called from anything but currently | 
 | 585 |  * running workers. | 
 | 586 |  */ | 
 | 587 | static bool need_more_worker(struct global_cwq *gcwq) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 588 | { | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 589 | 	return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 590 | } | 
 | 591 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 592 | /* Can I start working?  Called from busy but !running workers. */ | 
 | 593 | static bool may_start_working(struct global_cwq *gcwq) | 
 | 594 | { | 
 | 595 | 	return gcwq->nr_idle; | 
 | 596 | } | 
 | 597 |  | 
 | 598 | /* Do I need to keep working?  Called from currently running workers. */ | 
 | 599 | static bool keep_working(struct global_cwq *gcwq) | 
 | 600 | { | 
 | 601 | 	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); | 
 | 602 |  | 
| Tejun Heo | 3031004 | 2010-10-11 11:51:57 +0200 | [diff] [blame] | 603 | 	return !list_empty(&gcwq->worklist) && | 
 | 604 | 		(atomic_read(nr_running) <= 1 || | 
 | 605 | 		 gcwq->flags & GCWQ_HIGHPRI_PENDING); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 606 | } | 
 | 607 |  | 
 | 608 | /* Do we need a new worker?  Called from manager. */ | 
 | 609 | static bool need_to_create_worker(struct global_cwq *gcwq) | 
 | 610 | { | 
 | 611 | 	return need_more_worker(gcwq) && !may_start_working(gcwq); | 
 | 612 | } | 
 | 613 |  | 
 | 614 | /* Do I need to be the manager? */ | 
 | 615 | static bool need_to_manage_workers(struct global_cwq *gcwq) | 
 | 616 | { | 
 | 617 | 	return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS; | 
 | 618 | } | 
 | 619 |  | 
 | 620 | /* Do we have too many workers and should some go away? */ | 
 | 621 | static bool too_many_workers(struct global_cwq *gcwq) | 
 | 622 | { | 
 | 623 | 	bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS; | 
 | 624 | 	int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */ | 
 | 625 | 	int nr_busy = gcwq->nr_workers - nr_idle; | 
 | 626 |  | 
 | 627 | 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; | 
 | 628 | } | 
 | 629 |  | 
 | 630 | /* | 
 | 631 |  * Wake up functions. | 
 | 632 |  */ | 
 | 633 |  | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 634 | /* Return the first worker.  Safe with preemption disabled */ | 
 | 635 | static struct worker *first_worker(struct global_cwq *gcwq) | 
 | 636 | { | 
 | 637 | 	if (unlikely(list_empty(&gcwq->idle_list))) | 
 | 638 | 		return NULL; | 
 | 639 |  | 
 | 640 | 	return list_first_entry(&gcwq->idle_list, struct worker, entry); | 
 | 641 | } | 
 | 642 |  | 
 | 643 | /** | 
 | 644 |  * wake_up_worker - wake up an idle worker | 
 | 645 |  * @gcwq: gcwq to wake worker for | 
 | 646 |  * | 
 | 647 |  * Wake up the first idle worker of @gcwq. | 
 | 648 |  * | 
 | 649 |  * CONTEXT: | 
 | 650 |  * spin_lock_irq(gcwq->lock). | 
 | 651 |  */ | 
 | 652 | static void wake_up_worker(struct global_cwq *gcwq) | 
 | 653 | { | 
 | 654 | 	struct worker *worker = first_worker(gcwq); | 
 | 655 |  | 
 | 656 | 	if (likely(worker)) | 
 | 657 | 		wake_up_process(worker->task); | 
 | 658 | } | 
 | 659 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 660 | /** | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 661 |  * wq_worker_waking_up - a worker is waking up | 
 | 662 |  * @task: task waking up | 
 | 663 |  * @cpu: CPU @task is waking up to | 
 | 664 |  * | 
 | 665 |  * This function is called during try_to_wake_up() when a worker is | 
 | 666 |  * being awoken. | 
 | 667 |  * | 
 | 668 |  * CONTEXT: | 
 | 669 |  * spin_lock_irq(rq->lock) | 
 | 670 |  */ | 
 | 671 | void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) | 
 | 672 | { | 
 | 673 | 	struct worker *worker = kthread_data(task); | 
 | 674 |  | 
| Steven Rostedt | 2d64672 | 2010-12-03 23:12:33 -0500 | [diff] [blame] | 675 | 	if (!(worker->flags & WORKER_NOT_RUNNING)) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 676 | 		atomic_inc(get_gcwq_nr_running(cpu)); | 
 | 677 | } | 
 | 678 |  | 
 | 679 | /** | 
 | 680 |  * wq_worker_sleeping - a worker is going to sleep | 
 | 681 |  * @task: task going to sleep | 
 | 682 |  * @cpu: CPU in question, must be the current CPU number | 
 | 683 |  * | 
 | 684 |  * This function is called during schedule() when a busy worker is | 
 | 685 |  * going to sleep.  Worker on the same cpu can be woken up by | 
 | 686 |  * returning pointer to its task. | 
 | 687 |  * | 
 | 688 |  * CONTEXT: | 
 | 689 |  * spin_lock_irq(rq->lock) | 
 | 690 |  * | 
 | 691 |  * RETURNS: | 
 | 692 |  * Worker task on @cpu to wake up, %NULL if none. | 
 | 693 |  */ | 
 | 694 | struct task_struct *wq_worker_sleeping(struct task_struct *task, | 
 | 695 | 				       unsigned int cpu) | 
 | 696 | { | 
 | 697 | 	struct worker *worker = kthread_data(task), *to_wakeup = NULL; | 
 | 698 | 	struct global_cwq *gcwq = get_gcwq(cpu); | 
 | 699 | 	atomic_t *nr_running = get_gcwq_nr_running(cpu); | 
 | 700 |  | 
| Steven Rostedt | 2d64672 | 2010-12-03 23:12:33 -0500 | [diff] [blame] | 701 | 	if (worker->flags & WORKER_NOT_RUNNING) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 702 | 		return NULL; | 
 | 703 |  | 
 | 704 | 	/* this can only happen on the local cpu */ | 
 | 705 | 	BUG_ON(cpu != raw_smp_processor_id()); | 
 | 706 |  | 
 | 707 | 	/* | 
 | 708 | 	 * The counterpart of the following dec_and_test, implied mb, | 
 | 709 | 	 * worklist not empty test sequence is in insert_work(). | 
 | 710 | 	 * Please read comment there. | 
 | 711 | 	 * | 
 | 712 | 	 * NOT_RUNNING is clear.  This means that trustee is not in | 
 | 713 | 	 * charge and we're running on the local cpu w/ rq lock held | 
 | 714 | 	 * and preemption disabled, which in turn means that none else | 
 | 715 | 	 * could be manipulating idle_list, so dereferencing idle_list | 
 | 716 | 	 * without gcwq lock is safe. | 
 | 717 | 	 */ | 
 | 718 | 	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist)) | 
 | 719 | 		to_wakeup = first_worker(gcwq); | 
 | 720 | 	return to_wakeup ? to_wakeup->task : NULL; | 
 | 721 | } | 
 | 722 |  | 
 | 723 | /** | 
 | 724 |  * worker_set_flags - set worker flags and adjust nr_running accordingly | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 725 |  * @worker: self | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 726 |  * @flags: flags to set | 
 | 727 |  * @wakeup: wakeup an idle worker if necessary | 
 | 728 |  * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 729 |  * Set @flags in @worker->flags and adjust nr_running accordingly.  If | 
 | 730 |  * nr_running becomes zero and @wakeup is %true, an idle worker is | 
 | 731 |  * woken up. | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 732 |  * | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 733 |  * CONTEXT: | 
 | 734 |  * spin_lock_irq(gcwq->lock) | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 735 |  */ | 
 | 736 | static inline void worker_set_flags(struct worker *worker, unsigned int flags, | 
 | 737 | 				    bool wakeup) | 
 | 738 | { | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 739 | 	struct global_cwq *gcwq = worker->gcwq; | 
 | 740 |  | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 741 | 	WARN_ON_ONCE(worker->task != current); | 
 | 742 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 743 | 	/* | 
 | 744 | 	 * If transitioning into NOT_RUNNING, adjust nr_running and | 
 | 745 | 	 * wake up an idle worker as necessary if requested by | 
 | 746 | 	 * @wakeup. | 
 | 747 | 	 */ | 
 | 748 | 	if ((flags & WORKER_NOT_RUNNING) && | 
 | 749 | 	    !(worker->flags & WORKER_NOT_RUNNING)) { | 
 | 750 | 		atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); | 
 | 751 |  | 
 | 752 | 		if (wakeup) { | 
 | 753 | 			if (atomic_dec_and_test(nr_running) && | 
 | 754 | 			    !list_empty(&gcwq->worklist)) | 
 | 755 | 				wake_up_worker(gcwq); | 
 | 756 | 		} else | 
 | 757 | 			atomic_dec(nr_running); | 
 | 758 | 	} | 
 | 759 |  | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 760 | 	worker->flags |= flags; | 
 | 761 | } | 
 | 762 |  | 
 | 763 | /** | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 764 |  * worker_clr_flags - clear worker flags and adjust nr_running accordingly | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 765 |  * @worker: self | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 766 |  * @flags: flags to clear | 
 | 767 |  * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 768 |  * Clear @flags in @worker->flags and adjust nr_running accordingly. | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 769 |  * | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 770 |  * CONTEXT: | 
 | 771 |  * spin_lock_irq(gcwq->lock) | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 772 |  */ | 
 | 773 | static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | 
 | 774 | { | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 775 | 	struct global_cwq *gcwq = worker->gcwq; | 
 | 776 | 	unsigned int oflags = worker->flags; | 
 | 777 |  | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 778 | 	WARN_ON_ONCE(worker->task != current); | 
 | 779 |  | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 780 | 	worker->flags &= ~flags; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 781 |  | 
| Tejun Heo | 42c025f | 2011-01-11 15:58:49 +0100 | [diff] [blame] | 782 | 	/* | 
 | 783 | 	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note | 
 | 784 | 	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask | 
 | 785 | 	 * of multiple flags, not a single flag. | 
 | 786 | 	 */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 787 | 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) | 
 | 788 | 		if (!(worker->flags & WORKER_NOT_RUNNING)) | 
 | 789 | 			atomic_inc(get_gcwq_nr_running(gcwq->cpu)); | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 790 | } | 
 | 791 |  | 
 | 792 | /** | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 793 |  * busy_worker_head - return the busy hash head for a work | 
 | 794 |  * @gcwq: gcwq of interest | 
 | 795 |  * @work: work to be hashed | 
 | 796 |  * | 
 | 797 |  * Return hash head of @gcwq for @work. | 
 | 798 |  * | 
 | 799 |  * CONTEXT: | 
 | 800 |  * spin_lock_irq(gcwq->lock). | 
 | 801 |  * | 
 | 802 |  * RETURNS: | 
 | 803 |  * Pointer to the hash head. | 
 | 804 |  */ | 
 | 805 | static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, | 
 | 806 | 					   struct work_struct *work) | 
 | 807 | { | 
 | 808 | 	const int base_shift = ilog2(sizeof(struct work_struct)); | 
 | 809 | 	unsigned long v = (unsigned long)work; | 
 | 810 |  | 
 | 811 | 	/* simple shift and fold hash, do we need something better? */ | 
 | 812 | 	v >>= base_shift; | 
 | 813 | 	v += v >> BUSY_WORKER_HASH_ORDER; | 
 | 814 | 	v &= BUSY_WORKER_HASH_MASK; | 
 | 815 |  | 
 | 816 | 	return &gcwq->busy_hash[v]; | 
 | 817 | } | 
 | 818 |  | 
 | 819 | /** | 
| Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 820 |  * __find_worker_executing_work - find worker which is executing a work | 
 | 821 |  * @gcwq: gcwq of interest | 
 | 822 |  * @bwh: hash head as returned by busy_worker_head() | 
 | 823 |  * @work: work to find worker for | 
 | 824 |  * | 
 | 825 |  * Find a worker which is executing @work on @gcwq.  @bwh should be | 
 | 826 |  * the hash head obtained by calling busy_worker_head() with the same | 
 | 827 |  * work. | 
 | 828 |  * | 
 | 829 |  * CONTEXT: | 
 | 830 |  * spin_lock_irq(gcwq->lock). | 
 | 831 |  * | 
 | 832 |  * RETURNS: | 
 | 833 |  * Pointer to worker which is executing @work if found, NULL | 
 | 834 |  * otherwise. | 
 | 835 |  */ | 
 | 836 | static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, | 
 | 837 | 						   struct hlist_head *bwh, | 
 | 838 | 						   struct work_struct *work) | 
 | 839 | { | 
 | 840 | 	struct worker *worker; | 
 | 841 | 	struct hlist_node *tmp; | 
 | 842 |  | 
 | 843 | 	hlist_for_each_entry(worker, tmp, bwh, hentry) | 
 | 844 | 		if (worker->current_work == work) | 
 | 845 | 			return worker; | 
 | 846 | 	return NULL; | 
 | 847 | } | 
 | 848 |  | 
 | 849 | /** | 
 | 850 |  * find_worker_executing_work - find worker which is executing a work | 
 | 851 |  * @gcwq: gcwq of interest | 
 | 852 |  * @work: work to find worker for | 
 | 853 |  * | 
 | 854 |  * Find a worker which is executing @work on @gcwq.  This function is | 
 | 855 |  * identical to __find_worker_executing_work() except that this | 
 | 856 |  * function calculates @bwh itself. | 
 | 857 |  * | 
 | 858 |  * CONTEXT: | 
 | 859 |  * spin_lock_irq(gcwq->lock). | 
 | 860 |  * | 
 | 861 |  * RETURNS: | 
 | 862 |  * Pointer to worker which is executing @work if found, NULL | 
 | 863 |  * otherwise. | 
 | 864 |  */ | 
 | 865 | static struct worker *find_worker_executing_work(struct global_cwq *gcwq, | 
 | 866 | 						 struct work_struct *work) | 
 | 867 | { | 
 | 868 | 	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work), | 
 | 869 | 					    work); | 
 | 870 | } | 
 | 871 |  | 
 | 872 | /** | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 873 |  * gcwq_determine_ins_pos - find insertion position | 
 | 874 |  * @gcwq: gcwq of interest | 
 | 875 |  * @cwq: cwq a work is being queued for | 
 | 876 |  * | 
 | 877 |  * A work for @cwq is about to be queued on @gcwq, determine insertion | 
 | 878 |  * position for the work.  If @cwq is for HIGHPRI wq, the work is | 
 | 879 |  * queued at the head of the queue but in FIFO order with respect to | 
 | 880 |  * other HIGHPRI works; otherwise, at the end of the queue.  This | 
 | 881 |  * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that | 
 | 882 |  * there are HIGHPRI works pending. | 
 | 883 |  * | 
 | 884 |  * CONTEXT: | 
 | 885 |  * spin_lock_irq(gcwq->lock). | 
 | 886 |  * | 
 | 887 |  * RETURNS: | 
 | 888 |  * Pointer to inserstion position. | 
 | 889 |  */ | 
 | 890 | static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq, | 
 | 891 | 					       struct cpu_workqueue_struct *cwq) | 
 | 892 | { | 
 | 893 | 	struct work_struct *twork; | 
 | 894 |  | 
 | 895 | 	if (likely(!(cwq->wq->flags & WQ_HIGHPRI))) | 
 | 896 | 		return &gcwq->worklist; | 
 | 897 |  | 
 | 898 | 	list_for_each_entry(twork, &gcwq->worklist, entry) { | 
 | 899 | 		struct cpu_workqueue_struct *tcwq = get_work_cwq(twork); | 
 | 900 |  | 
 | 901 | 		if (!(tcwq->wq->flags & WQ_HIGHPRI)) | 
 | 902 | 			break; | 
 | 903 | 	} | 
 | 904 |  | 
 | 905 | 	gcwq->flags |= GCWQ_HIGHPRI_PENDING; | 
 | 906 | 	return &twork->entry; | 
 | 907 | } | 
 | 908 |  | 
 | 909 | /** | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 910 |  * insert_work - insert a work into gcwq | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 911 |  * @cwq: cwq @work belongs to | 
 | 912 |  * @work: work to insert | 
 | 913 |  * @head: insertion point | 
 | 914 |  * @extra_flags: extra WORK_STRUCT_* flags to set | 
 | 915 |  * | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 916 |  * Insert @work which belongs to @cwq into @gcwq after @head. | 
 | 917 |  * @extra_flags is or'd to work_struct flags. | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 918 |  * | 
 | 919 |  * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 920 |  * spin_lock_irq(gcwq->lock). | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 921 |  */ | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 922 | static void insert_work(struct cpu_workqueue_struct *cwq, | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 923 | 			struct work_struct *work, struct list_head *head, | 
 | 924 | 			unsigned int extra_flags) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 925 | { | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 926 | 	struct global_cwq *gcwq = cwq->gcwq; | 
| Frederic Weisbecker | e1d8aa9 | 2009-01-12 23:15:46 +0100 | [diff] [blame] | 927 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 928 | 	/* we own @work, set data and link */ | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 929 | 	set_work_cwq(work, cwq, extra_flags); | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 930 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 931 | 	/* | 
 | 932 | 	 * Ensure that we get the right work->data if we see the | 
 | 933 | 	 * result of list_add() below, see try_to_grab_pending(). | 
 | 934 | 	 */ | 
 | 935 | 	smp_wmb(); | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 936 |  | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 937 | 	list_add_tail(&work->entry, head); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 938 |  | 
 | 939 | 	/* | 
 | 940 | 	 * Ensure either worker_sched_deactivated() sees the above | 
 | 941 | 	 * list_add_tail() or we see zero nr_running to avoid workers | 
 | 942 | 	 * lying around lazily while there are works to be processed. | 
 | 943 | 	 */ | 
 | 944 | 	smp_mb(); | 
 | 945 |  | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 946 | 	if (__need_more_worker(gcwq)) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 947 | 		wake_up_worker(gcwq); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 948 | } | 
 | 949 |  | 
| Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 950 | /* | 
 | 951 |  * Test whether @work is being queued from another work executing on the | 
 | 952 |  * same workqueue.  This is rather expensive and should only be used from | 
 | 953 |  * cold paths. | 
 | 954 |  */ | 
 | 955 | static bool is_chained_work(struct workqueue_struct *wq) | 
 | 956 | { | 
 | 957 | 	unsigned long flags; | 
 | 958 | 	unsigned int cpu; | 
 | 959 |  | 
 | 960 | 	for_each_gcwq_cpu(cpu) { | 
 | 961 | 		struct global_cwq *gcwq = get_gcwq(cpu); | 
 | 962 | 		struct worker *worker; | 
 | 963 | 		struct hlist_node *pos; | 
 | 964 | 		int i; | 
 | 965 |  | 
 | 966 | 		spin_lock_irqsave(&gcwq->lock, flags); | 
 | 967 | 		for_each_busy_worker(worker, i, pos, gcwq) { | 
 | 968 | 			if (worker->task != current) | 
 | 969 | 				continue; | 
 | 970 | 			spin_unlock_irqrestore(&gcwq->lock, flags); | 
 | 971 | 			/* | 
 | 972 | 			 * I'm @worker, no locking necessary.  See if @work | 
 | 973 | 			 * is headed to the same workqueue. | 
 | 974 | 			 */ | 
 | 975 | 			return worker->current_cwq->wq == wq; | 
 | 976 | 		} | 
 | 977 | 		spin_unlock_irqrestore(&gcwq->lock, flags); | 
 | 978 | 	} | 
 | 979 | 	return false; | 
 | 980 | } | 
 | 981 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 982 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | 			 struct work_struct *work) | 
 | 984 | { | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 985 | 	struct global_cwq *gcwq; | 
 | 986 | 	struct cpu_workqueue_struct *cwq; | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 987 | 	struct list_head *worklist; | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 988 | 	unsigned int work_flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 989 | 	unsigned long flags; | 
 | 990 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 991 | 	debug_work_activate(work); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 992 |  | 
| Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 993 | 	/* if dying, only works from the same workqueue are allowed */ | 
| Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 994 | 	if (unlikely(wq->flags & WQ_DRAINING) && | 
| Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 995 | 	    WARN_ON_ONCE(!is_chained_work(wq))) | 
| Tejun Heo | e41e704 | 2010-08-24 14:22:47 +0200 | [diff] [blame] | 996 | 		return; | 
 | 997 |  | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 998 | 	/* determine gcwq to use */ | 
 | 999 | 	if (!(wq->flags & WQ_UNBOUND)) { | 
| Tejun Heo | 18aa9ef | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1000 | 		struct global_cwq *last_gcwq; | 
 | 1001 |  | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1002 | 		if (unlikely(cpu == WORK_CPU_UNBOUND)) | 
 | 1003 | 			cpu = raw_smp_processor_id(); | 
 | 1004 |  | 
| Tejun Heo | 18aa9ef | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1005 | 		/* | 
 | 1006 | 		 * It's multi cpu.  If @wq is non-reentrant and @work | 
 | 1007 | 		 * was previously on a different cpu, it might still | 
 | 1008 | 		 * be running there, in which case the work needs to | 
 | 1009 | 		 * be queued on that cpu to guarantee non-reentrance. | 
 | 1010 | 		 */ | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1011 | 		gcwq = get_gcwq(cpu); | 
| Tejun Heo | 18aa9ef | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1012 | 		if (wq->flags & WQ_NON_REENTRANT && | 
 | 1013 | 		    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { | 
 | 1014 | 			struct worker *worker; | 
 | 1015 |  | 
 | 1016 | 			spin_lock_irqsave(&last_gcwq->lock, flags); | 
 | 1017 |  | 
 | 1018 | 			worker = find_worker_executing_work(last_gcwq, work); | 
 | 1019 |  | 
 | 1020 | 			if (worker && worker->current_cwq->wq == wq) | 
 | 1021 | 				gcwq = last_gcwq; | 
 | 1022 | 			else { | 
 | 1023 | 				/* meh... not running there, queue here */ | 
 | 1024 | 				spin_unlock_irqrestore(&last_gcwq->lock, flags); | 
 | 1025 | 				spin_lock_irqsave(&gcwq->lock, flags); | 
 | 1026 | 			} | 
 | 1027 | 		} else | 
 | 1028 | 			spin_lock_irqsave(&gcwq->lock, flags); | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1029 | 	} else { | 
 | 1030 | 		gcwq = get_gcwq(WORK_CPU_UNBOUND); | 
 | 1031 | 		spin_lock_irqsave(&gcwq->lock, flags); | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1032 | 	} | 
 | 1033 |  | 
 | 1034 | 	/* gcwq determined, get cwq and queue */ | 
 | 1035 | 	cwq = get_cwq(gcwq->cpu, wq); | 
| Tejun Heo | cdadf00 | 2010-10-05 10:49:55 +0200 | [diff] [blame] | 1036 | 	trace_workqueue_queue_work(cpu, cwq, work); | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1037 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1038 | 	BUG_ON(!list_empty(&work->entry)); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1039 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1040 | 	cwq->nr_in_flight[cwq->work_color]++; | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1041 | 	work_flags = work_color_to_flags(cwq->work_color); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1042 |  | 
 | 1043 | 	if (likely(cwq->nr_active < cwq->max_active)) { | 
| Tejun Heo | cdadf00 | 2010-10-05 10:49:55 +0200 | [diff] [blame] | 1044 | 		trace_workqueue_activate_work(work); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1045 | 		cwq->nr_active++; | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1046 | 		worklist = gcwq_determine_ins_pos(gcwq, cwq); | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1047 | 	} else { | 
 | 1048 | 		work_flags |= WORK_STRUCT_DELAYED; | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1049 | 		worklist = &cwq->delayed_works; | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1050 | 	} | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1051 |  | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1052 | 	insert_work(cwq, work, worklist, work_flags); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1053 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1054 | 	spin_unlock_irqrestore(&gcwq->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | } | 
 | 1056 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1057 | /** | 
 | 1058 |  * queue_work - queue work on a workqueue | 
 | 1059 |  * @wq: workqueue to use | 
 | 1060 |  * @work: work to queue | 
 | 1061 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 1062 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 |  * | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 1064 |  * We queue the work to the CPU on which it was submitted, but if the CPU dies | 
 | 1065 |  * it can be processed by another CPU. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1066 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1067 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | { | 
| Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 1069 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 |  | 
| Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 1071 | 	ret = queue_work_on(get_cpu(), wq, work); | 
 | 1072 | 	put_cpu(); | 
 | 1073 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1074 | 	return ret; | 
 | 1075 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 1076 | EXPORT_SYMBOL_GPL(queue_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 |  | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 1078 | /** | 
 | 1079 |  * queue_work_on - queue work on specific cpu | 
 | 1080 |  * @cpu: CPU number to execute work on | 
 | 1081 |  * @wq: workqueue to use | 
 | 1082 |  * @work: work to queue | 
 | 1083 |  * | 
 | 1084 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
 | 1085 |  * | 
 | 1086 |  * We queue the work to a specific CPU, the caller must ensure it | 
 | 1087 |  * can't go away. | 
 | 1088 |  */ | 
 | 1089 | int | 
 | 1090 | queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) | 
 | 1091 | { | 
 | 1092 | 	int ret = 0; | 
 | 1093 |  | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1094 | 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1095 | 		__queue_work(cpu, wq, work); | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 1096 | 		ret = 1; | 
 | 1097 | 	} | 
 | 1098 | 	return ret; | 
 | 1099 | } | 
 | 1100 | EXPORT_SYMBOL_GPL(queue_work_on); | 
 | 1101 |  | 
| Li Zefan | 6d141c3 | 2008-02-08 04:21:09 -0800 | [diff] [blame] | 1102 | static void delayed_work_timer_fn(unsigned long __data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1104 | 	struct delayed_work *dwork = (struct delayed_work *)__data; | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1105 | 	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1106 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1107 | 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | } | 
 | 1109 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1110 | /** | 
 | 1111 |  * queue_delayed_work - queue work on a workqueue after delay | 
 | 1112 |  * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 1113 |  * @dwork: delayable work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1114 |  * @delay: number of jiffies to wait before queueing | 
 | 1115 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 1116 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1117 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1118 | int queue_delayed_work(struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1119 | 			struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1121 | 	if (delay == 0) | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 1122 | 		return queue_work(wq, &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1123 |  | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 1124 | 	return queue_delayed_work_on(-1, wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1125 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 1126 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1128 | /** | 
 | 1129 |  * queue_delayed_work_on - queue work on specific CPU after delay | 
 | 1130 |  * @cpu: CPU number to execute work on | 
 | 1131 |  * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 1132 |  * @dwork: work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1133 |  * @delay: number of jiffies to wait before queueing | 
 | 1134 |  * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 1135 |  * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1136 |  */ | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1137 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1138 | 			struct delayed_work *dwork, unsigned long delay) | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1139 | { | 
 | 1140 | 	int ret = 0; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1141 | 	struct timer_list *timer = &dwork->timer; | 
 | 1142 | 	struct work_struct *work = &dwork->work; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1143 |  | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1144 | 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1145 | 		unsigned int lcpu; | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1146 |  | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1147 | 		BUG_ON(timer_pending(timer)); | 
 | 1148 | 		BUG_ON(!list_empty(&work->entry)); | 
 | 1149 |  | 
| Andrew Liu | 8a3e77c | 2008-05-01 04:35:14 -0700 | [diff] [blame] | 1150 | 		timer_stats_timer_set_start_info(&dwork->timer); | 
 | 1151 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1152 | 		/* | 
 | 1153 | 		 * This stores cwq for the moment, for the timer_fn. | 
 | 1154 | 		 * Note that the work's gcwq is preserved to allow | 
 | 1155 | 		 * reentrance detection for delayed works. | 
 | 1156 | 		 */ | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1157 | 		if (!(wq->flags & WQ_UNBOUND)) { | 
 | 1158 | 			struct global_cwq *gcwq = get_work_gcwq(work); | 
 | 1159 |  | 
 | 1160 | 			if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) | 
 | 1161 | 				lcpu = gcwq->cpu; | 
 | 1162 | 			else | 
 | 1163 | 				lcpu = raw_smp_processor_id(); | 
 | 1164 | 		} else | 
 | 1165 | 			lcpu = WORK_CPU_UNBOUND; | 
 | 1166 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1167 | 		set_work_cwq(work, get_cwq(lcpu, wq), 0); | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1168 |  | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1169 | 		timer->expires = jiffies + delay; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1170 | 		timer->data = (unsigned long)dwork; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1171 | 		timer->function = delayed_work_timer_fn; | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 1172 |  | 
 | 1173 | 		if (unlikely(cpu >= 0)) | 
 | 1174 | 			add_timer_on(timer, cpu); | 
 | 1175 | 		else | 
 | 1176 | 			add_timer(timer); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1177 | 		ret = 1; | 
 | 1178 | 	} | 
 | 1179 | 	return ret; | 
 | 1180 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 1181 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1183 | /** | 
 | 1184 |  * worker_enter_idle - enter idle state | 
 | 1185 |  * @worker: worker which is entering idle state | 
 | 1186 |  * | 
 | 1187 |  * @worker is entering idle state.  Update stats and idle timer if | 
 | 1188 |  * necessary. | 
 | 1189 |  * | 
 | 1190 |  * LOCKING: | 
 | 1191 |  * spin_lock_irq(gcwq->lock). | 
 | 1192 |  */ | 
 | 1193 | static void worker_enter_idle(struct worker *worker) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | { | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1195 | 	struct global_cwq *gcwq = worker->gcwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1196 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1197 | 	BUG_ON(worker->flags & WORKER_IDLE); | 
 | 1198 | 	BUG_ON(!list_empty(&worker->entry) && | 
 | 1199 | 	       (worker->hentry.next || worker->hentry.pprev)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 |  | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1201 | 	/* can't use worker_set_flags(), also called from start_worker() */ | 
 | 1202 | 	worker->flags |= WORKER_IDLE; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1203 | 	gcwq->nr_idle++; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1204 | 	worker->last_active = jiffies; | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 1205 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1206 | 	/* idle_list is LIFO */ | 
 | 1207 | 	list_add(&worker->entry, &gcwq->idle_list); | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1208 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1209 | 	if (likely(!(worker->flags & WORKER_ROGUE))) { | 
 | 1210 | 		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) | 
 | 1211 | 			mod_timer(&gcwq->idle_timer, | 
 | 1212 | 				  jiffies + IDLE_WORKER_TIMEOUT); | 
 | 1213 | 	} else | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1214 | 		wake_up_all(&gcwq->trustee_wait); | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1215 |  | 
 | 1216 | 	/* sanity check nr_running */ | 
 | 1217 | 	WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle && | 
 | 1218 | 		     atomic_read(get_gcwq_nr_running(gcwq->cpu))); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1219 | } | 
 | 1220 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1221 | /** | 
 | 1222 |  * worker_leave_idle - leave idle state | 
 | 1223 |  * @worker: worker which is leaving idle state | 
 | 1224 |  * | 
 | 1225 |  * @worker is leaving idle state.  Update stats. | 
 | 1226 |  * | 
 | 1227 |  * LOCKING: | 
 | 1228 |  * spin_lock_irq(gcwq->lock). | 
 | 1229 |  */ | 
 | 1230 | static void worker_leave_idle(struct worker *worker) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | { | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1232 | 	struct global_cwq *gcwq = worker->gcwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1233 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1234 | 	BUG_ON(!(worker->flags & WORKER_IDLE)); | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1235 | 	worker_clr_flags(worker, WORKER_IDLE); | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1236 | 	gcwq->nr_idle--; | 
 | 1237 | 	list_del_init(&worker->entry); | 
 | 1238 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1239 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1240 | /** | 
 | 1241 |  * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq | 
 | 1242 |  * @worker: self | 
 | 1243 |  * | 
 | 1244 |  * Works which are scheduled while the cpu is online must at least be | 
 | 1245 |  * scheduled to a worker which is bound to the cpu so that if they are | 
 | 1246 |  * flushed from cpu callbacks while cpu is going down, they are | 
 | 1247 |  * guaranteed to execute on the cpu. | 
 | 1248 |  * | 
 | 1249 |  * This function is to be used by rogue workers and rescuers to bind | 
 | 1250 |  * themselves to the target cpu and may race with cpu going down or | 
 | 1251 |  * coming online.  kthread_bind() can't be used because it may put the | 
 | 1252 |  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used | 
 | 1253 |  * verbatim as it's best effort and blocking and gcwq may be | 
 | 1254 |  * [dis]associated in the meantime. | 
 | 1255 |  * | 
 | 1256 |  * This function tries set_cpus_allowed() and locks gcwq and verifies | 
 | 1257 |  * the binding against GCWQ_DISASSOCIATED which is set during | 
 | 1258 |  * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters | 
 | 1259 |  * idle state or fetches works without dropping lock, it can guarantee | 
 | 1260 |  * the scheduling requirement described in the first paragraph. | 
 | 1261 |  * | 
 | 1262 |  * CONTEXT: | 
 | 1263 |  * Might sleep.  Called without any lock but returns with gcwq->lock | 
 | 1264 |  * held. | 
 | 1265 |  * | 
 | 1266 |  * RETURNS: | 
 | 1267 |  * %true if the associated gcwq is online (@worker is successfully | 
 | 1268 |  * bound), %false if offline. | 
 | 1269 |  */ | 
 | 1270 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 
| Namhyung Kim | 972fa1c | 2010-08-22 23:19:43 +0900 | [diff] [blame] | 1271 | __acquires(&gcwq->lock) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1272 | { | 
 | 1273 | 	struct global_cwq *gcwq = worker->gcwq; | 
 | 1274 | 	struct task_struct *task = worker->task; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1275 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1276 | 	while (true) { | 
 | 1277 | 		/* | 
 | 1278 | 		 * The following call may fail, succeed or succeed | 
 | 1279 | 		 * without actually migrating the task to the cpu if | 
 | 1280 | 		 * it races with cpu hotunplug operation.  Verify | 
 | 1281 | 		 * against GCWQ_DISASSOCIATED. | 
 | 1282 | 		 */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1283 | 		if (!(gcwq->flags & GCWQ_DISASSOCIATED)) | 
 | 1284 | 			set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); | 
| Oleg Nesterov | 85f4186 | 2007-05-09 02:34:20 -0700 | [diff] [blame] | 1285 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1286 | 		spin_lock_irq(&gcwq->lock); | 
 | 1287 | 		if (gcwq->flags & GCWQ_DISASSOCIATED) | 
 | 1288 | 			return false; | 
 | 1289 | 		if (task_cpu(task) == gcwq->cpu && | 
 | 1290 | 		    cpumask_equal(¤t->cpus_allowed, | 
 | 1291 | 				  get_cpu_mask(gcwq->cpu))) | 
 | 1292 | 			return true; | 
 | 1293 | 		spin_unlock_irq(&gcwq->lock); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1294 |  | 
| Tejun Heo | 5035b20 | 2011-04-29 18:08:37 +0200 | [diff] [blame] | 1295 | 		/* | 
 | 1296 | 		 * We've raced with CPU hot[un]plug.  Give it a breather | 
 | 1297 | 		 * and retry migration.  cond_resched() is required here; | 
 | 1298 | 		 * otherwise, we might deadlock against cpu_stop trying to | 
 | 1299 | 		 * bring down the CPU on non-preemptive kernel. | 
 | 1300 | 		 */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1301 | 		cpu_relax(); | 
| Tejun Heo | 5035b20 | 2011-04-29 18:08:37 +0200 | [diff] [blame] | 1302 | 		cond_resched(); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1303 | 	} | 
 | 1304 | } | 
 | 1305 |  | 
 | 1306 | /* | 
 | 1307 |  * Function for worker->rebind_work used to rebind rogue busy workers | 
 | 1308 |  * to the associated cpu which is coming back online.  This is | 
 | 1309 |  * scheduled by cpu up but can race with other cpu hotplug operations | 
 | 1310 |  * and may be executed twice without intervening cpu down. | 
 | 1311 |  */ | 
 | 1312 | static void worker_rebind_fn(struct work_struct *work) | 
 | 1313 | { | 
 | 1314 | 	struct worker *worker = container_of(work, struct worker, rebind_work); | 
 | 1315 | 	struct global_cwq *gcwq = worker->gcwq; | 
 | 1316 |  | 
 | 1317 | 	if (worker_maybe_bind_and_lock(worker)) | 
 | 1318 | 		worker_clr_flags(worker, WORKER_REBIND); | 
 | 1319 |  | 
 | 1320 | 	spin_unlock_irq(&gcwq->lock); | 
 | 1321 | } | 
 | 1322 |  | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1323 | static struct worker *alloc_worker(void) | 
 | 1324 | { | 
 | 1325 | 	struct worker *worker; | 
 | 1326 |  | 
 | 1327 | 	worker = kzalloc(sizeof(*worker), GFP_KERNEL); | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1328 | 	if (worker) { | 
 | 1329 | 		INIT_LIST_HEAD(&worker->entry); | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1330 | 		INIT_LIST_HEAD(&worker->scheduled); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1331 | 		INIT_WORK(&worker->rebind_work, worker_rebind_fn); | 
 | 1332 | 		/* on creation a worker is in !idle && prep state */ | 
 | 1333 | 		worker->flags = WORKER_PREP; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1334 | 	} | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1335 | 	return worker; | 
 | 1336 | } | 
 | 1337 |  | 
 | 1338 | /** | 
 | 1339 |  * create_worker - create a new workqueue worker | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1340 |  * @gcwq: gcwq the new worker will belong to | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1341 |  * @bind: whether to set affinity to @cpu or not | 
 | 1342 |  * | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1343 |  * Create a new worker which is bound to @gcwq.  The returned worker | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1344 |  * can be started by calling start_worker() or destroyed using | 
 | 1345 |  * destroy_worker(). | 
 | 1346 |  * | 
 | 1347 |  * CONTEXT: | 
 | 1348 |  * Might sleep.  Does GFP_KERNEL allocations. | 
 | 1349 |  * | 
 | 1350 |  * RETURNS: | 
 | 1351 |  * Pointer to the newly created worker. | 
 | 1352 |  */ | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1353 | static struct worker *create_worker(struct global_cwq *gcwq, bool bind) | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1354 | { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1355 | 	bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1356 | 	struct worker *worker = NULL; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1357 | 	int id = -1; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1358 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1359 | 	spin_lock_irq(&gcwq->lock); | 
 | 1360 | 	while (ida_get_new(&gcwq->worker_ida, &id)) { | 
 | 1361 | 		spin_unlock_irq(&gcwq->lock); | 
 | 1362 | 		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL)) | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1363 | 			goto fail; | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1364 | 		spin_lock_irq(&gcwq->lock); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1365 | 	} | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1366 | 	spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1367 |  | 
 | 1368 | 	worker = alloc_worker(); | 
 | 1369 | 	if (!worker) | 
 | 1370 | 		goto fail; | 
 | 1371 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1372 | 	worker->gcwq = gcwq; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1373 | 	worker->id = id; | 
 | 1374 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1375 | 	if (!on_unbound_cpu) | 
| Eric Dumazet | 94dcf29 | 2011-03-22 16:30:45 -0700 | [diff] [blame] | 1376 | 		worker->task = kthread_create_on_node(worker_thread, | 
 | 1377 | 						      worker, | 
 | 1378 | 						      cpu_to_node(gcwq->cpu), | 
 | 1379 | 						      "kworker/%u:%d", gcwq->cpu, id); | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1380 | 	else | 
 | 1381 | 		worker->task = kthread_create(worker_thread, worker, | 
 | 1382 | 					      "kworker/u:%d", id); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1383 | 	if (IS_ERR(worker->task)) | 
 | 1384 | 		goto fail; | 
 | 1385 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1386 | 	/* | 
 | 1387 | 	 * A rogue worker will become a regular one if CPU comes | 
 | 1388 | 	 * online later on.  Make sure every worker has | 
 | 1389 | 	 * PF_THREAD_BOUND set. | 
 | 1390 | 	 */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1391 | 	if (bind && !on_unbound_cpu) | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1392 | 		kthread_bind(worker->task, gcwq->cpu); | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1393 | 	else { | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1394 | 		worker->task->flags |= PF_THREAD_BOUND; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1395 | 		if (on_unbound_cpu) | 
 | 1396 | 			worker->flags |= WORKER_UNBOUND; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1397 | 	} | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1398 |  | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1399 | 	return worker; | 
 | 1400 | fail: | 
 | 1401 | 	if (id >= 0) { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1402 | 		spin_lock_irq(&gcwq->lock); | 
 | 1403 | 		ida_remove(&gcwq->worker_ida, id); | 
 | 1404 | 		spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1405 | 	} | 
 | 1406 | 	kfree(worker); | 
 | 1407 | 	return NULL; | 
 | 1408 | } | 
 | 1409 |  | 
 | 1410 | /** | 
 | 1411 |  * start_worker - start a newly created worker | 
 | 1412 |  * @worker: worker to start | 
 | 1413 |  * | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1414 |  * Make the gcwq aware of @worker and start it. | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1415 |  * | 
 | 1416 |  * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1417 |  * spin_lock_irq(gcwq->lock). | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1418 |  */ | 
 | 1419 | static void start_worker(struct worker *worker) | 
 | 1420 | { | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1421 | 	worker->flags |= WORKER_STARTED; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1422 | 	worker->gcwq->nr_workers++; | 
 | 1423 | 	worker_enter_idle(worker); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1424 | 	wake_up_process(worker->task); | 
 | 1425 | } | 
 | 1426 |  | 
 | 1427 | /** | 
 | 1428 |  * destroy_worker - destroy a workqueue worker | 
 | 1429 |  * @worker: worker to be destroyed | 
 | 1430 |  * | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1431 |  * Destroy @worker and adjust @gcwq stats accordingly. | 
 | 1432 |  * | 
 | 1433 |  * CONTEXT: | 
 | 1434 |  * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1435 |  */ | 
 | 1436 | static void destroy_worker(struct worker *worker) | 
 | 1437 | { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1438 | 	struct global_cwq *gcwq = worker->gcwq; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1439 | 	int id = worker->id; | 
 | 1440 |  | 
 | 1441 | 	/* sanity check frenzy */ | 
 | 1442 | 	BUG_ON(worker->current_work); | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1443 | 	BUG_ON(!list_empty(&worker->scheduled)); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1444 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1445 | 	if (worker->flags & WORKER_STARTED) | 
 | 1446 | 		gcwq->nr_workers--; | 
 | 1447 | 	if (worker->flags & WORKER_IDLE) | 
 | 1448 | 		gcwq->nr_idle--; | 
 | 1449 |  | 
 | 1450 | 	list_del_init(&worker->entry); | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1451 | 	worker->flags |= WORKER_DIE; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1452 |  | 
 | 1453 | 	spin_unlock_irq(&gcwq->lock); | 
 | 1454 |  | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1455 | 	kthread_stop(worker->task); | 
 | 1456 | 	kfree(worker); | 
 | 1457 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1458 | 	spin_lock_irq(&gcwq->lock); | 
 | 1459 | 	ida_remove(&gcwq->worker_ida, id); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1460 | } | 
 | 1461 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1462 | static void idle_worker_timeout(unsigned long __gcwq) | 
 | 1463 | { | 
 | 1464 | 	struct global_cwq *gcwq = (void *)__gcwq; | 
 | 1465 |  | 
 | 1466 | 	spin_lock_irq(&gcwq->lock); | 
 | 1467 |  | 
 | 1468 | 	if (too_many_workers(gcwq)) { | 
 | 1469 | 		struct worker *worker; | 
 | 1470 | 		unsigned long expires; | 
 | 1471 |  | 
 | 1472 | 		/* idle_list is kept in LIFO order, check the last one */ | 
 | 1473 | 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry); | 
 | 1474 | 		expires = worker->last_active + IDLE_WORKER_TIMEOUT; | 
 | 1475 |  | 
 | 1476 | 		if (time_before(jiffies, expires)) | 
 | 1477 | 			mod_timer(&gcwq->idle_timer, expires); | 
 | 1478 | 		else { | 
 | 1479 | 			/* it's been idle for too long, wake up manager */ | 
 | 1480 | 			gcwq->flags |= GCWQ_MANAGE_WORKERS; | 
 | 1481 | 			wake_up_worker(gcwq); | 
 | 1482 | 		} | 
 | 1483 | 	} | 
 | 1484 |  | 
 | 1485 | 	spin_unlock_irq(&gcwq->lock); | 
 | 1486 | } | 
 | 1487 |  | 
 | 1488 | static bool send_mayday(struct work_struct *work) | 
 | 1489 | { | 
 | 1490 | 	struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 
 | 1491 | 	struct workqueue_struct *wq = cwq->wq; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1492 | 	unsigned int cpu; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1493 |  | 
 | 1494 | 	if (!(wq->flags & WQ_RESCUER)) | 
 | 1495 | 		return false; | 
 | 1496 |  | 
 | 1497 | 	/* mayday mayday mayday */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1498 | 	cpu = cwq->gcwq->cpu; | 
 | 1499 | 	/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ | 
 | 1500 | 	if (cpu == WORK_CPU_UNBOUND) | 
 | 1501 | 		cpu = 0; | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 1502 | 	if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask)) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1503 | 		wake_up_process(wq->rescuer->task); | 
 | 1504 | 	return true; | 
 | 1505 | } | 
 | 1506 |  | 
 | 1507 | static void gcwq_mayday_timeout(unsigned long __gcwq) | 
 | 1508 | { | 
 | 1509 | 	struct global_cwq *gcwq = (void *)__gcwq; | 
 | 1510 | 	struct work_struct *work; | 
 | 1511 |  | 
 | 1512 | 	spin_lock_irq(&gcwq->lock); | 
 | 1513 |  | 
 | 1514 | 	if (need_to_create_worker(gcwq)) { | 
 | 1515 | 		/* | 
 | 1516 | 		 * We've been trying to create a new worker but | 
 | 1517 | 		 * haven't been successful.  We might be hitting an | 
 | 1518 | 		 * allocation deadlock.  Send distress signals to | 
 | 1519 | 		 * rescuers. | 
 | 1520 | 		 */ | 
 | 1521 | 		list_for_each_entry(work, &gcwq->worklist, entry) | 
 | 1522 | 			send_mayday(work); | 
 | 1523 | 	} | 
 | 1524 |  | 
 | 1525 | 	spin_unlock_irq(&gcwq->lock); | 
 | 1526 |  | 
 | 1527 | 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL); | 
 | 1528 | } | 
 | 1529 |  | 
 | 1530 | /** | 
 | 1531 |  * maybe_create_worker - create a new worker if necessary | 
 | 1532 |  * @gcwq: gcwq to create a new worker for | 
 | 1533 |  * | 
 | 1534 |  * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to | 
 | 1535 |  * have at least one idle worker on return from this function.  If | 
 | 1536 |  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is | 
 | 1537 |  * sent to all rescuers with works scheduled on @gcwq to resolve | 
 | 1538 |  * possible allocation deadlock. | 
 | 1539 |  * | 
 | 1540 |  * On return, need_to_create_worker() is guaranteed to be false and | 
 | 1541 |  * may_start_working() true. | 
 | 1542 |  * | 
 | 1543 |  * LOCKING: | 
 | 1544 |  * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
 | 1545 |  * multiple times.  Does GFP_KERNEL allocations.  Called only from | 
 | 1546 |  * manager. | 
 | 1547 |  * | 
 | 1548 |  * RETURNS: | 
 | 1549 |  * false if no action was taken and gcwq->lock stayed locked, true | 
 | 1550 |  * otherwise. | 
 | 1551 |  */ | 
 | 1552 | static bool maybe_create_worker(struct global_cwq *gcwq) | 
| Namhyung Kim | 06bd6eb | 2010-08-22 23:19:42 +0900 | [diff] [blame] | 1553 | __releases(&gcwq->lock) | 
 | 1554 | __acquires(&gcwq->lock) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1555 | { | 
 | 1556 | 	if (!need_to_create_worker(gcwq)) | 
 | 1557 | 		return false; | 
 | 1558 | restart: | 
| Tejun Heo | 9f9c236 | 2010-07-14 11:31:20 +0200 | [diff] [blame] | 1559 | 	spin_unlock_irq(&gcwq->lock); | 
 | 1560 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1561 | 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ | 
 | 1562 | 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); | 
 | 1563 |  | 
 | 1564 | 	while (true) { | 
 | 1565 | 		struct worker *worker; | 
 | 1566 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1567 | 		worker = create_worker(gcwq, true); | 
 | 1568 | 		if (worker) { | 
 | 1569 | 			del_timer_sync(&gcwq->mayday_timer); | 
 | 1570 | 			spin_lock_irq(&gcwq->lock); | 
 | 1571 | 			start_worker(worker); | 
 | 1572 | 			BUG_ON(need_to_create_worker(gcwq)); | 
 | 1573 | 			return true; | 
 | 1574 | 		} | 
 | 1575 |  | 
 | 1576 | 		if (!need_to_create_worker(gcwq)) | 
 | 1577 | 			break; | 
 | 1578 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1579 | 		__set_current_state(TASK_INTERRUPTIBLE); | 
 | 1580 | 		schedule_timeout(CREATE_COOLDOWN); | 
| Tejun Heo | 9f9c236 | 2010-07-14 11:31:20 +0200 | [diff] [blame] | 1581 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1582 | 		if (!need_to_create_worker(gcwq)) | 
 | 1583 | 			break; | 
 | 1584 | 	} | 
 | 1585 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1586 | 	del_timer_sync(&gcwq->mayday_timer); | 
 | 1587 | 	spin_lock_irq(&gcwq->lock); | 
 | 1588 | 	if (need_to_create_worker(gcwq)) | 
 | 1589 | 		goto restart; | 
 | 1590 | 	return true; | 
 | 1591 | } | 
 | 1592 |  | 
 | 1593 | /** | 
 | 1594 |  * maybe_destroy_worker - destroy workers which have been idle for a while | 
 | 1595 |  * @gcwq: gcwq to destroy workers for | 
 | 1596 |  * | 
 | 1597 |  * Destroy @gcwq workers which have been idle for longer than | 
 | 1598 |  * IDLE_WORKER_TIMEOUT. | 
 | 1599 |  * | 
 | 1600 |  * LOCKING: | 
 | 1601 |  * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
 | 1602 |  * multiple times.  Called only from manager. | 
 | 1603 |  * | 
 | 1604 |  * RETURNS: | 
 | 1605 |  * false if no action was taken and gcwq->lock stayed locked, true | 
 | 1606 |  * otherwise. | 
 | 1607 |  */ | 
 | 1608 | static bool maybe_destroy_workers(struct global_cwq *gcwq) | 
 | 1609 | { | 
 | 1610 | 	bool ret = false; | 
 | 1611 |  | 
 | 1612 | 	while (too_many_workers(gcwq)) { | 
 | 1613 | 		struct worker *worker; | 
 | 1614 | 		unsigned long expires; | 
 | 1615 |  | 
 | 1616 | 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry); | 
 | 1617 | 		expires = worker->last_active + IDLE_WORKER_TIMEOUT; | 
 | 1618 |  | 
 | 1619 | 		if (time_before(jiffies, expires)) { | 
 | 1620 | 			mod_timer(&gcwq->idle_timer, expires); | 
 | 1621 | 			break; | 
 | 1622 | 		} | 
 | 1623 |  | 
 | 1624 | 		destroy_worker(worker); | 
 | 1625 | 		ret = true; | 
 | 1626 | 	} | 
 | 1627 |  | 
 | 1628 | 	return ret; | 
 | 1629 | } | 
 | 1630 |  | 
 | 1631 | /** | 
 | 1632 |  * manage_workers - manage worker pool | 
 | 1633 |  * @worker: self | 
 | 1634 |  * | 
 | 1635 |  * Assume the manager role and manage gcwq worker pool @worker belongs | 
 | 1636 |  * to.  At any given time, there can be only zero or one manager per | 
 | 1637 |  * gcwq.  The exclusion is handled automatically by this function. | 
 | 1638 |  * | 
 | 1639 |  * The caller can safely start processing works on false return.  On | 
 | 1640 |  * true return, it's guaranteed that need_to_create_worker() is false | 
 | 1641 |  * and may_start_working() is true. | 
 | 1642 |  * | 
 | 1643 |  * CONTEXT: | 
 | 1644 |  * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
 | 1645 |  * multiple times.  Does GFP_KERNEL allocations. | 
 | 1646 |  * | 
 | 1647 |  * RETURNS: | 
 | 1648 |  * false if no action was taken and gcwq->lock stayed locked, true if | 
 | 1649 |  * some action was taken. | 
 | 1650 |  */ | 
 | 1651 | static bool manage_workers(struct worker *worker) | 
 | 1652 | { | 
 | 1653 | 	struct global_cwq *gcwq = worker->gcwq; | 
 | 1654 | 	bool ret = false; | 
 | 1655 |  | 
 | 1656 | 	if (gcwq->flags & GCWQ_MANAGING_WORKERS) | 
 | 1657 | 		return ret; | 
 | 1658 |  | 
 | 1659 | 	gcwq->flags &= ~GCWQ_MANAGE_WORKERS; | 
 | 1660 | 	gcwq->flags |= GCWQ_MANAGING_WORKERS; | 
 | 1661 |  | 
 | 1662 | 	/* | 
 | 1663 | 	 * Destroy and then create so that may_start_working() is true | 
 | 1664 | 	 * on return. | 
 | 1665 | 	 */ | 
 | 1666 | 	ret |= maybe_destroy_workers(gcwq); | 
 | 1667 | 	ret |= maybe_create_worker(gcwq); | 
 | 1668 |  | 
 | 1669 | 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS; | 
 | 1670 |  | 
 | 1671 | 	/* | 
 | 1672 | 	 * The trustee might be waiting to take over the manager | 
 | 1673 | 	 * position, tell it we're done. | 
 | 1674 | 	 */ | 
 | 1675 | 	if (unlikely(gcwq->trustee)) | 
 | 1676 | 		wake_up_all(&gcwq->trustee_wait); | 
 | 1677 |  | 
 | 1678 | 	return ret; | 
 | 1679 | } | 
 | 1680 |  | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1681 | /** | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1682 |  * move_linked_works - move linked works to a list | 
 | 1683 |  * @work: start of series of works to be scheduled | 
 | 1684 |  * @head: target list to append @work to | 
 | 1685 |  * @nextp: out paramter for nested worklist walking | 
 | 1686 |  * | 
 | 1687 |  * Schedule linked works starting from @work to @head.  Work series to | 
 | 1688 |  * be scheduled starts at @work and includes any consecutive work with | 
 | 1689 |  * WORK_STRUCT_LINKED set in its predecessor. | 
 | 1690 |  * | 
 | 1691 |  * If @nextp is not NULL, it's updated to point to the next work of | 
 | 1692 |  * the last scheduled work.  This allows move_linked_works() to be | 
 | 1693 |  * nested inside outer list_for_each_entry_safe(). | 
 | 1694 |  * | 
 | 1695 |  * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1696 |  * spin_lock_irq(gcwq->lock). | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1697 |  */ | 
 | 1698 | static void move_linked_works(struct work_struct *work, struct list_head *head, | 
 | 1699 | 			      struct work_struct **nextp) | 
 | 1700 | { | 
 | 1701 | 	struct work_struct *n; | 
 | 1702 |  | 
 | 1703 | 	/* | 
 | 1704 | 	 * Linked worklist will always end before the end of the list, | 
 | 1705 | 	 * use NULL for list head. | 
 | 1706 | 	 */ | 
 | 1707 | 	list_for_each_entry_safe_from(work, n, NULL, entry) { | 
 | 1708 | 		list_move_tail(&work->entry, head); | 
 | 1709 | 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) | 
 | 1710 | 			break; | 
 | 1711 | 	} | 
 | 1712 |  | 
 | 1713 | 	/* | 
 | 1714 | 	 * If we're already inside safe list traversal and have moved | 
 | 1715 | 	 * multiple works to the scheduled queue, the next position | 
 | 1716 | 	 * needs to be updated. | 
 | 1717 | 	 */ | 
 | 1718 | 	if (nextp) | 
 | 1719 | 		*nextp = n; | 
 | 1720 | } | 
 | 1721 |  | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1722 | static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | 
 | 1723 | { | 
 | 1724 | 	struct work_struct *work = list_first_entry(&cwq->delayed_works, | 
 | 1725 | 						    struct work_struct, entry); | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1726 | 	struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1727 |  | 
| Tejun Heo | cdadf00 | 2010-10-05 10:49:55 +0200 | [diff] [blame] | 1728 | 	trace_workqueue_activate_work(work); | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1729 | 	move_linked_works(work, pos, NULL); | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1730 | 	__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1731 | 	cwq->nr_active++; | 
 | 1732 | } | 
 | 1733 |  | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1734 | /** | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1735 |  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight | 
 | 1736 |  * @cwq: cwq of interest | 
 | 1737 |  * @color: color of work which left the queue | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1738 |  * @delayed: for a delayed work | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1739 |  * | 
 | 1740 |  * A work either has completed or is removed from pending queue, | 
 | 1741 |  * decrement nr_in_flight of its cwq and handle workqueue flushing. | 
 | 1742 |  * | 
 | 1743 |  * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1744 |  * spin_lock_irq(gcwq->lock). | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1745 |  */ | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1746 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, | 
 | 1747 | 				 bool delayed) | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1748 | { | 
 | 1749 | 	/* ignore uncolored works */ | 
 | 1750 | 	if (color == WORK_NO_COLOR) | 
 | 1751 | 		return; | 
 | 1752 |  | 
 | 1753 | 	cwq->nr_in_flight[color]--; | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1754 |  | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1755 | 	if (!delayed) { | 
 | 1756 | 		cwq->nr_active--; | 
 | 1757 | 		if (!list_empty(&cwq->delayed_works)) { | 
 | 1758 | 			/* one down, submit a delayed one */ | 
 | 1759 | 			if (cwq->nr_active < cwq->max_active) | 
 | 1760 | 				cwq_activate_first_delayed(cwq); | 
 | 1761 | 		} | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1762 | 	} | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1763 |  | 
 | 1764 | 	/* is flush in progress and are we at the flushing tip? */ | 
 | 1765 | 	if (likely(cwq->flush_color != color)) | 
 | 1766 | 		return; | 
 | 1767 |  | 
 | 1768 | 	/* are there still in-flight works? */ | 
 | 1769 | 	if (cwq->nr_in_flight[color]) | 
 | 1770 | 		return; | 
 | 1771 |  | 
 | 1772 | 	/* this cwq is done, clear flush_color */ | 
 | 1773 | 	cwq->flush_color = -1; | 
 | 1774 |  | 
 | 1775 | 	/* | 
 | 1776 | 	 * If this was the last cwq, wake up the first flusher.  It | 
 | 1777 | 	 * will handle the rest. | 
 | 1778 | 	 */ | 
 | 1779 | 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) | 
 | 1780 | 		complete(&cwq->wq->first_flusher->done); | 
 | 1781 | } | 
 | 1782 |  | 
 | 1783 | /** | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1784 |  * process_one_work - process single work | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1785 |  * @worker: self | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1786 |  * @work: work to process | 
 | 1787 |  * | 
 | 1788 |  * Process @work.  This function contains all the logics necessary to | 
 | 1789 |  * process a single work including synchronization against and | 
 | 1790 |  * interaction with other workers on the same cpu, queueing and | 
 | 1791 |  * flushing.  As long as context requirement is met, any worker can | 
 | 1792 |  * call this function to process a work. | 
 | 1793 |  * | 
 | 1794 |  * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1795 |  * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1796 |  */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1797 | static void process_one_work(struct worker *worker, struct work_struct *work) | 
| Namhyung Kim | 06bd6eb | 2010-08-22 23:19:42 +0900 | [diff] [blame] | 1798 | __releases(&gcwq->lock) | 
 | 1799 | __acquires(&gcwq->lock) | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1800 | { | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1801 | 	struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1802 | 	struct global_cwq *gcwq = cwq->gcwq; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1803 | 	struct hlist_head *bwh = busy_worker_head(gcwq, work); | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 1804 | 	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1805 | 	work_func_t f = work->func; | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1806 | 	int work_color; | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1807 | 	struct worker *collision; | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1808 | #ifdef CONFIG_LOCKDEP | 
 | 1809 | 	/* | 
 | 1810 | 	 * It is permissible to free the struct work_struct from | 
 | 1811 | 	 * inside the function that is called from it, this we need to | 
 | 1812 | 	 * take into account for lockdep too.  To avoid bogus "held | 
 | 1813 | 	 * lock freed" warnings as well as problems when looking into | 
 | 1814 | 	 * work->lockdep_map, make a copy and use that here. | 
 | 1815 | 	 */ | 
 | 1816 | 	struct lockdep_map lockdep_map = work->lockdep_map; | 
 | 1817 | #endif | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1818 | 	/* | 
 | 1819 | 	 * A single work shouldn't be executed concurrently by | 
 | 1820 | 	 * multiple workers on a single cpu.  Check whether anyone is | 
 | 1821 | 	 * already processing the work.  If so, defer the work to the | 
 | 1822 | 	 * currently executing one. | 
 | 1823 | 	 */ | 
 | 1824 | 	collision = __find_worker_executing_work(gcwq, bwh, work); | 
 | 1825 | 	if (unlikely(collision)) { | 
 | 1826 | 		move_linked_works(work, &collision->scheduled, NULL); | 
 | 1827 | 		return; | 
 | 1828 | 	} | 
 | 1829 |  | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1830 | 	/* claim and process */ | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1831 | 	debug_work_deactivate(work); | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1832 | 	hlist_add_head(&worker->hentry, bwh); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1833 | 	worker->current_work = work; | 
| Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1834 | 	worker->current_cwq = cwq; | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1835 | 	work_color = get_work_color(work); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1836 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1837 | 	/* record the current cpu number in the work data and dequeue */ | 
 | 1838 | 	set_work_cpu(work, gcwq->cpu); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1839 | 	list_del_init(&work->entry); | 
 | 1840 |  | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1841 | 	/* | 
 | 1842 | 	 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI, | 
 | 1843 | 	 * wake up another worker; otherwise, clear HIGHPRI_PENDING. | 
 | 1844 | 	 */ | 
 | 1845 | 	if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) { | 
 | 1846 | 		struct work_struct *nwork = list_first_entry(&gcwq->worklist, | 
 | 1847 | 						struct work_struct, entry); | 
 | 1848 |  | 
 | 1849 | 		if (!list_empty(&gcwq->worklist) && | 
 | 1850 | 		    get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) | 
 | 1851 | 			wake_up_worker(gcwq); | 
 | 1852 | 		else | 
 | 1853 | 			gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; | 
 | 1854 | 	} | 
 | 1855 |  | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 1856 | 	/* | 
 | 1857 | 	 * CPU intensive works don't participate in concurrency | 
 | 1858 | 	 * management.  They're the scheduler's responsibility. | 
 | 1859 | 	 */ | 
 | 1860 | 	if (unlikely(cpu_intensive)) | 
 | 1861 | 		worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); | 
 | 1862 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1863 | 	spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1864 |  | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1865 | 	work_clear_pending(work); | 
| Tejun Heo | e159489 | 2011-01-09 23:32:15 +0100 | [diff] [blame] | 1866 | 	lock_map_acquire_read(&cwq->wq->lockdep_map); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1867 | 	lock_map_acquire(&lockdep_map); | 
| Arjan van de Ven | e36c886 | 2010-08-21 13:07:26 -0700 | [diff] [blame] | 1868 | 	trace_workqueue_execute_start(work); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1869 | 	f(work); | 
| Arjan van de Ven | e36c886 | 2010-08-21 13:07:26 -0700 | [diff] [blame] | 1870 | 	/* | 
 | 1871 | 	 * While we must be careful to not use "work" after this, the trace | 
 | 1872 | 	 * point will only record its address. | 
 | 1873 | 	 */ | 
 | 1874 | 	trace_workqueue_execute_end(work); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1875 | 	lock_map_release(&lockdep_map); | 
 | 1876 | 	lock_map_release(&cwq->wq->lockdep_map); | 
 | 1877 |  | 
 | 1878 | 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 
 | 1879 | 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 
 | 1880 | 		       "%s/0x%08x/%d\n", | 
 | 1881 | 		       current->comm, preempt_count(), task_pid_nr(current)); | 
 | 1882 | 		printk(KERN_ERR "    last function: "); | 
 | 1883 | 		print_symbol("%s\n", (unsigned long)f); | 
 | 1884 | 		debug_show_held_locks(current); | 
 | 1885 | 		dump_stack(); | 
 | 1886 | 	} | 
 | 1887 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1888 | 	spin_lock_irq(&gcwq->lock); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1889 |  | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 1890 | 	/* clear cpu intensive status */ | 
 | 1891 | 	if (unlikely(cpu_intensive)) | 
 | 1892 | 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE); | 
 | 1893 |  | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1894 | 	/* we're done with it, release */ | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1895 | 	hlist_del_init(&worker->hentry); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1896 | 	worker->current_work = NULL; | 
| Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1897 | 	worker->current_cwq = NULL; | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1898 | 	cwq_dec_nr_in_flight(cwq, work_color, false); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1899 | } | 
 | 1900 |  | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1901 | /** | 
 | 1902 |  * process_scheduled_works - process scheduled works | 
 | 1903 |  * @worker: self | 
 | 1904 |  * | 
 | 1905 |  * Process all scheduled works.  Please note that the scheduled list | 
 | 1906 |  * may change while processing a work, so this function repeatedly | 
 | 1907 |  * fetches a work from the top and executes it. | 
 | 1908 |  * | 
 | 1909 |  * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1910 |  * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1911 |  * multiple times. | 
 | 1912 |  */ | 
 | 1913 | static void process_scheduled_works(struct worker *worker) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1914 | { | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1915 | 	while (!list_empty(&worker->scheduled)) { | 
 | 1916 | 		struct work_struct *work = list_first_entry(&worker->scheduled, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1917 | 						struct work_struct, entry); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1918 | 		process_one_work(worker, work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1919 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1920 | } | 
 | 1921 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1922 | /** | 
 | 1923 |  * worker_thread - the worker thread function | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1924 |  * @__worker: self | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1925 |  * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1926 |  * The gcwq worker thread function.  There's a single dynamic pool of | 
 | 1927 |  * these per each cpu.  These workers process all works regardless of | 
 | 1928 |  * their specific target workqueue.  The only exception is works which | 
 | 1929 |  * belong to workqueues with a rescuer which will be explained in | 
 | 1930 |  * rescuer_thread(). | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1931 |  */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1932 | static int worker_thread(void *__worker) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1933 | { | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1934 | 	struct worker *worker = __worker; | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1935 | 	struct global_cwq *gcwq = worker->gcwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1936 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1937 | 	/* tell the scheduler that this is a workqueue worker */ | 
 | 1938 | 	worker->task->flags |= PF_WQ_WORKER; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1939 | woke_up: | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1940 | 	spin_lock_irq(&gcwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1941 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1942 | 	/* DIE can be set only while we're idle, checking here is enough */ | 
 | 1943 | 	if (worker->flags & WORKER_DIE) { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1944 | 		spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1945 | 		worker->task->flags &= ~PF_WQ_WORKER; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1946 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1947 | 	} | 
 | 1948 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1949 | 	worker_leave_idle(worker); | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1950 | recheck: | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1951 | 	/* no more worker necessary? */ | 
 | 1952 | 	if (!need_more_worker(gcwq)) | 
 | 1953 | 		goto sleep; | 
 | 1954 |  | 
 | 1955 | 	/* do we need to manage? */ | 
 | 1956 | 	if (unlikely(!may_start_working(gcwq)) && manage_workers(worker)) | 
 | 1957 | 		goto recheck; | 
 | 1958 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1959 | 	/* | 
 | 1960 | 	 * ->scheduled list can only be filled while a worker is | 
 | 1961 | 	 * preparing to process a work or actually processing it. | 
 | 1962 | 	 * Make sure nobody diddled with it while I was sleeping. | 
 | 1963 | 	 */ | 
 | 1964 | 	BUG_ON(!list_empty(&worker->scheduled)); | 
 | 1965 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1966 | 	/* | 
 | 1967 | 	 * When control reaches this point, we're guaranteed to have | 
 | 1968 | 	 * at least one idle worker or that someone else has already | 
 | 1969 | 	 * assumed the manager role. | 
 | 1970 | 	 */ | 
 | 1971 | 	worker_clr_flags(worker, WORKER_PREP); | 
 | 1972 |  | 
 | 1973 | 	do { | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1974 | 		struct work_struct *work = | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1975 | 			list_first_entry(&gcwq->worklist, | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1976 | 					 struct work_struct, entry); | 
 | 1977 |  | 
 | 1978 | 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { | 
 | 1979 | 			/* optimization path, not strictly necessary */ | 
 | 1980 | 			process_one_work(worker, work); | 
 | 1981 | 			if (unlikely(!list_empty(&worker->scheduled))) | 
 | 1982 | 				process_scheduled_works(worker); | 
 | 1983 | 		} else { | 
 | 1984 | 			move_linked_works(work, &worker->scheduled, NULL); | 
 | 1985 | 			process_scheduled_works(worker); | 
 | 1986 | 		} | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1987 | 	} while (keep_working(gcwq)); | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1988 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1989 | 	worker_set_flags(worker, WORKER_PREP, false); | 
| Tejun Heo | d313dd8 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1990 | sleep: | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1991 | 	if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker)) | 
 | 1992 | 		goto recheck; | 
| Tejun Heo | d313dd8 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1993 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1994 | 	/* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1995 | 	 * gcwq->lock is held and there's no work to process and no | 
 | 1996 | 	 * need to manage, sleep.  Workers are woken up only while | 
 | 1997 | 	 * holding gcwq->lock or from local cpu, so setting the | 
 | 1998 | 	 * current state before releasing gcwq->lock is enough to | 
 | 1999 | 	 * prevent losing any event. | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2000 | 	 */ | 
 | 2001 | 	worker_enter_idle(worker); | 
 | 2002 | 	__set_current_state(TASK_INTERRUPTIBLE); | 
 | 2003 | 	spin_unlock_irq(&gcwq->lock); | 
 | 2004 | 	schedule(); | 
 | 2005 | 	goto woke_up; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2006 | } | 
 | 2007 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2008 | /** | 
 | 2009 |  * rescuer_thread - the rescuer thread function | 
 | 2010 |  * @__wq: the associated workqueue | 
 | 2011 |  * | 
 | 2012 |  * Workqueue rescuer thread function.  There's one rescuer for each | 
 | 2013 |  * workqueue which has WQ_RESCUER set. | 
 | 2014 |  * | 
 | 2015 |  * Regular work processing on a gcwq may block trying to create a new | 
 | 2016 |  * worker which uses GFP_KERNEL allocation which has slight chance of | 
 | 2017 |  * developing into deadlock if some works currently on the same queue | 
 | 2018 |  * need to be processed to satisfy the GFP_KERNEL allocation.  This is | 
 | 2019 |  * the problem rescuer solves. | 
 | 2020 |  * | 
 | 2021 |  * When such condition is possible, the gcwq summons rescuers of all | 
 | 2022 |  * workqueues which have works queued on the gcwq and let them process | 
 | 2023 |  * those works so that forward progress can be guaranteed. | 
 | 2024 |  * | 
 | 2025 |  * This should happen rarely. | 
 | 2026 |  */ | 
 | 2027 | static int rescuer_thread(void *__wq) | 
 | 2028 | { | 
 | 2029 | 	struct workqueue_struct *wq = __wq; | 
 | 2030 | 	struct worker *rescuer = wq->rescuer; | 
 | 2031 | 	struct list_head *scheduled = &rescuer->scheduled; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2032 | 	bool is_unbound = wq->flags & WQ_UNBOUND; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2033 | 	unsigned int cpu; | 
 | 2034 |  | 
 | 2035 | 	set_user_nice(current, RESCUER_NICE_LEVEL); | 
 | 2036 | repeat: | 
 | 2037 | 	set_current_state(TASK_INTERRUPTIBLE); | 
 | 2038 |  | 
 | 2039 | 	if (kthread_should_stop()) | 
 | 2040 | 		return 0; | 
 | 2041 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2042 | 	/* | 
 | 2043 | 	 * See whether any cpu is asking for help.  Unbounded | 
 | 2044 | 	 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. | 
 | 2045 | 	 */ | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 2046 | 	for_each_mayday_cpu(cpu, wq->mayday_mask) { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2047 | 		unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; | 
 | 2048 | 		struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2049 | 		struct global_cwq *gcwq = cwq->gcwq; | 
 | 2050 | 		struct work_struct *work, *n; | 
 | 2051 |  | 
 | 2052 | 		__set_current_state(TASK_RUNNING); | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 2053 | 		mayday_clear_cpu(cpu, wq->mayday_mask); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2054 |  | 
 | 2055 | 		/* migrate to the target cpu if possible */ | 
 | 2056 | 		rescuer->gcwq = gcwq; | 
 | 2057 | 		worker_maybe_bind_and_lock(rescuer); | 
 | 2058 |  | 
 | 2059 | 		/* | 
 | 2060 | 		 * Slurp in all works issued via this workqueue and | 
 | 2061 | 		 * process'em. | 
 | 2062 | 		 */ | 
 | 2063 | 		BUG_ON(!list_empty(&rescuer->scheduled)); | 
 | 2064 | 		list_for_each_entry_safe(work, n, &gcwq->worklist, entry) | 
 | 2065 | 			if (get_work_cwq(work) == cwq) | 
 | 2066 | 				move_linked_works(work, scheduled, &n); | 
 | 2067 |  | 
 | 2068 | 		process_scheduled_works(rescuer); | 
| Tejun Heo | 7576958 | 2011-02-14 14:04:46 +0100 | [diff] [blame] | 2069 |  | 
 | 2070 | 		/* | 
 | 2071 | 		 * Leave this gcwq.  If keep_working() is %true, notify a | 
 | 2072 | 		 * regular worker; otherwise, we end up with 0 concurrency | 
 | 2073 | 		 * and stalling the execution. | 
 | 2074 | 		 */ | 
 | 2075 | 		if (keep_working(gcwq)) | 
 | 2076 | 			wake_up_worker(gcwq); | 
 | 2077 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2078 | 		spin_unlock_irq(&gcwq->lock); | 
 | 2079 | 	} | 
 | 2080 |  | 
 | 2081 | 	schedule(); | 
 | 2082 | 	goto repeat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2083 | } | 
 | 2084 |  | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2085 | struct wq_barrier { | 
 | 2086 | 	struct work_struct	work; | 
 | 2087 | 	struct completion	done; | 
 | 2088 | }; | 
 | 2089 |  | 
 | 2090 | static void wq_barrier_func(struct work_struct *work) | 
 | 2091 | { | 
 | 2092 | 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work); | 
 | 2093 | 	complete(&barr->done); | 
 | 2094 | } | 
 | 2095 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2096 | /** | 
 | 2097 |  * insert_wq_barrier - insert a barrier work | 
 | 2098 |  * @cwq: cwq to insert barrier into | 
 | 2099 |  * @barr: wq_barrier to insert | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2100 |  * @target: target work to attach @barr to | 
 | 2101 |  * @worker: worker currently executing @target, NULL if @target is not executing | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2102 |  * | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2103 |  * @barr is linked to @target such that @barr is completed only after | 
 | 2104 |  * @target finishes execution.  Please note that the ordering | 
 | 2105 |  * guarantee is observed only with respect to @target and on the local | 
 | 2106 |  * cpu. | 
 | 2107 |  * | 
 | 2108 |  * Currently, a queued barrier can't be canceled.  This is because | 
 | 2109 |  * try_to_grab_pending() can't determine whether the work to be | 
 | 2110 |  * grabbed is at the head of the queue and thus can't clear LINKED | 
 | 2111 |  * flag of the previous work while there must be a valid next work | 
 | 2112 |  * after a work with LINKED flag set. | 
 | 2113 |  * | 
 | 2114 |  * Note that when @worker is non-NULL, @target may be modified | 
 | 2115 |  * underneath us, so we can't reliably determine cwq from @target. | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2116 |  * | 
 | 2117 |  * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2118 |  * spin_lock_irq(gcwq->lock). | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2119 |  */ | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 2120 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2121 | 			      struct wq_barrier *barr, | 
 | 2122 | 			      struct work_struct *target, struct worker *worker) | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2123 | { | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2124 | 	struct list_head *head; | 
 | 2125 | 	unsigned int linked = 0; | 
 | 2126 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2127 | 	/* | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2128 | 	 * debugobject calls are safe here even with gcwq->lock locked | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2129 | 	 * as we know for sure that this will not trigger any of the | 
 | 2130 | 	 * checks and call back into the fixup functions where we | 
 | 2131 | 	 * might deadlock. | 
 | 2132 | 	 */ | 
| Andrew Morton | ca1cab3 | 2010-10-26 14:22:34 -0700 | [diff] [blame] | 2133 | 	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2134 | 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2135 | 	init_completion(&barr->done); | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 2136 |  | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2137 | 	/* | 
 | 2138 | 	 * If @target is currently being executed, schedule the | 
 | 2139 | 	 * barrier to the worker; otherwise, put it after @target. | 
 | 2140 | 	 */ | 
 | 2141 | 	if (worker) | 
 | 2142 | 		head = worker->scheduled.next; | 
 | 2143 | 	else { | 
 | 2144 | 		unsigned long *bits = work_data_bits(target); | 
 | 2145 |  | 
 | 2146 | 		head = target->entry.next; | 
 | 2147 | 		/* there can already be other linked works, inherit and set */ | 
 | 2148 | 		linked = *bits & WORK_STRUCT_LINKED; | 
 | 2149 | 		__set_bit(WORK_STRUCT_LINKED_BIT, bits); | 
 | 2150 | 	} | 
 | 2151 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2152 | 	debug_work_activate(&barr->work); | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2153 | 	insert_work(cwq, &barr->work, head, | 
 | 2154 | 		    work_color_to_flags(WORK_NO_COLOR) | linked); | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2155 | } | 
 | 2156 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2157 | /** | 
 | 2158 |  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing | 
 | 2159 |  * @wq: workqueue being flushed | 
 | 2160 |  * @flush_color: new flush color, < 0 for no-op | 
 | 2161 |  * @work_color: new work color, < 0 for no-op | 
 | 2162 |  * | 
 | 2163 |  * Prepare cwqs for workqueue flushing. | 
 | 2164 |  * | 
 | 2165 |  * If @flush_color is non-negative, flush_color on all cwqs should be | 
 | 2166 |  * -1.  If no cwq has in-flight commands at the specified color, all | 
 | 2167 |  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq | 
 | 2168 |  * has in flight commands, its cwq->flush_color is set to | 
 | 2169 |  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq | 
 | 2170 |  * wakeup logic is armed and %true is returned. | 
 | 2171 |  * | 
 | 2172 |  * The caller should have initialized @wq->first_flusher prior to | 
 | 2173 |  * calling this function with non-negative @flush_color.  If | 
 | 2174 |  * @flush_color is negative, no flush color update is done and %false | 
 | 2175 |  * is returned. | 
 | 2176 |  * | 
 | 2177 |  * If @work_color is non-negative, all cwqs should have the same | 
 | 2178 |  * work_color which is previous to @work_color and all will be | 
 | 2179 |  * advanced to @work_color. | 
 | 2180 |  * | 
 | 2181 |  * CONTEXT: | 
 | 2182 |  * mutex_lock(wq->flush_mutex). | 
 | 2183 |  * | 
 | 2184 |  * RETURNS: | 
 | 2185 |  * %true if @flush_color >= 0 and there's something to flush.  %false | 
 | 2186 |  * otherwise. | 
 | 2187 |  */ | 
 | 2188 | static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, | 
 | 2189 | 				      int flush_color, int work_color) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2190 | { | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2191 | 	bool wait = false; | 
 | 2192 | 	unsigned int cpu; | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 2193 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2194 | 	if (flush_color >= 0) { | 
 | 2195 | 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); | 
 | 2196 | 		atomic_set(&wq->nr_cwqs_to_flush, 1); | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2197 | 	} | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 2198 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2199 | 	for_each_cwq_cpu(cpu, wq) { | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2200 | 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2201 | 		struct global_cwq *gcwq = cwq->gcwq; | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2202 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2203 | 		spin_lock_irq(&gcwq->lock); | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2204 |  | 
 | 2205 | 		if (flush_color >= 0) { | 
 | 2206 | 			BUG_ON(cwq->flush_color != -1); | 
 | 2207 |  | 
 | 2208 | 			if (cwq->nr_in_flight[flush_color]) { | 
 | 2209 | 				cwq->flush_color = flush_color; | 
 | 2210 | 				atomic_inc(&wq->nr_cwqs_to_flush); | 
 | 2211 | 				wait = true; | 
 | 2212 | 			} | 
 | 2213 | 		} | 
 | 2214 |  | 
 | 2215 | 		if (work_color >= 0) { | 
 | 2216 | 			BUG_ON(work_color != work_next_color(cwq->work_color)); | 
 | 2217 | 			cwq->work_color = work_color; | 
 | 2218 | 		} | 
 | 2219 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2220 | 		spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2221 | 	} | 
 | 2222 |  | 
 | 2223 | 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) | 
 | 2224 | 		complete(&wq->first_flusher->done); | 
 | 2225 |  | 
 | 2226 | 	return wait; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2227 | } | 
 | 2228 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2229 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2230 |  * flush_workqueue - ensure that any scheduled work has run to completion. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2231 |  * @wq: workqueue to flush | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2232 |  * | 
 | 2233 |  * Forces execution of the workqueue and blocks until its completion. | 
 | 2234 |  * This is typically used in driver shutdown handlers. | 
 | 2235 |  * | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2236 |  * We sleep until all works which were queued on entry have been handled, | 
 | 2237 |  * but we are not livelocked by new incoming ones. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2238 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2239 | void flush_workqueue(struct workqueue_struct *wq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2240 | { | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2241 | 	struct wq_flusher this_flusher = { | 
 | 2242 | 		.list = LIST_HEAD_INIT(this_flusher.list), | 
 | 2243 | 		.flush_color = -1, | 
 | 2244 | 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), | 
 | 2245 | 	}; | 
 | 2246 | 	int next_color; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 2247 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 2248 | 	lock_map_acquire(&wq->lockdep_map); | 
 | 2249 | 	lock_map_release(&wq->lockdep_map); | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2250 |  | 
 | 2251 | 	mutex_lock(&wq->flush_mutex); | 
 | 2252 |  | 
 | 2253 | 	/* | 
 | 2254 | 	 * Start-to-wait phase | 
 | 2255 | 	 */ | 
 | 2256 | 	next_color = work_next_color(wq->work_color); | 
 | 2257 |  | 
 | 2258 | 	if (next_color != wq->flush_color) { | 
 | 2259 | 		/* | 
 | 2260 | 		 * Color space is not full.  The current work_color | 
 | 2261 | 		 * becomes our flush_color and work_color is advanced | 
 | 2262 | 		 * by one. | 
 | 2263 | 		 */ | 
 | 2264 | 		BUG_ON(!list_empty(&wq->flusher_overflow)); | 
 | 2265 | 		this_flusher.flush_color = wq->work_color; | 
 | 2266 | 		wq->work_color = next_color; | 
 | 2267 |  | 
 | 2268 | 		if (!wq->first_flusher) { | 
 | 2269 | 			/* no flush in progress, become the first flusher */ | 
 | 2270 | 			BUG_ON(wq->flush_color != this_flusher.flush_color); | 
 | 2271 |  | 
 | 2272 | 			wq->first_flusher = &this_flusher; | 
 | 2273 |  | 
 | 2274 | 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, | 
 | 2275 | 						       wq->work_color)) { | 
 | 2276 | 				/* nothing to flush, done */ | 
 | 2277 | 				wq->flush_color = next_color; | 
 | 2278 | 				wq->first_flusher = NULL; | 
 | 2279 | 				goto out_unlock; | 
 | 2280 | 			} | 
 | 2281 | 		} else { | 
 | 2282 | 			/* wait in queue */ | 
 | 2283 | 			BUG_ON(wq->flush_color == this_flusher.flush_color); | 
 | 2284 | 			list_add_tail(&this_flusher.list, &wq->flusher_queue); | 
 | 2285 | 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color); | 
 | 2286 | 		} | 
 | 2287 | 	} else { | 
 | 2288 | 		/* | 
 | 2289 | 		 * Oops, color space is full, wait on overflow queue. | 
 | 2290 | 		 * The next flush completion will assign us | 
 | 2291 | 		 * flush_color and transfer to flusher_queue. | 
 | 2292 | 		 */ | 
 | 2293 | 		list_add_tail(&this_flusher.list, &wq->flusher_overflow); | 
 | 2294 | 	} | 
 | 2295 |  | 
 | 2296 | 	mutex_unlock(&wq->flush_mutex); | 
 | 2297 |  | 
 | 2298 | 	wait_for_completion(&this_flusher.done); | 
 | 2299 |  | 
 | 2300 | 	/* | 
 | 2301 | 	 * Wake-up-and-cascade phase | 
 | 2302 | 	 * | 
 | 2303 | 	 * First flushers are responsible for cascading flushes and | 
 | 2304 | 	 * handling overflow.  Non-first flushers can simply return. | 
 | 2305 | 	 */ | 
 | 2306 | 	if (wq->first_flusher != &this_flusher) | 
 | 2307 | 		return; | 
 | 2308 |  | 
 | 2309 | 	mutex_lock(&wq->flush_mutex); | 
 | 2310 |  | 
| Tejun Heo | 4ce48b3 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2311 | 	/* we might have raced, check again with mutex held */ | 
 | 2312 | 	if (wq->first_flusher != &this_flusher) | 
 | 2313 | 		goto out_unlock; | 
 | 2314 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2315 | 	wq->first_flusher = NULL; | 
 | 2316 |  | 
 | 2317 | 	BUG_ON(!list_empty(&this_flusher.list)); | 
 | 2318 | 	BUG_ON(wq->flush_color != this_flusher.flush_color); | 
 | 2319 |  | 
 | 2320 | 	while (true) { | 
 | 2321 | 		struct wq_flusher *next, *tmp; | 
 | 2322 |  | 
 | 2323 | 		/* complete all the flushers sharing the current flush color */ | 
 | 2324 | 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { | 
 | 2325 | 			if (next->flush_color != wq->flush_color) | 
 | 2326 | 				break; | 
 | 2327 | 			list_del_init(&next->list); | 
 | 2328 | 			complete(&next->done); | 
 | 2329 | 		} | 
 | 2330 |  | 
 | 2331 | 		BUG_ON(!list_empty(&wq->flusher_overflow) && | 
 | 2332 | 		       wq->flush_color != work_next_color(wq->work_color)); | 
 | 2333 |  | 
 | 2334 | 		/* this flush_color is finished, advance by one */ | 
 | 2335 | 		wq->flush_color = work_next_color(wq->flush_color); | 
 | 2336 |  | 
 | 2337 | 		/* one color has been freed, handle overflow queue */ | 
 | 2338 | 		if (!list_empty(&wq->flusher_overflow)) { | 
 | 2339 | 			/* | 
 | 2340 | 			 * Assign the same color to all overflowed | 
 | 2341 | 			 * flushers, advance work_color and append to | 
 | 2342 | 			 * flusher_queue.  This is the start-to-wait | 
 | 2343 | 			 * phase for these overflowed flushers. | 
 | 2344 | 			 */ | 
 | 2345 | 			list_for_each_entry(tmp, &wq->flusher_overflow, list) | 
 | 2346 | 				tmp->flush_color = wq->work_color; | 
 | 2347 |  | 
 | 2348 | 			wq->work_color = work_next_color(wq->work_color); | 
 | 2349 |  | 
 | 2350 | 			list_splice_tail_init(&wq->flusher_overflow, | 
 | 2351 | 					      &wq->flusher_queue); | 
 | 2352 | 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color); | 
 | 2353 | 		} | 
 | 2354 |  | 
 | 2355 | 		if (list_empty(&wq->flusher_queue)) { | 
 | 2356 | 			BUG_ON(wq->flush_color != wq->work_color); | 
 | 2357 | 			break; | 
 | 2358 | 		} | 
 | 2359 |  | 
 | 2360 | 		/* | 
 | 2361 | 		 * Need to flush more colors.  Make the next flusher | 
 | 2362 | 		 * the new first flusher and arm cwqs. | 
 | 2363 | 		 */ | 
 | 2364 | 		BUG_ON(wq->flush_color == wq->work_color); | 
 | 2365 | 		BUG_ON(wq->flush_color != next->flush_color); | 
 | 2366 |  | 
 | 2367 | 		list_del_init(&next->list); | 
 | 2368 | 		wq->first_flusher = next; | 
 | 2369 |  | 
 | 2370 | 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) | 
 | 2371 | 			break; | 
 | 2372 |  | 
 | 2373 | 		/* | 
 | 2374 | 		 * Meh... this color is already done, clear first | 
 | 2375 | 		 * flusher and repeat cascading. | 
 | 2376 | 		 */ | 
 | 2377 | 		wq->first_flusher = NULL; | 
 | 2378 | 	} | 
 | 2379 |  | 
 | 2380 | out_unlock: | 
 | 2381 | 	mutex_unlock(&wq->flush_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2382 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2383 | EXPORT_SYMBOL_GPL(flush_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2384 |  | 
| Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 2385 | /** | 
 | 2386 |  * drain_workqueue - drain a workqueue | 
 | 2387 |  * @wq: workqueue to drain | 
 | 2388 |  * | 
 | 2389 |  * Wait until the workqueue becomes empty.  While draining is in progress, | 
 | 2390 |  * only chain queueing is allowed.  IOW, only currently pending or running | 
 | 2391 |  * work items on @wq can queue further work items on it.  @wq is flushed | 
 | 2392 |  * repeatedly until it becomes empty.  The number of flushing is detemined | 
 | 2393 |  * by the depth of chaining and should be relatively short.  Whine if it | 
 | 2394 |  * takes too long. | 
 | 2395 |  */ | 
 | 2396 | void drain_workqueue(struct workqueue_struct *wq) | 
 | 2397 | { | 
 | 2398 | 	unsigned int flush_cnt = 0; | 
 | 2399 | 	unsigned int cpu; | 
 | 2400 |  | 
 | 2401 | 	/* | 
 | 2402 | 	 * __queue_work() needs to test whether there are drainers, is much | 
 | 2403 | 	 * hotter than drain_workqueue() and already looks at @wq->flags. | 
 | 2404 | 	 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers. | 
 | 2405 | 	 */ | 
 | 2406 | 	spin_lock(&workqueue_lock); | 
 | 2407 | 	if (!wq->nr_drainers++) | 
 | 2408 | 		wq->flags |= WQ_DRAINING; | 
 | 2409 | 	spin_unlock(&workqueue_lock); | 
 | 2410 | reflush: | 
 | 2411 | 	flush_workqueue(wq); | 
 | 2412 |  | 
 | 2413 | 	for_each_cwq_cpu(cpu, wq) { | 
 | 2414 | 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
| Thomas Tuttle | fa2563e | 2011-09-14 16:22:28 -0700 | [diff] [blame] | 2415 | 		bool drained; | 
| Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 2416 |  | 
| Thomas Tuttle | fa2563e | 2011-09-14 16:22:28 -0700 | [diff] [blame] | 2417 | 		spin_lock_irq(&cwq->gcwq->lock); | 
 | 2418 | 		drained = !cwq->nr_active && list_empty(&cwq->delayed_works); | 
 | 2419 | 		spin_unlock_irq(&cwq->gcwq->lock); | 
 | 2420 |  | 
 | 2421 | 		if (drained) | 
| Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 2422 | 			continue; | 
 | 2423 |  | 
 | 2424 | 		if (++flush_cnt == 10 || | 
 | 2425 | 		    (flush_cnt % 100 == 0 && flush_cnt <= 1000)) | 
 | 2426 | 			pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n", | 
 | 2427 | 				   wq->name, flush_cnt); | 
 | 2428 | 		goto reflush; | 
 | 2429 | 	} | 
 | 2430 |  | 
 | 2431 | 	spin_lock(&workqueue_lock); | 
 | 2432 | 	if (!--wq->nr_drainers) | 
 | 2433 | 		wq->flags &= ~WQ_DRAINING; | 
 | 2434 | 	spin_unlock(&workqueue_lock); | 
 | 2435 | } | 
 | 2436 | EXPORT_SYMBOL_GPL(drain_workqueue); | 
 | 2437 |  | 
| Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2438 | static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, | 
 | 2439 | 			     bool wait_executing) | 
 | 2440 | { | 
 | 2441 | 	struct worker *worker = NULL; | 
 | 2442 | 	struct global_cwq *gcwq; | 
 | 2443 | 	struct cpu_workqueue_struct *cwq; | 
 | 2444 |  | 
 | 2445 | 	might_sleep(); | 
 | 2446 | 	gcwq = get_work_gcwq(work); | 
 | 2447 | 	if (!gcwq) | 
 | 2448 | 		return false; | 
 | 2449 |  | 
 | 2450 | 	spin_lock_irq(&gcwq->lock); | 
 | 2451 | 	if (!list_empty(&work->entry)) { | 
 | 2452 | 		/* | 
 | 2453 | 		 * See the comment near try_to_grab_pending()->smp_rmb(). | 
 | 2454 | 		 * If it was re-queued to a different gcwq under us, we | 
 | 2455 | 		 * are not going to wait. | 
 | 2456 | 		 */ | 
 | 2457 | 		smp_rmb(); | 
 | 2458 | 		cwq = get_work_cwq(work); | 
 | 2459 | 		if (unlikely(!cwq || gcwq != cwq->gcwq)) | 
 | 2460 | 			goto already_gone; | 
 | 2461 | 	} else if (wait_executing) { | 
 | 2462 | 		worker = find_worker_executing_work(gcwq, work); | 
 | 2463 | 		if (!worker) | 
 | 2464 | 			goto already_gone; | 
 | 2465 | 		cwq = worker->current_cwq; | 
 | 2466 | 	} else | 
 | 2467 | 		goto already_gone; | 
 | 2468 |  | 
 | 2469 | 	insert_wq_barrier(cwq, barr, work, worker); | 
 | 2470 | 	spin_unlock_irq(&gcwq->lock); | 
 | 2471 |  | 
| Tejun Heo | e159489 | 2011-01-09 23:32:15 +0100 | [diff] [blame] | 2472 | 	/* | 
 | 2473 | 	 * If @max_active is 1 or rescuer is in use, flushing another work | 
 | 2474 | 	 * item on the same workqueue may lead to deadlock.  Make sure the | 
 | 2475 | 	 * flusher is not running on the same workqueue by verifying write | 
 | 2476 | 	 * access. | 
 | 2477 | 	 */ | 
 | 2478 | 	if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) | 
 | 2479 | 		lock_map_acquire(&cwq->wq->lockdep_map); | 
 | 2480 | 	else | 
 | 2481 | 		lock_map_acquire_read(&cwq->wq->lockdep_map); | 
| Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2482 | 	lock_map_release(&cwq->wq->lockdep_map); | 
| Tejun Heo | e159489 | 2011-01-09 23:32:15 +0100 | [diff] [blame] | 2483 |  | 
| Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2484 | 	return true; | 
 | 2485 | already_gone: | 
 | 2486 | 	spin_unlock_irq(&gcwq->lock); | 
 | 2487 | 	return false; | 
 | 2488 | } | 
 | 2489 |  | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2490 | /** | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2491 |  * flush_work - wait for a work to finish executing the last queueing instance | 
 | 2492 |  * @work: the work to flush | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2493 |  * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2494 |  * Wait until @work has finished execution.  This function considers | 
 | 2495 |  * only the last queueing instance of @work.  If @work has been | 
 | 2496 |  * enqueued across different CPUs on a non-reentrant workqueue or on | 
 | 2497 |  * multiple workqueues, @work might still be executing on return on | 
 | 2498 |  * some of the CPUs from earlier queueing. | 
| Oleg Nesterov | a67da70 | 2008-07-25 01:47:52 -0700 | [diff] [blame] | 2499 |  * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2500 |  * If @work was queued only on a non-reentrant, ordered or unbound | 
 | 2501 |  * workqueue, @work is guaranteed to be idle on return if it hasn't | 
 | 2502 |  * been requeued since flush started. | 
 | 2503 |  * | 
 | 2504 |  * RETURNS: | 
 | 2505 |  * %true if flush_work() waited for the work to finish execution, | 
 | 2506 |  * %false if it was already idle. | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2507 |  */ | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2508 | bool flush_work(struct work_struct *work) | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2509 | { | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2510 | 	struct wq_barrier barr; | 
 | 2511 |  | 
| Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2512 | 	if (start_flush_work(work, &barr, true)) { | 
 | 2513 | 		wait_for_completion(&barr.done); | 
 | 2514 | 		destroy_work_on_stack(&barr.work); | 
 | 2515 | 		return true; | 
 | 2516 | 	} else | 
 | 2517 | 		return false; | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2518 | } | 
 | 2519 | EXPORT_SYMBOL_GPL(flush_work); | 
 | 2520 |  | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2521 | static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) | 
 | 2522 | { | 
 | 2523 | 	struct wq_barrier barr; | 
 | 2524 | 	struct worker *worker; | 
 | 2525 |  | 
 | 2526 | 	spin_lock_irq(&gcwq->lock); | 
 | 2527 |  | 
 | 2528 | 	worker = find_worker_executing_work(gcwq, work); | 
 | 2529 | 	if (unlikely(worker)) | 
 | 2530 | 		insert_wq_barrier(worker->current_cwq, &barr, work, worker); | 
 | 2531 |  | 
 | 2532 | 	spin_unlock_irq(&gcwq->lock); | 
 | 2533 |  | 
 | 2534 | 	if (unlikely(worker)) { | 
 | 2535 | 		wait_for_completion(&barr.done); | 
 | 2536 | 		destroy_work_on_stack(&barr.work); | 
 | 2537 | 		return true; | 
 | 2538 | 	} else | 
 | 2539 | 		return false; | 
 | 2540 | } | 
 | 2541 |  | 
 | 2542 | static bool wait_on_work(struct work_struct *work) | 
 | 2543 | { | 
 | 2544 | 	bool ret = false; | 
 | 2545 | 	int cpu; | 
 | 2546 |  | 
 | 2547 | 	might_sleep(); | 
 | 2548 |  | 
 | 2549 | 	lock_map_acquire(&work->lockdep_map); | 
 | 2550 | 	lock_map_release(&work->lockdep_map); | 
 | 2551 |  | 
 | 2552 | 	for_each_gcwq_cpu(cpu) | 
 | 2553 | 		ret |= wait_on_cpu_work(get_gcwq(cpu), work); | 
 | 2554 | 	return ret; | 
 | 2555 | } | 
 | 2556 |  | 
| Tejun Heo | 0938349 | 2010-09-16 10:48:29 +0200 | [diff] [blame] | 2557 | /** | 
 | 2558 |  * flush_work_sync - wait until a work has finished execution | 
 | 2559 |  * @work: the work to flush | 
 | 2560 |  * | 
 | 2561 |  * Wait until @work has finished execution.  On return, it's | 
 | 2562 |  * guaranteed that all queueing instances of @work which happened | 
 | 2563 |  * before this function is called are finished.  In other words, if | 
 | 2564 |  * @work hasn't been requeued since this function was called, @work is | 
 | 2565 |  * guaranteed to be idle on return. | 
 | 2566 |  * | 
 | 2567 |  * RETURNS: | 
 | 2568 |  * %true if flush_work_sync() waited for the work to finish execution, | 
 | 2569 |  * %false if it was already idle. | 
 | 2570 |  */ | 
 | 2571 | bool flush_work_sync(struct work_struct *work) | 
 | 2572 | { | 
 | 2573 | 	struct wq_barrier barr; | 
 | 2574 | 	bool pending, waited; | 
 | 2575 |  | 
 | 2576 | 	/* we'll wait for executions separately, queue barr only if pending */ | 
 | 2577 | 	pending = start_flush_work(work, &barr, false); | 
 | 2578 |  | 
 | 2579 | 	/* wait for executions to finish */ | 
 | 2580 | 	waited = wait_on_work(work); | 
 | 2581 |  | 
 | 2582 | 	/* wait for the pending one */ | 
 | 2583 | 	if (pending) { | 
 | 2584 | 		wait_for_completion(&barr.done); | 
 | 2585 | 		destroy_work_on_stack(&barr.work); | 
 | 2586 | 	} | 
 | 2587 |  | 
 | 2588 | 	return pending || waited; | 
 | 2589 | } | 
 | 2590 | EXPORT_SYMBOL_GPL(flush_work_sync); | 
 | 2591 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2592 | /* | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2593 |  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2594 |  * so this work can't be re-armed in any way. | 
 | 2595 |  */ | 
 | 2596 | static int try_to_grab_pending(struct work_struct *work) | 
 | 2597 | { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2598 | 	struct global_cwq *gcwq; | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2599 | 	int ret = -1; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2600 |  | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2601 | 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2602 | 		return 0; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2603 |  | 
 | 2604 | 	/* | 
 | 2605 | 	 * The queueing is in progress, or it is already queued. Try to | 
 | 2606 | 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. | 
 | 2607 | 	 */ | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2608 | 	gcwq = get_work_gcwq(work); | 
 | 2609 | 	if (!gcwq) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2610 | 		return ret; | 
 | 2611 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2612 | 	spin_lock_irq(&gcwq->lock); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2613 | 	if (!list_empty(&work->entry)) { | 
 | 2614 | 		/* | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2615 | 		 * This work is queued, but perhaps we locked the wrong gcwq. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2616 | 		 * In that case we must see the new value after rmb(), see | 
 | 2617 | 		 * insert_work()->wmb(). | 
 | 2618 | 		 */ | 
 | 2619 | 		smp_rmb(); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2620 | 		if (gcwq == get_work_gcwq(work)) { | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2621 | 			debug_work_deactivate(work); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2622 | 			list_del_init(&work->entry); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2623 | 			cwq_dec_nr_in_flight(get_work_cwq(work), | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 2624 | 				get_work_color(work), | 
 | 2625 | 				*work_data_bits(work) & WORK_STRUCT_DELAYED); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2626 | 			ret = 1; | 
 | 2627 | 		} | 
 | 2628 | 	} | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2629 | 	spin_unlock_irq(&gcwq->lock); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2630 |  | 
 | 2631 | 	return ret; | 
 | 2632 | } | 
 | 2633 |  | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2634 | static bool __cancel_work_timer(struct work_struct *work, | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2635 | 				struct timer_list* timer) | 
 | 2636 | { | 
 | 2637 | 	int ret; | 
 | 2638 |  | 
 | 2639 | 	do { | 
 | 2640 | 		ret = (timer && likely(del_timer(timer))); | 
 | 2641 | 		if (!ret) | 
 | 2642 | 			ret = try_to_grab_pending(work); | 
 | 2643 | 		wait_on_work(work); | 
 | 2644 | 	} while (unlikely(ret < 0)); | 
 | 2645 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2646 | 	clear_work_data(work); | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2647 | 	return ret; | 
 | 2648 | } | 
 | 2649 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2650 | /** | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2651 |  * cancel_work_sync - cancel a work and wait for it to finish | 
 | 2652 |  * @work: the work to cancel | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2653 |  * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2654 |  * Cancel @work and wait for its execution to finish.  This function | 
 | 2655 |  * can be used even if the work re-queues itself or migrates to | 
 | 2656 |  * another workqueue.  On return from this function, @work is | 
 | 2657 |  * guaranteed to be not pending or executing on any CPU. | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2658 |  * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2659 |  * cancel_work_sync(&delayed_work->work) must not be used for | 
 | 2660 |  * delayed_work's.  Use cancel_delayed_work_sync() instead. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2661 |  * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2662 |  * The caller must ensure that the workqueue on which @work was last | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2663 |  * queued can't be destroyed before this function returns. | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2664 |  * | 
 | 2665 |  * RETURNS: | 
 | 2666 |  * %true if @work was pending, %false otherwise. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2667 |  */ | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2668 | bool cancel_work_sync(struct work_struct *work) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2669 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2670 | 	return __cancel_work_timer(work, NULL); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 2671 | } | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 2672 | EXPORT_SYMBOL_GPL(cancel_work_sync); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 2673 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2674 | /** | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2675 |  * flush_delayed_work - wait for a dwork to finish executing the last queueing | 
 | 2676 |  * @dwork: the delayed work to flush | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2677 |  * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2678 |  * Delayed timer is cancelled and the pending work is queued for | 
 | 2679 |  * immediate execution.  Like flush_work(), this function only | 
 | 2680 |  * considers the last queueing instance of @dwork. | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2681 |  * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2682 |  * RETURNS: | 
 | 2683 |  * %true if flush_work() waited for the work to finish execution, | 
 | 2684 |  * %false if it was already idle. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2685 |  */ | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2686 | bool flush_delayed_work(struct delayed_work *dwork) | 
 | 2687 | { | 
 | 2688 | 	if (del_timer_sync(&dwork->timer)) | 
 | 2689 | 		__queue_work(raw_smp_processor_id(), | 
 | 2690 | 			     get_work_cwq(&dwork->work)->wq, &dwork->work); | 
 | 2691 | 	return flush_work(&dwork->work); | 
 | 2692 | } | 
 | 2693 | EXPORT_SYMBOL(flush_delayed_work); | 
 | 2694 |  | 
 | 2695 | /** | 
| Tejun Heo | 0938349 | 2010-09-16 10:48:29 +0200 | [diff] [blame] | 2696 |  * flush_delayed_work_sync - wait for a dwork to finish | 
 | 2697 |  * @dwork: the delayed work to flush | 
 | 2698 |  * | 
 | 2699 |  * Delayed timer is cancelled and the pending work is queued for | 
 | 2700 |  * execution immediately.  Other than timer handling, its behavior | 
 | 2701 |  * is identical to flush_work_sync(). | 
 | 2702 |  * | 
 | 2703 |  * RETURNS: | 
 | 2704 |  * %true if flush_work_sync() waited for the work to finish execution, | 
 | 2705 |  * %false if it was already idle. | 
 | 2706 |  */ | 
 | 2707 | bool flush_delayed_work_sync(struct delayed_work *dwork) | 
 | 2708 | { | 
 | 2709 | 	if (del_timer_sync(&dwork->timer)) | 
 | 2710 | 		__queue_work(raw_smp_processor_id(), | 
 | 2711 | 			     get_work_cwq(&dwork->work)->wq, &dwork->work); | 
 | 2712 | 	return flush_work_sync(&dwork->work); | 
 | 2713 | } | 
 | 2714 | EXPORT_SYMBOL(flush_delayed_work_sync); | 
 | 2715 |  | 
 | 2716 | /** | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2717 |  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish | 
 | 2718 |  * @dwork: the delayed work cancel | 
 | 2719 |  * | 
 | 2720 |  * This is cancel_work_sync() for delayed works. | 
 | 2721 |  * | 
 | 2722 |  * RETURNS: | 
 | 2723 |  * %true if @dwork was pending, %false otherwise. | 
 | 2724 |  */ | 
 | 2725 | bool cancel_delayed_work_sync(struct delayed_work *dwork) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2726 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2727 | 	return __cancel_work_timer(&dwork->work, &dwork->timer); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2728 | } | 
| Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2729 | EXPORT_SYMBOL(cancel_delayed_work_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2730 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2731 | /** | 
 | 2732 |  * schedule_work - put work task in global workqueue | 
 | 2733 |  * @work: job to be done | 
 | 2734 |  * | 
| Bart Van Assche | 5b0f437d | 2009-07-30 19:00:53 +0200 | [diff] [blame] | 2735 |  * Returns zero if @work was already on the kernel-global workqueue and | 
 | 2736 |  * non-zero otherwise. | 
 | 2737 |  * | 
 | 2738 |  * This puts a job in the kernel-global workqueue if it was not already | 
 | 2739 |  * queued and leaves it in the same position on the kernel-global | 
 | 2740 |  * workqueue otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2741 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2742 | int schedule_work(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2743 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2744 | 	return queue_work(system_wq, work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2745 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2746 | EXPORT_SYMBOL(schedule_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2747 |  | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 2748 | /* | 
 | 2749 |  * schedule_work_on - put work task on a specific cpu | 
 | 2750 |  * @cpu: cpu to put the work task on | 
 | 2751 |  * @work: job to be done | 
 | 2752 |  * | 
 | 2753 |  * This puts a job on a specific cpu | 
 | 2754 |  */ | 
 | 2755 | int schedule_work_on(int cpu, struct work_struct *work) | 
 | 2756 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2757 | 	return queue_work_on(cpu, system_wq, work); | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 2758 | } | 
 | 2759 | EXPORT_SYMBOL(schedule_work_on); | 
 | 2760 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2761 | /** | 
 | 2762 |  * schedule_delayed_work - put work task in global workqueue after delay | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 2763 |  * @dwork: job to be done | 
 | 2764 |  * @delay: number of jiffies to wait or 0 for immediate execution | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2765 |  * | 
 | 2766 |  * After waiting for a given time this puts a job in the kernel-global | 
 | 2767 |  * workqueue. | 
 | 2768 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2769 | int schedule_delayed_work(struct delayed_work *dwork, | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 2770 | 					unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2771 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2772 | 	return queue_delayed_work(system_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2773 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2774 | EXPORT_SYMBOL(schedule_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2775 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2776 | /** | 
 | 2777 |  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 
 | 2778 |  * @cpu: cpu to use | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 2779 |  * @dwork: job to be done | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2780 |  * @delay: number of jiffies to wait | 
 | 2781 |  * | 
 | 2782 |  * After waiting for a given time this puts a job in the kernel-global | 
 | 2783 |  * workqueue on the specified CPU. | 
 | 2784 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2785 | int schedule_delayed_work_on(int cpu, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 2786 | 			struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2787 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2788 | 	return queue_delayed_work_on(cpu, system_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2789 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2790 | EXPORT_SYMBOL(schedule_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2791 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2792 | /** | 
| Tejun Heo | 31ddd87 | 2010-10-19 11:14:49 +0200 | [diff] [blame] | 2793 |  * schedule_on_each_cpu - execute a function synchronously on each online CPU | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2794 |  * @func: the function to call | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2795 |  * | 
| Tejun Heo | 31ddd87 | 2010-10-19 11:14:49 +0200 | [diff] [blame] | 2796 |  * schedule_on_each_cpu() executes @func on each online CPU using the | 
 | 2797 |  * system workqueue and blocks until all CPUs have completed. | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2798 |  * schedule_on_each_cpu() is very slow. | 
| Tejun Heo | 31ddd87 | 2010-10-19 11:14:49 +0200 | [diff] [blame] | 2799 |  * | 
 | 2800 |  * RETURNS: | 
 | 2801 |  * 0 on success, -errno on failure. | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2802 |  */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2803 | int schedule_on_each_cpu(work_func_t func) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2804 | { | 
 | 2805 | 	int cpu; | 
| Namhyung Kim | 38f5156 | 2010-08-08 14:24:09 +0200 | [diff] [blame] | 2806 | 	struct work_struct __percpu *works; | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2807 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2808 | 	works = alloc_percpu(struct work_struct); | 
 | 2809 | 	if (!works) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2810 | 		return -ENOMEM; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2811 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 2812 | 	get_online_cpus(); | 
| Tejun Heo | 9398180 | 2009-11-17 14:06:20 -0800 | [diff] [blame] | 2813 |  | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2814 | 	for_each_online_cpu(cpu) { | 
| Ingo Molnar | 9bfb183 | 2006-12-18 20:05:09 +0100 | [diff] [blame] | 2815 | 		struct work_struct *work = per_cpu_ptr(works, cpu); | 
 | 2816 |  | 
 | 2817 | 		INIT_WORK(work, func); | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2818 | 		schedule_work_on(cpu, work); | 
| Andi Kleen | 65a6446 | 2009-10-14 06:22:47 +0200 | [diff] [blame] | 2819 | 	} | 
| Tejun Heo | 9398180 | 2009-11-17 14:06:20 -0800 | [diff] [blame] | 2820 |  | 
 | 2821 | 	for_each_online_cpu(cpu) | 
 | 2822 | 		flush_work(per_cpu_ptr(works, cpu)); | 
 | 2823 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 2824 | 	put_online_cpus(); | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2825 | 	free_percpu(works); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2826 | 	return 0; | 
 | 2827 | } | 
 | 2828 |  | 
| Alan Stern | eef6a7d | 2010-02-12 17:39:21 +0900 | [diff] [blame] | 2829 | /** | 
 | 2830 |  * flush_scheduled_work - ensure that any scheduled work has run to completion. | 
 | 2831 |  * | 
 | 2832 |  * Forces execution of the kernel-global workqueue and blocks until its | 
 | 2833 |  * completion. | 
 | 2834 |  * | 
 | 2835 |  * Think twice before calling this function!  It's very easy to get into | 
 | 2836 |  * trouble if you don't take great care.  Either of the following situations | 
 | 2837 |  * will lead to deadlock: | 
 | 2838 |  * | 
 | 2839 |  *	One of the work items currently on the workqueue needs to acquire | 
 | 2840 |  *	a lock held by your code or its caller. | 
 | 2841 |  * | 
 | 2842 |  *	Your code is running in the context of a work routine. | 
 | 2843 |  * | 
 | 2844 |  * They will be detected by lockdep when they occur, but the first might not | 
 | 2845 |  * occur very often.  It depends on what work items are on the workqueue and | 
 | 2846 |  * what locks they need, which you have no control over. | 
 | 2847 |  * | 
 | 2848 |  * In most situations flushing the entire workqueue is overkill; you merely | 
 | 2849 |  * need to know that a particular work item isn't queued and isn't running. | 
 | 2850 |  * In such cases you should use cancel_delayed_work_sync() or | 
 | 2851 |  * cancel_work_sync() instead. | 
 | 2852 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2853 | void flush_scheduled_work(void) | 
 | 2854 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2855 | 	flush_workqueue(system_wq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2856 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2857 | EXPORT_SYMBOL(flush_scheduled_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2858 |  | 
 | 2859 | /** | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2860 |  * execute_in_process_context - reliably execute the routine with user context | 
 | 2861 |  * @fn:		the function to execute | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2862 |  * @ew:		guaranteed storage for the execute work structure (must | 
 | 2863 |  *		be available when the work executes) | 
 | 2864 |  * | 
 | 2865 |  * Executes the function immediately if process context is available, | 
 | 2866 |  * otherwise schedules the function for delayed execution. | 
 | 2867 |  * | 
 | 2868 |  * Returns:	0 - function was executed | 
 | 2869 |  *		1 - function was scheduled for execution | 
 | 2870 |  */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2871 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2872 | { | 
 | 2873 | 	if (!in_interrupt()) { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2874 | 		fn(&ew->work); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2875 | 		return 0; | 
 | 2876 | 	} | 
 | 2877 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2878 | 	INIT_WORK(&ew->work, fn); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2879 | 	schedule_work(&ew->work); | 
 | 2880 |  | 
 | 2881 | 	return 1; | 
 | 2882 | } | 
 | 2883 | EXPORT_SYMBOL_GPL(execute_in_process_context); | 
 | 2884 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2885 | int keventd_up(void) | 
 | 2886 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2887 | 	return system_wq != NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2888 | } | 
 | 2889 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2890 | static int alloc_cwqs(struct workqueue_struct *wq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2891 | { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2892 | 	/* | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2893 | 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. | 
 | 2894 | 	 * Make sure that the alignment isn't lower than that of | 
 | 2895 | 	 * unsigned long long. | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2896 | 	 */ | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2897 | 	const size_t size = sizeof(struct cpu_workqueue_struct); | 
 | 2898 | 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, | 
 | 2899 | 				   __alignof__(unsigned long long)); | 
| Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2900 | #ifdef CONFIG_SMP | 
 | 2901 | 	bool percpu = !(wq->flags & WQ_UNBOUND); | 
 | 2902 | #else | 
 | 2903 | 	bool percpu = false; | 
 | 2904 | #endif | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2905 |  | 
| Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2906 | 	if (percpu) | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2907 | 		wq->cpu_wq.pcpu = __alloc_percpu(size, align); | 
| Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2908 | 	else { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2909 | 		void *ptr; | 
| Frederic Weisbecker | e1d8aa9 | 2009-01-12 23:15:46 +0100 | [diff] [blame] | 2910 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2911 | 		/* | 
 | 2912 | 		 * Allocate enough room to align cwq and put an extra | 
 | 2913 | 		 * pointer at the end pointing back to the originally | 
 | 2914 | 		 * allocated pointer which will be used for free. | 
 | 2915 | 		 */ | 
 | 2916 | 		ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); | 
 | 2917 | 		if (ptr) { | 
 | 2918 | 			wq->cpu_wq.single = PTR_ALIGN(ptr, align); | 
 | 2919 | 			*(void **)(wq->cpu_wq.single + 1) = ptr; | 
 | 2920 | 		} | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2921 | 	} | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2922 |  | 
| Tejun Heo | 0415b00d1 | 2011-03-24 18:50:09 +0100 | [diff] [blame] | 2923 | 	/* just in case, make sure it's actually aligned */ | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2924 | 	BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); | 
 | 2925 | 	return wq->cpu_wq.v ? 0 : -ENOMEM; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2926 | } | 
 | 2927 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2928 | static void free_cwqs(struct workqueue_struct *wq) | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 2929 | { | 
| Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2930 | #ifdef CONFIG_SMP | 
 | 2931 | 	bool percpu = !(wq->flags & WQ_UNBOUND); | 
 | 2932 | #else | 
 | 2933 | 	bool percpu = false; | 
 | 2934 | #endif | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 2935 |  | 
| Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2936 | 	if (percpu) | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2937 | 		free_percpu(wq->cpu_wq.pcpu); | 
 | 2938 | 	else if (wq->cpu_wq.single) { | 
 | 2939 | 		/* the pointer to free is stored right after the cwq */ | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2940 | 		kfree(*(void **)(wq->cpu_wq.single + 1)); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 2941 | 	} | 
 | 2942 | } | 
 | 2943 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2944 | static int wq_clamp_max_active(int max_active, unsigned int flags, | 
 | 2945 | 			       const char *name) | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2946 | { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2947 | 	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; | 
 | 2948 |  | 
 | 2949 | 	if (max_active < 1 || max_active > lim) | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2950 | 		printk(KERN_WARNING "workqueue: max_active %d requested for %s " | 
 | 2951 | 		       "is out of range, clamping between %d and %d\n", | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2952 | 		       max_active, name, 1, lim); | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2953 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2954 | 	return clamp_val(max_active, 1, lim); | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2955 | } | 
 | 2956 |  | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2957 | struct workqueue_struct *__alloc_workqueue_key(const char *name, | 
 | 2958 | 					       unsigned int flags, | 
 | 2959 | 					       int max_active, | 
 | 2960 | 					       struct lock_class_key *key, | 
 | 2961 | 					       const char *lock_name) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2962 | { | 
 | 2963 | 	struct workqueue_struct *wq; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2964 | 	unsigned int cpu; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2965 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2966 | 	/* | 
| Tejun Heo | 6370a6a | 2010-10-11 15:12:27 +0200 | [diff] [blame] | 2967 | 	 * Workqueues which may be used during memory reclaim should | 
 | 2968 | 	 * have a rescuer to guarantee forward progress. | 
 | 2969 | 	 */ | 
 | 2970 | 	if (flags & WQ_MEM_RECLAIM) | 
 | 2971 | 		flags |= WQ_RESCUER; | 
 | 2972 |  | 
 | 2973 | 	/* | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2974 | 	 * Unbound workqueues aren't concurrency managed and should be | 
 | 2975 | 	 * dispatched to workers immediately. | 
 | 2976 | 	 */ | 
 | 2977 | 	if (flags & WQ_UNBOUND) | 
 | 2978 | 		flags |= WQ_HIGHPRI; | 
 | 2979 |  | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2980 | 	max_active = max_active ?: WQ_DFL_ACTIVE; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2981 | 	max_active = wq_clamp_max_active(max_active, flags, name); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2982 |  | 
 | 2983 | 	wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 
 | 2984 | 	if (!wq) | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2985 | 		goto err; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2986 |  | 
| Tejun Heo | 97e37d7 | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2987 | 	wq->flags = flags; | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2988 | 	wq->saved_max_active = max_active; | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2989 | 	mutex_init(&wq->flush_mutex); | 
 | 2990 | 	atomic_set(&wq->nr_cwqs_to_flush, 0); | 
 | 2991 | 	INIT_LIST_HEAD(&wq->flusher_queue); | 
 | 2992 | 	INIT_LIST_HEAD(&wq->flusher_overflow); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2993 |  | 
 | 2994 | 	wq->name = name; | 
| Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 2995 | 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 2996 | 	INIT_LIST_HEAD(&wq->list); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2997 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2998 | 	if (alloc_cwqs(wq) < 0) | 
 | 2999 | 		goto err; | 
 | 3000 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3001 | 	for_each_cwq_cpu(cpu, wq) { | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3002 | 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3003 | 		struct global_cwq *gcwq = get_gcwq(cpu); | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3004 |  | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3005 | 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3006 | 		cwq->gcwq = gcwq; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3007 | 		cwq->wq = wq; | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3008 | 		cwq->flush_color = -1; | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3009 | 		cwq->max_active = max_active; | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3010 | 		INIT_LIST_HEAD(&cwq->delayed_works); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3011 | 	} | 
 | 3012 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3013 | 	if (flags & WQ_RESCUER) { | 
 | 3014 | 		struct worker *rescuer; | 
 | 3015 |  | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 3016 | 		if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL)) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3017 | 			goto err; | 
 | 3018 |  | 
 | 3019 | 		wq->rescuer = rescuer = alloc_worker(); | 
 | 3020 | 		if (!rescuer) | 
 | 3021 | 			goto err; | 
 | 3022 |  | 
 | 3023 | 		rescuer->task = kthread_create(rescuer_thread, wq, "%s", name); | 
 | 3024 | 		if (IS_ERR(rescuer->task)) | 
 | 3025 | 			goto err; | 
 | 3026 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3027 | 		rescuer->task->flags |= PF_THREAD_BOUND; | 
 | 3028 | 		wake_up_process(rescuer->task); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3029 | 	} | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3030 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3031 | 	/* | 
 | 3032 | 	 * workqueue_lock protects global freeze state and workqueues | 
 | 3033 | 	 * list.  Grab it, set max_active accordingly and add the new | 
 | 3034 | 	 * workqueue to workqueues list. | 
 | 3035 | 	 */ | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3036 | 	spin_lock(&workqueue_lock); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3037 |  | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3038 | 	if (workqueue_freezing && wq->flags & WQ_FREEZABLE) | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3039 | 		for_each_cwq_cpu(cpu, wq) | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3040 | 			get_cwq(cpu, wq)->max_active = 0; | 
 | 3041 |  | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3042 | 	list_add(&wq->list, &workqueues); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3043 |  | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3044 | 	spin_unlock(&workqueue_lock); | 
 | 3045 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3046 | 	return wq; | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 3047 | err: | 
 | 3048 | 	if (wq) { | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3049 | 		free_cwqs(wq); | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 3050 | 		free_mayday_mask(wq->mayday_mask); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3051 | 		kfree(wq->rescuer); | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 3052 | 		kfree(wq); | 
 | 3053 | 	} | 
 | 3054 | 	return NULL; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3055 | } | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3056 | EXPORT_SYMBOL_GPL(__alloc_workqueue_key); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3057 |  | 
 | 3058 | /** | 
 | 3059 |  * destroy_workqueue - safely terminate a workqueue | 
 | 3060 |  * @wq: target workqueue | 
 | 3061 |  * | 
 | 3062 |  * Safely destroy a workqueue. All work currently pending will be done first. | 
 | 3063 |  */ | 
 | 3064 | void destroy_workqueue(struct workqueue_struct *wq) | 
 | 3065 | { | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3066 | 	unsigned int cpu; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3067 |  | 
| Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 3068 | 	/* drain it before proceeding with destruction */ | 
 | 3069 | 	drain_workqueue(wq); | 
| Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 3070 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3071 | 	/* | 
 | 3072 | 	 * wq list is used to freeze wq, remove from list after | 
 | 3073 | 	 * flushing is complete in case freeze races us. | 
 | 3074 | 	 */ | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 3075 | 	spin_lock(&workqueue_lock); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 3076 | 	list_del(&wq->list); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 3077 | 	spin_unlock(&workqueue_lock); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3078 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3079 | 	/* sanity check */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3080 | 	for_each_cwq_cpu(cpu, wq) { | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3081 | 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
 | 3082 | 		int i; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3083 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3084 | 		for (i = 0; i < WORK_NR_COLORS; i++) | 
 | 3085 | 			BUG_ON(cwq->nr_in_flight[i]); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3086 | 		BUG_ON(cwq->nr_active); | 
 | 3087 | 		BUG_ON(!list_empty(&cwq->delayed_works)); | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3088 | 	} | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3089 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3090 | 	if (wq->flags & WQ_RESCUER) { | 
 | 3091 | 		kthread_stop(wq->rescuer->task); | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 3092 | 		free_mayday_mask(wq->mayday_mask); | 
| Xiaotian Feng | 8d9df9f | 2010-08-16 09:54:28 +0200 | [diff] [blame] | 3093 | 		kfree(wq->rescuer); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3094 | 	} | 
 | 3095 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3096 | 	free_cwqs(wq); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3097 | 	kfree(wq); | 
 | 3098 | } | 
 | 3099 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 
 | 3100 |  | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3101 | /** | 
 | 3102 |  * workqueue_set_max_active - adjust max_active of a workqueue | 
 | 3103 |  * @wq: target workqueue | 
 | 3104 |  * @max_active: new max_active value. | 
 | 3105 |  * | 
 | 3106 |  * Set max_active of @wq to @max_active. | 
 | 3107 |  * | 
 | 3108 |  * CONTEXT: | 
 | 3109 |  * Don't call from IRQ context. | 
 | 3110 |  */ | 
 | 3111 | void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | 
 | 3112 | { | 
 | 3113 | 	unsigned int cpu; | 
 | 3114 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3115 | 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3116 |  | 
 | 3117 | 	spin_lock(&workqueue_lock); | 
 | 3118 |  | 
 | 3119 | 	wq->saved_max_active = max_active; | 
 | 3120 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3121 | 	for_each_cwq_cpu(cpu, wq) { | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3122 | 		struct global_cwq *gcwq = get_gcwq(cpu); | 
 | 3123 |  | 
 | 3124 | 		spin_lock_irq(&gcwq->lock); | 
 | 3125 |  | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3126 | 		if (!(wq->flags & WQ_FREEZABLE) || | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3127 | 		    !(gcwq->flags & GCWQ_FREEZING)) | 
 | 3128 | 			get_cwq(gcwq->cpu, wq)->max_active = max_active; | 
 | 3129 |  | 
 | 3130 | 		spin_unlock_irq(&gcwq->lock); | 
 | 3131 | 	} | 
 | 3132 |  | 
 | 3133 | 	spin_unlock(&workqueue_lock); | 
 | 3134 | } | 
 | 3135 | EXPORT_SYMBOL_GPL(workqueue_set_max_active); | 
 | 3136 |  | 
 | 3137 | /** | 
 | 3138 |  * workqueue_congested - test whether a workqueue is congested | 
 | 3139 |  * @cpu: CPU in question | 
 | 3140 |  * @wq: target workqueue | 
 | 3141 |  * | 
 | 3142 |  * Test whether @wq's cpu workqueue for @cpu is congested.  There is | 
 | 3143 |  * no synchronization around this function and the test result is | 
 | 3144 |  * unreliable and only useful as advisory hints or for debugging. | 
 | 3145 |  * | 
 | 3146 |  * RETURNS: | 
 | 3147 |  * %true if congested, %false otherwise. | 
 | 3148 |  */ | 
 | 3149 | bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) | 
 | 3150 | { | 
 | 3151 | 	struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
 | 3152 |  | 
 | 3153 | 	return !list_empty(&cwq->delayed_works); | 
 | 3154 | } | 
 | 3155 | EXPORT_SYMBOL_GPL(workqueue_congested); | 
 | 3156 |  | 
 | 3157 | /** | 
 | 3158 |  * work_cpu - return the last known associated cpu for @work | 
 | 3159 |  * @work: the work of interest | 
 | 3160 |  * | 
 | 3161 |  * RETURNS: | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3162 |  * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise. | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3163 |  */ | 
 | 3164 | unsigned int work_cpu(struct work_struct *work) | 
 | 3165 | { | 
 | 3166 | 	struct global_cwq *gcwq = get_work_gcwq(work); | 
 | 3167 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3168 | 	return gcwq ? gcwq->cpu : WORK_CPU_NONE; | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3169 | } | 
 | 3170 | EXPORT_SYMBOL_GPL(work_cpu); | 
 | 3171 |  | 
 | 3172 | /** | 
 | 3173 |  * work_busy - test whether a work is currently pending or running | 
 | 3174 |  * @work: the work to be tested | 
 | 3175 |  * | 
 | 3176 |  * Test whether @work is currently pending or running.  There is no | 
 | 3177 |  * synchronization around this function and the test result is | 
 | 3178 |  * unreliable and only useful as advisory hints or for debugging. | 
 | 3179 |  * Especially for reentrant wqs, the pending state might hide the | 
 | 3180 |  * running state. | 
 | 3181 |  * | 
 | 3182 |  * RETURNS: | 
 | 3183 |  * OR'd bitmask of WORK_BUSY_* bits. | 
 | 3184 |  */ | 
 | 3185 | unsigned int work_busy(struct work_struct *work) | 
 | 3186 | { | 
 | 3187 | 	struct global_cwq *gcwq = get_work_gcwq(work); | 
 | 3188 | 	unsigned long flags; | 
 | 3189 | 	unsigned int ret = 0; | 
 | 3190 |  | 
 | 3191 | 	if (!gcwq) | 
 | 3192 | 		return false; | 
 | 3193 |  | 
 | 3194 | 	spin_lock_irqsave(&gcwq->lock, flags); | 
 | 3195 |  | 
 | 3196 | 	if (work_pending(work)) | 
 | 3197 | 		ret |= WORK_BUSY_PENDING; | 
 | 3198 | 	if (find_worker_executing_work(gcwq, work)) | 
 | 3199 | 		ret |= WORK_BUSY_RUNNING; | 
 | 3200 |  | 
 | 3201 | 	spin_unlock_irqrestore(&gcwq->lock, flags); | 
 | 3202 |  | 
 | 3203 | 	return ret; | 
 | 3204 | } | 
 | 3205 | EXPORT_SYMBOL_GPL(work_busy); | 
 | 3206 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3207 | /* | 
 | 3208 |  * CPU hotplug. | 
 | 3209 |  * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3210 |  * There are two challenges in supporting CPU hotplug.  Firstly, there | 
 | 3211 |  * are a lot of assumptions on strong associations among work, cwq and | 
 | 3212 |  * gcwq which make migrating pending and scheduled works very | 
 | 3213 |  * difficult to implement without impacting hot paths.  Secondly, | 
 | 3214 |  * gcwqs serve mix of short, long and very long running works making | 
 | 3215 |  * blocked draining impractical. | 
 | 3216 |  * | 
 | 3217 |  * This is solved by allowing a gcwq to be detached from CPU, running | 
 | 3218 |  * it with unbound (rogue) workers and allowing it to be reattached | 
 | 3219 |  * later if the cpu comes back online.  A separate thread is created | 
 | 3220 |  * to govern a gcwq in such state and is called the trustee of the | 
 | 3221 |  * gcwq. | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3222 |  * | 
 | 3223 |  * Trustee states and their descriptions. | 
 | 3224 |  * | 
 | 3225 |  * START	Command state used on startup.  On CPU_DOWN_PREPARE, a | 
 | 3226 |  *		new trustee is started with this state. | 
 | 3227 |  * | 
 | 3228 |  * IN_CHARGE	Once started, trustee will enter this state after | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3229 |  *		assuming the manager role and making all existing | 
 | 3230 |  *		workers rogue.  DOWN_PREPARE waits for trustee to | 
 | 3231 |  *		enter this state.  After reaching IN_CHARGE, trustee | 
 | 3232 |  *		tries to execute the pending worklist until it's empty | 
 | 3233 |  *		and the state is set to BUTCHER, or the state is set | 
 | 3234 |  *		to RELEASE. | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3235 |  * | 
 | 3236 |  * BUTCHER	Command state which is set by the cpu callback after | 
 | 3237 |  *		the cpu has went down.  Once this state is set trustee | 
 | 3238 |  *		knows that there will be no new works on the worklist | 
 | 3239 |  *		and once the worklist is empty it can proceed to | 
 | 3240 |  *		killing idle workers. | 
 | 3241 |  * | 
 | 3242 |  * RELEASE	Command state which is set by the cpu callback if the | 
 | 3243 |  *		cpu down has been canceled or it has come online | 
 | 3244 |  *		again.  After recognizing this state, trustee stops | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3245 |  *		trying to drain or butcher and clears ROGUE, rebinds | 
 | 3246 |  *		all remaining workers back to the cpu and releases | 
 | 3247 |  *		manager role. | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3248 |  * | 
 | 3249 |  * DONE		Trustee will enter this state after BUTCHER or RELEASE | 
 | 3250 |  *		is complete. | 
 | 3251 |  * | 
 | 3252 |  *          trustee                 CPU                draining | 
 | 3253 |  *         took over                down               complete | 
 | 3254 |  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE | 
 | 3255 |  *                        |                     |                  ^ | 
 | 3256 |  *                        | CPU is back online  v   return workers | | 
 | 3257 |  *                         ----------------> RELEASE -------------- | 
 | 3258 |  */ | 
 | 3259 |  | 
 | 3260 | /** | 
 | 3261 |  * trustee_wait_event_timeout - timed event wait for trustee | 
 | 3262 |  * @cond: condition to wait for | 
 | 3263 |  * @timeout: timeout in jiffies | 
 | 3264 |  * | 
 | 3265 |  * wait_event_timeout() for trustee to use.  Handles locking and | 
 | 3266 |  * checks for RELEASE request. | 
 | 3267 |  * | 
 | 3268 |  * CONTEXT: | 
 | 3269 |  * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
 | 3270 |  * multiple times.  To be used by trustee. | 
 | 3271 |  * | 
 | 3272 |  * RETURNS: | 
 | 3273 |  * Positive indicating left time if @cond is satisfied, 0 if timed | 
 | 3274 |  * out, -1 if canceled. | 
 | 3275 |  */ | 
 | 3276 | #define trustee_wait_event_timeout(cond, timeout) ({			\ | 
 | 3277 | 	long __ret = (timeout);						\ | 
 | 3278 | 	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\ | 
 | 3279 | 	       __ret) {							\ | 
 | 3280 | 		spin_unlock_irq(&gcwq->lock);				\ | 
 | 3281 | 		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\ | 
 | 3282 | 			(gcwq->trustee_state == TRUSTEE_RELEASE),	\ | 
 | 3283 | 			__ret);						\ | 
 | 3284 | 		spin_lock_irq(&gcwq->lock);				\ | 
 | 3285 | 	}								\ | 
 | 3286 | 	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\ | 
 | 3287 | }) | 
 | 3288 |  | 
 | 3289 | /** | 
 | 3290 |  * trustee_wait_event - event wait for trustee | 
 | 3291 |  * @cond: condition to wait for | 
 | 3292 |  * | 
 | 3293 |  * wait_event() for trustee to use.  Automatically handles locking and | 
 | 3294 |  * checks for CANCEL request. | 
 | 3295 |  * | 
 | 3296 |  * CONTEXT: | 
 | 3297 |  * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
 | 3298 |  * multiple times.  To be used by trustee. | 
 | 3299 |  * | 
 | 3300 |  * RETURNS: | 
 | 3301 |  * 0 if @cond is satisfied, -1 if canceled. | 
 | 3302 |  */ | 
 | 3303 | #define trustee_wait_event(cond) ({					\ | 
 | 3304 | 	long __ret1;							\ | 
 | 3305 | 	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\ | 
 | 3306 | 	__ret1 < 0 ? -1 : 0;						\ | 
 | 3307 | }) | 
 | 3308 |  | 
 | 3309 | static int __cpuinit trustee_thread(void *__gcwq) | 
 | 3310 | { | 
 | 3311 | 	struct global_cwq *gcwq = __gcwq; | 
 | 3312 | 	struct worker *worker; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3313 | 	struct work_struct *work; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3314 | 	struct hlist_node *pos; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3315 | 	long rc; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3316 | 	int i; | 
 | 3317 |  | 
 | 3318 | 	BUG_ON(gcwq->cpu != smp_processor_id()); | 
 | 3319 |  | 
 | 3320 | 	spin_lock_irq(&gcwq->lock); | 
 | 3321 | 	/* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3322 | 	 * Claim the manager position and make all workers rogue. | 
 | 3323 | 	 * Trustee must be bound to the target cpu and can't be | 
 | 3324 | 	 * cancelled. | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3325 | 	 */ | 
 | 3326 | 	BUG_ON(gcwq->cpu != smp_processor_id()); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3327 | 	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS)); | 
 | 3328 | 	BUG_ON(rc < 0); | 
 | 3329 |  | 
 | 3330 | 	gcwq->flags |= GCWQ_MANAGING_WORKERS; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3331 |  | 
 | 3332 | 	list_for_each_entry(worker, &gcwq->idle_list, entry) | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3333 | 		worker->flags |= WORKER_ROGUE; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3334 |  | 
 | 3335 | 	for_each_busy_worker(worker, i, pos, gcwq) | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3336 | 		worker->flags |= WORKER_ROGUE; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3337 |  | 
 | 3338 | 	/* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3339 | 	 * Call schedule() so that we cross rq->lock and thus can | 
 | 3340 | 	 * guarantee sched callbacks see the rogue flag.  This is | 
 | 3341 | 	 * necessary as scheduler callbacks may be invoked from other | 
 | 3342 | 	 * cpus. | 
 | 3343 | 	 */ | 
 | 3344 | 	spin_unlock_irq(&gcwq->lock); | 
 | 3345 | 	schedule(); | 
 | 3346 | 	spin_lock_irq(&gcwq->lock); | 
 | 3347 |  | 
 | 3348 | 	/* | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3349 | 	 * Sched callbacks are disabled now.  Zap nr_running.  After | 
 | 3350 | 	 * this, nr_running stays zero and need_more_worker() and | 
 | 3351 | 	 * keep_working() are always true as long as the worklist is | 
 | 3352 | 	 * not empty. | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3353 | 	 */ | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3354 | 	atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3355 |  | 
 | 3356 | 	spin_unlock_irq(&gcwq->lock); | 
 | 3357 | 	del_timer_sync(&gcwq->idle_timer); | 
 | 3358 | 	spin_lock_irq(&gcwq->lock); | 
 | 3359 |  | 
 | 3360 | 	/* | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3361 | 	 * We're now in charge.  Notify and proceed to drain.  We need | 
 | 3362 | 	 * to keep the gcwq running during the whole CPU down | 
 | 3363 | 	 * procedure as other cpu hotunplug callbacks may need to | 
 | 3364 | 	 * flush currently running tasks. | 
 | 3365 | 	 */ | 
 | 3366 | 	gcwq->trustee_state = TRUSTEE_IN_CHARGE; | 
 | 3367 | 	wake_up_all(&gcwq->trustee_wait); | 
 | 3368 |  | 
 | 3369 | 	/* | 
 | 3370 | 	 * The original cpu is in the process of dying and may go away | 
 | 3371 | 	 * anytime now.  When that happens, we and all workers would | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3372 | 	 * be migrated to other cpus.  Try draining any left work.  We | 
 | 3373 | 	 * want to get it over with ASAP - spam rescuers, wake up as | 
 | 3374 | 	 * many idlers as necessary and create new ones till the | 
 | 3375 | 	 * worklist is empty.  Note that if the gcwq is frozen, there | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3376 | 	 * may be frozen works in freezable cwqs.  Don't declare | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3377 | 	 * completion while frozen. | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3378 | 	 */ | 
 | 3379 | 	while (gcwq->nr_workers != gcwq->nr_idle || | 
 | 3380 | 	       gcwq->flags & GCWQ_FREEZING || | 
 | 3381 | 	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) { | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3382 | 		int nr_works = 0; | 
 | 3383 |  | 
 | 3384 | 		list_for_each_entry(work, &gcwq->worklist, entry) { | 
 | 3385 | 			send_mayday(work); | 
 | 3386 | 			nr_works++; | 
 | 3387 | 		} | 
 | 3388 |  | 
 | 3389 | 		list_for_each_entry(worker, &gcwq->idle_list, entry) { | 
 | 3390 | 			if (!nr_works--) | 
 | 3391 | 				break; | 
 | 3392 | 			wake_up_process(worker->task); | 
 | 3393 | 		} | 
 | 3394 |  | 
 | 3395 | 		if (need_to_create_worker(gcwq)) { | 
 | 3396 | 			spin_unlock_irq(&gcwq->lock); | 
 | 3397 | 			worker = create_worker(gcwq, false); | 
 | 3398 | 			spin_lock_irq(&gcwq->lock); | 
 | 3399 | 			if (worker) { | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3400 | 				worker->flags |= WORKER_ROGUE; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3401 | 				start_worker(worker); | 
 | 3402 | 			} | 
 | 3403 | 		} | 
 | 3404 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3405 | 		/* give a breather */ | 
 | 3406 | 		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) | 
 | 3407 | 			break; | 
 | 3408 | 	} | 
 | 3409 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3410 | 	/* | 
 | 3411 | 	 * Either all works have been scheduled and cpu is down, or | 
 | 3412 | 	 * cpu down has already been canceled.  Wait for and butcher | 
 | 3413 | 	 * all workers till we're canceled. | 
 | 3414 | 	 */ | 
 | 3415 | 	do { | 
 | 3416 | 		rc = trustee_wait_event(!list_empty(&gcwq->idle_list)); | 
 | 3417 | 		while (!list_empty(&gcwq->idle_list)) | 
 | 3418 | 			destroy_worker(list_first_entry(&gcwq->idle_list, | 
 | 3419 | 							struct worker, entry)); | 
 | 3420 | 	} while (gcwq->nr_workers && rc >= 0); | 
 | 3421 |  | 
 | 3422 | 	/* | 
 | 3423 | 	 * At this point, either draining has completed and no worker | 
 | 3424 | 	 * is left, or cpu down has been canceled or the cpu is being | 
 | 3425 | 	 * brought back up.  There shouldn't be any idle one left. | 
 | 3426 | 	 * Tell the remaining busy ones to rebind once it finishes the | 
 | 3427 | 	 * currently scheduled works by scheduling the rebind_work. | 
 | 3428 | 	 */ | 
 | 3429 | 	WARN_ON(!list_empty(&gcwq->idle_list)); | 
 | 3430 |  | 
 | 3431 | 	for_each_busy_worker(worker, i, pos, gcwq) { | 
 | 3432 | 		struct work_struct *rebind_work = &worker->rebind_work; | 
 | 3433 |  | 
 | 3434 | 		/* | 
 | 3435 | 		 * Rebind_work may race with future cpu hotplug | 
 | 3436 | 		 * operations.  Use a separate flag to mark that | 
 | 3437 | 		 * rebinding is scheduled. | 
 | 3438 | 		 */ | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3439 | 		worker->flags |= WORKER_REBIND; | 
 | 3440 | 		worker->flags &= ~WORKER_ROGUE; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3441 |  | 
 | 3442 | 		/* queue rebind_work, wq doesn't matter, use the default one */ | 
 | 3443 | 		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, | 
 | 3444 | 				     work_data_bits(rebind_work))) | 
 | 3445 | 			continue; | 
 | 3446 |  | 
 | 3447 | 		debug_work_activate(rebind_work); | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3448 | 		insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3449 | 			    worker->scheduled.next, | 
 | 3450 | 			    work_color_to_flags(WORK_NO_COLOR)); | 
 | 3451 | 	} | 
 | 3452 |  | 
 | 3453 | 	/* relinquish manager role */ | 
 | 3454 | 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS; | 
 | 3455 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3456 | 	/* notify completion */ | 
 | 3457 | 	gcwq->trustee = NULL; | 
 | 3458 | 	gcwq->trustee_state = TRUSTEE_DONE; | 
 | 3459 | 	wake_up_all(&gcwq->trustee_wait); | 
 | 3460 | 	spin_unlock_irq(&gcwq->lock); | 
 | 3461 | 	return 0; | 
 | 3462 | } | 
 | 3463 |  | 
 | 3464 | /** | 
 | 3465 |  * wait_trustee_state - wait for trustee to enter the specified state | 
 | 3466 |  * @gcwq: gcwq the trustee of interest belongs to | 
 | 3467 |  * @state: target state to wait for | 
 | 3468 |  * | 
 | 3469 |  * Wait for the trustee to reach @state.  DONE is already matched. | 
 | 3470 |  * | 
 | 3471 |  * CONTEXT: | 
 | 3472 |  * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
 | 3473 |  * multiple times.  To be used by cpu_callback. | 
 | 3474 |  */ | 
 | 3475 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) | 
| Namhyung Kim | 06bd6eb | 2010-08-22 23:19:42 +0900 | [diff] [blame] | 3476 | __releases(&gcwq->lock) | 
 | 3477 | __acquires(&gcwq->lock) | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3478 | { | 
 | 3479 | 	if (!(gcwq->trustee_state == state || | 
 | 3480 | 	      gcwq->trustee_state == TRUSTEE_DONE)) { | 
 | 3481 | 		spin_unlock_irq(&gcwq->lock); | 
 | 3482 | 		__wait_event(gcwq->trustee_wait, | 
 | 3483 | 			     gcwq->trustee_state == state || | 
 | 3484 | 			     gcwq->trustee_state == TRUSTEE_DONE); | 
 | 3485 | 		spin_lock_irq(&gcwq->lock); | 
 | 3486 | 	} | 
 | 3487 | } | 
 | 3488 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3489 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | 
 | 3490 | 						unsigned long action, | 
 | 3491 | 						void *hcpu) | 
 | 3492 | { | 
 | 3493 | 	unsigned int cpu = (unsigned long)hcpu; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3494 | 	struct global_cwq *gcwq = get_gcwq(cpu); | 
 | 3495 | 	struct task_struct *new_trustee = NULL; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3496 | 	struct worker *uninitialized_var(new_worker); | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3497 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3498 |  | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 3499 | 	action &= ~CPU_TASKS_FROZEN; | 
 | 3500 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3501 | 	switch (action) { | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3502 | 	case CPU_DOWN_PREPARE: | 
 | 3503 | 		new_trustee = kthread_create(trustee_thread, gcwq, | 
 | 3504 | 					     "workqueue_trustee/%d\n", cpu); | 
 | 3505 | 		if (IS_ERR(new_trustee)) | 
 | 3506 | 			return notifier_from_errno(PTR_ERR(new_trustee)); | 
 | 3507 | 		kthread_bind(new_trustee, cpu); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3508 | 		/* fall through */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3509 | 	case CPU_UP_PREPARE: | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3510 | 		BUG_ON(gcwq->first_idle); | 
 | 3511 | 		new_worker = create_worker(gcwq, false); | 
 | 3512 | 		if (!new_worker) { | 
 | 3513 | 			if (new_trustee) | 
 | 3514 | 				kthread_stop(new_trustee); | 
 | 3515 | 			return NOTIFY_BAD; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3516 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3517 | 	} | 
 | 3518 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3519 | 	/* some are called w/ irq disabled, don't disturb irq status */ | 
 | 3520 | 	spin_lock_irqsave(&gcwq->lock, flags); | 
 | 3521 |  | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 3522 | 	switch (action) { | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3523 | 	case CPU_DOWN_PREPARE: | 
 | 3524 | 		/* initialize trustee and tell it to acquire the gcwq */ | 
 | 3525 | 		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE); | 
 | 3526 | 		gcwq->trustee = new_trustee; | 
 | 3527 | 		gcwq->trustee_state = TRUSTEE_START; | 
 | 3528 | 		wake_up_process(gcwq->trustee); | 
 | 3529 | 		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3530 | 		/* fall through */ | 
 | 3531 | 	case CPU_UP_PREPARE: | 
 | 3532 | 		BUG_ON(gcwq->first_idle); | 
 | 3533 | 		gcwq->first_idle = new_worker; | 
 | 3534 | 		break; | 
 | 3535 |  | 
 | 3536 | 	case CPU_DYING: | 
 | 3537 | 		/* | 
 | 3538 | 		 * Before this, the trustee and all workers except for | 
 | 3539 | 		 * the ones which are still executing works from | 
 | 3540 | 		 * before the last CPU down must be on the cpu.  After | 
 | 3541 | 		 * this, they'll all be diasporas. | 
 | 3542 | 		 */ | 
 | 3543 | 		gcwq->flags |= GCWQ_DISASSOCIATED; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3544 | 		break; | 
 | 3545 |  | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 3546 | 	case CPU_POST_DEAD: | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3547 | 		gcwq->trustee_state = TRUSTEE_BUTCHER; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3548 | 		/* fall through */ | 
 | 3549 | 	case CPU_UP_CANCELED: | 
 | 3550 | 		destroy_worker(gcwq->first_idle); | 
 | 3551 | 		gcwq->first_idle = NULL; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3552 | 		break; | 
 | 3553 |  | 
 | 3554 | 	case CPU_DOWN_FAILED: | 
 | 3555 | 	case CPU_ONLINE: | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3556 | 		gcwq->flags &= ~GCWQ_DISASSOCIATED; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3557 | 		if (gcwq->trustee_state != TRUSTEE_DONE) { | 
 | 3558 | 			gcwq->trustee_state = TRUSTEE_RELEASE; | 
 | 3559 | 			wake_up_process(gcwq->trustee); | 
 | 3560 | 			wait_trustee_state(gcwq, TRUSTEE_DONE); | 
 | 3561 | 		} | 
 | 3562 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3563 | 		/* | 
 | 3564 | 		 * Trustee is done and there might be no worker left. | 
 | 3565 | 		 * Put the first_idle in and request a real manager to | 
 | 3566 | 		 * take a look. | 
 | 3567 | 		 */ | 
 | 3568 | 		spin_unlock_irq(&gcwq->lock); | 
 | 3569 | 		kthread_bind(gcwq->first_idle->task, cpu); | 
 | 3570 | 		spin_lock_irq(&gcwq->lock); | 
 | 3571 | 		gcwq->flags |= GCWQ_MANAGE_WORKERS; | 
 | 3572 | 		start_worker(gcwq->first_idle); | 
 | 3573 | 		gcwq->first_idle = NULL; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3574 | 		break; | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 3575 | 	} | 
 | 3576 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3577 | 	spin_unlock_irqrestore(&gcwq->lock, flags); | 
 | 3578 |  | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3579 | 	return notifier_from_errno(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3580 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3581 |  | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3582 | #ifdef CONFIG_SMP | 
| Rusty Russell | 8ccad40 | 2009-01-16 15:31:15 -0800 | [diff] [blame] | 3583 |  | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3584 | struct work_for_cpu { | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3585 | 	struct completion completion; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3586 | 	long (*fn)(void *); | 
 | 3587 | 	void *arg; | 
 | 3588 | 	long ret; | 
 | 3589 | }; | 
 | 3590 |  | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3591 | static int do_work_for_cpu(void *_wfc) | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3592 | { | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3593 | 	struct work_for_cpu *wfc = _wfc; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3594 | 	wfc->ret = wfc->fn(wfc->arg); | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3595 | 	complete(&wfc->completion); | 
 | 3596 | 	return 0; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3597 | } | 
 | 3598 |  | 
 | 3599 | /** | 
 | 3600 |  * work_on_cpu - run a function in user context on a particular cpu | 
 | 3601 |  * @cpu: the cpu to run on | 
 | 3602 |  * @fn: the function to run | 
 | 3603 |  * @arg: the function arg | 
 | 3604 |  * | 
| Rusty Russell | 31ad908 | 2009-01-16 15:31:15 -0800 | [diff] [blame] | 3605 |  * This will return the value @fn returns. | 
 | 3606 |  * It is up to the caller to ensure that the cpu doesn't go offline. | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3607 |  * The caller must not hold any locks which would prevent @fn from completing. | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3608 |  */ | 
 | 3609 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 
 | 3610 | { | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3611 | 	struct task_struct *sub_thread; | 
 | 3612 | 	struct work_for_cpu wfc = { | 
 | 3613 | 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), | 
 | 3614 | 		.fn = fn, | 
 | 3615 | 		.arg = arg, | 
 | 3616 | 	}; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3617 |  | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3618 | 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); | 
 | 3619 | 	if (IS_ERR(sub_thread)) | 
 | 3620 | 		return PTR_ERR(sub_thread); | 
 | 3621 | 	kthread_bind(sub_thread, cpu); | 
 | 3622 | 	wake_up_process(sub_thread); | 
 | 3623 | 	wait_for_completion(&wfc.completion); | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3624 | 	return wfc.ret; | 
 | 3625 | } | 
 | 3626 | EXPORT_SYMBOL_GPL(work_on_cpu); | 
 | 3627 | #endif /* CONFIG_SMP */ | 
 | 3628 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3629 | #ifdef CONFIG_FREEZER | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 3630 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3631 | /** | 
 | 3632 |  * freeze_workqueues_begin - begin freezing workqueues | 
 | 3633 |  * | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3634 |  * Start freezing workqueues.  After this function returns, all freezable | 
 | 3635 |  * workqueues will queue new works to their frozen_works list instead of | 
 | 3636 |  * gcwq->worklist. | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3637 |  * | 
 | 3638 |  * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3639 |  * Grabs and releases workqueue_lock and gcwq->lock's. | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3640 |  */ | 
 | 3641 | void freeze_workqueues_begin(void) | 
 | 3642 | { | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3643 | 	unsigned int cpu; | 
 | 3644 |  | 
 | 3645 | 	spin_lock(&workqueue_lock); | 
 | 3646 |  | 
 | 3647 | 	BUG_ON(workqueue_freezing); | 
 | 3648 | 	workqueue_freezing = true; | 
 | 3649 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3650 | 	for_each_gcwq_cpu(cpu) { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3651 | 		struct global_cwq *gcwq = get_gcwq(cpu); | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3652 | 		struct workqueue_struct *wq; | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3653 |  | 
 | 3654 | 		spin_lock_irq(&gcwq->lock); | 
 | 3655 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3656 | 		BUG_ON(gcwq->flags & GCWQ_FREEZING); | 
 | 3657 | 		gcwq->flags |= GCWQ_FREEZING; | 
 | 3658 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3659 | 		list_for_each_entry(wq, &workqueues, list) { | 
 | 3660 | 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
 | 3661 |  | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3662 | 			if (cwq && wq->flags & WQ_FREEZABLE) | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3663 | 				cwq->max_active = 0; | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3664 | 		} | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3665 |  | 
 | 3666 | 		spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3667 | 	} | 
 | 3668 |  | 
 | 3669 | 	spin_unlock(&workqueue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3670 | } | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3671 |  | 
 | 3672 | /** | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3673 |  * freeze_workqueues_busy - are freezable workqueues still busy? | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3674 |  * | 
 | 3675 |  * Check whether freezing is complete.  This function must be called | 
 | 3676 |  * between freeze_workqueues_begin() and thaw_workqueues(). | 
 | 3677 |  * | 
 | 3678 |  * CONTEXT: | 
 | 3679 |  * Grabs and releases workqueue_lock. | 
 | 3680 |  * | 
 | 3681 |  * RETURNS: | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3682 |  * %true if some freezable workqueues are still busy.  %false if freezing | 
 | 3683 |  * is complete. | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3684 |  */ | 
 | 3685 | bool freeze_workqueues_busy(void) | 
 | 3686 | { | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3687 | 	unsigned int cpu; | 
 | 3688 | 	bool busy = false; | 
 | 3689 |  | 
 | 3690 | 	spin_lock(&workqueue_lock); | 
 | 3691 |  | 
 | 3692 | 	BUG_ON(!workqueue_freezing); | 
 | 3693 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3694 | 	for_each_gcwq_cpu(cpu) { | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3695 | 		struct workqueue_struct *wq; | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3696 | 		/* | 
 | 3697 | 		 * nr_active is monotonically decreasing.  It's safe | 
 | 3698 | 		 * to peek without lock. | 
 | 3699 | 		 */ | 
 | 3700 | 		list_for_each_entry(wq, &workqueues, list) { | 
 | 3701 | 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
 | 3702 |  | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3703 | 			if (!cwq || !(wq->flags & WQ_FREEZABLE)) | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3704 | 				continue; | 
 | 3705 |  | 
 | 3706 | 			BUG_ON(cwq->nr_active < 0); | 
 | 3707 | 			if (cwq->nr_active) { | 
 | 3708 | 				busy = true; | 
 | 3709 | 				goto out_unlock; | 
 | 3710 | 			} | 
 | 3711 | 		} | 
 | 3712 | 	} | 
 | 3713 | out_unlock: | 
 | 3714 | 	spin_unlock(&workqueue_lock); | 
 | 3715 | 	return busy; | 
 | 3716 | } | 
 | 3717 |  | 
 | 3718 | /** | 
 | 3719 |  * thaw_workqueues - thaw workqueues | 
 | 3720 |  * | 
 | 3721 |  * Thaw workqueues.  Normal queueing is restored and all collected | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 3722 |  * frozen works are transferred to their respective gcwq worklists. | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3723 |  * | 
 | 3724 |  * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3725 |  * Grabs and releases workqueue_lock and gcwq->lock's. | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3726 |  */ | 
 | 3727 | void thaw_workqueues(void) | 
 | 3728 | { | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3729 | 	unsigned int cpu; | 
 | 3730 |  | 
 | 3731 | 	spin_lock(&workqueue_lock); | 
 | 3732 |  | 
 | 3733 | 	if (!workqueue_freezing) | 
 | 3734 | 		goto out_unlock; | 
 | 3735 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3736 | 	for_each_gcwq_cpu(cpu) { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3737 | 		struct global_cwq *gcwq = get_gcwq(cpu); | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3738 | 		struct workqueue_struct *wq; | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3739 |  | 
 | 3740 | 		spin_lock_irq(&gcwq->lock); | 
 | 3741 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3742 | 		BUG_ON(!(gcwq->flags & GCWQ_FREEZING)); | 
 | 3743 | 		gcwq->flags &= ~GCWQ_FREEZING; | 
 | 3744 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3745 | 		list_for_each_entry(wq, &workqueues, list) { | 
 | 3746 | 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
 | 3747 |  | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3748 | 			if (!cwq || !(wq->flags & WQ_FREEZABLE)) | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3749 | 				continue; | 
 | 3750 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3751 | 			/* restore max_active and repopulate worklist */ | 
 | 3752 | 			cwq->max_active = wq->saved_max_active; | 
 | 3753 |  | 
 | 3754 | 			while (!list_empty(&cwq->delayed_works) && | 
 | 3755 | 			       cwq->nr_active < cwq->max_active) | 
 | 3756 | 				cwq_activate_first_delayed(cwq); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3757 | 		} | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3758 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3759 | 		wake_up_worker(gcwq); | 
 | 3760 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3761 | 		spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3762 | 	} | 
 | 3763 |  | 
 | 3764 | 	workqueue_freezing = false; | 
 | 3765 | out_unlock: | 
 | 3766 | 	spin_unlock(&workqueue_lock); | 
 | 3767 | } | 
 | 3768 | #endif /* CONFIG_FREEZER */ | 
 | 3769 |  | 
| Suresh Siddha | 6ee0578 | 2010-07-30 14:57:37 -0700 | [diff] [blame] | 3770 | static int __init init_workqueues(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3771 | { | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3772 | 	unsigned int cpu; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3773 | 	int i; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3774 |  | 
| Tejun Heo | f650094 | 2010-08-09 11:50:34 +0200 | [diff] [blame] | 3775 | 	cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3776 |  | 
 | 3777 | 	/* initialize gcwqs */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3778 | 	for_each_gcwq_cpu(cpu) { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3779 | 		struct global_cwq *gcwq = get_gcwq(cpu); | 
 | 3780 |  | 
 | 3781 | 		spin_lock_init(&gcwq->lock); | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 3782 | 		INIT_LIST_HEAD(&gcwq->worklist); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3783 | 		gcwq->cpu = cpu; | 
| Tejun Heo | 477a3c3 | 2010-08-31 10:54:35 +0200 | [diff] [blame] | 3784 | 		gcwq->flags |= GCWQ_DISASSOCIATED; | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3785 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3786 | 		INIT_LIST_HEAD(&gcwq->idle_list); | 
 | 3787 | 		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) | 
 | 3788 | 			INIT_HLIST_HEAD(&gcwq->busy_hash[i]); | 
 | 3789 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3790 | 		init_timer_deferrable(&gcwq->idle_timer); | 
 | 3791 | 		gcwq->idle_timer.function = idle_worker_timeout; | 
 | 3792 | 		gcwq->idle_timer.data = (unsigned long)gcwq; | 
 | 3793 |  | 
 | 3794 | 		setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout, | 
 | 3795 | 			    (unsigned long)gcwq); | 
 | 3796 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3797 | 		ida_init(&gcwq->worker_ida); | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3798 |  | 
 | 3799 | 		gcwq->trustee_state = TRUSTEE_DONE; | 
 | 3800 | 		init_waitqueue_head(&gcwq->trustee_wait); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3801 | 	} | 
 | 3802 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3803 | 	/* create the initial worker */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3804 | 	for_each_online_gcwq_cpu(cpu) { | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3805 | 		struct global_cwq *gcwq = get_gcwq(cpu); | 
 | 3806 | 		struct worker *worker; | 
 | 3807 |  | 
| Tejun Heo | 477a3c3 | 2010-08-31 10:54:35 +0200 | [diff] [blame] | 3808 | 		if (cpu != WORK_CPU_UNBOUND) | 
 | 3809 | 			gcwq->flags &= ~GCWQ_DISASSOCIATED; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3810 | 		worker = create_worker(gcwq, true); | 
 | 3811 | 		BUG_ON(!worker); | 
 | 3812 | 		spin_lock_irq(&gcwq->lock); | 
 | 3813 | 		start_worker(worker); | 
 | 3814 | 		spin_unlock_irq(&gcwq->lock); | 
 | 3815 | 	} | 
 | 3816 |  | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3817 | 	system_wq = alloc_workqueue("events", 0, 0); | 
 | 3818 | 	system_long_wq = alloc_workqueue("events_long", 0, 0); | 
 | 3819 | 	system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3820 | 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, | 
 | 3821 | 					    WQ_UNBOUND_MAX_ACTIVE); | 
| Tejun Heo | 24d51ad | 2011-02-21 09:52:50 +0100 | [diff] [blame] | 3822 | 	system_freezable_wq = alloc_workqueue("events_freezable", | 
 | 3823 | 					      WQ_FREEZABLE, 0); | 
| Hitoshi Mitake | e5cba24 | 2010-11-26 12:06:44 +0100 | [diff] [blame] | 3824 | 	BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || | 
| Tejun Heo | 24d51ad | 2011-02-21 09:52:50 +0100 | [diff] [blame] | 3825 | 	       !system_unbound_wq || !system_freezable_wq); | 
| Suresh Siddha | 6ee0578 | 2010-07-30 14:57:37 -0700 | [diff] [blame] | 3826 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3827 | } | 
| Suresh Siddha | 6ee0578 | 2010-07-30 14:57:37 -0700 | [diff] [blame] | 3828 | early_initcall(init_workqueues); |