| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 2 | * kernel/workqueue.c - generic async execution with shared worker pool | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
| Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 4 | * Copyright (C) 2002		Ingo Molnar | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * | 
| Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 6 | *   Derived from the taskqueue/keventd code by: | 
|  | 7 | *     David Woodhouse <dwmw2@infradead.org> | 
|  | 8 | *     Andrew Morton | 
|  | 9 | *     Kai Petzke <wpp@marie.physik.tu-berlin.de> | 
|  | 10 | *     Theodore Ts'o <tytso@mit.edu> | 
| Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 11 | * | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 12 | * Made to use alloc_percpu by Christoph Lameter. | 
| Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 13 | * | 
|  | 14 | * Copyright (C) 2010		SUSE Linux Products GmbH | 
|  | 15 | * Copyright (C) 2010		Tejun Heo <tj@kernel.org> | 
|  | 16 | * | 
|  | 17 | * This is the generic async execution mechanism.  Work items as are | 
|  | 18 | * executed in process context.  The worker pool is shared and | 
|  | 19 | * automatically managed.  There is one worker pool for each CPU and | 
|  | 20 | * one extra for works which are better served by workers which are | 
|  | 21 | * not bound to any specific CPU. | 
|  | 22 | * | 
|  | 23 | * Please read Documentation/workqueue.txt for details. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | */ | 
|  | 25 |  | 
|  | 26 | #include <linux/module.h> | 
|  | 27 | #include <linux/kernel.h> | 
|  | 28 | #include <linux/sched.h> | 
|  | 29 | #include <linux/init.h> | 
|  | 30 | #include <linux/signal.h> | 
|  | 31 | #include <linux/completion.h> | 
|  | 32 | #include <linux/workqueue.h> | 
|  | 33 | #include <linux/slab.h> | 
|  | 34 | #include <linux/cpu.h> | 
|  | 35 | #include <linux/notifier.h> | 
|  | 36 | #include <linux/kthread.h> | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 37 | #include <linux/hardirq.h> | 
| Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 38 | #include <linux/mempolicy.h> | 
| Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 39 | #include <linux/freezer.h> | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 40 | #include <linux/kallsyms.h> | 
|  | 41 | #include <linux/debug_locks.h> | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 42 | #include <linux/lockdep.h> | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 43 | #include <linux/idr.h> | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 44 |  | 
|  | 45 | #include "workqueue_sched.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 47 | enum { | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 48 | /* global_cwq flags */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 49 | GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */ | 
|  | 50 | GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */ | 
|  | 51 | GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 52 | GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */ | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 53 | GCWQ_HIGHPRI_PENDING	= 1 << 4,	/* highpri works on queue */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 54 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 55 | /* worker flags */ | 
|  | 56 | WORKER_STARTED		= 1 << 0,	/* started */ | 
|  | 57 | WORKER_DIE		= 1 << 1,	/* die die die */ | 
|  | 58 | WORKER_IDLE		= 1 << 2,	/* is idle */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 59 | WORKER_PREP		= 1 << 3,	/* preparing to run works */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 60 | WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 61 | WORKER_REBIND		= 1 << 5,	/* mom is home, come back */ | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 62 | WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 63 | WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 64 |  | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 65 | WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 66 | WORKER_CPU_INTENSIVE | WORKER_UNBOUND, | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 67 |  | 
|  | 68 | /* gcwq->trustee_state */ | 
|  | 69 | TRUSTEE_START		= 0,		/* start */ | 
|  | 70 | TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */ | 
|  | 71 | TRUSTEE_BUTCHER		= 2,		/* butcher workers */ | 
|  | 72 | TRUSTEE_RELEASE		= 3,		/* release workers */ | 
|  | 73 | TRUSTEE_DONE		= 4,		/* trustee is done */ | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 74 |  | 
|  | 75 | BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */ | 
|  | 76 | BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER, | 
|  | 77 | BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1, | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 78 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 79 | MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */ | 
|  | 80 | IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */ | 
|  | 81 |  | 
| Tejun Heo | 3233cdb | 2011-02-16 18:10:19 +0100 | [diff] [blame] | 82 | MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2, | 
|  | 83 | /* call for help after 10ms | 
|  | 84 | (min two ticks) */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 85 | MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */ | 
|  | 86 | CREATE_COOLDOWN		= HZ,		/* time to breath after fail */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 87 | TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 88 |  | 
|  | 89 | /* | 
|  | 90 | * Rescue workers are used only on emergencies and shared by | 
|  | 91 | * all cpus.  Give -20. | 
|  | 92 | */ | 
|  | 93 | RESCUER_NICE_LEVEL	= -20, | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 94 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 |  | 
|  | 96 | /* | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 97 | * Structure fields follow one of the following exclusion rules. | 
|  | 98 | * | 
| Tejun Heo | e41e704 | 2010-08-24 14:22:47 +0200 | [diff] [blame] | 99 | * I: Modifiable by initialization/destruction paths and read-only for | 
|  | 100 | *    everyone else. | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 101 | * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 102 | * P: Preemption protected.  Disabling preemption is enough and should | 
|  | 103 | *    only be modified and accessed from the local cpu. | 
|  | 104 | * | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 105 | * L: gcwq->lock protected.  Access with gcwq->lock held. | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 106 | * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 107 | * X: During normal operation, modification requires gcwq->lock and | 
|  | 108 | *    should be done only from local cpu.  Either disabling preemption | 
|  | 109 | *    on local cpu or grabbing gcwq->lock is enough for read access. | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 110 | *    If GCWQ_DISASSOCIATED is set, it's identical to L. | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 111 | * | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 112 | * F: wq->flush_mutex protected. | 
|  | 113 | * | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 114 | * W: workqueue_lock protected. | 
|  | 115 | */ | 
|  | 116 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 117 | struct global_cwq; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 118 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 119 | /* | 
|  | 120 | * The poor guys doing the actual heavy lifting.  All on-duty workers | 
|  | 121 | * are either serving the manager role, on idle list or on busy hash. | 
|  | 122 | */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 123 | struct worker { | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 124 | /* on idle list while idle, on busy hash table while busy */ | 
|  | 125 | union { | 
|  | 126 | struct list_head	entry;	/* L: while idle */ | 
|  | 127 | struct hlist_node	hentry;	/* L: while busy */ | 
|  | 128 | }; | 
|  | 129 |  | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 130 | struct work_struct	*current_work;	/* L: work being processed */ | 
| Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 131 | struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 132 | struct list_head	scheduled;	/* L: scheduled works */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 133 | struct task_struct	*task;		/* I: worker task */ | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 134 | struct global_cwq	*gcwq;		/* I: the associated gcwq */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 135 | /* 64 bytes boundary on 64bit, 32 on 32bit */ | 
|  | 136 | unsigned long		last_active;	/* L: last active timestamp */ | 
|  | 137 | unsigned int		flags;		/* X: flags */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 138 | int			id;		/* I: worker id */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 139 | struct work_struct	rebind_work;	/* L: rebind worker to cpu */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 140 | }; | 
|  | 141 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 142 | /* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 143 | * Global per-cpu workqueue.  There's one and only one for each cpu | 
|  | 144 | * and all works are queued and processed here regardless of their | 
|  | 145 | * target workqueues. | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 146 | */ | 
|  | 147 | struct global_cwq { | 
|  | 148 | spinlock_t		lock;		/* the gcwq lock */ | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 149 | struct list_head	worklist;	/* L: list of pending works */ | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 150 | unsigned int		cpu;		/* I: the associated cpu */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 151 | unsigned int		flags;		/* L: GCWQ_* flags */ | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 152 |  | 
|  | 153 | int			nr_workers;	/* L: total number of workers */ | 
|  | 154 | int			nr_idle;	/* L: currently idle ones */ | 
|  | 155 |  | 
|  | 156 | /* workers are chained either in the idle_list or busy_hash */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 157 | struct list_head	idle_list;	/* X: list of idle workers */ | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 158 | struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE]; | 
|  | 159 | /* L: hash of busy workers */ | 
|  | 160 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 161 | struct timer_list	idle_timer;	/* L: worker idle timeout */ | 
|  | 162 | struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */ | 
|  | 163 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 164 | struct ida		worker_ida;	/* L: for worker IDs */ | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 165 |  | 
|  | 166 | struct task_struct	*trustee;	/* L: for gcwq shutdown */ | 
|  | 167 | unsigned int		trustee_state;	/* L: trustee state */ | 
|  | 168 | wait_queue_head_t	trustee_wait;	/* trustee wait */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 169 | struct worker		*first_idle;	/* L: first idle worker */ | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 170 | } ____cacheline_aligned_in_smp; | 
|  | 171 |  | 
|  | 172 | /* | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 173 | * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 174 | * work_struct->data are used for flags and thus cwqs need to be | 
|  | 175 | * aligned at two's power of the number of flag bits. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | */ | 
|  | 177 | struct cpu_workqueue_struct { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 178 | struct global_cwq	*gcwq;		/* I: the associated gcwq */ | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 179 | struct workqueue_struct *wq;		/* I: the owning workqueue */ | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 180 | int			work_color;	/* L: current color */ | 
|  | 181 | int			flush_color;	/* L: flushing color */ | 
|  | 182 | int			nr_in_flight[WORK_NR_COLORS]; | 
|  | 183 | /* L: nr of in_flight works */ | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 184 | int			nr_active;	/* L: nr of active works */ | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 185 | int			max_active;	/* L: max active works */ | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 186 | struct list_head	delayed_works;	/* L: delayed works */ | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 187 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | /* | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 190 | * Structure used to wait for workqueue flush. | 
|  | 191 | */ | 
|  | 192 | struct wq_flusher { | 
|  | 193 | struct list_head	list;		/* F: list of flushers */ | 
|  | 194 | int			flush_color;	/* F: flush color waiting for */ | 
|  | 195 | struct completion	done;		/* flush completion */ | 
|  | 196 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 198 | /* | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 199 | * All cpumasks are assumed to be always set on UP and thus can't be | 
|  | 200 | * used to determine whether there's something to be done. | 
|  | 201 | */ | 
|  | 202 | #ifdef CONFIG_SMP | 
|  | 203 | typedef cpumask_var_t mayday_mask_t; | 
|  | 204 | #define mayday_test_and_set_cpu(cpu, mask)	\ | 
|  | 205 | cpumask_test_and_set_cpu((cpu), (mask)) | 
|  | 206 | #define mayday_clear_cpu(cpu, mask)		cpumask_clear_cpu((cpu), (mask)) | 
|  | 207 | #define for_each_mayday_cpu(cpu, mask)		for_each_cpu((cpu), (mask)) | 
| Tejun Heo | 9c37547 | 2010-08-31 11:18:34 +0200 | [diff] [blame] | 208 | #define alloc_mayday_mask(maskp, gfp)		zalloc_cpumask_var((maskp), (gfp)) | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 209 | #define free_mayday_mask(mask)			free_cpumask_var((mask)) | 
|  | 210 | #else | 
|  | 211 | typedef unsigned long mayday_mask_t; | 
|  | 212 | #define mayday_test_and_set_cpu(cpu, mask)	test_and_set_bit(0, &(mask)) | 
|  | 213 | #define mayday_clear_cpu(cpu, mask)		clear_bit(0, &(mask)) | 
|  | 214 | #define for_each_mayday_cpu(cpu, mask)		if ((cpu) = 0, (mask)) | 
|  | 215 | #define alloc_mayday_mask(maskp, gfp)		true | 
|  | 216 | #define free_mayday_mask(mask)			do { } while (0) | 
|  | 217 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 |  | 
|  | 219 | /* | 
|  | 220 | * The externally visible workqueue abstraction is an array of | 
|  | 221 | * per-CPU workqueues: | 
|  | 222 | */ | 
|  | 223 | struct workqueue_struct { | 
| Tejun Heo | 97e37d7 | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 224 | unsigned int		flags;		/* I: WQ_* flags */ | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 225 | union { | 
|  | 226 | struct cpu_workqueue_struct __percpu	*pcpu; | 
|  | 227 | struct cpu_workqueue_struct		*single; | 
|  | 228 | unsigned long				v; | 
|  | 229 | } cpu_wq;				/* I: cwq's */ | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 230 | struct list_head	list;		/* W: list of all workqueues */ | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 231 |  | 
|  | 232 | struct mutex		flush_mutex;	/* protects wq flushing */ | 
|  | 233 | int			work_color;	/* F: current work color */ | 
|  | 234 | int			flush_color;	/* F: current flush color */ | 
|  | 235 | atomic_t		nr_cwqs_to_flush; /* flush in progress */ | 
|  | 236 | struct wq_flusher	*first_flusher;	/* F: first flusher */ | 
|  | 237 | struct list_head	flusher_queue;	/* F: flush waiters */ | 
|  | 238 | struct list_head	flusher_overflow; /* F: flush overflow list */ | 
|  | 239 |  | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 240 | mayday_mask_t		mayday_mask;	/* cpus requesting rescue */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 241 | struct worker		*rescuer;	/* I: rescue worker */ | 
|  | 242 |  | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 243 | int			saved_max_active; /* W: saved cwq max_active */ | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 244 | const char		*name;		/* I: workqueue name */ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 245 | #ifdef CONFIG_LOCKDEP | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 246 | struct lockdep_map	lockdep_map; | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 247 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | }; | 
|  | 249 |  | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 250 | struct workqueue_struct *system_wq __read_mostly; | 
|  | 251 | struct workqueue_struct *system_long_wq __read_mostly; | 
|  | 252 | struct workqueue_struct *system_nrt_wq __read_mostly; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 253 | struct workqueue_struct *system_unbound_wq __read_mostly; | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 254 | EXPORT_SYMBOL_GPL(system_wq); | 
|  | 255 | EXPORT_SYMBOL_GPL(system_long_wq); | 
|  | 256 | EXPORT_SYMBOL_GPL(system_nrt_wq); | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 257 | EXPORT_SYMBOL_GPL(system_unbound_wq); | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 258 |  | 
| Tejun Heo | 97bd234 | 2010-10-05 10:41:14 +0200 | [diff] [blame] | 259 | #define CREATE_TRACE_POINTS | 
|  | 260 | #include <trace/events/workqueue.h> | 
|  | 261 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 262 | #define for_each_busy_worker(worker, i, pos, gcwq)			\ | 
|  | 263 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\ | 
|  | 264 | hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) | 
|  | 265 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 266 | static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, | 
|  | 267 | unsigned int sw) | 
|  | 268 | { | 
|  | 269 | if (cpu < nr_cpu_ids) { | 
|  | 270 | if (sw & 1) { | 
|  | 271 | cpu = cpumask_next(cpu, mask); | 
|  | 272 | if (cpu < nr_cpu_ids) | 
|  | 273 | return cpu; | 
|  | 274 | } | 
|  | 275 | if (sw & 2) | 
|  | 276 | return WORK_CPU_UNBOUND; | 
|  | 277 | } | 
|  | 278 | return WORK_CPU_NONE; | 
|  | 279 | } | 
|  | 280 |  | 
|  | 281 | static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, | 
|  | 282 | struct workqueue_struct *wq) | 
|  | 283 | { | 
|  | 284 | return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); | 
|  | 285 | } | 
|  | 286 |  | 
| Tejun Heo | 0988495 | 2010-08-01 11:50:12 +0200 | [diff] [blame] | 287 | /* | 
|  | 288 | * CPU iterators | 
|  | 289 | * | 
|  | 290 | * An extra gcwq is defined for an invalid cpu number | 
|  | 291 | * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any | 
|  | 292 | * specific CPU.  The following iterators are similar to | 
|  | 293 | * for_each_*_cpu() iterators but also considers the unbound gcwq. | 
|  | 294 | * | 
|  | 295 | * for_each_gcwq_cpu()		: possible CPUs + WORK_CPU_UNBOUND | 
|  | 296 | * for_each_online_gcwq_cpu()	: online CPUs + WORK_CPU_UNBOUND | 
|  | 297 | * for_each_cwq_cpu()		: possible CPUs for bound workqueues, | 
|  | 298 | *				  WORK_CPU_UNBOUND for unbound workqueues | 
|  | 299 | */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 300 | #define for_each_gcwq_cpu(cpu)						\ | 
|  | 301 | for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);		\ | 
|  | 302 | (cpu) < WORK_CPU_NONE;					\ | 
|  | 303 | (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) | 
|  | 304 |  | 
|  | 305 | #define for_each_online_gcwq_cpu(cpu)					\ | 
|  | 306 | for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);		\ | 
|  | 307 | (cpu) < WORK_CPU_NONE;					\ | 
|  | 308 | (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) | 
|  | 309 |  | 
|  | 310 | #define for_each_cwq_cpu(cpu, wq)					\ | 
|  | 311 | for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));	\ | 
|  | 312 | (cpu) < WORK_CPU_NONE;					\ | 
|  | 313 | (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) | 
|  | 314 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 315 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 
|  | 316 |  | 
|  | 317 | static struct debug_obj_descr work_debug_descr; | 
|  | 318 |  | 
|  | 319 | /* | 
|  | 320 | * fixup_init is called when: | 
|  | 321 | * - an active object is initialized | 
|  | 322 | */ | 
|  | 323 | static int work_fixup_init(void *addr, enum debug_obj_state state) | 
|  | 324 | { | 
|  | 325 | struct work_struct *work = addr; | 
|  | 326 |  | 
|  | 327 | switch (state) { | 
|  | 328 | case ODEBUG_STATE_ACTIVE: | 
|  | 329 | cancel_work_sync(work); | 
|  | 330 | debug_object_init(work, &work_debug_descr); | 
|  | 331 | return 1; | 
|  | 332 | default: | 
|  | 333 | return 0; | 
|  | 334 | } | 
|  | 335 | } | 
|  | 336 |  | 
|  | 337 | /* | 
|  | 338 | * fixup_activate is called when: | 
|  | 339 | * - an active object is activated | 
|  | 340 | * - an unknown object is activated (might be a statically initialized object) | 
|  | 341 | */ | 
|  | 342 | static int work_fixup_activate(void *addr, enum debug_obj_state state) | 
|  | 343 | { | 
|  | 344 | struct work_struct *work = addr; | 
|  | 345 |  | 
|  | 346 | switch (state) { | 
|  | 347 |  | 
|  | 348 | case ODEBUG_STATE_NOTAVAILABLE: | 
|  | 349 | /* | 
|  | 350 | * This is not really a fixup. The work struct was | 
|  | 351 | * statically initialized. We just make sure that it | 
|  | 352 | * is tracked in the object tracker. | 
|  | 353 | */ | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 354 | if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 355 | debug_object_init(work, &work_debug_descr); | 
|  | 356 | debug_object_activate(work, &work_debug_descr); | 
|  | 357 | return 0; | 
|  | 358 | } | 
|  | 359 | WARN_ON_ONCE(1); | 
|  | 360 | return 0; | 
|  | 361 |  | 
|  | 362 | case ODEBUG_STATE_ACTIVE: | 
|  | 363 | WARN_ON(1); | 
|  | 364 |  | 
|  | 365 | default: | 
|  | 366 | return 0; | 
|  | 367 | } | 
|  | 368 | } | 
|  | 369 |  | 
|  | 370 | /* | 
|  | 371 | * fixup_free is called when: | 
|  | 372 | * - an active object is freed | 
|  | 373 | */ | 
|  | 374 | static int work_fixup_free(void *addr, enum debug_obj_state state) | 
|  | 375 | { | 
|  | 376 | struct work_struct *work = addr; | 
|  | 377 |  | 
|  | 378 | switch (state) { | 
|  | 379 | case ODEBUG_STATE_ACTIVE: | 
|  | 380 | cancel_work_sync(work); | 
|  | 381 | debug_object_free(work, &work_debug_descr); | 
|  | 382 | return 1; | 
|  | 383 | default: | 
|  | 384 | return 0; | 
|  | 385 | } | 
|  | 386 | } | 
|  | 387 |  | 
|  | 388 | static struct debug_obj_descr work_debug_descr = { | 
|  | 389 | .name		= "work_struct", | 
|  | 390 | .fixup_init	= work_fixup_init, | 
|  | 391 | .fixup_activate	= work_fixup_activate, | 
|  | 392 | .fixup_free	= work_fixup_free, | 
|  | 393 | }; | 
|  | 394 |  | 
|  | 395 | static inline void debug_work_activate(struct work_struct *work) | 
|  | 396 | { | 
|  | 397 | debug_object_activate(work, &work_debug_descr); | 
|  | 398 | } | 
|  | 399 |  | 
|  | 400 | static inline void debug_work_deactivate(struct work_struct *work) | 
|  | 401 | { | 
|  | 402 | debug_object_deactivate(work, &work_debug_descr); | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | void __init_work(struct work_struct *work, int onstack) | 
|  | 406 | { | 
|  | 407 | if (onstack) | 
|  | 408 | debug_object_init_on_stack(work, &work_debug_descr); | 
|  | 409 | else | 
|  | 410 | debug_object_init(work, &work_debug_descr); | 
|  | 411 | } | 
|  | 412 | EXPORT_SYMBOL_GPL(__init_work); | 
|  | 413 |  | 
|  | 414 | void destroy_work_on_stack(struct work_struct *work) | 
|  | 415 | { | 
|  | 416 | debug_object_free(work, &work_debug_descr); | 
|  | 417 | } | 
|  | 418 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); | 
|  | 419 |  | 
|  | 420 | #else | 
|  | 421 | static inline void debug_work_activate(struct work_struct *work) { } | 
|  | 422 | static inline void debug_work_deactivate(struct work_struct *work) { } | 
|  | 423 | #endif | 
|  | 424 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 425 | /* Serializes the accesses to the list of workqueues. */ | 
|  | 426 | static DEFINE_SPINLOCK(workqueue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | static LIST_HEAD(workqueues); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 428 | static bool workqueue_freezing;		/* W: have wqs started freezing? */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 |  | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 430 | /* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 431 | * The almighty global cpu workqueues.  nr_running is the only field | 
|  | 432 | * which is expected to be used frequently by other cpus via | 
|  | 433 | * try_to_wake_up().  Put it in a separate cacheline. | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 434 | */ | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 435 | static DEFINE_PER_CPU(struct global_cwq, global_cwq); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 436 | static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); | 
| Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 437 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 438 | /* | 
|  | 439 | * Global cpu workqueue and nr_running counter for unbound gcwq.  The | 
|  | 440 | * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its | 
|  | 441 | * workers have WORKER_UNBOUND set. | 
|  | 442 | */ | 
|  | 443 | static struct global_cwq unbound_global_cwq; | 
|  | 444 | static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);	/* always 0 */ | 
|  | 445 |  | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 446 | static int worker_thread(void *__worker); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 448 | static struct global_cwq *get_gcwq(unsigned int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 450 | if (cpu != WORK_CPU_UNBOUND) | 
|  | 451 | return &per_cpu(global_cwq, cpu); | 
|  | 452 | else | 
|  | 453 | return &unbound_global_cwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | } | 
|  | 455 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 456 | static atomic_t *get_gcwq_nr_running(unsigned int cpu) | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 457 | { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 458 | if (cpu != WORK_CPU_UNBOUND) | 
|  | 459 | return &per_cpu(gcwq_nr_running, cpu); | 
|  | 460 | else | 
|  | 461 | return &unbound_gcwq_nr_running; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 462 | } | 
|  | 463 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 464 | static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, | 
|  | 465 | struct workqueue_struct *wq) | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 466 | { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 467 | if (!(wq->flags & WQ_UNBOUND)) { | 
|  | 468 | if (likely(cpu < nr_cpu_ids)) { | 
|  | 469 | #ifdef CONFIG_SMP | 
|  | 470 | return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 471 | #else | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 472 | return wq->cpu_wq.single; | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 473 | #endif | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 474 | } | 
|  | 475 | } else if (likely(cpu == WORK_CPU_UNBOUND)) | 
|  | 476 | return wq->cpu_wq.single; | 
|  | 477 | return NULL; | 
| Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 478 | } | 
|  | 479 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 480 | static unsigned int work_color_to_flags(int color) | 
|  | 481 | { | 
|  | 482 | return color << WORK_STRUCT_COLOR_SHIFT; | 
|  | 483 | } | 
|  | 484 |  | 
|  | 485 | static int get_work_color(struct work_struct *work) | 
|  | 486 | { | 
|  | 487 | return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & | 
|  | 488 | ((1 << WORK_STRUCT_COLOR_BITS) - 1); | 
|  | 489 | } | 
|  | 490 |  | 
|  | 491 | static int work_next_color(int color) | 
|  | 492 | { | 
|  | 493 | return (color + 1) % WORK_NR_COLORS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | } | 
|  | 495 |  | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 496 | /* | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 497 | * A work's data points to the cwq with WORK_STRUCT_CWQ set while the | 
|  | 498 | * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is | 
|  | 499 | * cleared and the work data contains the cpu number it was last on. | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 500 | * | 
|  | 501 | * set_work_{cwq|cpu}() and clear_work_data() can be used to set the | 
|  | 502 | * cwq, cpu or clear work->data.  These functions should only be | 
|  | 503 | * called while the work is owned - ie. while the PENDING bit is set. | 
|  | 504 | * | 
|  | 505 | * get_work_[g]cwq() can be used to obtain the gcwq or cwq | 
|  | 506 | * corresponding to a work.  gcwq is available once the work has been | 
|  | 507 | * queued anywhere after initialization.  cwq is available only from | 
|  | 508 | * queueing until execution starts. | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 509 | */ | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 510 | static inline void set_work_data(struct work_struct *work, unsigned long data, | 
|  | 511 | unsigned long flags) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 512 | { | 
| David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 513 | BUG_ON(!work_pending(work)); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 514 | atomic_long_set(&work->data, data | flags | work_static(work)); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 515 | } | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 516 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 517 | static void set_work_cwq(struct work_struct *work, | 
|  | 518 | struct cpu_workqueue_struct *cwq, | 
|  | 519 | unsigned long extra_flags) | 
| Oleg Nesterov | 4d707b9 | 2010-04-23 17:40:40 +0200 | [diff] [blame] | 520 | { | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 521 | set_work_data(work, (unsigned long)cwq, | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 522 | WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); | 
| Oleg Nesterov | 4d707b9 | 2010-04-23 17:40:40 +0200 | [diff] [blame] | 523 | } | 
|  | 524 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 525 | static void set_work_cpu(struct work_struct *work, unsigned int cpu) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 526 | { | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 527 | set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING); | 
|  | 528 | } | 
|  | 529 |  | 
|  | 530 | static void clear_work_data(struct work_struct *work) | 
|  | 531 | { | 
|  | 532 | set_work_data(work, WORK_STRUCT_NO_CPU, 0); | 
|  | 533 | } | 
|  | 534 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 535 | static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) | 
|  | 536 | { | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 537 | unsigned long data = atomic_long_read(&work->data); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 538 |  | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 539 | if (data & WORK_STRUCT_CWQ) | 
|  | 540 | return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); | 
|  | 541 | else | 
|  | 542 | return NULL; | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 543 | } | 
|  | 544 |  | 
|  | 545 | static struct global_cwq *get_work_gcwq(struct work_struct *work) | 
|  | 546 | { | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 547 | unsigned long data = atomic_long_read(&work->data); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 548 | unsigned int cpu; | 
|  | 549 |  | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 550 | if (data & WORK_STRUCT_CWQ) | 
|  | 551 | return ((struct cpu_workqueue_struct *) | 
|  | 552 | (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq; | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 553 |  | 
|  | 554 | cpu = data >> WORK_STRUCT_FLAG_BITS; | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 555 | if (cpu == WORK_CPU_NONE) | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 556 | return NULL; | 
|  | 557 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 558 | BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 559 | return get_gcwq(cpu); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 560 | } | 
|  | 561 |  | 
|  | 562 | /* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 563 | * Policy functions.  These define the policies on how the global | 
|  | 564 | * worker pool is managed.  Unless noted otherwise, these functions | 
|  | 565 | * assume that they're being called with gcwq->lock held. | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 566 | */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 567 |  | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 568 | static bool __need_more_worker(struct global_cwq *gcwq) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 569 | { | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 570 | return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) || | 
|  | 571 | gcwq->flags & GCWQ_HIGHPRI_PENDING; | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 572 | } | 
|  | 573 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 574 | /* | 
|  | 575 | * Need to wake up a worker?  Called from anything but currently | 
|  | 576 | * running workers. | 
|  | 577 | */ | 
|  | 578 | static bool need_more_worker(struct global_cwq *gcwq) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 579 | { | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 580 | return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq); | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 581 | } | 
|  | 582 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 583 | /* Can I start working?  Called from busy but !running workers. */ | 
|  | 584 | static bool may_start_working(struct global_cwq *gcwq) | 
|  | 585 | { | 
|  | 586 | return gcwq->nr_idle; | 
|  | 587 | } | 
|  | 588 |  | 
|  | 589 | /* Do I need to keep working?  Called from currently running workers. */ | 
|  | 590 | static bool keep_working(struct global_cwq *gcwq) | 
|  | 591 | { | 
|  | 592 | atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); | 
|  | 593 |  | 
| Tejun Heo | 3031004 | 2010-10-11 11:51:57 +0200 | [diff] [blame] | 594 | return !list_empty(&gcwq->worklist) && | 
|  | 595 | (atomic_read(nr_running) <= 1 || | 
|  | 596 | gcwq->flags & GCWQ_HIGHPRI_PENDING); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 597 | } | 
|  | 598 |  | 
|  | 599 | /* Do we need a new worker?  Called from manager. */ | 
|  | 600 | static bool need_to_create_worker(struct global_cwq *gcwq) | 
|  | 601 | { | 
|  | 602 | return need_more_worker(gcwq) && !may_start_working(gcwq); | 
|  | 603 | } | 
|  | 604 |  | 
|  | 605 | /* Do I need to be the manager? */ | 
|  | 606 | static bool need_to_manage_workers(struct global_cwq *gcwq) | 
|  | 607 | { | 
|  | 608 | return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS; | 
|  | 609 | } | 
|  | 610 |  | 
|  | 611 | /* Do we have too many workers and should some go away? */ | 
|  | 612 | static bool too_many_workers(struct global_cwq *gcwq) | 
|  | 613 | { | 
|  | 614 | bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS; | 
|  | 615 | int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */ | 
|  | 616 | int nr_busy = gcwq->nr_workers - nr_idle; | 
|  | 617 |  | 
|  | 618 | return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; | 
|  | 619 | } | 
|  | 620 |  | 
|  | 621 | /* | 
|  | 622 | * Wake up functions. | 
|  | 623 | */ | 
|  | 624 |  | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 625 | /* Return the first worker.  Safe with preemption disabled */ | 
|  | 626 | static struct worker *first_worker(struct global_cwq *gcwq) | 
|  | 627 | { | 
|  | 628 | if (unlikely(list_empty(&gcwq->idle_list))) | 
|  | 629 | return NULL; | 
|  | 630 |  | 
|  | 631 | return list_first_entry(&gcwq->idle_list, struct worker, entry); | 
|  | 632 | } | 
|  | 633 |  | 
|  | 634 | /** | 
|  | 635 | * wake_up_worker - wake up an idle worker | 
|  | 636 | * @gcwq: gcwq to wake worker for | 
|  | 637 | * | 
|  | 638 | * Wake up the first idle worker of @gcwq. | 
|  | 639 | * | 
|  | 640 | * CONTEXT: | 
|  | 641 | * spin_lock_irq(gcwq->lock). | 
|  | 642 | */ | 
|  | 643 | static void wake_up_worker(struct global_cwq *gcwq) | 
|  | 644 | { | 
|  | 645 | struct worker *worker = first_worker(gcwq); | 
|  | 646 |  | 
|  | 647 | if (likely(worker)) | 
|  | 648 | wake_up_process(worker->task); | 
|  | 649 | } | 
|  | 650 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 651 | /** | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 652 | * wq_worker_waking_up - a worker is waking up | 
|  | 653 | * @task: task waking up | 
|  | 654 | * @cpu: CPU @task is waking up to | 
|  | 655 | * | 
|  | 656 | * This function is called during try_to_wake_up() when a worker is | 
|  | 657 | * being awoken. | 
|  | 658 | * | 
|  | 659 | * CONTEXT: | 
|  | 660 | * spin_lock_irq(rq->lock) | 
|  | 661 | */ | 
|  | 662 | void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) | 
|  | 663 | { | 
|  | 664 | struct worker *worker = kthread_data(task); | 
|  | 665 |  | 
| Steven Rostedt | 2d64672 | 2010-12-03 23:12:33 -0500 | [diff] [blame] | 666 | if (!(worker->flags & WORKER_NOT_RUNNING)) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 667 | atomic_inc(get_gcwq_nr_running(cpu)); | 
|  | 668 | } | 
|  | 669 |  | 
|  | 670 | /** | 
|  | 671 | * wq_worker_sleeping - a worker is going to sleep | 
|  | 672 | * @task: task going to sleep | 
|  | 673 | * @cpu: CPU in question, must be the current CPU number | 
|  | 674 | * | 
|  | 675 | * This function is called during schedule() when a busy worker is | 
|  | 676 | * going to sleep.  Worker on the same cpu can be woken up by | 
|  | 677 | * returning pointer to its task. | 
|  | 678 | * | 
|  | 679 | * CONTEXT: | 
|  | 680 | * spin_lock_irq(rq->lock) | 
|  | 681 | * | 
|  | 682 | * RETURNS: | 
|  | 683 | * Worker task on @cpu to wake up, %NULL if none. | 
|  | 684 | */ | 
|  | 685 | struct task_struct *wq_worker_sleeping(struct task_struct *task, | 
|  | 686 | unsigned int cpu) | 
|  | 687 | { | 
|  | 688 | struct worker *worker = kthread_data(task), *to_wakeup = NULL; | 
|  | 689 | struct global_cwq *gcwq = get_gcwq(cpu); | 
|  | 690 | atomic_t *nr_running = get_gcwq_nr_running(cpu); | 
|  | 691 |  | 
| Steven Rostedt | 2d64672 | 2010-12-03 23:12:33 -0500 | [diff] [blame] | 692 | if (worker->flags & WORKER_NOT_RUNNING) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 693 | return NULL; | 
|  | 694 |  | 
|  | 695 | /* this can only happen on the local cpu */ | 
|  | 696 | BUG_ON(cpu != raw_smp_processor_id()); | 
|  | 697 |  | 
|  | 698 | /* | 
|  | 699 | * The counterpart of the following dec_and_test, implied mb, | 
|  | 700 | * worklist not empty test sequence is in insert_work(). | 
|  | 701 | * Please read comment there. | 
|  | 702 | * | 
|  | 703 | * NOT_RUNNING is clear.  This means that trustee is not in | 
|  | 704 | * charge and we're running on the local cpu w/ rq lock held | 
|  | 705 | * and preemption disabled, which in turn means that none else | 
|  | 706 | * could be manipulating idle_list, so dereferencing idle_list | 
|  | 707 | * without gcwq lock is safe. | 
|  | 708 | */ | 
|  | 709 | if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist)) | 
|  | 710 | to_wakeup = first_worker(gcwq); | 
|  | 711 | return to_wakeup ? to_wakeup->task : NULL; | 
|  | 712 | } | 
|  | 713 |  | 
|  | 714 | /** | 
|  | 715 | * worker_set_flags - set worker flags and adjust nr_running accordingly | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 716 | * @worker: self | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 717 | * @flags: flags to set | 
|  | 718 | * @wakeup: wakeup an idle worker if necessary | 
|  | 719 | * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 720 | * Set @flags in @worker->flags and adjust nr_running accordingly.  If | 
|  | 721 | * nr_running becomes zero and @wakeup is %true, an idle worker is | 
|  | 722 | * woken up. | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 723 | * | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 724 | * CONTEXT: | 
|  | 725 | * spin_lock_irq(gcwq->lock) | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 726 | */ | 
|  | 727 | static inline void worker_set_flags(struct worker *worker, unsigned int flags, | 
|  | 728 | bool wakeup) | 
|  | 729 | { | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 730 | struct global_cwq *gcwq = worker->gcwq; | 
|  | 731 |  | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 732 | WARN_ON_ONCE(worker->task != current); | 
|  | 733 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 734 | /* | 
|  | 735 | * If transitioning into NOT_RUNNING, adjust nr_running and | 
|  | 736 | * wake up an idle worker as necessary if requested by | 
|  | 737 | * @wakeup. | 
|  | 738 | */ | 
|  | 739 | if ((flags & WORKER_NOT_RUNNING) && | 
|  | 740 | !(worker->flags & WORKER_NOT_RUNNING)) { | 
|  | 741 | atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); | 
|  | 742 |  | 
|  | 743 | if (wakeup) { | 
|  | 744 | if (atomic_dec_and_test(nr_running) && | 
|  | 745 | !list_empty(&gcwq->worklist)) | 
|  | 746 | wake_up_worker(gcwq); | 
|  | 747 | } else | 
|  | 748 | atomic_dec(nr_running); | 
|  | 749 | } | 
|  | 750 |  | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 751 | worker->flags |= flags; | 
|  | 752 | } | 
|  | 753 |  | 
|  | 754 | /** | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 755 | * worker_clr_flags - clear worker flags and adjust nr_running accordingly | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 756 | * @worker: self | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 757 | * @flags: flags to clear | 
|  | 758 | * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 759 | * Clear @flags in @worker->flags and adjust nr_running accordingly. | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 760 | * | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 761 | * CONTEXT: | 
|  | 762 | * spin_lock_irq(gcwq->lock) | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 763 | */ | 
|  | 764 | static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | 
|  | 765 | { | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 766 | struct global_cwq *gcwq = worker->gcwq; | 
|  | 767 | unsigned int oflags = worker->flags; | 
|  | 768 |  | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 769 | WARN_ON_ONCE(worker->task != current); | 
|  | 770 |  | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 771 | worker->flags &= ~flags; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 772 |  | 
| Tejun Heo | 42c025f | 2011-01-11 15:58:49 +0100 | [diff] [blame] | 773 | /* | 
|  | 774 | * If transitioning out of NOT_RUNNING, increment nr_running.  Note | 
|  | 775 | * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask | 
|  | 776 | * of multiple flags, not a single flag. | 
|  | 777 | */ | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 778 | if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) | 
|  | 779 | if (!(worker->flags & WORKER_NOT_RUNNING)) | 
|  | 780 | atomic_inc(get_gcwq_nr_running(gcwq->cpu)); | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 781 | } | 
|  | 782 |  | 
|  | 783 | /** | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 784 | * busy_worker_head - return the busy hash head for a work | 
|  | 785 | * @gcwq: gcwq of interest | 
|  | 786 | * @work: work to be hashed | 
|  | 787 | * | 
|  | 788 | * Return hash head of @gcwq for @work. | 
|  | 789 | * | 
|  | 790 | * CONTEXT: | 
|  | 791 | * spin_lock_irq(gcwq->lock). | 
|  | 792 | * | 
|  | 793 | * RETURNS: | 
|  | 794 | * Pointer to the hash head. | 
|  | 795 | */ | 
|  | 796 | static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, | 
|  | 797 | struct work_struct *work) | 
|  | 798 | { | 
|  | 799 | const int base_shift = ilog2(sizeof(struct work_struct)); | 
|  | 800 | unsigned long v = (unsigned long)work; | 
|  | 801 |  | 
|  | 802 | /* simple shift and fold hash, do we need something better? */ | 
|  | 803 | v >>= base_shift; | 
|  | 804 | v += v >> BUSY_WORKER_HASH_ORDER; | 
|  | 805 | v &= BUSY_WORKER_HASH_MASK; | 
|  | 806 |  | 
|  | 807 | return &gcwq->busy_hash[v]; | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | /** | 
| Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 811 | * __find_worker_executing_work - find worker which is executing a work | 
|  | 812 | * @gcwq: gcwq of interest | 
|  | 813 | * @bwh: hash head as returned by busy_worker_head() | 
|  | 814 | * @work: work to find worker for | 
|  | 815 | * | 
|  | 816 | * Find a worker which is executing @work on @gcwq.  @bwh should be | 
|  | 817 | * the hash head obtained by calling busy_worker_head() with the same | 
|  | 818 | * work. | 
|  | 819 | * | 
|  | 820 | * CONTEXT: | 
|  | 821 | * spin_lock_irq(gcwq->lock). | 
|  | 822 | * | 
|  | 823 | * RETURNS: | 
|  | 824 | * Pointer to worker which is executing @work if found, NULL | 
|  | 825 | * otherwise. | 
|  | 826 | */ | 
|  | 827 | static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, | 
|  | 828 | struct hlist_head *bwh, | 
|  | 829 | struct work_struct *work) | 
|  | 830 | { | 
|  | 831 | struct worker *worker; | 
|  | 832 | struct hlist_node *tmp; | 
|  | 833 |  | 
|  | 834 | hlist_for_each_entry(worker, tmp, bwh, hentry) | 
|  | 835 | if (worker->current_work == work) | 
|  | 836 | return worker; | 
|  | 837 | return NULL; | 
|  | 838 | } | 
|  | 839 |  | 
|  | 840 | /** | 
|  | 841 | * find_worker_executing_work - find worker which is executing a work | 
|  | 842 | * @gcwq: gcwq of interest | 
|  | 843 | * @work: work to find worker for | 
|  | 844 | * | 
|  | 845 | * Find a worker which is executing @work on @gcwq.  This function is | 
|  | 846 | * identical to __find_worker_executing_work() except that this | 
|  | 847 | * function calculates @bwh itself. | 
|  | 848 | * | 
|  | 849 | * CONTEXT: | 
|  | 850 | * spin_lock_irq(gcwq->lock). | 
|  | 851 | * | 
|  | 852 | * RETURNS: | 
|  | 853 | * Pointer to worker which is executing @work if found, NULL | 
|  | 854 | * otherwise. | 
|  | 855 | */ | 
|  | 856 | static struct worker *find_worker_executing_work(struct global_cwq *gcwq, | 
|  | 857 | struct work_struct *work) | 
|  | 858 | { | 
|  | 859 | return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work), | 
|  | 860 | work); | 
|  | 861 | } | 
|  | 862 |  | 
|  | 863 | /** | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 864 | * gcwq_determine_ins_pos - find insertion position | 
|  | 865 | * @gcwq: gcwq of interest | 
|  | 866 | * @cwq: cwq a work is being queued for | 
|  | 867 | * | 
|  | 868 | * A work for @cwq is about to be queued on @gcwq, determine insertion | 
|  | 869 | * position for the work.  If @cwq is for HIGHPRI wq, the work is | 
|  | 870 | * queued at the head of the queue but in FIFO order with respect to | 
|  | 871 | * other HIGHPRI works; otherwise, at the end of the queue.  This | 
|  | 872 | * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that | 
|  | 873 | * there are HIGHPRI works pending. | 
|  | 874 | * | 
|  | 875 | * CONTEXT: | 
|  | 876 | * spin_lock_irq(gcwq->lock). | 
|  | 877 | * | 
|  | 878 | * RETURNS: | 
|  | 879 | * Pointer to inserstion position. | 
|  | 880 | */ | 
|  | 881 | static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq, | 
|  | 882 | struct cpu_workqueue_struct *cwq) | 
|  | 883 | { | 
|  | 884 | struct work_struct *twork; | 
|  | 885 |  | 
|  | 886 | if (likely(!(cwq->wq->flags & WQ_HIGHPRI))) | 
|  | 887 | return &gcwq->worklist; | 
|  | 888 |  | 
|  | 889 | list_for_each_entry(twork, &gcwq->worklist, entry) { | 
|  | 890 | struct cpu_workqueue_struct *tcwq = get_work_cwq(twork); | 
|  | 891 |  | 
|  | 892 | if (!(tcwq->wq->flags & WQ_HIGHPRI)) | 
|  | 893 | break; | 
|  | 894 | } | 
|  | 895 |  | 
|  | 896 | gcwq->flags |= GCWQ_HIGHPRI_PENDING; | 
|  | 897 | return &twork->entry; | 
|  | 898 | } | 
|  | 899 |  | 
|  | 900 | /** | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 901 | * insert_work - insert a work into gcwq | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 902 | * @cwq: cwq @work belongs to | 
|  | 903 | * @work: work to insert | 
|  | 904 | * @head: insertion point | 
|  | 905 | * @extra_flags: extra WORK_STRUCT_* flags to set | 
|  | 906 | * | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 907 | * Insert @work which belongs to @cwq into @gcwq after @head. | 
|  | 908 | * @extra_flags is or'd to work_struct flags. | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 909 | * | 
|  | 910 | * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 911 | * spin_lock_irq(gcwq->lock). | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 912 | */ | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 913 | static void insert_work(struct cpu_workqueue_struct *cwq, | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 914 | struct work_struct *work, struct list_head *head, | 
|  | 915 | unsigned int extra_flags) | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 916 | { | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 917 | struct global_cwq *gcwq = cwq->gcwq; | 
| Frederic Weisbecker | e1d8aa9 | 2009-01-12 23:15:46 +0100 | [diff] [blame] | 918 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 919 | /* we own @work, set data and link */ | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 920 | set_work_cwq(work, cwq, extra_flags); | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 921 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 922 | /* | 
|  | 923 | * Ensure that we get the right work->data if we see the | 
|  | 924 | * result of list_add() below, see try_to_grab_pending(). | 
|  | 925 | */ | 
|  | 926 | smp_wmb(); | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 927 |  | 
| Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 928 | list_add_tail(&work->entry, head); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 929 |  | 
|  | 930 | /* | 
|  | 931 | * Ensure either worker_sched_deactivated() sees the above | 
|  | 932 | * list_add_tail() or we see zero nr_running to avoid workers | 
|  | 933 | * lying around lazily while there are works to be processed. | 
|  | 934 | */ | 
|  | 935 | smp_mb(); | 
|  | 936 |  | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 937 | if (__need_more_worker(gcwq)) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 938 | wake_up_worker(gcwq); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 939 | } | 
|  | 940 |  | 
| Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 941 | /* | 
|  | 942 | * Test whether @work is being queued from another work executing on the | 
|  | 943 | * same workqueue.  This is rather expensive and should only be used from | 
|  | 944 | * cold paths. | 
|  | 945 | */ | 
|  | 946 | static bool is_chained_work(struct workqueue_struct *wq) | 
|  | 947 | { | 
|  | 948 | unsigned long flags; | 
|  | 949 | unsigned int cpu; | 
|  | 950 |  | 
|  | 951 | for_each_gcwq_cpu(cpu) { | 
|  | 952 | struct global_cwq *gcwq = get_gcwq(cpu); | 
|  | 953 | struct worker *worker; | 
|  | 954 | struct hlist_node *pos; | 
|  | 955 | int i; | 
|  | 956 |  | 
|  | 957 | spin_lock_irqsave(&gcwq->lock, flags); | 
|  | 958 | for_each_busy_worker(worker, i, pos, gcwq) { | 
|  | 959 | if (worker->task != current) | 
|  | 960 | continue; | 
|  | 961 | spin_unlock_irqrestore(&gcwq->lock, flags); | 
|  | 962 | /* | 
|  | 963 | * I'm @worker, no locking necessary.  See if @work | 
|  | 964 | * is headed to the same workqueue. | 
|  | 965 | */ | 
|  | 966 | return worker->current_cwq->wq == wq; | 
|  | 967 | } | 
|  | 968 | spin_unlock_irqrestore(&gcwq->lock, flags); | 
|  | 969 | } | 
|  | 970 | return false; | 
|  | 971 | } | 
|  | 972 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 973 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 | struct work_struct *work) | 
|  | 975 | { | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 976 | struct global_cwq *gcwq; | 
|  | 977 | struct cpu_workqueue_struct *cwq; | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 978 | struct list_head *worklist; | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 979 | unsigned int work_flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 | unsigned long flags; | 
|  | 981 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 982 | debug_work_activate(work); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 983 |  | 
| Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 984 | /* if dying, only works from the same workqueue are allowed */ | 
|  | 985 | if (unlikely(wq->flags & WQ_DYING) && | 
|  | 986 | WARN_ON_ONCE(!is_chained_work(wq))) | 
| Tejun Heo | e41e704 | 2010-08-24 14:22:47 +0200 | [diff] [blame] | 987 | return; | 
|  | 988 |  | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 989 | /* determine gcwq to use */ | 
|  | 990 | if (!(wq->flags & WQ_UNBOUND)) { | 
| Tejun Heo | 18aa9ef | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 991 | struct global_cwq *last_gcwq; | 
|  | 992 |  | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 993 | if (unlikely(cpu == WORK_CPU_UNBOUND)) | 
|  | 994 | cpu = raw_smp_processor_id(); | 
|  | 995 |  | 
| Tejun Heo | 18aa9ef | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 996 | /* | 
|  | 997 | * It's multi cpu.  If @wq is non-reentrant and @work | 
|  | 998 | * was previously on a different cpu, it might still | 
|  | 999 | * be running there, in which case the work needs to | 
|  | 1000 | * be queued on that cpu to guarantee non-reentrance. | 
|  | 1001 | */ | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1002 | gcwq = get_gcwq(cpu); | 
| Tejun Heo | 18aa9ef | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1003 | if (wq->flags & WQ_NON_REENTRANT && | 
|  | 1004 | (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { | 
|  | 1005 | struct worker *worker; | 
|  | 1006 |  | 
|  | 1007 | spin_lock_irqsave(&last_gcwq->lock, flags); | 
|  | 1008 |  | 
|  | 1009 | worker = find_worker_executing_work(last_gcwq, work); | 
|  | 1010 |  | 
|  | 1011 | if (worker && worker->current_cwq->wq == wq) | 
|  | 1012 | gcwq = last_gcwq; | 
|  | 1013 | else { | 
|  | 1014 | /* meh... not running there, queue here */ | 
|  | 1015 | spin_unlock_irqrestore(&last_gcwq->lock, flags); | 
|  | 1016 | spin_lock_irqsave(&gcwq->lock, flags); | 
|  | 1017 | } | 
|  | 1018 | } else | 
|  | 1019 | spin_lock_irqsave(&gcwq->lock, flags); | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1020 | } else { | 
|  | 1021 | gcwq = get_gcwq(WORK_CPU_UNBOUND); | 
|  | 1022 | spin_lock_irqsave(&gcwq->lock, flags); | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1023 | } | 
|  | 1024 |  | 
|  | 1025 | /* gcwq determined, get cwq and queue */ | 
|  | 1026 | cwq = get_cwq(gcwq->cpu, wq); | 
| Tejun Heo | cdadf00 | 2010-10-05 10:49:55 +0200 | [diff] [blame] | 1027 | trace_workqueue_queue_work(cpu, cwq, work); | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1028 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1029 | BUG_ON(!list_empty(&work->entry)); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1030 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1031 | cwq->nr_in_flight[cwq->work_color]++; | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1032 | work_flags = work_color_to_flags(cwq->work_color); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1033 |  | 
|  | 1034 | if (likely(cwq->nr_active < cwq->max_active)) { | 
| Tejun Heo | cdadf00 | 2010-10-05 10:49:55 +0200 | [diff] [blame] | 1035 | trace_workqueue_activate_work(work); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1036 | cwq->nr_active++; | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1037 | worklist = gcwq_determine_ins_pos(gcwq, cwq); | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1038 | } else { | 
|  | 1039 | work_flags |= WORK_STRUCT_DELAYED; | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1040 | worklist = &cwq->delayed_works; | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1041 | } | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1042 |  | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1043 | insert_work(cwq, work, worklist, work_flags); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1044 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1045 | spin_unlock_irqrestore(&gcwq->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | } | 
|  | 1047 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1048 | /** | 
|  | 1049 | * queue_work - queue work on a workqueue | 
|  | 1050 | * @wq: workqueue to use | 
|  | 1051 | * @work: work to queue | 
|  | 1052 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 1053 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | * | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 1055 | * We queue the work to the CPU on which it was submitted, but if the CPU dies | 
|  | 1056 | * it can be processed by another CPU. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1057 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1058 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | { | 
| Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 1060 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1061 |  | 
| Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 1062 | ret = queue_work_on(get_cpu(), wq, work); | 
|  | 1063 | put_cpu(); | 
|  | 1064 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 | return ret; | 
|  | 1066 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 1067 | EXPORT_SYMBOL_GPL(queue_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 |  | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 1069 | /** | 
|  | 1070 | * queue_work_on - queue work on specific cpu | 
|  | 1071 | * @cpu: CPU number to execute work on | 
|  | 1072 | * @wq: workqueue to use | 
|  | 1073 | * @work: work to queue | 
|  | 1074 | * | 
|  | 1075 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
|  | 1076 | * | 
|  | 1077 | * We queue the work to a specific CPU, the caller must ensure it | 
|  | 1078 | * can't go away. | 
|  | 1079 | */ | 
|  | 1080 | int | 
|  | 1081 | queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) | 
|  | 1082 | { | 
|  | 1083 | int ret = 0; | 
|  | 1084 |  | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1085 | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1086 | __queue_work(cpu, wq, work); | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 1087 | ret = 1; | 
|  | 1088 | } | 
|  | 1089 | return ret; | 
|  | 1090 | } | 
|  | 1091 | EXPORT_SYMBOL_GPL(queue_work_on); | 
|  | 1092 |  | 
| Li Zefan | 6d141c3 | 2008-02-08 04:21:09 -0800 | [diff] [blame] | 1093 | static void delayed_work_timer_fn(unsigned long __data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1094 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1095 | struct delayed_work *dwork = (struct delayed_work *)__data; | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1096 | struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1098 | __queue_work(smp_processor_id(), cwq->wq, &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | } | 
|  | 1100 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1101 | /** | 
|  | 1102 | * queue_delayed_work - queue work on a workqueue after delay | 
|  | 1103 | * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 1104 | * @dwork: delayable work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1105 | * @delay: number of jiffies to wait before queueing | 
|  | 1106 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 1107 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1108 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1109 | int queue_delayed_work(struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1110 | struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | { | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1112 | if (delay == 0) | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 1113 | return queue_work(wq, &dwork->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 |  | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 1115 | return queue_delayed_work_on(-1, wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 1117 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1119 | /** | 
|  | 1120 | * queue_delayed_work_on - queue work on specific CPU after delay | 
|  | 1121 | * @cpu: CPU number to execute work on | 
|  | 1122 | * @wq: workqueue to use | 
| Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 1123 | * @dwork: work to queue | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1124 | * @delay: number of jiffies to wait before queueing | 
|  | 1125 | * | 
| Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 1126 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1127 | */ | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1128 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1129 | struct delayed_work *dwork, unsigned long delay) | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1130 | { | 
|  | 1131 | int ret = 0; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1132 | struct timer_list *timer = &dwork->timer; | 
|  | 1133 | struct work_struct *work = &dwork->work; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1134 |  | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1135 | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1136 | unsigned int lcpu; | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1137 |  | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1138 | BUG_ON(timer_pending(timer)); | 
|  | 1139 | BUG_ON(!list_empty(&work->entry)); | 
|  | 1140 |  | 
| Andrew Liu | 8a3e77c | 2008-05-01 04:35:14 -0700 | [diff] [blame] | 1141 | timer_stats_timer_set_start_info(&dwork->timer); | 
|  | 1142 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1143 | /* | 
|  | 1144 | * This stores cwq for the moment, for the timer_fn. | 
|  | 1145 | * Note that the work's gcwq is preserved to allow | 
|  | 1146 | * reentrance detection for delayed works. | 
|  | 1147 | */ | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1148 | if (!(wq->flags & WQ_UNBOUND)) { | 
|  | 1149 | struct global_cwq *gcwq = get_work_gcwq(work); | 
|  | 1150 |  | 
|  | 1151 | if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) | 
|  | 1152 | lcpu = gcwq->cpu; | 
|  | 1153 | else | 
|  | 1154 | lcpu = raw_smp_processor_id(); | 
|  | 1155 | } else | 
|  | 1156 | lcpu = WORK_CPU_UNBOUND; | 
|  | 1157 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1158 | set_work_cwq(work, get_cwq(lcpu, wq), 0); | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1159 |  | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1160 | timer->expires = jiffies + delay; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1161 | timer->data = (unsigned long)dwork; | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1162 | timer->function = delayed_work_timer_fn; | 
| Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 1163 |  | 
|  | 1164 | if (unlikely(cpu >= 0)) | 
|  | 1165 | add_timer_on(timer, cpu); | 
|  | 1166 | else | 
|  | 1167 | add_timer(timer); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1168 | ret = 1; | 
|  | 1169 | } | 
|  | 1170 | return ret; | 
|  | 1171 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 1172 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1174 | /** | 
|  | 1175 | * worker_enter_idle - enter idle state | 
|  | 1176 | * @worker: worker which is entering idle state | 
|  | 1177 | * | 
|  | 1178 | * @worker is entering idle state.  Update stats and idle timer if | 
|  | 1179 | * necessary. | 
|  | 1180 | * | 
|  | 1181 | * LOCKING: | 
|  | 1182 | * spin_lock_irq(gcwq->lock). | 
|  | 1183 | */ | 
|  | 1184 | static void worker_enter_idle(struct worker *worker) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1185 | { | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1186 | struct global_cwq *gcwq = worker->gcwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1188 | BUG_ON(worker->flags & WORKER_IDLE); | 
|  | 1189 | BUG_ON(!list_empty(&worker->entry) && | 
|  | 1190 | (worker->hentry.next || worker->hentry.pprev)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 |  | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1192 | /* can't use worker_set_flags(), also called from start_worker() */ | 
|  | 1193 | worker->flags |= WORKER_IDLE; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1194 | gcwq->nr_idle++; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1195 | worker->last_active = jiffies; | 
| Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 1196 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1197 | /* idle_list is LIFO */ | 
|  | 1198 | list_add(&worker->entry, &gcwq->idle_list); | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1199 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1200 | if (likely(!(worker->flags & WORKER_ROGUE))) { | 
|  | 1201 | if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) | 
|  | 1202 | mod_timer(&gcwq->idle_timer, | 
|  | 1203 | jiffies + IDLE_WORKER_TIMEOUT); | 
|  | 1204 | } else | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1205 | wake_up_all(&gcwq->trustee_wait); | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1206 |  | 
|  | 1207 | /* sanity check nr_running */ | 
|  | 1208 | WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle && | 
|  | 1209 | atomic_read(get_gcwq_nr_running(gcwq->cpu))); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1210 | } | 
|  | 1211 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1212 | /** | 
|  | 1213 | * worker_leave_idle - leave idle state | 
|  | 1214 | * @worker: worker which is leaving idle state | 
|  | 1215 | * | 
|  | 1216 | * @worker is leaving idle state.  Update stats. | 
|  | 1217 | * | 
|  | 1218 | * LOCKING: | 
|  | 1219 | * spin_lock_irq(gcwq->lock). | 
|  | 1220 | */ | 
|  | 1221 | static void worker_leave_idle(struct worker *worker) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 | { | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1223 | struct global_cwq *gcwq = worker->gcwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1224 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1225 | BUG_ON(!(worker->flags & WORKER_IDLE)); | 
| Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1226 | worker_clr_flags(worker, WORKER_IDLE); | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1227 | gcwq->nr_idle--; | 
|  | 1228 | list_del_init(&worker->entry); | 
|  | 1229 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1231 | /** | 
|  | 1232 | * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq | 
|  | 1233 | * @worker: self | 
|  | 1234 | * | 
|  | 1235 | * Works which are scheduled while the cpu is online must at least be | 
|  | 1236 | * scheduled to a worker which is bound to the cpu so that if they are | 
|  | 1237 | * flushed from cpu callbacks while cpu is going down, they are | 
|  | 1238 | * guaranteed to execute on the cpu. | 
|  | 1239 | * | 
|  | 1240 | * This function is to be used by rogue workers and rescuers to bind | 
|  | 1241 | * themselves to the target cpu and may race with cpu going down or | 
|  | 1242 | * coming online.  kthread_bind() can't be used because it may put the | 
|  | 1243 | * worker to already dead cpu and set_cpus_allowed_ptr() can't be used | 
|  | 1244 | * verbatim as it's best effort and blocking and gcwq may be | 
|  | 1245 | * [dis]associated in the meantime. | 
|  | 1246 | * | 
|  | 1247 | * This function tries set_cpus_allowed() and locks gcwq and verifies | 
|  | 1248 | * the binding against GCWQ_DISASSOCIATED which is set during | 
|  | 1249 | * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters | 
|  | 1250 | * idle state or fetches works without dropping lock, it can guarantee | 
|  | 1251 | * the scheduling requirement described in the first paragraph. | 
|  | 1252 | * | 
|  | 1253 | * CONTEXT: | 
|  | 1254 | * Might sleep.  Called without any lock but returns with gcwq->lock | 
|  | 1255 | * held. | 
|  | 1256 | * | 
|  | 1257 | * RETURNS: | 
|  | 1258 | * %true if the associated gcwq is online (@worker is successfully | 
|  | 1259 | * bound), %false if offline. | 
|  | 1260 | */ | 
|  | 1261 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 
| Namhyung Kim | 972fa1c | 2010-08-22 23:19:43 +0900 | [diff] [blame] | 1262 | __acquires(&gcwq->lock) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1263 | { | 
|  | 1264 | struct global_cwq *gcwq = worker->gcwq; | 
|  | 1265 | struct task_struct *task = worker->task; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1267 | while (true) { | 
|  | 1268 | /* | 
|  | 1269 | * The following call may fail, succeed or succeed | 
|  | 1270 | * without actually migrating the task to the cpu if | 
|  | 1271 | * it races with cpu hotunplug operation.  Verify | 
|  | 1272 | * against GCWQ_DISASSOCIATED. | 
|  | 1273 | */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1274 | if (!(gcwq->flags & GCWQ_DISASSOCIATED)) | 
|  | 1275 | set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); | 
| Oleg Nesterov | 85f4186 | 2007-05-09 02:34:20 -0700 | [diff] [blame] | 1276 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1277 | spin_lock_irq(&gcwq->lock); | 
|  | 1278 | if (gcwq->flags & GCWQ_DISASSOCIATED) | 
|  | 1279 | return false; | 
|  | 1280 | if (task_cpu(task) == gcwq->cpu && | 
|  | 1281 | cpumask_equal(¤t->cpus_allowed, | 
|  | 1282 | get_cpu_mask(gcwq->cpu))) | 
|  | 1283 | return true; | 
|  | 1284 | spin_unlock_irq(&gcwq->lock); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1285 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1286 | /* CPU has come up inbetween, retry migration */ | 
|  | 1287 | cpu_relax(); | 
|  | 1288 | } | 
|  | 1289 | } | 
|  | 1290 |  | 
|  | 1291 | /* | 
|  | 1292 | * Function for worker->rebind_work used to rebind rogue busy workers | 
|  | 1293 | * to the associated cpu which is coming back online.  This is | 
|  | 1294 | * scheduled by cpu up but can race with other cpu hotplug operations | 
|  | 1295 | * and may be executed twice without intervening cpu down. | 
|  | 1296 | */ | 
|  | 1297 | static void worker_rebind_fn(struct work_struct *work) | 
|  | 1298 | { | 
|  | 1299 | struct worker *worker = container_of(work, struct worker, rebind_work); | 
|  | 1300 | struct global_cwq *gcwq = worker->gcwq; | 
|  | 1301 |  | 
|  | 1302 | if (worker_maybe_bind_and_lock(worker)) | 
|  | 1303 | worker_clr_flags(worker, WORKER_REBIND); | 
|  | 1304 |  | 
|  | 1305 | spin_unlock_irq(&gcwq->lock); | 
|  | 1306 | } | 
|  | 1307 |  | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1308 | static struct worker *alloc_worker(void) | 
|  | 1309 | { | 
|  | 1310 | struct worker *worker; | 
|  | 1311 |  | 
|  | 1312 | worker = kzalloc(sizeof(*worker), GFP_KERNEL); | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1313 | if (worker) { | 
|  | 1314 | INIT_LIST_HEAD(&worker->entry); | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1315 | INIT_LIST_HEAD(&worker->scheduled); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1316 | INIT_WORK(&worker->rebind_work, worker_rebind_fn); | 
|  | 1317 | /* on creation a worker is in !idle && prep state */ | 
|  | 1318 | worker->flags = WORKER_PREP; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1319 | } | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1320 | return worker; | 
|  | 1321 | } | 
|  | 1322 |  | 
|  | 1323 | /** | 
|  | 1324 | * create_worker - create a new workqueue worker | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1325 | * @gcwq: gcwq the new worker will belong to | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1326 | * @bind: whether to set affinity to @cpu or not | 
|  | 1327 | * | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1328 | * Create a new worker which is bound to @gcwq.  The returned worker | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1329 | * can be started by calling start_worker() or destroyed using | 
|  | 1330 | * destroy_worker(). | 
|  | 1331 | * | 
|  | 1332 | * CONTEXT: | 
|  | 1333 | * Might sleep.  Does GFP_KERNEL allocations. | 
|  | 1334 | * | 
|  | 1335 | * RETURNS: | 
|  | 1336 | * Pointer to the newly created worker. | 
|  | 1337 | */ | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1338 | static struct worker *create_worker(struct global_cwq *gcwq, bool bind) | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1339 | { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1340 | bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1341 | struct worker *worker = NULL; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1342 | int id = -1; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1343 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1344 | spin_lock_irq(&gcwq->lock); | 
|  | 1345 | while (ida_get_new(&gcwq->worker_ida, &id)) { | 
|  | 1346 | spin_unlock_irq(&gcwq->lock); | 
|  | 1347 | if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL)) | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1348 | goto fail; | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1349 | spin_lock_irq(&gcwq->lock); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1350 | } | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1351 | spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1352 |  | 
|  | 1353 | worker = alloc_worker(); | 
|  | 1354 | if (!worker) | 
|  | 1355 | goto fail; | 
|  | 1356 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1357 | worker->gcwq = gcwq; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1358 | worker->id = id; | 
|  | 1359 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1360 | if (!on_unbound_cpu) | 
|  | 1361 | worker->task = kthread_create(worker_thread, worker, | 
|  | 1362 | "kworker/%u:%d", gcwq->cpu, id); | 
|  | 1363 | else | 
|  | 1364 | worker->task = kthread_create(worker_thread, worker, | 
|  | 1365 | "kworker/u:%d", id); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1366 | if (IS_ERR(worker->task)) | 
|  | 1367 | goto fail; | 
|  | 1368 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1369 | /* | 
|  | 1370 | * A rogue worker will become a regular one if CPU comes | 
|  | 1371 | * online later on.  Make sure every worker has | 
|  | 1372 | * PF_THREAD_BOUND set. | 
|  | 1373 | */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1374 | if (bind && !on_unbound_cpu) | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1375 | kthread_bind(worker->task, gcwq->cpu); | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1376 | else { | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1377 | worker->task->flags |= PF_THREAD_BOUND; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1378 | if (on_unbound_cpu) | 
|  | 1379 | worker->flags |= WORKER_UNBOUND; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1380 | } | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1381 |  | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1382 | return worker; | 
|  | 1383 | fail: | 
|  | 1384 | if (id >= 0) { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1385 | spin_lock_irq(&gcwq->lock); | 
|  | 1386 | ida_remove(&gcwq->worker_ida, id); | 
|  | 1387 | spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1388 | } | 
|  | 1389 | kfree(worker); | 
|  | 1390 | return NULL; | 
|  | 1391 | } | 
|  | 1392 |  | 
|  | 1393 | /** | 
|  | 1394 | * start_worker - start a newly created worker | 
|  | 1395 | * @worker: worker to start | 
|  | 1396 | * | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1397 | * Make the gcwq aware of @worker and start it. | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1398 | * | 
|  | 1399 | * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1400 | * spin_lock_irq(gcwq->lock). | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1401 | */ | 
|  | 1402 | static void start_worker(struct worker *worker) | 
|  | 1403 | { | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1404 | worker->flags |= WORKER_STARTED; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1405 | worker->gcwq->nr_workers++; | 
|  | 1406 | worker_enter_idle(worker); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1407 | wake_up_process(worker->task); | 
|  | 1408 | } | 
|  | 1409 |  | 
|  | 1410 | /** | 
|  | 1411 | * destroy_worker - destroy a workqueue worker | 
|  | 1412 | * @worker: worker to be destroyed | 
|  | 1413 | * | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1414 | * Destroy @worker and adjust @gcwq stats accordingly. | 
|  | 1415 | * | 
|  | 1416 | * CONTEXT: | 
|  | 1417 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1418 | */ | 
|  | 1419 | static void destroy_worker(struct worker *worker) | 
|  | 1420 | { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1421 | struct global_cwq *gcwq = worker->gcwq; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1422 | int id = worker->id; | 
|  | 1423 |  | 
|  | 1424 | /* sanity check frenzy */ | 
|  | 1425 | BUG_ON(worker->current_work); | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1426 | BUG_ON(!list_empty(&worker->scheduled)); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1427 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1428 | if (worker->flags & WORKER_STARTED) | 
|  | 1429 | gcwq->nr_workers--; | 
|  | 1430 | if (worker->flags & WORKER_IDLE) | 
|  | 1431 | gcwq->nr_idle--; | 
|  | 1432 |  | 
|  | 1433 | list_del_init(&worker->entry); | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1434 | worker->flags |= WORKER_DIE; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1435 |  | 
|  | 1436 | spin_unlock_irq(&gcwq->lock); | 
|  | 1437 |  | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1438 | kthread_stop(worker->task); | 
|  | 1439 | kfree(worker); | 
|  | 1440 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1441 | spin_lock_irq(&gcwq->lock); | 
|  | 1442 | ida_remove(&gcwq->worker_ida, id); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1443 | } | 
|  | 1444 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1445 | static void idle_worker_timeout(unsigned long __gcwq) | 
|  | 1446 | { | 
|  | 1447 | struct global_cwq *gcwq = (void *)__gcwq; | 
|  | 1448 |  | 
|  | 1449 | spin_lock_irq(&gcwq->lock); | 
|  | 1450 |  | 
|  | 1451 | if (too_many_workers(gcwq)) { | 
|  | 1452 | struct worker *worker; | 
|  | 1453 | unsigned long expires; | 
|  | 1454 |  | 
|  | 1455 | /* idle_list is kept in LIFO order, check the last one */ | 
|  | 1456 | worker = list_entry(gcwq->idle_list.prev, struct worker, entry); | 
|  | 1457 | expires = worker->last_active + IDLE_WORKER_TIMEOUT; | 
|  | 1458 |  | 
|  | 1459 | if (time_before(jiffies, expires)) | 
|  | 1460 | mod_timer(&gcwq->idle_timer, expires); | 
|  | 1461 | else { | 
|  | 1462 | /* it's been idle for too long, wake up manager */ | 
|  | 1463 | gcwq->flags |= GCWQ_MANAGE_WORKERS; | 
|  | 1464 | wake_up_worker(gcwq); | 
|  | 1465 | } | 
|  | 1466 | } | 
|  | 1467 |  | 
|  | 1468 | spin_unlock_irq(&gcwq->lock); | 
|  | 1469 | } | 
|  | 1470 |  | 
|  | 1471 | static bool send_mayday(struct work_struct *work) | 
|  | 1472 | { | 
|  | 1473 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 
|  | 1474 | struct workqueue_struct *wq = cwq->wq; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1475 | unsigned int cpu; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1476 |  | 
|  | 1477 | if (!(wq->flags & WQ_RESCUER)) | 
|  | 1478 | return false; | 
|  | 1479 |  | 
|  | 1480 | /* mayday mayday mayday */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1481 | cpu = cwq->gcwq->cpu; | 
|  | 1482 | /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ | 
|  | 1483 | if (cpu == WORK_CPU_UNBOUND) | 
|  | 1484 | cpu = 0; | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 1485 | if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask)) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1486 | wake_up_process(wq->rescuer->task); | 
|  | 1487 | return true; | 
|  | 1488 | } | 
|  | 1489 |  | 
|  | 1490 | static void gcwq_mayday_timeout(unsigned long __gcwq) | 
|  | 1491 | { | 
|  | 1492 | struct global_cwq *gcwq = (void *)__gcwq; | 
|  | 1493 | struct work_struct *work; | 
|  | 1494 |  | 
|  | 1495 | spin_lock_irq(&gcwq->lock); | 
|  | 1496 |  | 
|  | 1497 | if (need_to_create_worker(gcwq)) { | 
|  | 1498 | /* | 
|  | 1499 | * We've been trying to create a new worker but | 
|  | 1500 | * haven't been successful.  We might be hitting an | 
|  | 1501 | * allocation deadlock.  Send distress signals to | 
|  | 1502 | * rescuers. | 
|  | 1503 | */ | 
|  | 1504 | list_for_each_entry(work, &gcwq->worklist, entry) | 
|  | 1505 | send_mayday(work); | 
|  | 1506 | } | 
|  | 1507 |  | 
|  | 1508 | spin_unlock_irq(&gcwq->lock); | 
|  | 1509 |  | 
|  | 1510 | mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL); | 
|  | 1511 | } | 
|  | 1512 |  | 
|  | 1513 | /** | 
|  | 1514 | * maybe_create_worker - create a new worker if necessary | 
|  | 1515 | * @gcwq: gcwq to create a new worker for | 
|  | 1516 | * | 
|  | 1517 | * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to | 
|  | 1518 | * have at least one idle worker on return from this function.  If | 
|  | 1519 | * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is | 
|  | 1520 | * sent to all rescuers with works scheduled on @gcwq to resolve | 
|  | 1521 | * possible allocation deadlock. | 
|  | 1522 | * | 
|  | 1523 | * On return, need_to_create_worker() is guaranteed to be false and | 
|  | 1524 | * may_start_working() true. | 
|  | 1525 | * | 
|  | 1526 | * LOCKING: | 
|  | 1527 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
|  | 1528 | * multiple times.  Does GFP_KERNEL allocations.  Called only from | 
|  | 1529 | * manager. | 
|  | 1530 | * | 
|  | 1531 | * RETURNS: | 
|  | 1532 | * false if no action was taken and gcwq->lock stayed locked, true | 
|  | 1533 | * otherwise. | 
|  | 1534 | */ | 
|  | 1535 | static bool maybe_create_worker(struct global_cwq *gcwq) | 
| Namhyung Kim | 06bd6eb | 2010-08-22 23:19:42 +0900 | [diff] [blame] | 1536 | __releases(&gcwq->lock) | 
|  | 1537 | __acquires(&gcwq->lock) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1538 | { | 
|  | 1539 | if (!need_to_create_worker(gcwq)) | 
|  | 1540 | return false; | 
|  | 1541 | restart: | 
| Tejun Heo | 9f9c236 | 2010-07-14 11:31:20 +0200 | [diff] [blame] | 1542 | spin_unlock_irq(&gcwq->lock); | 
|  | 1543 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1544 | /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ | 
|  | 1545 | mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); | 
|  | 1546 |  | 
|  | 1547 | while (true) { | 
|  | 1548 | struct worker *worker; | 
|  | 1549 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1550 | worker = create_worker(gcwq, true); | 
|  | 1551 | if (worker) { | 
|  | 1552 | del_timer_sync(&gcwq->mayday_timer); | 
|  | 1553 | spin_lock_irq(&gcwq->lock); | 
|  | 1554 | start_worker(worker); | 
|  | 1555 | BUG_ON(need_to_create_worker(gcwq)); | 
|  | 1556 | return true; | 
|  | 1557 | } | 
|  | 1558 |  | 
|  | 1559 | if (!need_to_create_worker(gcwq)) | 
|  | 1560 | break; | 
|  | 1561 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1562 | __set_current_state(TASK_INTERRUPTIBLE); | 
|  | 1563 | schedule_timeout(CREATE_COOLDOWN); | 
| Tejun Heo | 9f9c236 | 2010-07-14 11:31:20 +0200 | [diff] [blame] | 1564 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1565 | if (!need_to_create_worker(gcwq)) | 
|  | 1566 | break; | 
|  | 1567 | } | 
|  | 1568 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1569 | del_timer_sync(&gcwq->mayday_timer); | 
|  | 1570 | spin_lock_irq(&gcwq->lock); | 
|  | 1571 | if (need_to_create_worker(gcwq)) | 
|  | 1572 | goto restart; | 
|  | 1573 | return true; | 
|  | 1574 | } | 
|  | 1575 |  | 
|  | 1576 | /** | 
|  | 1577 | * maybe_destroy_worker - destroy workers which have been idle for a while | 
|  | 1578 | * @gcwq: gcwq to destroy workers for | 
|  | 1579 | * | 
|  | 1580 | * Destroy @gcwq workers which have been idle for longer than | 
|  | 1581 | * IDLE_WORKER_TIMEOUT. | 
|  | 1582 | * | 
|  | 1583 | * LOCKING: | 
|  | 1584 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
|  | 1585 | * multiple times.  Called only from manager. | 
|  | 1586 | * | 
|  | 1587 | * RETURNS: | 
|  | 1588 | * false if no action was taken and gcwq->lock stayed locked, true | 
|  | 1589 | * otherwise. | 
|  | 1590 | */ | 
|  | 1591 | static bool maybe_destroy_workers(struct global_cwq *gcwq) | 
|  | 1592 | { | 
|  | 1593 | bool ret = false; | 
|  | 1594 |  | 
|  | 1595 | while (too_many_workers(gcwq)) { | 
|  | 1596 | struct worker *worker; | 
|  | 1597 | unsigned long expires; | 
|  | 1598 |  | 
|  | 1599 | worker = list_entry(gcwq->idle_list.prev, struct worker, entry); | 
|  | 1600 | expires = worker->last_active + IDLE_WORKER_TIMEOUT; | 
|  | 1601 |  | 
|  | 1602 | if (time_before(jiffies, expires)) { | 
|  | 1603 | mod_timer(&gcwq->idle_timer, expires); | 
|  | 1604 | break; | 
|  | 1605 | } | 
|  | 1606 |  | 
|  | 1607 | destroy_worker(worker); | 
|  | 1608 | ret = true; | 
|  | 1609 | } | 
|  | 1610 |  | 
|  | 1611 | return ret; | 
|  | 1612 | } | 
|  | 1613 |  | 
|  | 1614 | /** | 
|  | 1615 | * manage_workers - manage worker pool | 
|  | 1616 | * @worker: self | 
|  | 1617 | * | 
|  | 1618 | * Assume the manager role and manage gcwq worker pool @worker belongs | 
|  | 1619 | * to.  At any given time, there can be only zero or one manager per | 
|  | 1620 | * gcwq.  The exclusion is handled automatically by this function. | 
|  | 1621 | * | 
|  | 1622 | * The caller can safely start processing works on false return.  On | 
|  | 1623 | * true return, it's guaranteed that need_to_create_worker() is false | 
|  | 1624 | * and may_start_working() is true. | 
|  | 1625 | * | 
|  | 1626 | * CONTEXT: | 
|  | 1627 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
|  | 1628 | * multiple times.  Does GFP_KERNEL allocations. | 
|  | 1629 | * | 
|  | 1630 | * RETURNS: | 
|  | 1631 | * false if no action was taken and gcwq->lock stayed locked, true if | 
|  | 1632 | * some action was taken. | 
|  | 1633 | */ | 
|  | 1634 | static bool manage_workers(struct worker *worker) | 
|  | 1635 | { | 
|  | 1636 | struct global_cwq *gcwq = worker->gcwq; | 
|  | 1637 | bool ret = false; | 
|  | 1638 |  | 
|  | 1639 | if (gcwq->flags & GCWQ_MANAGING_WORKERS) | 
|  | 1640 | return ret; | 
|  | 1641 |  | 
|  | 1642 | gcwq->flags &= ~GCWQ_MANAGE_WORKERS; | 
|  | 1643 | gcwq->flags |= GCWQ_MANAGING_WORKERS; | 
|  | 1644 |  | 
|  | 1645 | /* | 
|  | 1646 | * Destroy and then create so that may_start_working() is true | 
|  | 1647 | * on return. | 
|  | 1648 | */ | 
|  | 1649 | ret |= maybe_destroy_workers(gcwq); | 
|  | 1650 | ret |= maybe_create_worker(gcwq); | 
|  | 1651 |  | 
|  | 1652 | gcwq->flags &= ~GCWQ_MANAGING_WORKERS; | 
|  | 1653 |  | 
|  | 1654 | /* | 
|  | 1655 | * The trustee might be waiting to take over the manager | 
|  | 1656 | * position, tell it we're done. | 
|  | 1657 | */ | 
|  | 1658 | if (unlikely(gcwq->trustee)) | 
|  | 1659 | wake_up_all(&gcwq->trustee_wait); | 
|  | 1660 |  | 
|  | 1661 | return ret; | 
|  | 1662 | } | 
|  | 1663 |  | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1664 | /** | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1665 | * move_linked_works - move linked works to a list | 
|  | 1666 | * @work: start of series of works to be scheduled | 
|  | 1667 | * @head: target list to append @work to | 
|  | 1668 | * @nextp: out paramter for nested worklist walking | 
|  | 1669 | * | 
|  | 1670 | * Schedule linked works starting from @work to @head.  Work series to | 
|  | 1671 | * be scheduled starts at @work and includes any consecutive work with | 
|  | 1672 | * WORK_STRUCT_LINKED set in its predecessor. | 
|  | 1673 | * | 
|  | 1674 | * If @nextp is not NULL, it's updated to point to the next work of | 
|  | 1675 | * the last scheduled work.  This allows move_linked_works() to be | 
|  | 1676 | * nested inside outer list_for_each_entry_safe(). | 
|  | 1677 | * | 
|  | 1678 | * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1679 | * spin_lock_irq(gcwq->lock). | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1680 | */ | 
|  | 1681 | static void move_linked_works(struct work_struct *work, struct list_head *head, | 
|  | 1682 | struct work_struct **nextp) | 
|  | 1683 | { | 
|  | 1684 | struct work_struct *n; | 
|  | 1685 |  | 
|  | 1686 | /* | 
|  | 1687 | * Linked worklist will always end before the end of the list, | 
|  | 1688 | * use NULL for list head. | 
|  | 1689 | */ | 
|  | 1690 | list_for_each_entry_safe_from(work, n, NULL, entry) { | 
|  | 1691 | list_move_tail(&work->entry, head); | 
|  | 1692 | if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) | 
|  | 1693 | break; | 
|  | 1694 | } | 
|  | 1695 |  | 
|  | 1696 | /* | 
|  | 1697 | * If we're already inside safe list traversal and have moved | 
|  | 1698 | * multiple works to the scheduled queue, the next position | 
|  | 1699 | * needs to be updated. | 
|  | 1700 | */ | 
|  | 1701 | if (nextp) | 
|  | 1702 | *nextp = n; | 
|  | 1703 | } | 
|  | 1704 |  | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1705 | static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | 
|  | 1706 | { | 
|  | 1707 | struct work_struct *work = list_first_entry(&cwq->delayed_works, | 
|  | 1708 | struct work_struct, entry); | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1709 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1710 |  | 
| Tejun Heo | cdadf00 | 2010-10-05 10:49:55 +0200 | [diff] [blame] | 1711 | trace_workqueue_activate_work(work); | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1712 | move_linked_works(work, pos, NULL); | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1713 | __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1714 | cwq->nr_active++; | 
|  | 1715 | } | 
|  | 1716 |  | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1717 | /** | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1718 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight | 
|  | 1719 | * @cwq: cwq of interest | 
|  | 1720 | * @color: color of work which left the queue | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1721 | * @delayed: for a delayed work | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1722 | * | 
|  | 1723 | * A work either has completed or is removed from pending queue, | 
|  | 1724 | * decrement nr_in_flight of its cwq and handle workqueue flushing. | 
|  | 1725 | * | 
|  | 1726 | * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1727 | * spin_lock_irq(gcwq->lock). | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1728 | */ | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1729 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, | 
|  | 1730 | bool delayed) | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1731 | { | 
|  | 1732 | /* ignore uncolored works */ | 
|  | 1733 | if (color == WORK_NO_COLOR) | 
|  | 1734 | return; | 
|  | 1735 |  | 
|  | 1736 | cwq->nr_in_flight[color]--; | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1737 |  | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1738 | if (!delayed) { | 
|  | 1739 | cwq->nr_active--; | 
|  | 1740 | if (!list_empty(&cwq->delayed_works)) { | 
|  | 1741 | /* one down, submit a delayed one */ | 
|  | 1742 | if (cwq->nr_active < cwq->max_active) | 
|  | 1743 | cwq_activate_first_delayed(cwq); | 
|  | 1744 | } | 
| Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1745 | } | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1746 |  | 
|  | 1747 | /* is flush in progress and are we at the flushing tip? */ | 
|  | 1748 | if (likely(cwq->flush_color != color)) | 
|  | 1749 | return; | 
|  | 1750 |  | 
|  | 1751 | /* are there still in-flight works? */ | 
|  | 1752 | if (cwq->nr_in_flight[color]) | 
|  | 1753 | return; | 
|  | 1754 |  | 
|  | 1755 | /* this cwq is done, clear flush_color */ | 
|  | 1756 | cwq->flush_color = -1; | 
|  | 1757 |  | 
|  | 1758 | /* | 
|  | 1759 | * If this was the last cwq, wake up the first flusher.  It | 
|  | 1760 | * will handle the rest. | 
|  | 1761 | */ | 
|  | 1762 | if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) | 
|  | 1763 | complete(&cwq->wq->first_flusher->done); | 
|  | 1764 | } | 
|  | 1765 |  | 
|  | 1766 | /** | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1767 | * process_one_work - process single work | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1768 | * @worker: self | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1769 | * @work: work to process | 
|  | 1770 | * | 
|  | 1771 | * Process @work.  This function contains all the logics necessary to | 
|  | 1772 | * process a single work including synchronization against and | 
|  | 1773 | * interaction with other workers on the same cpu, queueing and | 
|  | 1774 | * flushing.  As long as context requirement is met, any worker can | 
|  | 1775 | * call this function to process a work. | 
|  | 1776 | * | 
|  | 1777 | * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1778 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1779 | */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1780 | static void process_one_work(struct worker *worker, struct work_struct *work) | 
| Namhyung Kim | 06bd6eb | 2010-08-22 23:19:42 +0900 | [diff] [blame] | 1781 | __releases(&gcwq->lock) | 
|  | 1782 | __acquires(&gcwq->lock) | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1783 | { | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1784 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1785 | struct global_cwq *gcwq = cwq->gcwq; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1786 | struct hlist_head *bwh = busy_worker_head(gcwq, work); | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 1787 | bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1788 | work_func_t f = work->func; | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1789 | int work_color; | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1790 | struct worker *collision; | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1791 | #ifdef CONFIG_LOCKDEP | 
|  | 1792 | /* | 
|  | 1793 | * It is permissible to free the struct work_struct from | 
|  | 1794 | * inside the function that is called from it, this we need to | 
|  | 1795 | * take into account for lockdep too.  To avoid bogus "held | 
|  | 1796 | * lock freed" warnings as well as problems when looking into | 
|  | 1797 | * work->lockdep_map, make a copy and use that here. | 
|  | 1798 | */ | 
|  | 1799 | struct lockdep_map lockdep_map = work->lockdep_map; | 
|  | 1800 | #endif | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1801 | /* | 
|  | 1802 | * A single work shouldn't be executed concurrently by | 
|  | 1803 | * multiple workers on a single cpu.  Check whether anyone is | 
|  | 1804 | * already processing the work.  If so, defer the work to the | 
|  | 1805 | * currently executing one. | 
|  | 1806 | */ | 
|  | 1807 | collision = __find_worker_executing_work(gcwq, bwh, work); | 
|  | 1808 | if (unlikely(collision)) { | 
|  | 1809 | move_linked_works(work, &collision->scheduled, NULL); | 
|  | 1810 | return; | 
|  | 1811 | } | 
|  | 1812 |  | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1813 | /* claim and process */ | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1814 | debug_work_deactivate(work); | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1815 | hlist_add_head(&worker->hentry, bwh); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1816 | worker->current_work = work; | 
| Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1817 | worker->current_cwq = cwq; | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1818 | work_color = get_work_color(work); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1819 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1820 | /* record the current cpu number in the work data and dequeue */ | 
|  | 1821 | set_work_cpu(work, gcwq->cpu); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1822 | list_del_init(&work->entry); | 
|  | 1823 |  | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1824 | /* | 
|  | 1825 | * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI, | 
|  | 1826 | * wake up another worker; otherwise, clear HIGHPRI_PENDING. | 
|  | 1827 | */ | 
|  | 1828 | if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) { | 
|  | 1829 | struct work_struct *nwork = list_first_entry(&gcwq->worklist, | 
|  | 1830 | struct work_struct, entry); | 
|  | 1831 |  | 
|  | 1832 | if (!list_empty(&gcwq->worklist) && | 
|  | 1833 | get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) | 
|  | 1834 | wake_up_worker(gcwq); | 
|  | 1835 | else | 
|  | 1836 | gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; | 
|  | 1837 | } | 
|  | 1838 |  | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 1839 | /* | 
|  | 1840 | * CPU intensive works don't participate in concurrency | 
|  | 1841 | * management.  They're the scheduler's responsibility. | 
|  | 1842 | */ | 
|  | 1843 | if (unlikely(cpu_intensive)) | 
|  | 1844 | worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); | 
|  | 1845 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1846 | spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1847 |  | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1848 | work_clear_pending(work); | 
| Tejun Heo | e159489 | 2011-01-09 23:32:15 +0100 | [diff] [blame] | 1849 | lock_map_acquire_read(&cwq->wq->lockdep_map); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1850 | lock_map_acquire(&lockdep_map); | 
| Arjan van de Ven | e36c886 | 2010-08-21 13:07:26 -0700 | [diff] [blame] | 1851 | trace_workqueue_execute_start(work); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1852 | f(work); | 
| Arjan van de Ven | e36c886 | 2010-08-21 13:07:26 -0700 | [diff] [blame] | 1853 | /* | 
|  | 1854 | * While we must be careful to not use "work" after this, the trace | 
|  | 1855 | * point will only record its address. | 
|  | 1856 | */ | 
|  | 1857 | trace_workqueue_execute_end(work); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1858 | lock_map_release(&lockdep_map); | 
|  | 1859 | lock_map_release(&cwq->wq->lockdep_map); | 
|  | 1860 |  | 
|  | 1861 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 
|  | 1862 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 
|  | 1863 | "%s/0x%08x/%d\n", | 
|  | 1864 | current->comm, preempt_count(), task_pid_nr(current)); | 
|  | 1865 | printk(KERN_ERR "    last function: "); | 
|  | 1866 | print_symbol("%s\n", (unsigned long)f); | 
|  | 1867 | debug_show_held_locks(current); | 
|  | 1868 | dump_stack(); | 
|  | 1869 | } | 
|  | 1870 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1871 | spin_lock_irq(&gcwq->lock); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1872 |  | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 1873 | /* clear cpu intensive status */ | 
|  | 1874 | if (unlikely(cpu_intensive)) | 
|  | 1875 | worker_clr_flags(worker, WORKER_CPU_INTENSIVE); | 
|  | 1876 |  | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1877 | /* we're done with it, release */ | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1878 | hlist_del_init(&worker->hentry); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1879 | worker->current_work = NULL; | 
| Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1880 | worker->current_cwq = NULL; | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1881 | cwq_dec_nr_in_flight(cwq, work_color, false); | 
| Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1882 | } | 
|  | 1883 |  | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1884 | /** | 
|  | 1885 | * process_scheduled_works - process scheduled works | 
|  | 1886 | * @worker: self | 
|  | 1887 | * | 
|  | 1888 | * Process all scheduled works.  Please note that the scheduled list | 
|  | 1889 | * may change while processing a work, so this function repeatedly | 
|  | 1890 | * fetches a work from the top and executes it. | 
|  | 1891 | * | 
|  | 1892 | * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1893 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1894 | * multiple times. | 
|  | 1895 | */ | 
|  | 1896 | static void process_scheduled_works(struct worker *worker) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1897 | { | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1898 | while (!list_empty(&worker->scheduled)) { | 
|  | 1899 | struct work_struct *work = list_first_entry(&worker->scheduled, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1900 | struct work_struct, entry); | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1901 | process_one_work(worker, work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1902 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1903 | } | 
|  | 1904 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1905 | /** | 
|  | 1906 | * worker_thread - the worker thread function | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1907 | * @__worker: self | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1908 | * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1909 | * The gcwq worker thread function.  There's a single dynamic pool of | 
|  | 1910 | * these per each cpu.  These workers process all works regardless of | 
|  | 1911 | * their specific target workqueue.  The only exception is works which | 
|  | 1912 | * belong to workqueues with a rescuer which will be explained in | 
|  | 1913 | * rescuer_thread(). | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1914 | */ | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1915 | static int worker_thread(void *__worker) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1916 | { | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1917 | struct worker *worker = __worker; | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1918 | struct global_cwq *gcwq = worker->gcwq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1919 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1920 | /* tell the scheduler that this is a workqueue worker */ | 
|  | 1921 | worker->task->flags |= PF_WQ_WORKER; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1922 | woke_up: | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1923 | spin_lock_irq(&gcwq->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1924 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1925 | /* DIE can be set only while we're idle, checking here is enough */ | 
|  | 1926 | if (worker->flags & WORKER_DIE) { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1927 | spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1928 | worker->task->flags &= ~PF_WQ_WORKER; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1929 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1930 | } | 
|  | 1931 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1932 | worker_leave_idle(worker); | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1933 | recheck: | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1934 | /* no more worker necessary? */ | 
|  | 1935 | if (!need_more_worker(gcwq)) | 
|  | 1936 | goto sleep; | 
|  | 1937 |  | 
|  | 1938 | /* do we need to manage? */ | 
|  | 1939 | if (unlikely(!may_start_working(gcwq)) && manage_workers(worker)) | 
|  | 1940 | goto recheck; | 
|  | 1941 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1942 | /* | 
|  | 1943 | * ->scheduled list can only be filled while a worker is | 
|  | 1944 | * preparing to process a work or actually processing it. | 
|  | 1945 | * Make sure nobody diddled with it while I was sleeping. | 
|  | 1946 | */ | 
|  | 1947 | BUG_ON(!list_empty(&worker->scheduled)); | 
|  | 1948 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1949 | /* | 
|  | 1950 | * When control reaches this point, we're guaranteed to have | 
|  | 1951 | * at least one idle worker or that someone else has already | 
|  | 1952 | * assumed the manager role. | 
|  | 1953 | */ | 
|  | 1954 | worker_clr_flags(worker, WORKER_PREP); | 
|  | 1955 |  | 
|  | 1956 | do { | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1957 | struct work_struct *work = | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1958 | list_first_entry(&gcwq->worklist, | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1959 | struct work_struct, entry); | 
|  | 1960 |  | 
|  | 1961 | if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { | 
|  | 1962 | /* optimization path, not strictly necessary */ | 
|  | 1963 | process_one_work(worker, work); | 
|  | 1964 | if (unlikely(!list_empty(&worker->scheduled))) | 
|  | 1965 | process_scheduled_works(worker); | 
|  | 1966 | } else { | 
|  | 1967 | move_linked_works(work, &worker->scheduled, NULL); | 
|  | 1968 | process_scheduled_works(worker); | 
|  | 1969 | } | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1970 | } while (keep_working(gcwq)); | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1971 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1972 | worker_set_flags(worker, WORKER_PREP, false); | 
| Tejun Heo | d313dd8 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1973 | sleep: | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1974 | if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker)) | 
|  | 1975 | goto recheck; | 
| Tejun Heo | d313dd8 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1976 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1977 | /* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1978 | * gcwq->lock is held and there's no work to process and no | 
|  | 1979 | * need to manage, sleep.  Workers are woken up only while | 
|  | 1980 | * holding gcwq->lock or from local cpu, so setting the | 
|  | 1981 | * current state before releasing gcwq->lock is enough to | 
|  | 1982 | * prevent losing any event. | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1983 | */ | 
|  | 1984 | worker_enter_idle(worker); | 
|  | 1985 | __set_current_state(TASK_INTERRUPTIBLE); | 
|  | 1986 | spin_unlock_irq(&gcwq->lock); | 
|  | 1987 | schedule(); | 
|  | 1988 | goto woke_up; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1989 | } | 
|  | 1990 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1991 | /** | 
|  | 1992 | * rescuer_thread - the rescuer thread function | 
|  | 1993 | * @__wq: the associated workqueue | 
|  | 1994 | * | 
|  | 1995 | * Workqueue rescuer thread function.  There's one rescuer for each | 
|  | 1996 | * workqueue which has WQ_RESCUER set. | 
|  | 1997 | * | 
|  | 1998 | * Regular work processing on a gcwq may block trying to create a new | 
|  | 1999 | * worker which uses GFP_KERNEL allocation which has slight chance of | 
|  | 2000 | * developing into deadlock if some works currently on the same queue | 
|  | 2001 | * need to be processed to satisfy the GFP_KERNEL allocation.  This is | 
|  | 2002 | * the problem rescuer solves. | 
|  | 2003 | * | 
|  | 2004 | * When such condition is possible, the gcwq summons rescuers of all | 
|  | 2005 | * workqueues which have works queued on the gcwq and let them process | 
|  | 2006 | * those works so that forward progress can be guaranteed. | 
|  | 2007 | * | 
|  | 2008 | * This should happen rarely. | 
|  | 2009 | */ | 
|  | 2010 | static int rescuer_thread(void *__wq) | 
|  | 2011 | { | 
|  | 2012 | struct workqueue_struct *wq = __wq; | 
|  | 2013 | struct worker *rescuer = wq->rescuer; | 
|  | 2014 | struct list_head *scheduled = &rescuer->scheduled; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2015 | bool is_unbound = wq->flags & WQ_UNBOUND; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2016 | unsigned int cpu; | 
|  | 2017 |  | 
|  | 2018 | set_user_nice(current, RESCUER_NICE_LEVEL); | 
|  | 2019 | repeat: | 
|  | 2020 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 2021 |  | 
|  | 2022 | if (kthread_should_stop()) | 
|  | 2023 | return 0; | 
|  | 2024 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2025 | /* | 
|  | 2026 | * See whether any cpu is asking for help.  Unbounded | 
|  | 2027 | * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. | 
|  | 2028 | */ | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 2029 | for_each_mayday_cpu(cpu, wq->mayday_mask) { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2030 | unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; | 
|  | 2031 | struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2032 | struct global_cwq *gcwq = cwq->gcwq; | 
|  | 2033 | struct work_struct *work, *n; | 
|  | 2034 |  | 
|  | 2035 | __set_current_state(TASK_RUNNING); | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 2036 | mayday_clear_cpu(cpu, wq->mayday_mask); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2037 |  | 
|  | 2038 | /* migrate to the target cpu if possible */ | 
|  | 2039 | rescuer->gcwq = gcwq; | 
|  | 2040 | worker_maybe_bind_and_lock(rescuer); | 
|  | 2041 |  | 
|  | 2042 | /* | 
|  | 2043 | * Slurp in all works issued via this workqueue and | 
|  | 2044 | * process'em. | 
|  | 2045 | */ | 
|  | 2046 | BUG_ON(!list_empty(&rescuer->scheduled)); | 
|  | 2047 | list_for_each_entry_safe(work, n, &gcwq->worklist, entry) | 
|  | 2048 | if (get_work_cwq(work) == cwq) | 
|  | 2049 | move_linked_works(work, scheduled, &n); | 
|  | 2050 |  | 
|  | 2051 | process_scheduled_works(rescuer); | 
| Tejun Heo | 7576958 | 2011-02-14 14:04:46 +0100 | [diff] [blame] | 2052 |  | 
|  | 2053 | /* | 
|  | 2054 | * Leave this gcwq.  If keep_working() is %true, notify a | 
|  | 2055 | * regular worker; otherwise, we end up with 0 concurrency | 
|  | 2056 | * and stalling the execution. | 
|  | 2057 | */ | 
|  | 2058 | if (keep_working(gcwq)) | 
|  | 2059 | wake_up_worker(gcwq); | 
|  | 2060 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2061 | spin_unlock_irq(&gcwq->lock); | 
|  | 2062 | } | 
|  | 2063 |  | 
|  | 2064 | schedule(); | 
|  | 2065 | goto repeat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2066 | } | 
|  | 2067 |  | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2068 | struct wq_barrier { | 
|  | 2069 | struct work_struct	work; | 
|  | 2070 | struct completion	done; | 
|  | 2071 | }; | 
|  | 2072 |  | 
|  | 2073 | static void wq_barrier_func(struct work_struct *work) | 
|  | 2074 | { | 
|  | 2075 | struct wq_barrier *barr = container_of(work, struct wq_barrier, work); | 
|  | 2076 | complete(&barr->done); | 
|  | 2077 | } | 
|  | 2078 |  | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2079 | /** | 
|  | 2080 | * insert_wq_barrier - insert a barrier work | 
|  | 2081 | * @cwq: cwq to insert barrier into | 
|  | 2082 | * @barr: wq_barrier to insert | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2083 | * @target: target work to attach @barr to | 
|  | 2084 | * @worker: worker currently executing @target, NULL if @target is not executing | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2085 | * | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2086 | * @barr is linked to @target such that @barr is completed only after | 
|  | 2087 | * @target finishes execution.  Please note that the ordering | 
|  | 2088 | * guarantee is observed only with respect to @target and on the local | 
|  | 2089 | * cpu. | 
|  | 2090 | * | 
|  | 2091 | * Currently, a queued barrier can't be canceled.  This is because | 
|  | 2092 | * try_to_grab_pending() can't determine whether the work to be | 
|  | 2093 | * grabbed is at the head of the queue and thus can't clear LINKED | 
|  | 2094 | * flag of the previous work while there must be a valid next work | 
|  | 2095 | * after a work with LINKED flag set. | 
|  | 2096 | * | 
|  | 2097 | * Note that when @worker is non-NULL, @target may be modified | 
|  | 2098 | * underneath us, so we can't reliably determine cwq from @target. | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2099 | * | 
|  | 2100 | * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2101 | * spin_lock_irq(gcwq->lock). | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2102 | */ | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 2103 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2104 | struct wq_barrier *barr, | 
|  | 2105 | struct work_struct *target, struct worker *worker) | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2106 | { | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2107 | struct list_head *head; | 
|  | 2108 | unsigned int linked = 0; | 
|  | 2109 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2110 | /* | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2111 | * debugobject calls are safe here even with gcwq->lock locked | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2112 | * as we know for sure that this will not trigger any of the | 
|  | 2113 | * checks and call back into the fixup functions where we | 
|  | 2114 | * might deadlock. | 
|  | 2115 | */ | 
| Andrew Morton | ca1cab3 | 2010-10-26 14:22:34 -0700 | [diff] [blame] | 2116 | INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2117 | __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2118 | init_completion(&barr->done); | 
| Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 2119 |  | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2120 | /* | 
|  | 2121 | * If @target is currently being executed, schedule the | 
|  | 2122 | * barrier to the worker; otherwise, put it after @target. | 
|  | 2123 | */ | 
|  | 2124 | if (worker) | 
|  | 2125 | head = worker->scheduled.next; | 
|  | 2126 | else { | 
|  | 2127 | unsigned long *bits = work_data_bits(target); | 
|  | 2128 |  | 
|  | 2129 | head = target->entry.next; | 
|  | 2130 | /* there can already be other linked works, inherit and set */ | 
|  | 2131 | linked = *bits & WORK_STRUCT_LINKED; | 
|  | 2132 | __set_bit(WORK_STRUCT_LINKED_BIT, bits); | 
|  | 2133 | } | 
|  | 2134 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2135 | debug_work_activate(&barr->work); | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2136 | insert_work(cwq, &barr->work, head, | 
|  | 2137 | work_color_to_flags(WORK_NO_COLOR) | linked); | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2138 | } | 
|  | 2139 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2140 | /** | 
|  | 2141 | * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing | 
|  | 2142 | * @wq: workqueue being flushed | 
|  | 2143 | * @flush_color: new flush color, < 0 for no-op | 
|  | 2144 | * @work_color: new work color, < 0 for no-op | 
|  | 2145 | * | 
|  | 2146 | * Prepare cwqs for workqueue flushing. | 
|  | 2147 | * | 
|  | 2148 | * If @flush_color is non-negative, flush_color on all cwqs should be | 
|  | 2149 | * -1.  If no cwq has in-flight commands at the specified color, all | 
|  | 2150 | * cwq->flush_color's stay at -1 and %false is returned.  If any cwq | 
|  | 2151 | * has in flight commands, its cwq->flush_color is set to | 
|  | 2152 | * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq | 
|  | 2153 | * wakeup logic is armed and %true is returned. | 
|  | 2154 | * | 
|  | 2155 | * The caller should have initialized @wq->first_flusher prior to | 
|  | 2156 | * calling this function with non-negative @flush_color.  If | 
|  | 2157 | * @flush_color is negative, no flush color update is done and %false | 
|  | 2158 | * is returned. | 
|  | 2159 | * | 
|  | 2160 | * If @work_color is non-negative, all cwqs should have the same | 
|  | 2161 | * work_color which is previous to @work_color and all will be | 
|  | 2162 | * advanced to @work_color. | 
|  | 2163 | * | 
|  | 2164 | * CONTEXT: | 
|  | 2165 | * mutex_lock(wq->flush_mutex). | 
|  | 2166 | * | 
|  | 2167 | * RETURNS: | 
|  | 2168 | * %true if @flush_color >= 0 and there's something to flush.  %false | 
|  | 2169 | * otherwise. | 
|  | 2170 | */ | 
|  | 2171 | static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, | 
|  | 2172 | int flush_color, int work_color) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2173 | { | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2174 | bool wait = false; | 
|  | 2175 | unsigned int cpu; | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 2176 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2177 | if (flush_color >= 0) { | 
|  | 2178 | BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); | 
|  | 2179 | atomic_set(&wq->nr_cwqs_to_flush, 1); | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2180 | } | 
| Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 2181 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2182 | for_each_cwq_cpu(cpu, wq) { | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2183 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2184 | struct global_cwq *gcwq = cwq->gcwq; | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2185 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2186 | spin_lock_irq(&gcwq->lock); | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2187 |  | 
|  | 2188 | if (flush_color >= 0) { | 
|  | 2189 | BUG_ON(cwq->flush_color != -1); | 
|  | 2190 |  | 
|  | 2191 | if (cwq->nr_in_flight[flush_color]) { | 
|  | 2192 | cwq->flush_color = flush_color; | 
|  | 2193 | atomic_inc(&wq->nr_cwqs_to_flush); | 
|  | 2194 | wait = true; | 
|  | 2195 | } | 
|  | 2196 | } | 
|  | 2197 |  | 
|  | 2198 | if (work_color >= 0) { | 
|  | 2199 | BUG_ON(work_color != work_next_color(cwq->work_color)); | 
|  | 2200 | cwq->work_color = work_color; | 
|  | 2201 | } | 
|  | 2202 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2203 | spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2204 | } | 
|  | 2205 |  | 
|  | 2206 | if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) | 
|  | 2207 | complete(&wq->first_flusher->done); | 
|  | 2208 |  | 
|  | 2209 | return wait; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2210 | } | 
|  | 2211 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2212 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2213 | * flush_workqueue - ensure that any scheduled work has run to completion. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2214 | * @wq: workqueue to flush | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2215 | * | 
|  | 2216 | * Forces execution of the workqueue and blocks until its completion. | 
|  | 2217 | * This is typically used in driver shutdown handlers. | 
|  | 2218 | * | 
| Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2219 | * We sleep until all works which were queued on entry have been handled, | 
|  | 2220 | * but we are not livelocked by new incoming ones. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2221 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2222 | void flush_workqueue(struct workqueue_struct *wq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2223 | { | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2224 | struct wq_flusher this_flusher = { | 
|  | 2225 | .list = LIST_HEAD_INIT(this_flusher.list), | 
|  | 2226 | .flush_color = -1, | 
|  | 2227 | .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), | 
|  | 2228 | }; | 
|  | 2229 | int next_color; | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 2230 |  | 
| Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 2231 | lock_map_acquire(&wq->lockdep_map); | 
|  | 2232 | lock_map_release(&wq->lockdep_map); | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2233 |  | 
|  | 2234 | mutex_lock(&wq->flush_mutex); | 
|  | 2235 |  | 
|  | 2236 | /* | 
|  | 2237 | * Start-to-wait phase | 
|  | 2238 | */ | 
|  | 2239 | next_color = work_next_color(wq->work_color); | 
|  | 2240 |  | 
|  | 2241 | if (next_color != wq->flush_color) { | 
|  | 2242 | /* | 
|  | 2243 | * Color space is not full.  The current work_color | 
|  | 2244 | * becomes our flush_color and work_color is advanced | 
|  | 2245 | * by one. | 
|  | 2246 | */ | 
|  | 2247 | BUG_ON(!list_empty(&wq->flusher_overflow)); | 
|  | 2248 | this_flusher.flush_color = wq->work_color; | 
|  | 2249 | wq->work_color = next_color; | 
|  | 2250 |  | 
|  | 2251 | if (!wq->first_flusher) { | 
|  | 2252 | /* no flush in progress, become the first flusher */ | 
|  | 2253 | BUG_ON(wq->flush_color != this_flusher.flush_color); | 
|  | 2254 |  | 
|  | 2255 | wq->first_flusher = &this_flusher; | 
|  | 2256 |  | 
|  | 2257 | if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, | 
|  | 2258 | wq->work_color)) { | 
|  | 2259 | /* nothing to flush, done */ | 
|  | 2260 | wq->flush_color = next_color; | 
|  | 2261 | wq->first_flusher = NULL; | 
|  | 2262 | goto out_unlock; | 
|  | 2263 | } | 
|  | 2264 | } else { | 
|  | 2265 | /* wait in queue */ | 
|  | 2266 | BUG_ON(wq->flush_color == this_flusher.flush_color); | 
|  | 2267 | list_add_tail(&this_flusher.list, &wq->flusher_queue); | 
|  | 2268 | flush_workqueue_prep_cwqs(wq, -1, wq->work_color); | 
|  | 2269 | } | 
|  | 2270 | } else { | 
|  | 2271 | /* | 
|  | 2272 | * Oops, color space is full, wait on overflow queue. | 
|  | 2273 | * The next flush completion will assign us | 
|  | 2274 | * flush_color and transfer to flusher_queue. | 
|  | 2275 | */ | 
|  | 2276 | list_add_tail(&this_flusher.list, &wq->flusher_overflow); | 
|  | 2277 | } | 
|  | 2278 |  | 
|  | 2279 | mutex_unlock(&wq->flush_mutex); | 
|  | 2280 |  | 
|  | 2281 | wait_for_completion(&this_flusher.done); | 
|  | 2282 |  | 
|  | 2283 | /* | 
|  | 2284 | * Wake-up-and-cascade phase | 
|  | 2285 | * | 
|  | 2286 | * First flushers are responsible for cascading flushes and | 
|  | 2287 | * handling overflow.  Non-first flushers can simply return. | 
|  | 2288 | */ | 
|  | 2289 | if (wq->first_flusher != &this_flusher) | 
|  | 2290 | return; | 
|  | 2291 |  | 
|  | 2292 | mutex_lock(&wq->flush_mutex); | 
|  | 2293 |  | 
| Tejun Heo | 4ce48b3 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2294 | /* we might have raced, check again with mutex held */ | 
|  | 2295 | if (wq->first_flusher != &this_flusher) | 
|  | 2296 | goto out_unlock; | 
|  | 2297 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2298 | wq->first_flusher = NULL; | 
|  | 2299 |  | 
|  | 2300 | BUG_ON(!list_empty(&this_flusher.list)); | 
|  | 2301 | BUG_ON(wq->flush_color != this_flusher.flush_color); | 
|  | 2302 |  | 
|  | 2303 | while (true) { | 
|  | 2304 | struct wq_flusher *next, *tmp; | 
|  | 2305 |  | 
|  | 2306 | /* complete all the flushers sharing the current flush color */ | 
|  | 2307 | list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { | 
|  | 2308 | if (next->flush_color != wq->flush_color) | 
|  | 2309 | break; | 
|  | 2310 | list_del_init(&next->list); | 
|  | 2311 | complete(&next->done); | 
|  | 2312 | } | 
|  | 2313 |  | 
|  | 2314 | BUG_ON(!list_empty(&wq->flusher_overflow) && | 
|  | 2315 | wq->flush_color != work_next_color(wq->work_color)); | 
|  | 2316 |  | 
|  | 2317 | /* this flush_color is finished, advance by one */ | 
|  | 2318 | wq->flush_color = work_next_color(wq->flush_color); | 
|  | 2319 |  | 
|  | 2320 | /* one color has been freed, handle overflow queue */ | 
|  | 2321 | if (!list_empty(&wq->flusher_overflow)) { | 
|  | 2322 | /* | 
|  | 2323 | * Assign the same color to all overflowed | 
|  | 2324 | * flushers, advance work_color and append to | 
|  | 2325 | * flusher_queue.  This is the start-to-wait | 
|  | 2326 | * phase for these overflowed flushers. | 
|  | 2327 | */ | 
|  | 2328 | list_for_each_entry(tmp, &wq->flusher_overflow, list) | 
|  | 2329 | tmp->flush_color = wq->work_color; | 
|  | 2330 |  | 
|  | 2331 | wq->work_color = work_next_color(wq->work_color); | 
|  | 2332 |  | 
|  | 2333 | list_splice_tail_init(&wq->flusher_overflow, | 
|  | 2334 | &wq->flusher_queue); | 
|  | 2335 | flush_workqueue_prep_cwqs(wq, -1, wq->work_color); | 
|  | 2336 | } | 
|  | 2337 |  | 
|  | 2338 | if (list_empty(&wq->flusher_queue)) { | 
|  | 2339 | BUG_ON(wq->flush_color != wq->work_color); | 
|  | 2340 | break; | 
|  | 2341 | } | 
|  | 2342 |  | 
|  | 2343 | /* | 
|  | 2344 | * Need to flush more colors.  Make the next flusher | 
|  | 2345 | * the new first flusher and arm cwqs. | 
|  | 2346 | */ | 
|  | 2347 | BUG_ON(wq->flush_color == wq->work_color); | 
|  | 2348 | BUG_ON(wq->flush_color != next->flush_color); | 
|  | 2349 |  | 
|  | 2350 | list_del_init(&next->list); | 
|  | 2351 | wq->first_flusher = next; | 
|  | 2352 |  | 
|  | 2353 | if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) | 
|  | 2354 | break; | 
|  | 2355 |  | 
|  | 2356 | /* | 
|  | 2357 | * Meh... this color is already done, clear first | 
|  | 2358 | * flusher and repeat cascading. | 
|  | 2359 | */ | 
|  | 2360 | wq->first_flusher = NULL; | 
|  | 2361 | } | 
|  | 2362 |  | 
|  | 2363 | out_unlock: | 
|  | 2364 | mutex_unlock(&wq->flush_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2365 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2366 | EXPORT_SYMBOL_GPL(flush_workqueue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2367 |  | 
| Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2368 | static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, | 
|  | 2369 | bool wait_executing) | 
|  | 2370 | { | 
|  | 2371 | struct worker *worker = NULL; | 
|  | 2372 | struct global_cwq *gcwq; | 
|  | 2373 | struct cpu_workqueue_struct *cwq; | 
|  | 2374 |  | 
|  | 2375 | might_sleep(); | 
|  | 2376 | gcwq = get_work_gcwq(work); | 
|  | 2377 | if (!gcwq) | 
|  | 2378 | return false; | 
|  | 2379 |  | 
|  | 2380 | spin_lock_irq(&gcwq->lock); | 
|  | 2381 | if (!list_empty(&work->entry)) { | 
|  | 2382 | /* | 
|  | 2383 | * See the comment near try_to_grab_pending()->smp_rmb(). | 
|  | 2384 | * If it was re-queued to a different gcwq under us, we | 
|  | 2385 | * are not going to wait. | 
|  | 2386 | */ | 
|  | 2387 | smp_rmb(); | 
|  | 2388 | cwq = get_work_cwq(work); | 
|  | 2389 | if (unlikely(!cwq || gcwq != cwq->gcwq)) | 
|  | 2390 | goto already_gone; | 
|  | 2391 | } else if (wait_executing) { | 
|  | 2392 | worker = find_worker_executing_work(gcwq, work); | 
|  | 2393 | if (!worker) | 
|  | 2394 | goto already_gone; | 
|  | 2395 | cwq = worker->current_cwq; | 
|  | 2396 | } else | 
|  | 2397 | goto already_gone; | 
|  | 2398 |  | 
|  | 2399 | insert_wq_barrier(cwq, barr, work, worker); | 
|  | 2400 | spin_unlock_irq(&gcwq->lock); | 
|  | 2401 |  | 
| Tejun Heo | e159489 | 2011-01-09 23:32:15 +0100 | [diff] [blame] | 2402 | /* | 
|  | 2403 | * If @max_active is 1 or rescuer is in use, flushing another work | 
|  | 2404 | * item on the same workqueue may lead to deadlock.  Make sure the | 
|  | 2405 | * flusher is not running on the same workqueue by verifying write | 
|  | 2406 | * access. | 
|  | 2407 | */ | 
|  | 2408 | if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) | 
|  | 2409 | lock_map_acquire(&cwq->wq->lockdep_map); | 
|  | 2410 | else | 
|  | 2411 | lock_map_acquire_read(&cwq->wq->lockdep_map); | 
| Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2412 | lock_map_release(&cwq->wq->lockdep_map); | 
| Tejun Heo | e159489 | 2011-01-09 23:32:15 +0100 | [diff] [blame] | 2413 |  | 
| Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2414 | return true; | 
|  | 2415 | already_gone: | 
|  | 2416 | spin_unlock_irq(&gcwq->lock); | 
|  | 2417 | return false; | 
|  | 2418 | } | 
|  | 2419 |  | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2420 | /** | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2421 | * flush_work - wait for a work to finish executing the last queueing instance | 
|  | 2422 | * @work: the work to flush | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2423 | * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2424 | * Wait until @work has finished execution.  This function considers | 
|  | 2425 | * only the last queueing instance of @work.  If @work has been | 
|  | 2426 | * enqueued across different CPUs on a non-reentrant workqueue or on | 
|  | 2427 | * multiple workqueues, @work might still be executing on return on | 
|  | 2428 | * some of the CPUs from earlier queueing. | 
| Oleg Nesterov | a67da70 | 2008-07-25 01:47:52 -0700 | [diff] [blame] | 2429 | * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2430 | * If @work was queued only on a non-reentrant, ordered or unbound | 
|  | 2431 | * workqueue, @work is guaranteed to be idle on return if it hasn't | 
|  | 2432 | * been requeued since flush started. | 
|  | 2433 | * | 
|  | 2434 | * RETURNS: | 
|  | 2435 | * %true if flush_work() waited for the work to finish execution, | 
|  | 2436 | * %false if it was already idle. | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2437 | */ | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2438 | bool flush_work(struct work_struct *work) | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2439 | { | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2440 | struct wq_barrier barr; | 
|  | 2441 |  | 
| Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2442 | if (start_flush_work(work, &barr, true)) { | 
|  | 2443 | wait_for_completion(&barr.done); | 
|  | 2444 | destroy_work_on_stack(&barr.work); | 
|  | 2445 | return true; | 
|  | 2446 | } else | 
|  | 2447 | return false; | 
| Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2448 | } | 
|  | 2449 | EXPORT_SYMBOL_GPL(flush_work); | 
|  | 2450 |  | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2451 | static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) | 
|  | 2452 | { | 
|  | 2453 | struct wq_barrier barr; | 
|  | 2454 | struct worker *worker; | 
|  | 2455 |  | 
|  | 2456 | spin_lock_irq(&gcwq->lock); | 
|  | 2457 |  | 
|  | 2458 | worker = find_worker_executing_work(gcwq, work); | 
|  | 2459 | if (unlikely(worker)) | 
|  | 2460 | insert_wq_barrier(worker->current_cwq, &barr, work, worker); | 
|  | 2461 |  | 
|  | 2462 | spin_unlock_irq(&gcwq->lock); | 
|  | 2463 |  | 
|  | 2464 | if (unlikely(worker)) { | 
|  | 2465 | wait_for_completion(&barr.done); | 
|  | 2466 | destroy_work_on_stack(&barr.work); | 
|  | 2467 | return true; | 
|  | 2468 | } else | 
|  | 2469 | return false; | 
|  | 2470 | } | 
|  | 2471 |  | 
|  | 2472 | static bool wait_on_work(struct work_struct *work) | 
|  | 2473 | { | 
|  | 2474 | bool ret = false; | 
|  | 2475 | int cpu; | 
|  | 2476 |  | 
|  | 2477 | might_sleep(); | 
|  | 2478 |  | 
|  | 2479 | lock_map_acquire(&work->lockdep_map); | 
|  | 2480 | lock_map_release(&work->lockdep_map); | 
|  | 2481 |  | 
|  | 2482 | for_each_gcwq_cpu(cpu) | 
|  | 2483 | ret |= wait_on_cpu_work(get_gcwq(cpu), work); | 
|  | 2484 | return ret; | 
|  | 2485 | } | 
|  | 2486 |  | 
| Tejun Heo | 0938349 | 2010-09-16 10:48:29 +0200 | [diff] [blame] | 2487 | /** | 
|  | 2488 | * flush_work_sync - wait until a work has finished execution | 
|  | 2489 | * @work: the work to flush | 
|  | 2490 | * | 
|  | 2491 | * Wait until @work has finished execution.  On return, it's | 
|  | 2492 | * guaranteed that all queueing instances of @work which happened | 
|  | 2493 | * before this function is called are finished.  In other words, if | 
|  | 2494 | * @work hasn't been requeued since this function was called, @work is | 
|  | 2495 | * guaranteed to be idle on return. | 
|  | 2496 | * | 
|  | 2497 | * RETURNS: | 
|  | 2498 | * %true if flush_work_sync() waited for the work to finish execution, | 
|  | 2499 | * %false if it was already idle. | 
|  | 2500 | */ | 
|  | 2501 | bool flush_work_sync(struct work_struct *work) | 
|  | 2502 | { | 
|  | 2503 | struct wq_barrier barr; | 
|  | 2504 | bool pending, waited; | 
|  | 2505 |  | 
|  | 2506 | /* we'll wait for executions separately, queue barr only if pending */ | 
|  | 2507 | pending = start_flush_work(work, &barr, false); | 
|  | 2508 |  | 
|  | 2509 | /* wait for executions to finish */ | 
|  | 2510 | waited = wait_on_work(work); | 
|  | 2511 |  | 
|  | 2512 | /* wait for the pending one */ | 
|  | 2513 | if (pending) { | 
|  | 2514 | wait_for_completion(&barr.done); | 
|  | 2515 | destroy_work_on_stack(&barr.work); | 
|  | 2516 | } | 
|  | 2517 |  | 
|  | 2518 | return pending || waited; | 
|  | 2519 | } | 
|  | 2520 | EXPORT_SYMBOL_GPL(flush_work_sync); | 
|  | 2521 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2522 | /* | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2523 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2524 | * so this work can't be re-armed in any way. | 
|  | 2525 | */ | 
|  | 2526 | static int try_to_grab_pending(struct work_struct *work) | 
|  | 2527 | { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2528 | struct global_cwq *gcwq; | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2529 | int ret = -1; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2530 |  | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2531 | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2532 | return 0; | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2533 |  | 
|  | 2534 | /* | 
|  | 2535 | * The queueing is in progress, or it is already queued. Try to | 
|  | 2536 | * steal it from ->worklist without clearing WORK_STRUCT_PENDING. | 
|  | 2537 | */ | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2538 | gcwq = get_work_gcwq(work); | 
|  | 2539 | if (!gcwq) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2540 | return ret; | 
|  | 2541 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2542 | spin_lock_irq(&gcwq->lock); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2543 | if (!list_empty(&work->entry)) { | 
|  | 2544 | /* | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2545 | * This work is queued, but perhaps we locked the wrong gcwq. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2546 | * In that case we must see the new value after rmb(), see | 
|  | 2547 | * insert_work()->wmb(). | 
|  | 2548 | */ | 
|  | 2549 | smp_rmb(); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2550 | if (gcwq == get_work_gcwq(work)) { | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2551 | debug_work_deactivate(work); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2552 | list_del_init(&work->entry); | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2553 | cwq_dec_nr_in_flight(get_work_cwq(work), | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 2554 | get_work_color(work), | 
|  | 2555 | *work_data_bits(work) & WORK_STRUCT_DELAYED); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2556 | ret = 1; | 
|  | 2557 | } | 
|  | 2558 | } | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2559 | spin_unlock_irq(&gcwq->lock); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2560 |  | 
|  | 2561 | return ret; | 
|  | 2562 | } | 
|  | 2563 |  | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2564 | static bool __cancel_work_timer(struct work_struct *work, | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2565 | struct timer_list* timer) | 
|  | 2566 | { | 
|  | 2567 | int ret; | 
|  | 2568 |  | 
|  | 2569 | do { | 
|  | 2570 | ret = (timer && likely(del_timer(timer))); | 
|  | 2571 | if (!ret) | 
|  | 2572 | ret = try_to_grab_pending(work); | 
|  | 2573 | wait_on_work(work); | 
|  | 2574 | } while (unlikely(ret < 0)); | 
|  | 2575 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2576 | clear_work_data(work); | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2577 | return ret; | 
|  | 2578 | } | 
|  | 2579 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2580 | /** | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2581 | * cancel_work_sync - cancel a work and wait for it to finish | 
|  | 2582 | * @work: the work to cancel | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2583 | * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2584 | * Cancel @work and wait for its execution to finish.  This function | 
|  | 2585 | * can be used even if the work re-queues itself or migrates to | 
|  | 2586 | * another workqueue.  On return from this function, @work is | 
|  | 2587 | * guaranteed to be not pending or executing on any CPU. | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2588 | * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2589 | * cancel_work_sync(&delayed_work->work) must not be used for | 
|  | 2590 | * delayed_work's.  Use cancel_delayed_work_sync() instead. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2591 | * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2592 | * The caller must ensure that the workqueue on which @work was last | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2593 | * queued can't be destroyed before this function returns. | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2594 | * | 
|  | 2595 | * RETURNS: | 
|  | 2596 | * %true if @work was pending, %false otherwise. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2597 | */ | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2598 | bool cancel_work_sync(struct work_struct *work) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2599 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2600 | return __cancel_work_timer(work, NULL); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 2601 | } | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 2602 | EXPORT_SYMBOL_GPL(cancel_work_sync); | 
| Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 2603 |  | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2604 | /** | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2605 | * flush_delayed_work - wait for a dwork to finish executing the last queueing | 
|  | 2606 | * @dwork: the delayed work to flush | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2607 | * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2608 | * Delayed timer is cancelled and the pending work is queued for | 
|  | 2609 | * immediate execution.  Like flush_work(), this function only | 
|  | 2610 | * considers the last queueing instance of @dwork. | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2611 | * | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2612 | * RETURNS: | 
|  | 2613 | * %true if flush_work() waited for the work to finish execution, | 
|  | 2614 | * %false if it was already idle. | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2615 | */ | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2616 | bool flush_delayed_work(struct delayed_work *dwork) | 
|  | 2617 | { | 
|  | 2618 | if (del_timer_sync(&dwork->timer)) | 
|  | 2619 | __queue_work(raw_smp_processor_id(), | 
|  | 2620 | get_work_cwq(&dwork->work)->wq, &dwork->work); | 
|  | 2621 | return flush_work(&dwork->work); | 
|  | 2622 | } | 
|  | 2623 | EXPORT_SYMBOL(flush_delayed_work); | 
|  | 2624 |  | 
|  | 2625 | /** | 
| Tejun Heo | 0938349 | 2010-09-16 10:48:29 +0200 | [diff] [blame] | 2626 | * flush_delayed_work_sync - wait for a dwork to finish | 
|  | 2627 | * @dwork: the delayed work to flush | 
|  | 2628 | * | 
|  | 2629 | * Delayed timer is cancelled and the pending work is queued for | 
|  | 2630 | * execution immediately.  Other than timer handling, its behavior | 
|  | 2631 | * is identical to flush_work_sync(). | 
|  | 2632 | * | 
|  | 2633 | * RETURNS: | 
|  | 2634 | * %true if flush_work_sync() waited for the work to finish execution, | 
|  | 2635 | * %false if it was already idle. | 
|  | 2636 | */ | 
|  | 2637 | bool flush_delayed_work_sync(struct delayed_work *dwork) | 
|  | 2638 | { | 
|  | 2639 | if (del_timer_sync(&dwork->timer)) | 
|  | 2640 | __queue_work(raw_smp_processor_id(), | 
|  | 2641 | get_work_cwq(&dwork->work)->wq, &dwork->work); | 
|  | 2642 | return flush_work_sync(&dwork->work); | 
|  | 2643 | } | 
|  | 2644 | EXPORT_SYMBOL(flush_delayed_work_sync); | 
|  | 2645 |  | 
|  | 2646 | /** | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2647 | * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish | 
|  | 2648 | * @dwork: the delayed work cancel | 
|  | 2649 | * | 
|  | 2650 | * This is cancel_work_sync() for delayed works. | 
|  | 2651 | * | 
|  | 2652 | * RETURNS: | 
|  | 2653 | * %true if @dwork was pending, %false otherwise. | 
|  | 2654 | */ | 
|  | 2655 | bool cancel_delayed_work_sync(struct delayed_work *dwork) | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2656 | { | 
| Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2657 | return __cancel_work_timer(&dwork->work, &dwork->timer); | 
| Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2658 | } | 
| Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2659 | EXPORT_SYMBOL(cancel_delayed_work_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2660 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2661 | /** | 
|  | 2662 | * schedule_work - put work task in global workqueue | 
|  | 2663 | * @work: job to be done | 
|  | 2664 | * | 
| Bart Van Assche | 5b0f437d | 2009-07-30 19:00:53 +0200 | [diff] [blame] | 2665 | * Returns zero if @work was already on the kernel-global workqueue and | 
|  | 2666 | * non-zero otherwise. | 
|  | 2667 | * | 
|  | 2668 | * This puts a job in the kernel-global workqueue if it was not already | 
|  | 2669 | * queued and leaves it in the same position on the kernel-global | 
|  | 2670 | * workqueue otherwise. | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2671 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2672 | int schedule_work(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2673 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2674 | return queue_work(system_wq, work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2675 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2676 | EXPORT_SYMBOL(schedule_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2677 |  | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 2678 | /* | 
|  | 2679 | * schedule_work_on - put work task on a specific cpu | 
|  | 2680 | * @cpu: cpu to put the work task on | 
|  | 2681 | * @work: job to be done | 
|  | 2682 | * | 
|  | 2683 | * This puts a job on a specific cpu | 
|  | 2684 | */ | 
|  | 2685 | int schedule_work_on(int cpu, struct work_struct *work) | 
|  | 2686 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2687 | return queue_work_on(cpu, system_wq, work); | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 2688 | } | 
|  | 2689 | EXPORT_SYMBOL(schedule_work_on); | 
|  | 2690 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2691 | /** | 
|  | 2692 | * schedule_delayed_work - put work task in global workqueue after delay | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 2693 | * @dwork: job to be done | 
|  | 2694 | * @delay: number of jiffies to wait or 0 for immediate execution | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2695 | * | 
|  | 2696 | * After waiting for a given time this puts a job in the kernel-global | 
|  | 2697 | * workqueue. | 
|  | 2698 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2699 | int schedule_delayed_work(struct delayed_work *dwork, | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 2700 | unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2701 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2702 | return queue_delayed_work(system_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2703 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2704 | EXPORT_SYMBOL(schedule_delayed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2705 |  | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2706 | /** | 
|  | 2707 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 
|  | 2708 | * @cpu: cpu to use | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 2709 | * @dwork: job to be done | 
| Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2710 | * @delay: number of jiffies to wait | 
|  | 2711 | * | 
|  | 2712 | * After waiting for a given time this puts a job in the kernel-global | 
|  | 2713 | * workqueue on the specified CPU. | 
|  | 2714 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2715 | int schedule_delayed_work_on(int cpu, | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 2716 | struct delayed_work *dwork, unsigned long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2717 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2718 | return queue_delayed_work_on(cpu, system_wq, dwork, delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2719 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2720 | EXPORT_SYMBOL(schedule_delayed_work_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2721 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2722 | /** | 
| Tejun Heo | 31ddd87 | 2010-10-19 11:14:49 +0200 | [diff] [blame] | 2723 | * schedule_on_each_cpu - execute a function synchronously on each online CPU | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2724 | * @func: the function to call | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2725 | * | 
| Tejun Heo | 31ddd87 | 2010-10-19 11:14:49 +0200 | [diff] [blame] | 2726 | * schedule_on_each_cpu() executes @func on each online CPU using the | 
|  | 2727 | * system workqueue and blocks until all CPUs have completed. | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2728 | * schedule_on_each_cpu() is very slow. | 
| Tejun Heo | 31ddd87 | 2010-10-19 11:14:49 +0200 | [diff] [blame] | 2729 | * | 
|  | 2730 | * RETURNS: | 
|  | 2731 | * 0 on success, -errno on failure. | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2732 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2733 | int schedule_on_each_cpu(work_func_t func) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2734 | { | 
|  | 2735 | int cpu; | 
| Namhyung Kim | 38f5156 | 2010-08-08 14:24:09 +0200 | [diff] [blame] | 2736 | struct work_struct __percpu *works; | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2737 |  | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2738 | works = alloc_percpu(struct work_struct); | 
|  | 2739 | if (!works) | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2740 | return -ENOMEM; | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2741 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 2742 | get_online_cpus(); | 
| Tejun Heo | 9398180 | 2009-11-17 14:06:20 -0800 | [diff] [blame] | 2743 |  | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2744 | for_each_online_cpu(cpu) { | 
| Ingo Molnar | 9bfb183 | 2006-12-18 20:05:09 +0100 | [diff] [blame] | 2745 | struct work_struct *work = per_cpu_ptr(works, cpu); | 
|  | 2746 |  | 
|  | 2747 | INIT_WORK(work, func); | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2748 | schedule_work_on(cpu, work); | 
| Andi Kleen | 65a6446 | 2009-10-14 06:22:47 +0200 | [diff] [blame] | 2749 | } | 
| Tejun Heo | 9398180 | 2009-11-17 14:06:20 -0800 | [diff] [blame] | 2750 |  | 
|  | 2751 | for_each_online_cpu(cpu) | 
|  | 2752 | flush_work(per_cpu_ptr(works, cpu)); | 
|  | 2753 |  | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 2754 | put_online_cpus(); | 
| Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2755 | free_percpu(works); | 
| Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2756 | return 0; | 
|  | 2757 | } | 
|  | 2758 |  | 
| Alan Stern | eef6a7d | 2010-02-12 17:39:21 +0900 | [diff] [blame] | 2759 | /** | 
|  | 2760 | * flush_scheduled_work - ensure that any scheduled work has run to completion. | 
|  | 2761 | * | 
|  | 2762 | * Forces execution of the kernel-global workqueue and blocks until its | 
|  | 2763 | * completion. | 
|  | 2764 | * | 
|  | 2765 | * Think twice before calling this function!  It's very easy to get into | 
|  | 2766 | * trouble if you don't take great care.  Either of the following situations | 
|  | 2767 | * will lead to deadlock: | 
|  | 2768 | * | 
|  | 2769 | *	One of the work items currently on the workqueue needs to acquire | 
|  | 2770 | *	a lock held by your code or its caller. | 
|  | 2771 | * | 
|  | 2772 | *	Your code is running in the context of a work routine. | 
|  | 2773 | * | 
|  | 2774 | * They will be detected by lockdep when they occur, but the first might not | 
|  | 2775 | * occur very often.  It depends on what work items are on the workqueue and | 
|  | 2776 | * what locks they need, which you have no control over. | 
|  | 2777 | * | 
|  | 2778 | * In most situations flushing the entire workqueue is overkill; you merely | 
|  | 2779 | * need to know that a particular work item isn't queued and isn't running. | 
|  | 2780 | * In such cases you should use cancel_delayed_work_sync() or | 
|  | 2781 | * cancel_work_sync() instead. | 
|  | 2782 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2783 | void flush_scheduled_work(void) | 
|  | 2784 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2785 | flush_workqueue(system_wq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2786 | } | 
| Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2787 | EXPORT_SYMBOL(flush_scheduled_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2788 |  | 
|  | 2789 | /** | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2790 | * execute_in_process_context - reliably execute the routine with user context | 
|  | 2791 | * @fn:		the function to execute | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2792 | * @ew:		guaranteed storage for the execute work structure (must | 
|  | 2793 | *		be available when the work executes) | 
|  | 2794 | * | 
|  | 2795 | * Executes the function immediately if process context is available, | 
|  | 2796 | * otherwise schedules the function for delayed execution. | 
|  | 2797 | * | 
|  | 2798 | * Returns:	0 - function was executed | 
|  | 2799 | *		1 - function was scheduled for execution | 
|  | 2800 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2801 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2802 | { | 
|  | 2803 | if (!in_interrupt()) { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2804 | fn(&ew->work); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2805 | return 0; | 
|  | 2806 | } | 
|  | 2807 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2808 | INIT_WORK(&ew->work, fn); | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2809 | schedule_work(&ew->work); | 
|  | 2810 |  | 
|  | 2811 | return 1; | 
|  | 2812 | } | 
|  | 2813 | EXPORT_SYMBOL_GPL(execute_in_process_context); | 
|  | 2814 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2815 | int keventd_up(void) | 
|  | 2816 | { | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2817 | return system_wq != NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2818 | } | 
|  | 2819 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2820 | static int alloc_cwqs(struct workqueue_struct *wq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2821 | { | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2822 | /* | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2823 | * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. | 
|  | 2824 | * Make sure that the alignment isn't lower than that of | 
|  | 2825 | * unsigned long long. | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2826 | */ | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2827 | const size_t size = sizeof(struct cpu_workqueue_struct); | 
|  | 2828 | const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, | 
|  | 2829 | __alignof__(unsigned long long)); | 
| Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2830 | #ifdef CONFIG_SMP | 
|  | 2831 | bool percpu = !(wq->flags & WQ_UNBOUND); | 
|  | 2832 | #else | 
|  | 2833 | bool percpu = false; | 
|  | 2834 | #endif | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2835 |  | 
| Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2836 | if (percpu) | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2837 | wq->cpu_wq.pcpu = __alloc_percpu(size, align); | 
| Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2838 | else { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2839 | void *ptr; | 
| Frederic Weisbecker | e1d8aa9 | 2009-01-12 23:15:46 +0100 | [diff] [blame] | 2840 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2841 | /* | 
|  | 2842 | * Allocate enough room to align cwq and put an extra | 
|  | 2843 | * pointer at the end pointing back to the originally | 
|  | 2844 | * allocated pointer which will be used for free. | 
|  | 2845 | */ | 
|  | 2846 | ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); | 
|  | 2847 | if (ptr) { | 
|  | 2848 | wq->cpu_wq.single = PTR_ALIGN(ptr, align); | 
|  | 2849 | *(void **)(wq->cpu_wq.single + 1) = ptr; | 
|  | 2850 | } | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2851 | } | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2852 |  | 
| David Howells | 5260562 | 2010-10-25 23:41:11 +0100 | [diff] [blame] | 2853 | /* just in case, make sure it's actually aligned | 
|  | 2854 | * - this is affected by PERCPU() alignment in vmlinux.lds.S | 
|  | 2855 | */ | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2856 | BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); | 
|  | 2857 | return wq->cpu_wq.v ? 0 : -ENOMEM; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2858 | } | 
|  | 2859 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2860 | static void free_cwqs(struct workqueue_struct *wq) | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 2861 | { | 
| Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2862 | #ifdef CONFIG_SMP | 
|  | 2863 | bool percpu = !(wq->flags & WQ_UNBOUND); | 
|  | 2864 | #else | 
|  | 2865 | bool percpu = false; | 
|  | 2866 | #endif | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 2867 |  | 
| Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2868 | if (percpu) | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2869 | free_percpu(wq->cpu_wq.pcpu); | 
|  | 2870 | else if (wq->cpu_wq.single) { | 
|  | 2871 | /* the pointer to free is stored right after the cwq */ | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2872 | kfree(*(void **)(wq->cpu_wq.single + 1)); | 
| Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 2873 | } | 
|  | 2874 | } | 
|  | 2875 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2876 | static int wq_clamp_max_active(int max_active, unsigned int flags, | 
|  | 2877 | const char *name) | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2878 | { | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2879 | int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; | 
|  | 2880 |  | 
|  | 2881 | if (max_active < 1 || max_active > lim) | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2882 | printk(KERN_WARNING "workqueue: max_active %d requested for %s " | 
|  | 2883 | "is out of range, clamping between %d and %d\n", | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2884 | max_active, name, 1, lim); | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2885 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2886 | return clamp_val(max_active, 1, lim); | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2887 | } | 
|  | 2888 |  | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2889 | struct workqueue_struct *__alloc_workqueue_key(const char *name, | 
|  | 2890 | unsigned int flags, | 
|  | 2891 | int max_active, | 
|  | 2892 | struct lock_class_key *key, | 
|  | 2893 | const char *lock_name) | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2894 | { | 
|  | 2895 | struct workqueue_struct *wq; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2896 | unsigned int cpu; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2897 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2898 | /* | 
| Tejun Heo | 6370a6a | 2010-10-11 15:12:27 +0200 | [diff] [blame] | 2899 | * Workqueues which may be used during memory reclaim should | 
|  | 2900 | * have a rescuer to guarantee forward progress. | 
|  | 2901 | */ | 
|  | 2902 | if (flags & WQ_MEM_RECLAIM) | 
|  | 2903 | flags |= WQ_RESCUER; | 
|  | 2904 |  | 
|  | 2905 | /* | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2906 | * Unbound workqueues aren't concurrency managed and should be | 
|  | 2907 | * dispatched to workers immediately. | 
|  | 2908 | */ | 
|  | 2909 | if (flags & WQ_UNBOUND) | 
|  | 2910 | flags |= WQ_HIGHPRI; | 
|  | 2911 |  | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2912 | max_active = max_active ?: WQ_DFL_ACTIVE; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2913 | max_active = wq_clamp_max_active(max_active, flags, name); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2914 |  | 
|  | 2915 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 
|  | 2916 | if (!wq) | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2917 | goto err; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2918 |  | 
| Tejun Heo | 97e37d7 | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2919 | wq->flags = flags; | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2920 | wq->saved_max_active = max_active; | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2921 | mutex_init(&wq->flush_mutex); | 
|  | 2922 | atomic_set(&wq->nr_cwqs_to_flush, 0); | 
|  | 2923 | INIT_LIST_HEAD(&wq->flusher_queue); | 
|  | 2924 | INIT_LIST_HEAD(&wq->flusher_overflow); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2925 |  | 
|  | 2926 | wq->name = name; | 
| Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 2927 | lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); | 
| Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 2928 | INIT_LIST_HEAD(&wq->list); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2929 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2930 | if (alloc_cwqs(wq) < 0) | 
|  | 2931 | goto err; | 
|  | 2932 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2933 | for_each_cwq_cpu(cpu, wq) { | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2934 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2935 | struct global_cwq *gcwq = get_gcwq(cpu); | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2936 |  | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2937 | BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2938 | cwq->gcwq = gcwq; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2939 | cwq->wq = wq; | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2940 | cwq->flush_color = -1; | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2941 | cwq->max_active = max_active; | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2942 | INIT_LIST_HEAD(&cwq->delayed_works); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2943 | } | 
|  | 2944 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2945 | if (flags & WQ_RESCUER) { | 
|  | 2946 | struct worker *rescuer; | 
|  | 2947 |  | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 2948 | if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL)) | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2949 | goto err; | 
|  | 2950 |  | 
|  | 2951 | wq->rescuer = rescuer = alloc_worker(); | 
|  | 2952 | if (!rescuer) | 
|  | 2953 | goto err; | 
|  | 2954 |  | 
|  | 2955 | rescuer->task = kthread_create(rescuer_thread, wq, "%s", name); | 
|  | 2956 | if (IS_ERR(rescuer->task)) | 
|  | 2957 | goto err; | 
|  | 2958 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2959 | rescuer->task->flags |= PF_THREAD_BOUND; | 
|  | 2960 | wake_up_process(rescuer->task); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2961 | } | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2962 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2963 | /* | 
|  | 2964 | * workqueue_lock protects global freeze state and workqueues | 
|  | 2965 | * list.  Grab it, set max_active accordingly and add the new | 
|  | 2966 | * workqueue to workqueues list. | 
|  | 2967 | */ | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2968 | spin_lock(&workqueue_lock); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2969 |  | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 2970 | if (workqueue_freezing && wq->flags & WQ_FREEZABLE) | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2971 | for_each_cwq_cpu(cpu, wq) | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2972 | get_cwq(cpu, wq)->max_active = 0; | 
|  | 2973 |  | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2974 | list_add(&wq->list, &workqueues); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2975 |  | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2976 | spin_unlock(&workqueue_lock); | 
|  | 2977 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2978 | return wq; | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2979 | err: | 
|  | 2980 | if (wq) { | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2981 | free_cwqs(wq); | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 2982 | free_mayday_mask(wq->mayday_mask); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2983 | kfree(wq->rescuer); | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2984 | kfree(wq); | 
|  | 2985 | } | 
|  | 2986 | return NULL; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2987 | } | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2988 | EXPORT_SYMBOL_GPL(__alloc_workqueue_key); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2989 |  | 
|  | 2990 | /** | 
|  | 2991 | * destroy_workqueue - safely terminate a workqueue | 
|  | 2992 | * @wq: target workqueue | 
|  | 2993 | * | 
|  | 2994 | * Safely destroy a workqueue. All work currently pending will be done first. | 
|  | 2995 | */ | 
|  | 2996 | void destroy_workqueue(struct workqueue_struct *wq) | 
|  | 2997 | { | 
| Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 2998 | unsigned int flush_cnt = 0; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2999 | unsigned int cpu; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3000 |  | 
| Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 3001 | /* | 
|  | 3002 | * Mark @wq dying and drain all pending works.  Once WQ_DYING is | 
|  | 3003 | * set, only chain queueing is allowed.  IOW, only currently | 
|  | 3004 | * pending or running work items on @wq can queue further work | 
|  | 3005 | * items on it.  @wq is flushed repeatedly until it becomes empty. | 
|  | 3006 | * The number of flushing is detemined by the depth of chaining and | 
|  | 3007 | * should be relatively short.  Whine if it takes too long. | 
|  | 3008 | */ | 
| Tejun Heo | e41e704 | 2010-08-24 14:22:47 +0200 | [diff] [blame] | 3009 | wq->flags |= WQ_DYING; | 
| Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 3010 | reflush: | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3011 | flush_workqueue(wq); | 
|  | 3012 |  | 
| Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 3013 | for_each_cwq_cpu(cpu, wq) { | 
|  | 3014 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
|  | 3015 |  | 
|  | 3016 | if (!cwq->nr_active && list_empty(&cwq->delayed_works)) | 
|  | 3017 | continue; | 
|  | 3018 |  | 
|  | 3019 | if (++flush_cnt == 10 || | 
|  | 3020 | (flush_cnt % 100 == 0 && flush_cnt <= 1000)) | 
|  | 3021 | printk(KERN_WARNING "workqueue %s: flush on " | 
|  | 3022 | "destruction isn't complete after %u tries\n", | 
|  | 3023 | wq->name, flush_cnt); | 
|  | 3024 | goto reflush; | 
|  | 3025 | } | 
|  | 3026 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3027 | /* | 
|  | 3028 | * wq list is used to freeze wq, remove from list after | 
|  | 3029 | * flushing is complete in case freeze races us. | 
|  | 3030 | */ | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 3031 | spin_lock(&workqueue_lock); | 
| Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 3032 | list_del(&wq->list); | 
| Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 3033 | spin_unlock(&workqueue_lock); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3034 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3035 | /* sanity check */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3036 | for_each_cwq_cpu(cpu, wq) { | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3037 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
|  | 3038 | int i; | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3039 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3040 | for (i = 0; i < WORK_NR_COLORS; i++) | 
|  | 3041 | BUG_ON(cwq->nr_in_flight[i]); | 
| Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3042 | BUG_ON(cwq->nr_active); | 
|  | 3043 | BUG_ON(!list_empty(&cwq->delayed_works)); | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3044 | } | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3045 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3046 | if (wq->flags & WQ_RESCUER) { | 
|  | 3047 | kthread_stop(wq->rescuer->task); | 
| Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 3048 | free_mayday_mask(wq->mayday_mask); | 
| Xiaotian Feng | 8d9df9f | 2010-08-16 09:54:28 +0200 | [diff] [blame] | 3049 | kfree(wq->rescuer); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3050 | } | 
|  | 3051 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3052 | free_cwqs(wq); | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3053 | kfree(wq); | 
|  | 3054 | } | 
|  | 3055 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 
|  | 3056 |  | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3057 | /** | 
|  | 3058 | * workqueue_set_max_active - adjust max_active of a workqueue | 
|  | 3059 | * @wq: target workqueue | 
|  | 3060 | * @max_active: new max_active value. | 
|  | 3061 | * | 
|  | 3062 | * Set max_active of @wq to @max_active. | 
|  | 3063 | * | 
|  | 3064 | * CONTEXT: | 
|  | 3065 | * Don't call from IRQ context. | 
|  | 3066 | */ | 
|  | 3067 | void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | 
|  | 3068 | { | 
|  | 3069 | unsigned int cpu; | 
|  | 3070 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3071 | max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3072 |  | 
|  | 3073 | spin_lock(&workqueue_lock); | 
|  | 3074 |  | 
|  | 3075 | wq->saved_max_active = max_active; | 
|  | 3076 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3077 | for_each_cwq_cpu(cpu, wq) { | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3078 | struct global_cwq *gcwq = get_gcwq(cpu); | 
|  | 3079 |  | 
|  | 3080 | spin_lock_irq(&gcwq->lock); | 
|  | 3081 |  | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3082 | if (!(wq->flags & WQ_FREEZABLE) || | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3083 | !(gcwq->flags & GCWQ_FREEZING)) | 
|  | 3084 | get_cwq(gcwq->cpu, wq)->max_active = max_active; | 
|  | 3085 |  | 
|  | 3086 | spin_unlock_irq(&gcwq->lock); | 
|  | 3087 | } | 
|  | 3088 |  | 
|  | 3089 | spin_unlock(&workqueue_lock); | 
|  | 3090 | } | 
|  | 3091 | EXPORT_SYMBOL_GPL(workqueue_set_max_active); | 
|  | 3092 |  | 
|  | 3093 | /** | 
|  | 3094 | * workqueue_congested - test whether a workqueue is congested | 
|  | 3095 | * @cpu: CPU in question | 
|  | 3096 | * @wq: target workqueue | 
|  | 3097 | * | 
|  | 3098 | * Test whether @wq's cpu workqueue for @cpu is congested.  There is | 
|  | 3099 | * no synchronization around this function and the test result is | 
|  | 3100 | * unreliable and only useful as advisory hints or for debugging. | 
|  | 3101 | * | 
|  | 3102 | * RETURNS: | 
|  | 3103 | * %true if congested, %false otherwise. | 
|  | 3104 | */ | 
|  | 3105 | bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) | 
|  | 3106 | { | 
|  | 3107 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
|  | 3108 |  | 
|  | 3109 | return !list_empty(&cwq->delayed_works); | 
|  | 3110 | } | 
|  | 3111 | EXPORT_SYMBOL_GPL(workqueue_congested); | 
|  | 3112 |  | 
|  | 3113 | /** | 
|  | 3114 | * work_cpu - return the last known associated cpu for @work | 
|  | 3115 | * @work: the work of interest | 
|  | 3116 | * | 
|  | 3117 | * RETURNS: | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3118 | * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise. | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3119 | */ | 
|  | 3120 | unsigned int work_cpu(struct work_struct *work) | 
|  | 3121 | { | 
|  | 3122 | struct global_cwq *gcwq = get_work_gcwq(work); | 
|  | 3123 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3124 | return gcwq ? gcwq->cpu : WORK_CPU_NONE; | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3125 | } | 
|  | 3126 | EXPORT_SYMBOL_GPL(work_cpu); | 
|  | 3127 |  | 
|  | 3128 | /** | 
|  | 3129 | * work_busy - test whether a work is currently pending or running | 
|  | 3130 | * @work: the work to be tested | 
|  | 3131 | * | 
|  | 3132 | * Test whether @work is currently pending or running.  There is no | 
|  | 3133 | * synchronization around this function and the test result is | 
|  | 3134 | * unreliable and only useful as advisory hints or for debugging. | 
|  | 3135 | * Especially for reentrant wqs, the pending state might hide the | 
|  | 3136 | * running state. | 
|  | 3137 | * | 
|  | 3138 | * RETURNS: | 
|  | 3139 | * OR'd bitmask of WORK_BUSY_* bits. | 
|  | 3140 | */ | 
|  | 3141 | unsigned int work_busy(struct work_struct *work) | 
|  | 3142 | { | 
|  | 3143 | struct global_cwq *gcwq = get_work_gcwq(work); | 
|  | 3144 | unsigned long flags; | 
|  | 3145 | unsigned int ret = 0; | 
|  | 3146 |  | 
|  | 3147 | if (!gcwq) | 
|  | 3148 | return false; | 
|  | 3149 |  | 
|  | 3150 | spin_lock_irqsave(&gcwq->lock, flags); | 
|  | 3151 |  | 
|  | 3152 | if (work_pending(work)) | 
|  | 3153 | ret |= WORK_BUSY_PENDING; | 
|  | 3154 | if (find_worker_executing_work(gcwq, work)) | 
|  | 3155 | ret |= WORK_BUSY_RUNNING; | 
|  | 3156 |  | 
|  | 3157 | spin_unlock_irqrestore(&gcwq->lock, flags); | 
|  | 3158 |  | 
|  | 3159 | return ret; | 
|  | 3160 | } | 
|  | 3161 | EXPORT_SYMBOL_GPL(work_busy); | 
|  | 3162 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3163 | /* | 
|  | 3164 | * CPU hotplug. | 
|  | 3165 | * | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3166 | * There are two challenges in supporting CPU hotplug.  Firstly, there | 
|  | 3167 | * are a lot of assumptions on strong associations among work, cwq and | 
|  | 3168 | * gcwq which make migrating pending and scheduled works very | 
|  | 3169 | * difficult to implement without impacting hot paths.  Secondly, | 
|  | 3170 | * gcwqs serve mix of short, long and very long running works making | 
|  | 3171 | * blocked draining impractical. | 
|  | 3172 | * | 
|  | 3173 | * This is solved by allowing a gcwq to be detached from CPU, running | 
|  | 3174 | * it with unbound (rogue) workers and allowing it to be reattached | 
|  | 3175 | * later if the cpu comes back online.  A separate thread is created | 
|  | 3176 | * to govern a gcwq in such state and is called the trustee of the | 
|  | 3177 | * gcwq. | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3178 | * | 
|  | 3179 | * Trustee states and their descriptions. | 
|  | 3180 | * | 
|  | 3181 | * START	Command state used on startup.  On CPU_DOWN_PREPARE, a | 
|  | 3182 | *		new trustee is started with this state. | 
|  | 3183 | * | 
|  | 3184 | * IN_CHARGE	Once started, trustee will enter this state after | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3185 | *		assuming the manager role and making all existing | 
|  | 3186 | *		workers rogue.  DOWN_PREPARE waits for trustee to | 
|  | 3187 | *		enter this state.  After reaching IN_CHARGE, trustee | 
|  | 3188 | *		tries to execute the pending worklist until it's empty | 
|  | 3189 | *		and the state is set to BUTCHER, or the state is set | 
|  | 3190 | *		to RELEASE. | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3191 | * | 
|  | 3192 | * BUTCHER	Command state which is set by the cpu callback after | 
|  | 3193 | *		the cpu has went down.  Once this state is set trustee | 
|  | 3194 | *		knows that there will be no new works on the worklist | 
|  | 3195 | *		and once the worklist is empty it can proceed to | 
|  | 3196 | *		killing idle workers. | 
|  | 3197 | * | 
|  | 3198 | * RELEASE	Command state which is set by the cpu callback if the | 
|  | 3199 | *		cpu down has been canceled or it has come online | 
|  | 3200 | *		again.  After recognizing this state, trustee stops | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3201 | *		trying to drain or butcher and clears ROGUE, rebinds | 
|  | 3202 | *		all remaining workers back to the cpu and releases | 
|  | 3203 | *		manager role. | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3204 | * | 
|  | 3205 | * DONE		Trustee will enter this state after BUTCHER or RELEASE | 
|  | 3206 | *		is complete. | 
|  | 3207 | * | 
|  | 3208 | *          trustee                 CPU                draining | 
|  | 3209 | *         took over                down               complete | 
|  | 3210 | * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE | 
|  | 3211 | *                        |                     |                  ^ | 
|  | 3212 | *                        | CPU is back online  v   return workers | | 
|  | 3213 | *                         ----------------> RELEASE -------------- | 
|  | 3214 | */ | 
|  | 3215 |  | 
|  | 3216 | /** | 
|  | 3217 | * trustee_wait_event_timeout - timed event wait for trustee | 
|  | 3218 | * @cond: condition to wait for | 
|  | 3219 | * @timeout: timeout in jiffies | 
|  | 3220 | * | 
|  | 3221 | * wait_event_timeout() for trustee to use.  Handles locking and | 
|  | 3222 | * checks for RELEASE request. | 
|  | 3223 | * | 
|  | 3224 | * CONTEXT: | 
|  | 3225 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
|  | 3226 | * multiple times.  To be used by trustee. | 
|  | 3227 | * | 
|  | 3228 | * RETURNS: | 
|  | 3229 | * Positive indicating left time if @cond is satisfied, 0 if timed | 
|  | 3230 | * out, -1 if canceled. | 
|  | 3231 | */ | 
|  | 3232 | #define trustee_wait_event_timeout(cond, timeout) ({			\ | 
|  | 3233 | long __ret = (timeout);						\ | 
|  | 3234 | while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\ | 
|  | 3235 | __ret) {							\ | 
|  | 3236 | spin_unlock_irq(&gcwq->lock);				\ | 
|  | 3237 | __wait_event_timeout(gcwq->trustee_wait, (cond) ||	\ | 
|  | 3238 | (gcwq->trustee_state == TRUSTEE_RELEASE),	\ | 
|  | 3239 | __ret);						\ | 
|  | 3240 | spin_lock_irq(&gcwq->lock);				\ | 
|  | 3241 | }								\ | 
|  | 3242 | gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\ | 
|  | 3243 | }) | 
|  | 3244 |  | 
|  | 3245 | /** | 
|  | 3246 | * trustee_wait_event - event wait for trustee | 
|  | 3247 | * @cond: condition to wait for | 
|  | 3248 | * | 
|  | 3249 | * wait_event() for trustee to use.  Automatically handles locking and | 
|  | 3250 | * checks for CANCEL request. | 
|  | 3251 | * | 
|  | 3252 | * CONTEXT: | 
|  | 3253 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
|  | 3254 | * multiple times.  To be used by trustee. | 
|  | 3255 | * | 
|  | 3256 | * RETURNS: | 
|  | 3257 | * 0 if @cond is satisfied, -1 if canceled. | 
|  | 3258 | */ | 
|  | 3259 | #define trustee_wait_event(cond) ({					\ | 
|  | 3260 | long __ret1;							\ | 
|  | 3261 | __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\ | 
|  | 3262 | __ret1 < 0 ? -1 : 0;						\ | 
|  | 3263 | }) | 
|  | 3264 |  | 
|  | 3265 | static int __cpuinit trustee_thread(void *__gcwq) | 
|  | 3266 | { | 
|  | 3267 | struct global_cwq *gcwq = __gcwq; | 
|  | 3268 | struct worker *worker; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3269 | struct work_struct *work; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3270 | struct hlist_node *pos; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3271 | long rc; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3272 | int i; | 
|  | 3273 |  | 
|  | 3274 | BUG_ON(gcwq->cpu != smp_processor_id()); | 
|  | 3275 |  | 
|  | 3276 | spin_lock_irq(&gcwq->lock); | 
|  | 3277 | /* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3278 | * Claim the manager position and make all workers rogue. | 
|  | 3279 | * Trustee must be bound to the target cpu and can't be | 
|  | 3280 | * cancelled. | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3281 | */ | 
|  | 3282 | BUG_ON(gcwq->cpu != smp_processor_id()); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3283 | rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS)); | 
|  | 3284 | BUG_ON(rc < 0); | 
|  | 3285 |  | 
|  | 3286 | gcwq->flags |= GCWQ_MANAGING_WORKERS; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3287 |  | 
|  | 3288 | list_for_each_entry(worker, &gcwq->idle_list, entry) | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3289 | worker->flags |= WORKER_ROGUE; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3290 |  | 
|  | 3291 | for_each_busy_worker(worker, i, pos, gcwq) | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3292 | worker->flags |= WORKER_ROGUE; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3293 |  | 
|  | 3294 | /* | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3295 | * Call schedule() so that we cross rq->lock and thus can | 
|  | 3296 | * guarantee sched callbacks see the rogue flag.  This is | 
|  | 3297 | * necessary as scheduler callbacks may be invoked from other | 
|  | 3298 | * cpus. | 
|  | 3299 | */ | 
|  | 3300 | spin_unlock_irq(&gcwq->lock); | 
|  | 3301 | schedule(); | 
|  | 3302 | spin_lock_irq(&gcwq->lock); | 
|  | 3303 |  | 
|  | 3304 | /* | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3305 | * Sched callbacks are disabled now.  Zap nr_running.  After | 
|  | 3306 | * this, nr_running stays zero and need_more_worker() and | 
|  | 3307 | * keep_working() are always true as long as the worklist is | 
|  | 3308 | * not empty. | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3309 | */ | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3310 | atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3311 |  | 
|  | 3312 | spin_unlock_irq(&gcwq->lock); | 
|  | 3313 | del_timer_sync(&gcwq->idle_timer); | 
|  | 3314 | spin_lock_irq(&gcwq->lock); | 
|  | 3315 |  | 
|  | 3316 | /* | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3317 | * We're now in charge.  Notify and proceed to drain.  We need | 
|  | 3318 | * to keep the gcwq running during the whole CPU down | 
|  | 3319 | * procedure as other cpu hotunplug callbacks may need to | 
|  | 3320 | * flush currently running tasks. | 
|  | 3321 | */ | 
|  | 3322 | gcwq->trustee_state = TRUSTEE_IN_CHARGE; | 
|  | 3323 | wake_up_all(&gcwq->trustee_wait); | 
|  | 3324 |  | 
|  | 3325 | /* | 
|  | 3326 | * The original cpu is in the process of dying and may go away | 
|  | 3327 | * anytime now.  When that happens, we and all workers would | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3328 | * be migrated to other cpus.  Try draining any left work.  We | 
|  | 3329 | * want to get it over with ASAP - spam rescuers, wake up as | 
|  | 3330 | * many idlers as necessary and create new ones till the | 
|  | 3331 | * worklist is empty.  Note that if the gcwq is frozen, there | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3332 | * may be frozen works in freezable cwqs.  Don't declare | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3333 | * completion while frozen. | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3334 | */ | 
|  | 3335 | while (gcwq->nr_workers != gcwq->nr_idle || | 
|  | 3336 | gcwq->flags & GCWQ_FREEZING || | 
|  | 3337 | gcwq->trustee_state == TRUSTEE_IN_CHARGE) { | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3338 | int nr_works = 0; | 
|  | 3339 |  | 
|  | 3340 | list_for_each_entry(work, &gcwq->worklist, entry) { | 
|  | 3341 | send_mayday(work); | 
|  | 3342 | nr_works++; | 
|  | 3343 | } | 
|  | 3344 |  | 
|  | 3345 | list_for_each_entry(worker, &gcwq->idle_list, entry) { | 
|  | 3346 | if (!nr_works--) | 
|  | 3347 | break; | 
|  | 3348 | wake_up_process(worker->task); | 
|  | 3349 | } | 
|  | 3350 |  | 
|  | 3351 | if (need_to_create_worker(gcwq)) { | 
|  | 3352 | spin_unlock_irq(&gcwq->lock); | 
|  | 3353 | worker = create_worker(gcwq, false); | 
|  | 3354 | spin_lock_irq(&gcwq->lock); | 
|  | 3355 | if (worker) { | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3356 | worker->flags |= WORKER_ROGUE; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3357 | start_worker(worker); | 
|  | 3358 | } | 
|  | 3359 | } | 
|  | 3360 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3361 | /* give a breather */ | 
|  | 3362 | if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) | 
|  | 3363 | break; | 
|  | 3364 | } | 
|  | 3365 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3366 | /* | 
|  | 3367 | * Either all works have been scheduled and cpu is down, or | 
|  | 3368 | * cpu down has already been canceled.  Wait for and butcher | 
|  | 3369 | * all workers till we're canceled. | 
|  | 3370 | */ | 
|  | 3371 | do { | 
|  | 3372 | rc = trustee_wait_event(!list_empty(&gcwq->idle_list)); | 
|  | 3373 | while (!list_empty(&gcwq->idle_list)) | 
|  | 3374 | destroy_worker(list_first_entry(&gcwq->idle_list, | 
|  | 3375 | struct worker, entry)); | 
|  | 3376 | } while (gcwq->nr_workers && rc >= 0); | 
|  | 3377 |  | 
|  | 3378 | /* | 
|  | 3379 | * At this point, either draining has completed and no worker | 
|  | 3380 | * is left, or cpu down has been canceled or the cpu is being | 
|  | 3381 | * brought back up.  There shouldn't be any idle one left. | 
|  | 3382 | * Tell the remaining busy ones to rebind once it finishes the | 
|  | 3383 | * currently scheduled works by scheduling the rebind_work. | 
|  | 3384 | */ | 
|  | 3385 | WARN_ON(!list_empty(&gcwq->idle_list)); | 
|  | 3386 |  | 
|  | 3387 | for_each_busy_worker(worker, i, pos, gcwq) { | 
|  | 3388 | struct work_struct *rebind_work = &worker->rebind_work; | 
|  | 3389 |  | 
|  | 3390 | /* | 
|  | 3391 | * Rebind_work may race with future cpu hotplug | 
|  | 3392 | * operations.  Use a separate flag to mark that | 
|  | 3393 | * rebinding is scheduled. | 
|  | 3394 | */ | 
| Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3395 | worker->flags |= WORKER_REBIND; | 
|  | 3396 | worker->flags &= ~WORKER_ROGUE; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3397 |  | 
|  | 3398 | /* queue rebind_work, wq doesn't matter, use the default one */ | 
|  | 3399 | if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, | 
|  | 3400 | work_data_bits(rebind_work))) | 
|  | 3401 | continue; | 
|  | 3402 |  | 
|  | 3403 | debug_work_activate(rebind_work); | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3404 | insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3405 | worker->scheduled.next, | 
|  | 3406 | work_color_to_flags(WORK_NO_COLOR)); | 
|  | 3407 | } | 
|  | 3408 |  | 
|  | 3409 | /* relinquish manager role */ | 
|  | 3410 | gcwq->flags &= ~GCWQ_MANAGING_WORKERS; | 
|  | 3411 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3412 | /* notify completion */ | 
|  | 3413 | gcwq->trustee = NULL; | 
|  | 3414 | gcwq->trustee_state = TRUSTEE_DONE; | 
|  | 3415 | wake_up_all(&gcwq->trustee_wait); | 
|  | 3416 | spin_unlock_irq(&gcwq->lock); | 
|  | 3417 | return 0; | 
|  | 3418 | } | 
|  | 3419 |  | 
|  | 3420 | /** | 
|  | 3421 | * wait_trustee_state - wait for trustee to enter the specified state | 
|  | 3422 | * @gcwq: gcwq the trustee of interest belongs to | 
|  | 3423 | * @state: target state to wait for | 
|  | 3424 | * | 
|  | 3425 | * Wait for the trustee to reach @state.  DONE is already matched. | 
|  | 3426 | * | 
|  | 3427 | * CONTEXT: | 
|  | 3428 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 
|  | 3429 | * multiple times.  To be used by cpu_callback. | 
|  | 3430 | */ | 
|  | 3431 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) | 
| Namhyung Kim | 06bd6eb | 2010-08-22 23:19:42 +0900 | [diff] [blame] | 3432 | __releases(&gcwq->lock) | 
|  | 3433 | __acquires(&gcwq->lock) | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3434 | { | 
|  | 3435 | if (!(gcwq->trustee_state == state || | 
|  | 3436 | gcwq->trustee_state == TRUSTEE_DONE)) { | 
|  | 3437 | spin_unlock_irq(&gcwq->lock); | 
|  | 3438 | __wait_event(gcwq->trustee_wait, | 
|  | 3439 | gcwq->trustee_state == state || | 
|  | 3440 | gcwq->trustee_state == TRUSTEE_DONE); | 
|  | 3441 | spin_lock_irq(&gcwq->lock); | 
|  | 3442 | } | 
|  | 3443 | } | 
|  | 3444 |  | 
| Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3445 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | 
|  | 3446 | unsigned long action, | 
|  | 3447 | void *hcpu) | 
|  | 3448 | { | 
|  | 3449 | unsigned int cpu = (unsigned long)hcpu; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3450 | struct global_cwq *gcwq = get_gcwq(cpu); | 
|  | 3451 | struct task_struct *new_trustee = NULL; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3452 | struct worker *uninitialized_var(new_worker); | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3453 | unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3454 |  | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 3455 | action &= ~CPU_TASKS_FROZEN; | 
|  | 3456 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3457 | switch (action) { | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3458 | case CPU_DOWN_PREPARE: | 
|  | 3459 | new_trustee = kthread_create(trustee_thread, gcwq, | 
|  | 3460 | "workqueue_trustee/%d\n", cpu); | 
|  | 3461 | if (IS_ERR(new_trustee)) | 
|  | 3462 | return notifier_from_errno(PTR_ERR(new_trustee)); | 
|  | 3463 | kthread_bind(new_trustee, cpu); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3464 | /* fall through */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3465 | case CPU_UP_PREPARE: | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3466 | BUG_ON(gcwq->first_idle); | 
|  | 3467 | new_worker = create_worker(gcwq, false); | 
|  | 3468 | if (!new_worker) { | 
|  | 3469 | if (new_trustee) | 
|  | 3470 | kthread_stop(new_trustee); | 
|  | 3471 | return NOTIFY_BAD; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3472 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3473 | } | 
|  | 3474 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3475 | /* some are called w/ irq disabled, don't disturb irq status */ | 
|  | 3476 | spin_lock_irqsave(&gcwq->lock, flags); | 
|  | 3477 |  | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 3478 | switch (action) { | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3479 | case CPU_DOWN_PREPARE: | 
|  | 3480 | /* initialize trustee and tell it to acquire the gcwq */ | 
|  | 3481 | BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE); | 
|  | 3482 | gcwq->trustee = new_trustee; | 
|  | 3483 | gcwq->trustee_state = TRUSTEE_START; | 
|  | 3484 | wake_up_process(gcwq->trustee); | 
|  | 3485 | wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3486 | /* fall through */ | 
|  | 3487 | case CPU_UP_PREPARE: | 
|  | 3488 | BUG_ON(gcwq->first_idle); | 
|  | 3489 | gcwq->first_idle = new_worker; | 
|  | 3490 | break; | 
|  | 3491 |  | 
|  | 3492 | case CPU_DYING: | 
|  | 3493 | /* | 
|  | 3494 | * Before this, the trustee and all workers except for | 
|  | 3495 | * the ones which are still executing works from | 
|  | 3496 | * before the last CPU down must be on the cpu.  After | 
|  | 3497 | * this, they'll all be diasporas. | 
|  | 3498 | */ | 
|  | 3499 | gcwq->flags |= GCWQ_DISASSOCIATED; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3500 | break; | 
|  | 3501 |  | 
| Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 3502 | case CPU_POST_DEAD: | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3503 | gcwq->trustee_state = TRUSTEE_BUTCHER; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3504 | /* fall through */ | 
|  | 3505 | case CPU_UP_CANCELED: | 
|  | 3506 | destroy_worker(gcwq->first_idle); | 
|  | 3507 | gcwq->first_idle = NULL; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3508 | break; | 
|  | 3509 |  | 
|  | 3510 | case CPU_DOWN_FAILED: | 
|  | 3511 | case CPU_ONLINE: | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3512 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3513 | if (gcwq->trustee_state != TRUSTEE_DONE) { | 
|  | 3514 | gcwq->trustee_state = TRUSTEE_RELEASE; | 
|  | 3515 | wake_up_process(gcwq->trustee); | 
|  | 3516 | wait_trustee_state(gcwq, TRUSTEE_DONE); | 
|  | 3517 | } | 
|  | 3518 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3519 | /* | 
|  | 3520 | * Trustee is done and there might be no worker left. | 
|  | 3521 | * Put the first_idle in and request a real manager to | 
|  | 3522 | * take a look. | 
|  | 3523 | */ | 
|  | 3524 | spin_unlock_irq(&gcwq->lock); | 
|  | 3525 | kthread_bind(gcwq->first_idle->task, cpu); | 
|  | 3526 | spin_lock_irq(&gcwq->lock); | 
|  | 3527 | gcwq->flags |= GCWQ_MANAGE_WORKERS; | 
|  | 3528 | start_worker(gcwq->first_idle); | 
|  | 3529 | gcwq->first_idle = NULL; | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3530 | break; | 
| Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 3531 | } | 
|  | 3532 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3533 | spin_unlock_irqrestore(&gcwq->lock, flags); | 
|  | 3534 |  | 
| Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3535 | return notifier_from_errno(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3536 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3537 |  | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3538 | #ifdef CONFIG_SMP | 
| Rusty Russell | 8ccad40 | 2009-01-16 15:31:15 -0800 | [diff] [blame] | 3539 |  | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3540 | struct work_for_cpu { | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3541 | struct completion completion; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3542 | long (*fn)(void *); | 
|  | 3543 | void *arg; | 
|  | 3544 | long ret; | 
|  | 3545 | }; | 
|  | 3546 |  | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3547 | static int do_work_for_cpu(void *_wfc) | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3548 | { | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3549 | struct work_for_cpu *wfc = _wfc; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3550 | wfc->ret = wfc->fn(wfc->arg); | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3551 | complete(&wfc->completion); | 
|  | 3552 | return 0; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3553 | } | 
|  | 3554 |  | 
|  | 3555 | /** | 
|  | 3556 | * work_on_cpu - run a function in user context on a particular cpu | 
|  | 3557 | * @cpu: the cpu to run on | 
|  | 3558 | * @fn: the function to run | 
|  | 3559 | * @arg: the function arg | 
|  | 3560 | * | 
| Rusty Russell | 31ad908 | 2009-01-16 15:31:15 -0800 | [diff] [blame] | 3561 | * This will return the value @fn returns. | 
|  | 3562 | * It is up to the caller to ensure that the cpu doesn't go offline. | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3563 | * The caller must not hold any locks which would prevent @fn from completing. | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3564 | */ | 
|  | 3565 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 
|  | 3566 | { | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3567 | struct task_struct *sub_thread; | 
|  | 3568 | struct work_for_cpu wfc = { | 
|  | 3569 | .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), | 
|  | 3570 | .fn = fn, | 
|  | 3571 | .arg = arg, | 
|  | 3572 | }; | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3573 |  | 
| Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3574 | sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); | 
|  | 3575 | if (IS_ERR(sub_thread)) | 
|  | 3576 | return PTR_ERR(sub_thread); | 
|  | 3577 | kthread_bind(sub_thread, cpu); | 
|  | 3578 | wake_up_process(sub_thread); | 
|  | 3579 | wait_for_completion(&wfc.completion); | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3580 | return wfc.ret; | 
|  | 3581 | } | 
|  | 3582 | EXPORT_SYMBOL_GPL(work_on_cpu); | 
|  | 3583 | #endif /* CONFIG_SMP */ | 
|  | 3584 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3585 | #ifdef CONFIG_FREEZER | 
| Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 3586 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3587 | /** | 
|  | 3588 | * freeze_workqueues_begin - begin freezing workqueues | 
|  | 3589 | * | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3590 | * Start freezing workqueues.  After this function returns, all freezable | 
|  | 3591 | * workqueues will queue new works to their frozen_works list instead of | 
|  | 3592 | * gcwq->worklist. | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3593 | * | 
|  | 3594 | * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3595 | * Grabs and releases workqueue_lock and gcwq->lock's. | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3596 | */ | 
|  | 3597 | void freeze_workqueues_begin(void) | 
|  | 3598 | { | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3599 | unsigned int cpu; | 
|  | 3600 |  | 
|  | 3601 | spin_lock(&workqueue_lock); | 
|  | 3602 |  | 
|  | 3603 | BUG_ON(workqueue_freezing); | 
|  | 3604 | workqueue_freezing = true; | 
|  | 3605 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3606 | for_each_gcwq_cpu(cpu) { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3607 | struct global_cwq *gcwq = get_gcwq(cpu); | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3608 | struct workqueue_struct *wq; | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3609 |  | 
|  | 3610 | spin_lock_irq(&gcwq->lock); | 
|  | 3611 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3612 | BUG_ON(gcwq->flags & GCWQ_FREEZING); | 
|  | 3613 | gcwq->flags |= GCWQ_FREEZING; | 
|  | 3614 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3615 | list_for_each_entry(wq, &workqueues, list) { | 
|  | 3616 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
|  | 3617 |  | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3618 | if (cwq && wq->flags & WQ_FREEZABLE) | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3619 | cwq->max_active = 0; | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3620 | } | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3621 |  | 
|  | 3622 | spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3623 | } | 
|  | 3624 |  | 
|  | 3625 | spin_unlock(&workqueue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3626 | } | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3627 |  | 
|  | 3628 | /** | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3629 | * freeze_workqueues_busy - are freezable workqueues still busy? | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3630 | * | 
|  | 3631 | * Check whether freezing is complete.  This function must be called | 
|  | 3632 | * between freeze_workqueues_begin() and thaw_workqueues(). | 
|  | 3633 | * | 
|  | 3634 | * CONTEXT: | 
|  | 3635 | * Grabs and releases workqueue_lock. | 
|  | 3636 | * | 
|  | 3637 | * RETURNS: | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3638 | * %true if some freezable workqueues are still busy.  %false if freezing | 
|  | 3639 | * is complete. | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3640 | */ | 
|  | 3641 | bool freeze_workqueues_busy(void) | 
|  | 3642 | { | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3643 | unsigned int cpu; | 
|  | 3644 | bool busy = false; | 
|  | 3645 |  | 
|  | 3646 | spin_lock(&workqueue_lock); | 
|  | 3647 |  | 
|  | 3648 | BUG_ON(!workqueue_freezing); | 
|  | 3649 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3650 | for_each_gcwq_cpu(cpu) { | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3651 | struct workqueue_struct *wq; | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3652 | /* | 
|  | 3653 | * nr_active is monotonically decreasing.  It's safe | 
|  | 3654 | * to peek without lock. | 
|  | 3655 | */ | 
|  | 3656 | list_for_each_entry(wq, &workqueues, list) { | 
|  | 3657 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
|  | 3658 |  | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3659 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3660 | continue; | 
|  | 3661 |  | 
|  | 3662 | BUG_ON(cwq->nr_active < 0); | 
|  | 3663 | if (cwq->nr_active) { | 
|  | 3664 | busy = true; | 
|  | 3665 | goto out_unlock; | 
|  | 3666 | } | 
|  | 3667 | } | 
|  | 3668 | } | 
|  | 3669 | out_unlock: | 
|  | 3670 | spin_unlock(&workqueue_lock); | 
|  | 3671 | return busy; | 
|  | 3672 | } | 
|  | 3673 |  | 
|  | 3674 | /** | 
|  | 3675 | * thaw_workqueues - thaw workqueues | 
|  | 3676 | * | 
|  | 3677 | * Thaw workqueues.  Normal queueing is restored and all collected | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 3678 | * frozen works are transferred to their respective gcwq worklists. | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3679 | * | 
|  | 3680 | * CONTEXT: | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3681 | * Grabs and releases workqueue_lock and gcwq->lock's. | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3682 | */ | 
|  | 3683 | void thaw_workqueues(void) | 
|  | 3684 | { | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3685 | unsigned int cpu; | 
|  | 3686 |  | 
|  | 3687 | spin_lock(&workqueue_lock); | 
|  | 3688 |  | 
|  | 3689 | if (!workqueue_freezing) | 
|  | 3690 | goto out_unlock; | 
|  | 3691 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3692 | for_each_gcwq_cpu(cpu) { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3693 | struct global_cwq *gcwq = get_gcwq(cpu); | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3694 | struct workqueue_struct *wq; | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3695 |  | 
|  | 3696 | spin_lock_irq(&gcwq->lock); | 
|  | 3697 |  | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3698 | BUG_ON(!(gcwq->flags & GCWQ_FREEZING)); | 
|  | 3699 | gcwq->flags &= ~GCWQ_FREEZING; | 
|  | 3700 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3701 | list_for_each_entry(wq, &workqueues, list) { | 
|  | 3702 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 
|  | 3703 |  | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3704 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3705 | continue; | 
|  | 3706 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3707 | /* restore max_active and repopulate worklist */ | 
|  | 3708 | cwq->max_active = wq->saved_max_active; | 
|  | 3709 |  | 
|  | 3710 | while (!list_empty(&cwq->delayed_works) && | 
|  | 3711 | cwq->nr_active < cwq->max_active) | 
|  | 3712 | cwq_activate_first_delayed(cwq); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3713 | } | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3714 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3715 | wake_up_worker(gcwq); | 
|  | 3716 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3717 | spin_unlock_irq(&gcwq->lock); | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3718 | } | 
|  | 3719 |  | 
|  | 3720 | workqueue_freezing = false; | 
|  | 3721 | out_unlock: | 
|  | 3722 | spin_unlock(&workqueue_lock); | 
|  | 3723 | } | 
|  | 3724 | #endif /* CONFIG_FREEZER */ | 
|  | 3725 |  | 
| Suresh Siddha | 6ee0578 | 2010-07-30 14:57:37 -0700 | [diff] [blame] | 3726 | static int __init init_workqueues(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3727 | { | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3728 | unsigned int cpu; | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3729 | int i; | 
| Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3730 |  | 
| Tejun Heo | f650094 | 2010-08-09 11:50:34 +0200 | [diff] [blame] | 3731 | cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3732 |  | 
|  | 3733 | /* initialize gcwqs */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3734 | for_each_gcwq_cpu(cpu) { | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3735 | struct global_cwq *gcwq = get_gcwq(cpu); | 
|  | 3736 |  | 
|  | 3737 | spin_lock_init(&gcwq->lock); | 
| Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 3738 | INIT_LIST_HEAD(&gcwq->worklist); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3739 | gcwq->cpu = cpu; | 
| Tejun Heo | 477a3c3 | 2010-08-31 10:54:35 +0200 | [diff] [blame] | 3740 | gcwq->flags |= GCWQ_DISASSOCIATED; | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3741 |  | 
| Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3742 | INIT_LIST_HEAD(&gcwq->idle_list); | 
|  | 3743 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) | 
|  | 3744 | INIT_HLIST_HEAD(&gcwq->busy_hash[i]); | 
|  | 3745 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3746 | init_timer_deferrable(&gcwq->idle_timer); | 
|  | 3747 | gcwq->idle_timer.function = idle_worker_timeout; | 
|  | 3748 | gcwq->idle_timer.data = (unsigned long)gcwq; | 
|  | 3749 |  | 
|  | 3750 | setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout, | 
|  | 3751 | (unsigned long)gcwq); | 
|  | 3752 |  | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3753 | ida_init(&gcwq->worker_ida); | 
| Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3754 |  | 
|  | 3755 | gcwq->trustee_state = TRUSTEE_DONE; | 
|  | 3756 | init_waitqueue_head(&gcwq->trustee_wait); | 
| Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3757 | } | 
|  | 3758 |  | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3759 | /* create the initial worker */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3760 | for_each_online_gcwq_cpu(cpu) { | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3761 | struct global_cwq *gcwq = get_gcwq(cpu); | 
|  | 3762 | struct worker *worker; | 
|  | 3763 |  | 
| Tejun Heo | 477a3c3 | 2010-08-31 10:54:35 +0200 | [diff] [blame] | 3764 | if (cpu != WORK_CPU_UNBOUND) | 
|  | 3765 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | 
| Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3766 | worker = create_worker(gcwq, true); | 
|  | 3767 | BUG_ON(!worker); | 
|  | 3768 | spin_lock_irq(&gcwq->lock); | 
|  | 3769 | start_worker(worker); | 
|  | 3770 | spin_unlock_irq(&gcwq->lock); | 
|  | 3771 | } | 
|  | 3772 |  | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3773 | system_wq = alloc_workqueue("events", 0, 0); | 
|  | 3774 | system_long_wq = alloc_workqueue("events_long", 0, 0); | 
|  | 3775 | system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3776 | system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, | 
|  | 3777 | WQ_UNBOUND_MAX_ACTIVE); | 
| Hitoshi Mitake | e5cba24 | 2010-11-26 12:06:44 +0100 | [diff] [blame] | 3778 | BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || | 
|  | 3779 | !system_unbound_wq); | 
| Suresh Siddha | 6ee0578 | 2010-07-30 14:57:37 -0700 | [diff] [blame] | 3780 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3781 | } | 
| Suresh Siddha | 6ee0578 | 2010-07-30 14:57:37 -0700 | [diff] [blame] | 3782 | early_initcall(init_workqueues); |