Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 2 | * kernel/workqueue.c - generic async execution with shared worker pool |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 4 | * Copyright (C) 2002 Ingo Molnar |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 6 | * Derived from the taskqueue/keventd code by: |
| 7 | * David Woodhouse <dwmw2@infradead.org> |
| 8 | * Andrew Morton |
| 9 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> |
| 10 | * Theodore Ts'o <tytso@mit.edu> |
Christoph Lameter | 89ada67 | 2005-10-30 15:01:59 -0800 | [diff] [blame] | 11 | * |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 12 | * Made to use alloc_percpu by Christoph Lameter. |
Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 13 | * |
| 14 | * Copyright (C) 2010 SUSE Linux Products GmbH |
| 15 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
| 16 | * |
| 17 | * This is the generic async execution mechanism. Work items as are |
| 18 | * executed in process context. The worker pool is shared and |
| 19 | * automatically managed. There is one worker pool for each CPU and |
| 20 | * one extra for works which are better served by workers which are |
| 21 | * not bound to any specific CPU. |
| 22 | * |
| 23 | * Please read Documentation/workqueue.txt for details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | */ |
| 25 | |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 26 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/kernel.h> |
| 28 | #include <linux/sched.h> |
| 29 | #include <linux/init.h> |
| 30 | #include <linux/signal.h> |
| 31 | #include <linux/completion.h> |
| 32 | #include <linux/workqueue.h> |
| 33 | #include <linux/slab.h> |
| 34 | #include <linux/cpu.h> |
| 35 | #include <linux/notifier.h> |
| 36 | #include <linux/kthread.h> |
James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 37 | #include <linux/hardirq.h> |
Christoph Lameter | 4693402 | 2006-10-11 01:21:26 -0700 | [diff] [blame] | 38 | #include <linux/mempolicy.h> |
Rafael J. Wysocki | 341a595 | 2006-12-06 20:34:49 -0800 | [diff] [blame] | 39 | #include <linux/freezer.h> |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 40 | #include <linux/kallsyms.h> |
| 41 | #include <linux/debug_locks.h> |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 42 | #include <linux/lockdep.h> |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 43 | #include <linux/idr.h> |
Syed Rameez Mustafa | 1bee7b9 | 2013-07-15 11:52:09 -0700 | [diff] [blame] | 44 | #include <linux/bug.h> |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 45 | |
| 46 | #include "workqueue_sched.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 48 | enum { |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 49 | /* global_cwq flags */ |
Tejun Heo | 22ad564 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 50 | GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ |
| 51 | GCWQ_FREEZING = 1 << 1, /* freeze in progress */ |
| 52 | |
| 53 | /* pool flags */ |
| 54 | POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ |
| 55 | POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 56 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 57 | /* worker flags */ |
| 58 | WORKER_STARTED = 1 << 0, /* started */ |
| 59 | WORKER_DIE = 1 << 1, /* die die die */ |
| 60 | WORKER_IDLE = 1 << 2, /* is idle */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 61 | WORKER_PREP = 1 << 3, /* preparing to run works */ |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 62 | WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 63 | WORKER_REBIND = 1 << 5, /* mom is home, come back */ |
Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 64 | WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 65 | WORKER_UNBOUND = 1 << 7, /* worker is unbound */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 66 | |
Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 67 | WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 68 | WORKER_CPU_INTENSIVE | WORKER_UNBOUND, |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 69 | |
| 70 | /* gcwq->trustee_state */ |
| 71 | TRUSTEE_START = 0, /* start */ |
| 72 | TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */ |
| 73 | TRUSTEE_BUTCHER = 2, /* butcher workers */ |
| 74 | TRUSTEE_RELEASE = 3, /* release workers */ |
| 75 | TRUSTEE_DONE = 4, /* trustee is done */ |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 76 | |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 77 | NR_WORKER_POOLS = 2, /* # worker pools per gcwq */ |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 78 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 79 | BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ |
| 80 | BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, |
| 81 | BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 82 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 83 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ |
| 84 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ |
| 85 | |
Tejun Heo | 3233cdb | 2011-02-16 18:10:19 +0100 | [diff] [blame] | 86 | MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, |
| 87 | /* call for help after 10ms |
| 88 | (min two ticks) */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 89 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ |
| 90 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 91 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 92 | |
| 93 | /* |
| 94 | * Rescue workers are used only on emergencies and shared by |
| 95 | * all cpus. Give -20. |
| 96 | */ |
| 97 | RESCUER_NICE_LEVEL = -20, |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 98 | HIGHPRI_NICE_LEVEL = -20, |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 99 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | |
| 101 | /* |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 102 | * Structure fields follow one of the following exclusion rules. |
| 103 | * |
Tejun Heo | e41e704 | 2010-08-24 14:22:47 +0200 | [diff] [blame] | 104 | * I: Modifiable by initialization/destruction paths and read-only for |
| 105 | * everyone else. |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 106 | * |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 107 | * P: Preemption protected. Disabling preemption is enough and should |
| 108 | * only be modified and accessed from the local cpu. |
| 109 | * |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 110 | * L: gcwq->lock protected. Access with gcwq->lock held. |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 111 | * |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 112 | * X: During normal operation, modification requires gcwq->lock and |
| 113 | * should be done only from local cpu. Either disabling preemption |
| 114 | * on local cpu or grabbing gcwq->lock is enough for read access. |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 115 | * If GCWQ_DISASSOCIATED is set, it's identical to L. |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 116 | * |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 117 | * F: wq->flush_mutex protected. |
| 118 | * |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 119 | * W: workqueue_lock protected. |
| 120 | */ |
| 121 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 122 | struct global_cwq; |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 123 | struct worker_pool; |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 124 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 125 | /* |
| 126 | * The poor guys doing the actual heavy lifting. All on-duty workers |
| 127 | * are either serving the manager role, on idle list or on busy hash. |
| 128 | */ |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 129 | struct worker { |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 130 | /* on idle list while idle, on busy hash table while busy */ |
| 131 | union { |
| 132 | struct list_head entry; /* L: while idle */ |
| 133 | struct hlist_node hentry; /* L: while busy */ |
| 134 | }; |
| 135 | |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 136 | struct work_struct *current_work; /* L: work being processed */ |
Tejun Heo | 55e3e1f | 2012-12-18 10:35:02 -0800 | [diff] [blame] | 137 | work_func_t current_func; /* L: current_work's fn */ |
Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 138 | struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 139 | struct list_head scheduled; /* L: scheduled works */ |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 140 | struct task_struct *task; /* I: worker task */ |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 141 | struct worker_pool *pool; /* I: the associated pool */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 142 | /* 64 bytes boundary on 64bit, 32 on 32bit */ |
| 143 | unsigned long last_active; /* L: last active timestamp */ |
| 144 | unsigned int flags; /* X: flags */ |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 145 | int id; /* I: worker id */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 146 | struct work_struct rebind_work; /* L: rebind worker to cpu */ |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 147 | }; |
| 148 | |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 149 | struct worker_pool { |
| 150 | struct global_cwq *gcwq; /* I: the owning gcwq */ |
Tejun Heo | 22ad564 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 151 | unsigned int flags; /* X: flags */ |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 152 | |
| 153 | struct list_head worklist; /* L: list of pending works */ |
| 154 | int nr_workers; /* L: total number of workers */ |
| 155 | int nr_idle; /* L: currently idle ones */ |
| 156 | |
| 157 | struct list_head idle_list; /* X: list of idle workers */ |
| 158 | struct timer_list idle_timer; /* L: worker idle timeout */ |
| 159 | struct timer_list mayday_timer; /* L: SOS timer for workers */ |
| 160 | |
| 161 | struct ida worker_ida; /* L: for worker IDs */ |
| 162 | struct worker *first_idle; /* L: first idle worker */ |
| 163 | }; |
| 164 | |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 165 | /* |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 166 | * Global per-cpu workqueue. There's one and only one for each cpu |
| 167 | * and all works are queued and processed here regardless of their |
| 168 | * target workqueues. |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 169 | */ |
| 170 | struct global_cwq { |
| 171 | spinlock_t lock; /* the gcwq lock */ |
| 172 | unsigned int cpu; /* I: the associated cpu */ |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 173 | unsigned int flags; /* L: GCWQ_* flags */ |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 174 | |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 175 | /* workers are chained either in busy_hash or pool idle_list */ |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 176 | struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; |
| 177 | /* L: hash of busy workers */ |
| 178 | |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 179 | struct worker_pool pools[2]; /* normal and highpri pools */ |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 180 | |
| 181 | struct task_struct *trustee; /* L: for gcwq shutdown */ |
| 182 | unsigned int trustee_state; /* L: trustee state */ |
| 183 | wait_queue_head_t trustee_wait; /* trustee wait */ |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 184 | } ____cacheline_aligned_in_smp; |
| 185 | |
| 186 | /* |
Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 187 | * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of |
Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 188 | * work_struct->data are used for flags and thus cwqs need to be |
| 189 | * aligned at two's power of the number of flag bits. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | */ |
| 191 | struct cpu_workqueue_struct { |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 192 | struct worker_pool *pool; /* I: the associated pool */ |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 193 | struct workqueue_struct *wq; /* I: the owning workqueue */ |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 194 | int work_color; /* L: current color */ |
| 195 | int flush_color; /* L: flushing color */ |
| 196 | int nr_in_flight[WORK_NR_COLORS]; |
| 197 | /* L: nr of in_flight works */ |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 198 | int nr_active; /* L: nr of active works */ |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 199 | int max_active; /* L: max active works */ |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 200 | struct list_head delayed_works; /* L: delayed works */ |
Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 201 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | /* |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 204 | * Structure used to wait for workqueue flush. |
| 205 | */ |
| 206 | struct wq_flusher { |
| 207 | struct list_head list; /* F: list of flushers */ |
| 208 | int flush_color; /* F: flush color waiting for */ |
| 209 | struct completion done; /* flush completion */ |
| 210 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 212 | /* |
Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 213 | * All cpumasks are assumed to be always set on UP and thus can't be |
| 214 | * used to determine whether there's something to be done. |
| 215 | */ |
| 216 | #ifdef CONFIG_SMP |
| 217 | typedef cpumask_var_t mayday_mask_t; |
| 218 | #define mayday_test_and_set_cpu(cpu, mask) \ |
| 219 | cpumask_test_and_set_cpu((cpu), (mask)) |
| 220 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) |
| 221 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) |
Tejun Heo | 9c37547 | 2010-08-31 11:18:34 +0200 | [diff] [blame] | 222 | #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp)) |
Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 223 | #define free_mayday_mask(mask) free_cpumask_var((mask)) |
| 224 | #else |
| 225 | typedef unsigned long mayday_mask_t; |
| 226 | #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask)) |
| 227 | #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask)) |
| 228 | #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask)) |
| 229 | #define alloc_mayday_mask(maskp, gfp) true |
| 230 | #define free_mayday_mask(mask) do { } while (0) |
| 231 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
| 233 | /* |
| 234 | * The externally visible workqueue abstraction is an array of |
| 235 | * per-CPU workqueues: |
| 236 | */ |
| 237 | struct workqueue_struct { |
Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 238 | unsigned int flags; /* W: WQ_* flags */ |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 239 | union { |
| 240 | struct cpu_workqueue_struct __percpu *pcpu; |
| 241 | struct cpu_workqueue_struct *single; |
| 242 | unsigned long v; |
| 243 | } cpu_wq; /* I: cwq's */ |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 244 | struct list_head list; /* W: list of all workqueues */ |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 245 | |
| 246 | struct mutex flush_mutex; /* protects wq flushing */ |
| 247 | int work_color; /* F: current work color */ |
| 248 | int flush_color; /* F: current flush color */ |
| 249 | atomic_t nr_cwqs_to_flush; /* flush in progress */ |
| 250 | struct wq_flusher *first_flusher; /* F: first flusher */ |
| 251 | struct list_head flusher_queue; /* F: flush waiters */ |
| 252 | struct list_head flusher_overflow; /* F: flush overflow list */ |
| 253 | |
Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 254 | mayday_mask_t mayday_mask; /* cpus requesting rescue */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 255 | struct worker *rescuer; /* I: rescue worker */ |
| 256 | |
Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 257 | int nr_drainers; /* W: drain in progress */ |
Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 258 | int saved_max_active; /* W: saved cwq max_active */ |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 259 | #ifdef CONFIG_LOCKDEP |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 260 | struct lockdep_map lockdep_map; |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 261 | #endif |
Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 262 | char name[]; /* I: workqueue name */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | }; |
| 264 | |
Viresh Kumar | 154e311 | 2013-04-08 16:45:40 +0530 | [diff] [blame] | 265 | /* see the comment above the definition of WQ_POWER_EFFICIENT */ |
| 266 | #ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT |
| 267 | static bool wq_power_efficient = true; |
| 268 | #else |
| 269 | static bool wq_power_efficient; |
| 270 | #endif |
| 271 | |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 272 | struct workqueue_struct *system_wq __read_mostly; |
| 273 | struct workqueue_struct *system_long_wq __read_mostly; |
| 274 | struct workqueue_struct *system_nrt_wq __read_mostly; |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 275 | struct workqueue_struct *system_unbound_wq __read_mostly; |
Tejun Heo | 24d51ad | 2011-02-21 09:52:50 +0100 | [diff] [blame] | 276 | struct workqueue_struct *system_freezable_wq __read_mostly; |
Alan Stern | 62d3c54 | 2012-03-02 10:51:00 +0100 | [diff] [blame] | 277 | struct workqueue_struct *system_nrt_freezable_wq __read_mostly; |
Viresh Kumar | 1adb4b6 | 2013-04-24 17:12:54 +0530 | [diff] [blame] | 278 | struct workqueue_struct *system_power_efficient_wq __read_mostly; |
| 279 | struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 280 | EXPORT_SYMBOL_GPL(system_wq); |
| 281 | EXPORT_SYMBOL_GPL(system_long_wq); |
| 282 | EXPORT_SYMBOL_GPL(system_nrt_wq); |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 283 | EXPORT_SYMBOL_GPL(system_unbound_wq); |
Tejun Heo | 24d51ad | 2011-02-21 09:52:50 +0100 | [diff] [blame] | 284 | EXPORT_SYMBOL_GPL(system_freezable_wq); |
Alan Stern | 62d3c54 | 2012-03-02 10:51:00 +0100 | [diff] [blame] | 285 | EXPORT_SYMBOL_GPL(system_nrt_freezable_wq); |
Viresh Kumar | 1adb4b6 | 2013-04-24 17:12:54 +0530 | [diff] [blame] | 286 | EXPORT_SYMBOL_GPL(system_power_efficient_wq); |
| 287 | EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); |
Nick Reuter | b8678b3 | 2018-08-30 02:53:55 -0500 | [diff] [blame] | 288 | |
Tejun Heo | 97bd234 | 2010-10-05 10:41:14 +0200 | [diff] [blame] | 289 | #define CREATE_TRACE_POINTS |
| 290 | #include <trace/events/workqueue.h> |
| 291 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 292 | #define for_each_worker_pool(pool, gcwq) \ |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 293 | for ((pool) = &(gcwq)->pools[0]; \ |
| 294 | (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++) |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 295 | |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 296 | #define for_each_busy_worker(worker, i, pos, gcwq) \ |
| 297 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ |
| 298 | hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) |
| 299 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 300 | static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, |
| 301 | unsigned int sw) |
| 302 | { |
| 303 | if (cpu < nr_cpu_ids) { |
| 304 | if (sw & 1) { |
| 305 | cpu = cpumask_next(cpu, mask); |
| 306 | if (cpu < nr_cpu_ids) |
| 307 | return cpu; |
| 308 | } |
| 309 | if (sw & 2) |
| 310 | return WORK_CPU_UNBOUND; |
| 311 | } |
| 312 | return WORK_CPU_NONE; |
| 313 | } |
| 314 | |
| 315 | static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, |
| 316 | struct workqueue_struct *wq) |
| 317 | { |
| 318 | return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); |
| 319 | } |
| 320 | |
Tejun Heo | 0988495 | 2010-08-01 11:50:12 +0200 | [diff] [blame] | 321 | /* |
| 322 | * CPU iterators |
| 323 | * |
| 324 | * An extra gcwq is defined for an invalid cpu number |
| 325 | * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any |
| 326 | * specific CPU. The following iterators are similar to |
| 327 | * for_each_*_cpu() iterators but also considers the unbound gcwq. |
| 328 | * |
| 329 | * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND |
| 330 | * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND |
| 331 | * for_each_cwq_cpu() : possible CPUs for bound workqueues, |
| 332 | * WORK_CPU_UNBOUND for unbound workqueues |
| 333 | */ |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 334 | #define for_each_gcwq_cpu(cpu) \ |
| 335 | for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ |
| 336 | (cpu) < WORK_CPU_NONE; \ |
| 337 | (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) |
| 338 | |
| 339 | #define for_each_online_gcwq_cpu(cpu) \ |
| 340 | for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \ |
| 341 | (cpu) < WORK_CPU_NONE; \ |
| 342 | (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) |
| 343 | |
| 344 | #define for_each_cwq_cpu(cpu, wq) \ |
| 345 | for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \ |
| 346 | (cpu) < WORK_CPU_NONE; \ |
| 347 | (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) |
| 348 | |
Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 349 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| 350 | |
| 351 | static struct debug_obj_descr work_debug_descr; |
| 352 | |
Stanislaw Gruszka | 9977728 | 2011-03-07 09:58:33 +0100 | [diff] [blame] | 353 | static void *work_debug_hint(void *addr) |
| 354 | { |
| 355 | return ((struct work_struct *) addr)->func; |
| 356 | } |
| 357 | |
Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 358 | /* |
| 359 | * fixup_init is called when: |
| 360 | * - an active object is initialized |
| 361 | */ |
| 362 | static int work_fixup_init(void *addr, enum debug_obj_state state) |
| 363 | { |
| 364 | struct work_struct *work = addr; |
| 365 | |
| 366 | switch (state) { |
| 367 | case ODEBUG_STATE_ACTIVE: |
| 368 | cancel_work_sync(work); |
| 369 | debug_object_init(work, &work_debug_descr); |
| 370 | return 1; |
| 371 | default: |
| 372 | return 0; |
| 373 | } |
| 374 | } |
| 375 | |
| 376 | /* |
| 377 | * fixup_activate is called when: |
| 378 | * - an active object is activated |
| 379 | * - an unknown object is activated (might be a statically initialized object) |
| 380 | */ |
| 381 | static int work_fixup_activate(void *addr, enum debug_obj_state state) |
| 382 | { |
| 383 | struct work_struct *work = addr; |
| 384 | |
| 385 | switch (state) { |
| 386 | |
| 387 | case ODEBUG_STATE_NOTAVAILABLE: |
| 388 | /* |
| 389 | * This is not really a fixup. The work struct was |
| 390 | * statically initialized. We just make sure that it |
| 391 | * is tracked in the object tracker. |
| 392 | */ |
Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 393 | if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { |
Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 394 | debug_object_init(work, &work_debug_descr); |
| 395 | debug_object_activate(work, &work_debug_descr); |
| 396 | return 0; |
| 397 | } |
| 398 | WARN_ON_ONCE(1); |
| 399 | return 0; |
| 400 | |
| 401 | case ODEBUG_STATE_ACTIVE: |
| 402 | WARN_ON(1); |
| 403 | |
| 404 | default: |
| 405 | return 0; |
| 406 | } |
| 407 | } |
| 408 | |
| 409 | /* |
| 410 | * fixup_free is called when: |
| 411 | * - an active object is freed |
| 412 | */ |
| 413 | static int work_fixup_free(void *addr, enum debug_obj_state state) |
| 414 | { |
| 415 | struct work_struct *work = addr; |
| 416 | |
| 417 | switch (state) { |
| 418 | case ODEBUG_STATE_ACTIVE: |
| 419 | cancel_work_sync(work); |
| 420 | debug_object_free(work, &work_debug_descr); |
| 421 | return 1; |
| 422 | default: |
| 423 | return 0; |
| 424 | } |
| 425 | } |
| 426 | |
| 427 | static struct debug_obj_descr work_debug_descr = { |
| 428 | .name = "work_struct", |
Stanislaw Gruszka | 9977728 | 2011-03-07 09:58:33 +0100 | [diff] [blame] | 429 | .debug_hint = work_debug_hint, |
Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 430 | .fixup_init = work_fixup_init, |
| 431 | .fixup_activate = work_fixup_activate, |
| 432 | .fixup_free = work_fixup_free, |
| 433 | }; |
| 434 | |
| 435 | static inline void debug_work_activate(struct work_struct *work) |
| 436 | { |
| 437 | debug_object_activate(work, &work_debug_descr); |
| 438 | } |
| 439 | |
| 440 | static inline void debug_work_deactivate(struct work_struct *work) |
| 441 | { |
| 442 | debug_object_deactivate(work, &work_debug_descr); |
| 443 | } |
| 444 | |
| 445 | void __init_work(struct work_struct *work, int onstack) |
| 446 | { |
| 447 | if (onstack) |
| 448 | debug_object_init_on_stack(work, &work_debug_descr); |
| 449 | else |
| 450 | debug_object_init(work, &work_debug_descr); |
| 451 | } |
| 452 | EXPORT_SYMBOL_GPL(__init_work); |
| 453 | |
| 454 | void destroy_work_on_stack(struct work_struct *work) |
| 455 | { |
| 456 | debug_object_free(work, &work_debug_descr); |
| 457 | } |
| 458 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); |
| 459 | |
| 460 | #else |
| 461 | static inline void debug_work_activate(struct work_struct *work) { } |
| 462 | static inline void debug_work_deactivate(struct work_struct *work) { } |
| 463 | #endif |
| 464 | |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 465 | /* Serializes the accesses to the list of workqueues. */ |
| 466 | static DEFINE_SPINLOCK(workqueue_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | static LIST_HEAD(workqueues); |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 468 | static bool workqueue_freezing; /* W: have wqs started freezing? */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | |
Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 470 | /* |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 471 | * The almighty global cpu workqueues. nr_running is the only field |
| 472 | * which is expected to be used frequently by other cpus via |
| 473 | * try_to_wake_up(). Put it in a separate cacheline. |
Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 474 | */ |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 475 | static DEFINE_PER_CPU(struct global_cwq, global_cwq); |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 476 | static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]); |
Nathan Lynch | f756d5e | 2006-01-08 01:05:12 -0800 | [diff] [blame] | 477 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 478 | /* |
| 479 | * Global cpu workqueue and nr_running counter for unbound gcwq. The |
| 480 | * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its |
| 481 | * workers have WORKER_UNBOUND set. |
| 482 | */ |
| 483 | static struct global_cwq unbound_global_cwq; |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 484 | static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = { |
| 485 | [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */ |
| 486 | }; |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 487 | |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 488 | static int worker_thread(void *__worker); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 490 | static int worker_pool_pri(struct worker_pool *pool) |
| 491 | { |
| 492 | return pool - pool->gcwq->pools; |
| 493 | } |
| 494 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 495 | static struct global_cwq *get_gcwq(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | { |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 497 | if (cpu != WORK_CPU_UNBOUND) |
| 498 | return &per_cpu(global_cwq, cpu); |
| 499 | else |
| 500 | return &unbound_global_cwq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | } |
| 502 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 503 | static atomic_t *get_pool_nr_running(struct worker_pool *pool) |
Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 504 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 505 | int cpu = pool->gcwq->cpu; |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 506 | int idx = worker_pool_pri(pool); |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 507 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 508 | if (cpu != WORK_CPU_UNBOUND) |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 509 | return &per_cpu(pool_nr_running, cpu)[idx]; |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 510 | else |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 511 | return &unbound_pool_nr_running[idx]; |
Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 512 | } |
| 513 | |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 514 | static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, |
| 515 | struct workqueue_struct *wq) |
Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 516 | { |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 517 | if (!(wq->flags & WQ_UNBOUND)) { |
Lai Jiangshan | e06ffa1 | 2012-03-09 18:03:20 +0800 | [diff] [blame] | 518 | if (likely(cpu < nr_cpu_ids)) |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 519 | return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 520 | } else if (likely(cpu == WORK_CPU_UNBOUND)) |
| 521 | return wq->cpu_wq.single; |
| 522 | return NULL; |
Oleg Nesterov | a848e3b | 2007-05-09 02:34:17 -0700 | [diff] [blame] | 523 | } |
| 524 | |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 525 | static unsigned int work_color_to_flags(int color) |
| 526 | { |
| 527 | return color << WORK_STRUCT_COLOR_SHIFT; |
| 528 | } |
| 529 | |
| 530 | static int get_work_color(struct work_struct *work) |
| 531 | { |
| 532 | return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & |
| 533 | ((1 << WORK_STRUCT_COLOR_BITS) - 1); |
| 534 | } |
| 535 | |
| 536 | static int work_next_color(int color) |
| 537 | { |
| 538 | return (color + 1) % WORK_NR_COLORS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | } |
| 540 | |
David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 541 | /* |
Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 542 | * A work's data points to the cwq with WORK_STRUCT_CWQ set while the |
| 543 | * work is on queue. Once execution starts, WORK_STRUCT_CWQ is |
| 544 | * cleared and the work data contains the cpu number it was last on. |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 545 | * |
| 546 | * set_work_{cwq|cpu}() and clear_work_data() can be used to set the |
| 547 | * cwq, cpu or clear work->data. These functions should only be |
| 548 | * called while the work is owned - ie. while the PENDING bit is set. |
| 549 | * |
| 550 | * get_work_[g]cwq() can be used to obtain the gcwq or cwq |
| 551 | * corresponding to a work. gcwq is available once the work has been |
| 552 | * queued anywhere after initialization. cwq is available only from |
| 553 | * queueing until execution starts. |
David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 554 | */ |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 555 | static inline void set_work_data(struct work_struct *work, unsigned long data, |
| 556 | unsigned long flags) |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 557 | { |
David Howells | 4594bf1 | 2006-12-07 11:33:26 +0000 | [diff] [blame] | 558 | BUG_ON(!work_pending(work)); |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 559 | atomic_long_set(&work->data, data | flags | work_static(work)); |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 560 | } |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 561 | |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 562 | static void set_work_cwq(struct work_struct *work, |
| 563 | struct cpu_workqueue_struct *cwq, |
| 564 | unsigned long extra_flags) |
Oleg Nesterov | 4d707b9 | 2010-04-23 17:40:40 +0200 | [diff] [blame] | 565 | { |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 566 | set_work_data(work, (unsigned long)cwq, |
Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 567 | WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); |
Oleg Nesterov | 4d707b9 | 2010-04-23 17:40:40 +0200 | [diff] [blame] | 568 | } |
| 569 | |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 570 | static void set_work_cpu(struct work_struct *work, unsigned int cpu) |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 571 | { |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 572 | set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING); |
| 573 | } |
| 574 | |
| 575 | static void clear_work_data(struct work_struct *work) |
| 576 | { |
| 577 | set_work_data(work, WORK_STRUCT_NO_CPU, 0); |
| 578 | } |
| 579 | |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 580 | static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) |
| 581 | { |
Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 582 | unsigned long data = atomic_long_read(&work->data); |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 583 | |
Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 584 | if (data & WORK_STRUCT_CWQ) |
| 585 | return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); |
| 586 | else |
Srinivasarao P | b6e586c | 2013-09-18 14:33:45 +0530 | [diff] [blame] | 587 | { |
| 588 | WARN_ON_ONCE(1); |
Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 589 | return NULL; |
Srinivasarao P | b6e586c | 2013-09-18 14:33:45 +0530 | [diff] [blame] | 590 | } |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 591 | } |
| 592 | |
| 593 | static struct global_cwq *get_work_gcwq(struct work_struct *work) |
| 594 | { |
Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 595 | unsigned long data = atomic_long_read(&work->data); |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 596 | unsigned int cpu; |
| 597 | |
Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 598 | if (data & WORK_STRUCT_CWQ) |
| 599 | return ((struct cpu_workqueue_struct *) |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 600 | (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq; |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 601 | |
| 602 | cpu = data >> WORK_STRUCT_FLAG_BITS; |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 603 | if (cpu == WORK_CPU_NONE) |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 604 | return NULL; |
| 605 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 606 | BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 607 | return get_gcwq(cpu); |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 608 | } |
| 609 | |
| 610 | /* |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 611 | * Policy functions. These define the policies on how the global worker |
| 612 | * pools are managed. Unless noted otherwise, these functions assume that |
| 613 | * they're being called with gcwq->lock held. |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 614 | */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 615 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 616 | static bool __need_more_worker(struct worker_pool *pool) |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 617 | { |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 618 | return !atomic_read(get_pool_nr_running(pool)); |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 619 | } |
| 620 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 621 | /* |
| 622 | * Need to wake up a worker? Called from anything but currently |
| 623 | * running workers. |
Tejun Heo | b7b5c68 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 624 | * |
| 625 | * Note that, because unbound workers never contribute to nr_running, this |
| 626 | * function will always return %true for unbound gcwq as long as the |
| 627 | * worklist isn't empty. |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 628 | */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 629 | static bool need_more_worker(struct worker_pool *pool) |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 630 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 631 | return !list_empty(&pool->worklist) && __need_more_worker(pool); |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 632 | } |
| 633 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 634 | /* Can I start working? Called from busy but !running workers. */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 635 | static bool may_start_working(struct worker_pool *pool) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 636 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 637 | return pool->nr_idle; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 638 | } |
| 639 | |
| 640 | /* Do I need to keep working? Called from currently running workers. */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 641 | static bool keep_working(struct worker_pool *pool) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 642 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 643 | atomic_t *nr_running = get_pool_nr_running(pool); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 644 | |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 645 | return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 646 | } |
| 647 | |
| 648 | /* Do we need a new worker? Called from manager. */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 649 | static bool need_to_create_worker(struct worker_pool *pool) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 650 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 651 | return need_more_worker(pool) && !may_start_working(pool); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 652 | } |
| 653 | |
| 654 | /* Do I need to be the manager? */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 655 | static bool need_to_manage_workers(struct worker_pool *pool) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 656 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 657 | return need_to_create_worker(pool) || |
Tejun Heo | 22ad564 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 658 | (pool->flags & POOL_MANAGE_WORKERS); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 659 | } |
| 660 | |
| 661 | /* Do we have too many workers and should some go away? */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 662 | static bool too_many_workers(struct worker_pool *pool) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 663 | { |
Tejun Heo | 22ad564 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 664 | bool managing = pool->flags & POOL_MANAGING_WORKERS; |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 665 | int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ |
| 666 | int nr_busy = pool->nr_workers - nr_idle; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 667 | |
| 668 | return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; |
| 669 | } |
| 670 | |
| 671 | /* |
| 672 | * Wake up functions. |
| 673 | */ |
| 674 | |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 675 | /* Return the first worker. Safe with preemption disabled */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 676 | static struct worker *first_worker(struct worker_pool *pool) |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 677 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 678 | if (unlikely(list_empty(&pool->idle_list))) |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 679 | return NULL; |
| 680 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 681 | return list_first_entry(&pool->idle_list, struct worker, entry); |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 682 | } |
| 683 | |
| 684 | /** |
| 685 | * wake_up_worker - wake up an idle worker |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 686 | * @pool: worker pool to wake worker from |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 687 | * |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 688 | * Wake up the first idle worker of @pool. |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 689 | * |
| 690 | * CONTEXT: |
| 691 | * spin_lock_irq(gcwq->lock). |
| 692 | */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 693 | static void wake_up_worker(struct worker_pool *pool) |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 694 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 695 | struct worker *worker = first_worker(pool); |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 696 | |
| 697 | if (likely(worker)) |
| 698 | wake_up_process(worker->task); |
| 699 | } |
| 700 | |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 701 | /** |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 702 | * wq_worker_waking_up - a worker is waking up |
| 703 | * @task: task waking up |
| 704 | * @cpu: CPU @task is waking up to |
| 705 | * |
| 706 | * This function is called during try_to_wake_up() when a worker is |
| 707 | * being awoken. |
| 708 | * |
| 709 | * CONTEXT: |
| 710 | * spin_lock_irq(rq->lock) |
| 711 | */ |
| 712 | void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) |
| 713 | { |
| 714 | struct worker *worker = kthread_data(task); |
| 715 | |
Steven Rostedt | 2d64672 | 2010-12-03 23:12:33 -0500 | [diff] [blame] | 716 | if (!(worker->flags & WORKER_NOT_RUNNING)) |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 717 | atomic_inc(get_pool_nr_running(worker->pool)); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 718 | } |
| 719 | |
| 720 | /** |
| 721 | * wq_worker_sleeping - a worker is going to sleep |
| 722 | * @task: task going to sleep |
| 723 | * @cpu: CPU in question, must be the current CPU number |
| 724 | * |
| 725 | * This function is called during schedule() when a busy worker is |
| 726 | * going to sleep. Worker on the same cpu can be woken up by |
| 727 | * returning pointer to its task. |
| 728 | * |
| 729 | * CONTEXT: |
| 730 | * spin_lock_irq(rq->lock) |
| 731 | * |
| 732 | * RETURNS: |
| 733 | * Worker task on @cpu to wake up, %NULL if none. |
| 734 | */ |
| 735 | struct task_struct *wq_worker_sleeping(struct task_struct *task, |
| 736 | unsigned int cpu) |
| 737 | { |
| 738 | struct worker *worker = kthread_data(task), *to_wakeup = NULL; |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 739 | struct worker_pool *pool = worker->pool; |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 740 | atomic_t *nr_running = get_pool_nr_running(pool); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 741 | |
Steven Rostedt | 2d64672 | 2010-12-03 23:12:33 -0500 | [diff] [blame] | 742 | if (worker->flags & WORKER_NOT_RUNNING) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 743 | return NULL; |
| 744 | |
| 745 | /* this can only happen on the local cpu */ |
| 746 | BUG_ON(cpu != raw_smp_processor_id()); |
| 747 | |
| 748 | /* |
| 749 | * The counterpart of the following dec_and_test, implied mb, |
| 750 | * worklist not empty test sequence is in insert_work(). |
| 751 | * Please read comment there. |
| 752 | * |
| 753 | * NOT_RUNNING is clear. This means that trustee is not in |
| 754 | * charge and we're running on the local cpu w/ rq lock held |
| 755 | * and preemption disabled, which in turn means that none else |
| 756 | * could be manipulating idle_list, so dereferencing idle_list |
| 757 | * without gcwq lock is safe. |
| 758 | */ |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 759 | if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist)) |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 760 | to_wakeup = first_worker(pool); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 761 | return to_wakeup ? to_wakeup->task : NULL; |
| 762 | } |
| 763 | |
| 764 | /** |
| 765 | * worker_set_flags - set worker flags and adjust nr_running accordingly |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 766 | * @worker: self |
Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 767 | * @flags: flags to set |
| 768 | * @wakeup: wakeup an idle worker if necessary |
| 769 | * |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 770 | * Set @flags in @worker->flags and adjust nr_running accordingly. If |
| 771 | * nr_running becomes zero and @wakeup is %true, an idle worker is |
| 772 | * woken up. |
Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 773 | * |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 774 | * CONTEXT: |
| 775 | * spin_lock_irq(gcwq->lock) |
Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 776 | */ |
| 777 | static inline void worker_set_flags(struct worker *worker, unsigned int flags, |
| 778 | bool wakeup) |
| 779 | { |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 780 | struct worker_pool *pool = worker->pool; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 781 | |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 782 | WARN_ON_ONCE(worker->task != current); |
| 783 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 784 | /* |
| 785 | * If transitioning into NOT_RUNNING, adjust nr_running and |
| 786 | * wake up an idle worker as necessary if requested by |
| 787 | * @wakeup. |
| 788 | */ |
| 789 | if ((flags & WORKER_NOT_RUNNING) && |
| 790 | !(worker->flags & WORKER_NOT_RUNNING)) { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 791 | atomic_t *nr_running = get_pool_nr_running(pool); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 792 | |
| 793 | if (wakeup) { |
| 794 | if (atomic_dec_and_test(nr_running) && |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 795 | !list_empty(&pool->worklist)) |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 796 | wake_up_worker(pool); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 797 | } else |
| 798 | atomic_dec(nr_running); |
| 799 | } |
| 800 | |
Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 801 | worker->flags |= flags; |
| 802 | } |
| 803 | |
| 804 | /** |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 805 | * worker_clr_flags - clear worker flags and adjust nr_running accordingly |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 806 | * @worker: self |
Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 807 | * @flags: flags to clear |
| 808 | * |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 809 | * Clear @flags in @worker->flags and adjust nr_running accordingly. |
Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 810 | * |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 811 | * CONTEXT: |
| 812 | * spin_lock_irq(gcwq->lock) |
Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 813 | */ |
| 814 | static inline void worker_clr_flags(struct worker *worker, unsigned int flags) |
| 815 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 816 | struct worker_pool *pool = worker->pool; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 817 | unsigned int oflags = worker->flags; |
| 818 | |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 819 | WARN_ON_ONCE(worker->task != current); |
| 820 | |
Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 821 | worker->flags &= ~flags; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 822 | |
Tejun Heo | 42c025f | 2011-01-11 15:58:49 +0100 | [diff] [blame] | 823 | /* |
| 824 | * If transitioning out of NOT_RUNNING, increment nr_running. Note |
| 825 | * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask |
| 826 | * of multiple flags, not a single flag. |
| 827 | */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 828 | if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) |
| 829 | if (!(worker->flags & WORKER_NOT_RUNNING)) |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 830 | atomic_inc(get_pool_nr_running(pool)); |
Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 831 | } |
| 832 | |
| 833 | /** |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 834 | * busy_worker_head - return the busy hash head for a work |
| 835 | * @gcwq: gcwq of interest |
| 836 | * @work: work to be hashed |
| 837 | * |
| 838 | * Return hash head of @gcwq for @work. |
| 839 | * |
| 840 | * CONTEXT: |
| 841 | * spin_lock_irq(gcwq->lock). |
| 842 | * |
| 843 | * RETURNS: |
| 844 | * Pointer to the hash head. |
| 845 | */ |
| 846 | static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, |
| 847 | struct work_struct *work) |
| 848 | { |
| 849 | const int base_shift = ilog2(sizeof(struct work_struct)); |
| 850 | unsigned long v = (unsigned long)work; |
| 851 | |
| 852 | /* simple shift and fold hash, do we need something better? */ |
| 853 | v >>= base_shift; |
| 854 | v += v >> BUSY_WORKER_HASH_ORDER; |
| 855 | v &= BUSY_WORKER_HASH_MASK; |
| 856 | |
| 857 | return &gcwq->busy_hash[v]; |
| 858 | } |
| 859 | |
| 860 | /** |
Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 861 | * __find_worker_executing_work - find worker which is executing a work |
| 862 | * @gcwq: gcwq of interest |
| 863 | * @bwh: hash head as returned by busy_worker_head() |
| 864 | * @work: work to find worker for |
| 865 | * |
| 866 | * Find a worker which is executing @work on @gcwq. @bwh should be |
| 867 | * the hash head obtained by calling busy_worker_head() with the same |
| 868 | * work. |
| 869 | * |
| 870 | * CONTEXT: |
| 871 | * spin_lock_irq(gcwq->lock). |
| 872 | * |
| 873 | * RETURNS: |
| 874 | * Pointer to worker which is executing @work if found, NULL |
| 875 | * otherwise. |
| 876 | */ |
| 877 | static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, |
| 878 | struct hlist_head *bwh, |
| 879 | struct work_struct *work) |
| 880 | { |
| 881 | struct worker *worker; |
| 882 | struct hlist_node *tmp; |
| 883 | |
| 884 | hlist_for_each_entry(worker, tmp, bwh, hentry) |
Tejun Heo | 55e3e1f | 2012-12-18 10:35:02 -0800 | [diff] [blame] | 885 | if (worker->current_work == work && |
| 886 | worker->current_func == work->func) |
Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 887 | return worker; |
| 888 | return NULL; |
| 889 | } |
| 890 | |
| 891 | /** |
| 892 | * find_worker_executing_work - find worker which is executing a work |
| 893 | * @gcwq: gcwq of interest |
| 894 | * @work: work to find worker for |
| 895 | * |
Tejun Heo | 55e3e1f | 2012-12-18 10:35:02 -0800 | [diff] [blame] | 896 | * Find a worker which is executing @work on @gcwq by searching |
| 897 | * @gcwq->busy_hash which is keyed by the address of @work. For a worker |
| 898 | * to match, its current execution should match the address of @work and |
| 899 | * its work function. This is to avoid unwanted dependency between |
| 900 | * unrelated work executions through a work item being recycled while still |
| 901 | * being executed. |
| 902 | * |
| 903 | * This is a bit tricky. A work item may be freed once its execution |
| 904 | * starts and nothing prevents the freed area from being recycled for |
| 905 | * another work item. If the same work item address ends up being reused |
| 906 | * before the original execution finishes, workqueue will identify the |
| 907 | * recycled work item as currently executing and make it wait until the |
| 908 | * current execution finishes, introducing an unwanted dependency. |
| 909 | * |
| 910 | * This function checks the work item address, work function and workqueue |
| 911 | * to avoid false positives. Note that this isn't complete as one may |
| 912 | * construct a work function which can introduce dependency onto itself |
| 913 | * through a recycled work item. Well, if somebody wants to shoot oneself |
| 914 | * in the foot that badly, there's only so much we can do, and if such |
| 915 | * deadlock actually occurs, it should be easy to locate the culprit work |
| 916 | * function. |
Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 917 | * |
| 918 | * CONTEXT: |
| 919 | * spin_lock_irq(gcwq->lock). |
| 920 | * |
| 921 | * RETURNS: |
| 922 | * Pointer to worker which is executing @work if found, NULL |
| 923 | * otherwise. |
| 924 | */ |
| 925 | static struct worker *find_worker_executing_work(struct global_cwq *gcwq, |
| 926 | struct work_struct *work) |
| 927 | { |
| 928 | return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work), |
| 929 | work); |
| 930 | } |
| 931 | |
| 932 | /** |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 933 | * insert_work - insert a work into gcwq |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 934 | * @cwq: cwq @work belongs to |
| 935 | * @work: work to insert |
| 936 | * @head: insertion point |
| 937 | * @extra_flags: extra WORK_STRUCT_* flags to set |
| 938 | * |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 939 | * Insert @work which belongs to @cwq into @gcwq after @head. |
| 940 | * @extra_flags is or'd to work_struct flags. |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 941 | * |
| 942 | * CONTEXT: |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 943 | * spin_lock_irq(gcwq->lock). |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 944 | */ |
Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 945 | static void insert_work(struct cpu_workqueue_struct *cwq, |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 946 | struct work_struct *work, struct list_head *head, |
| 947 | unsigned int extra_flags) |
Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 948 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 949 | struct worker_pool *pool = cwq->pool; |
Frederic Weisbecker | e1d8aa9 | 2009-01-12 23:15:46 +0100 | [diff] [blame] | 950 | |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 951 | /* we own @work, set data and link */ |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 952 | set_work_cwq(work, cwq, extra_flags); |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 953 | |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 954 | /* |
| 955 | * Ensure that we get the right work->data if we see the |
| 956 | * result of list_add() below, see try_to_grab_pending(). |
| 957 | */ |
| 958 | smp_wmb(); |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 959 | |
Oleg Nesterov | 1a4d9b0 | 2008-07-25 01:47:47 -0700 | [diff] [blame] | 960 | list_add_tail(&work->entry, head); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 961 | |
| 962 | /* |
| 963 | * Ensure either worker_sched_deactivated() sees the above |
| 964 | * list_add_tail() or we see zero nr_running to avoid workers |
| 965 | * lying around lazily while there are works to be processed. |
| 966 | */ |
| 967 | smp_mb(); |
| 968 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 969 | if (__need_more_worker(pool)) |
| 970 | wake_up_worker(pool); |
Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 971 | } |
| 972 | |
Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 973 | /* |
| 974 | * Test whether @work is being queued from another work executing on the |
| 975 | * same workqueue. This is rather expensive and should only be used from |
| 976 | * cold paths. |
| 977 | */ |
| 978 | static bool is_chained_work(struct workqueue_struct *wq) |
| 979 | { |
| 980 | unsigned long flags; |
| 981 | unsigned int cpu; |
| 982 | |
| 983 | for_each_gcwq_cpu(cpu) { |
| 984 | struct global_cwq *gcwq = get_gcwq(cpu); |
| 985 | struct worker *worker; |
| 986 | struct hlist_node *pos; |
| 987 | int i; |
| 988 | |
| 989 | spin_lock_irqsave(&gcwq->lock, flags); |
| 990 | for_each_busy_worker(worker, i, pos, gcwq) { |
| 991 | if (worker->task != current) |
| 992 | continue; |
| 993 | spin_unlock_irqrestore(&gcwq->lock, flags); |
| 994 | /* |
| 995 | * I'm @worker, no locking necessary. See if @work |
| 996 | * is headed to the same workqueue. |
| 997 | */ |
| 998 | return worker->current_cwq->wq == wq; |
| 999 | } |
| 1000 | spin_unlock_irqrestore(&gcwq->lock, flags); |
| 1001 | } |
| 1002 | return false; |
| 1003 | } |
| 1004 | |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1005 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | struct work_struct *work) |
| 1007 | { |
Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1008 | struct global_cwq *gcwq; |
| 1009 | struct cpu_workqueue_struct *cwq; |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1010 | struct list_head *worklist; |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1011 | unsigned int work_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 | unsigned long flags; |
| 1013 | |
Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 1014 | debug_work_activate(work); |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1015 | |
Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 1016 | /* if dying, only works from the same workqueue are allowed */ |
Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 1017 | if (unlikely(wq->flags & WQ_DRAINING) && |
Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 1018 | WARN_ON_ONCE(!is_chained_work(wq))) |
Tejun Heo | e41e704 | 2010-08-24 14:22:47 +0200 | [diff] [blame] | 1019 | return; |
| 1020 | |
Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1021 | /* determine gcwq to use */ |
| 1022 | if (!(wq->flags & WQ_UNBOUND)) { |
Tejun Heo | 18aa9ef | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1023 | struct global_cwq *last_gcwq; |
| 1024 | |
Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1025 | if (unlikely(cpu == WORK_CPU_UNBOUND)) |
| 1026 | cpu = raw_smp_processor_id(); |
| 1027 | |
Tejun Heo | 18aa9ef | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1028 | /* |
| 1029 | * It's multi cpu. If @wq is non-reentrant and @work |
| 1030 | * was previously on a different cpu, it might still |
| 1031 | * be running there, in which case the work needs to |
| 1032 | * be queued on that cpu to guarantee non-reentrance. |
| 1033 | */ |
Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1034 | gcwq = get_gcwq(cpu); |
Tejun Heo | 18aa9ef | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1035 | if (wq->flags & WQ_NON_REENTRANT && |
| 1036 | (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { |
| 1037 | struct worker *worker; |
| 1038 | |
| 1039 | spin_lock_irqsave(&last_gcwq->lock, flags); |
| 1040 | |
| 1041 | worker = find_worker_executing_work(last_gcwq, work); |
| 1042 | |
| 1043 | if (worker && worker->current_cwq->wq == wq) |
| 1044 | gcwq = last_gcwq; |
| 1045 | else { |
| 1046 | /* meh... not running there, queue here */ |
| 1047 | spin_unlock_irqrestore(&last_gcwq->lock, flags); |
| 1048 | spin_lock_irqsave(&gcwq->lock, flags); |
| 1049 | } |
| 1050 | } else |
| 1051 | spin_lock_irqsave(&gcwq->lock, flags); |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1052 | } else { |
| 1053 | gcwq = get_gcwq(WORK_CPU_UNBOUND); |
| 1054 | spin_lock_irqsave(&gcwq->lock, flags); |
Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1055 | } |
| 1056 | |
| 1057 | /* gcwq determined, get cwq and queue */ |
| 1058 | cwq = get_cwq(gcwq->cpu, wq); |
Tejun Heo | cdadf00 | 2010-10-05 10:49:55 +0200 | [diff] [blame] | 1059 | trace_workqueue_queue_work(cpu, cwq, work); |
Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1060 | |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1061 | BUG_ON(!list_empty(&work->entry)); |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1062 | |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1063 | cwq->nr_in_flight[cwq->work_color]++; |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1064 | work_flags = work_color_to_flags(cwq->work_color); |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1065 | |
| 1066 | if (likely(cwq->nr_active < cwq->max_active)) { |
Tejun Heo | cdadf00 | 2010-10-05 10:49:55 +0200 | [diff] [blame] | 1067 | trace_workqueue_activate_work(work); |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1068 | cwq->nr_active++; |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 1069 | worklist = &cwq->pool->worklist; |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1070 | } else { |
| 1071 | work_flags |= WORK_STRUCT_DELAYED; |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1072 | worklist = &cwq->delayed_works; |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1073 | } |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1074 | |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1075 | insert_work(cwq, work, worklist, work_flags); |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1076 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1077 | spin_unlock_irqrestore(&gcwq->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | } |
| 1079 | |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1080 | /** |
| 1081 | * queue_work - queue work on a workqueue |
| 1082 | * @wq: workqueue to use |
| 1083 | * @work: work to queue |
| 1084 | * |
Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 1085 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1086 | * |
Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 1087 | * We queue the work to the CPU on which it was submitted, but if the CPU dies |
| 1088 | * it can be processed by another CPU. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1089 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1090 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1091 | { |
Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 1092 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 | |
Oleg Nesterov | ef1ca23 | 2008-07-25 01:47:53 -0700 | [diff] [blame] | 1094 | ret = queue_work_on(get_cpu(), wq, work); |
| 1095 | put_cpu(); |
| 1096 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | return ret; |
| 1098 | } |
Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 1099 | EXPORT_SYMBOL_GPL(queue_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1100 | |
Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 1101 | /** |
| 1102 | * queue_work_on - queue work on specific cpu |
| 1103 | * @cpu: CPU number to execute work on |
| 1104 | * @wq: workqueue to use |
| 1105 | * @work: work to queue |
| 1106 | * |
| 1107 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
| 1108 | * |
| 1109 | * We queue the work to a specific CPU, the caller must ensure it |
| 1110 | * can't go away. |
| 1111 | */ |
| 1112 | int |
| 1113 | queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) |
| 1114 | { |
| 1115 | int ret = 0; |
| 1116 | |
Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1117 | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1118 | __queue_work(cpu, wq, work); |
Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 1119 | ret = 1; |
| 1120 | } |
| 1121 | return ret; |
| 1122 | } |
| 1123 | EXPORT_SYMBOL_GPL(queue_work_on); |
| 1124 | |
Li Zefan | 6d141c3 | 2008-02-08 04:21:09 -0800 | [diff] [blame] | 1125 | static void delayed_work_timer_fn(unsigned long __data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | { |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1127 | struct delayed_work *dwork = (struct delayed_work *)__data; |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1128 | struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | |
Srinivasarao P | b6e586c | 2013-09-18 14:33:45 +0530 | [diff] [blame] | 1130 | if (cwq != NULL) |
| 1131 | __queue_work(smp_processor_id(), cwq->wq, &dwork->work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 | } |
| 1133 | |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1134 | /** |
| 1135 | * queue_delayed_work - queue work on a workqueue after delay |
| 1136 | * @wq: workqueue to use |
Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 1137 | * @dwork: delayable work to queue |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1138 | * @delay: number of jiffies to wait before queueing |
| 1139 | * |
Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 1140 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1141 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1142 | int queue_delayed_work(struct workqueue_struct *wq, |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1143 | struct delayed_work *dwork, unsigned long delay) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1144 | { |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1145 | if (delay == 0) |
Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 1146 | return queue_work(wq, &dwork->work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 | |
Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 1148 | return queue_delayed_work_on(-1, wq, dwork, delay); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | } |
Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 1150 | EXPORT_SYMBOL_GPL(queue_delayed_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1151 | |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1152 | /** |
| 1153 | * queue_delayed_work_on - queue work on specific CPU after delay |
| 1154 | * @cpu: CPU number to execute work on |
| 1155 | * @wq: workqueue to use |
Randy Dunlap | af9997e | 2006-12-22 01:06:52 -0800 | [diff] [blame] | 1156 | * @dwork: work to queue |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1157 | * @delay: number of jiffies to wait before queueing |
| 1158 | * |
Alan Stern | 057647f | 2006-10-28 10:38:58 -0700 | [diff] [blame] | 1159 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 1160 | */ |
Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1161 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1162 | struct delayed_work *dwork, unsigned long delay) |
Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1163 | { |
| 1164 | int ret = 0; |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1165 | struct timer_list *timer = &dwork->timer; |
| 1166 | struct work_struct *work = &dwork->work; |
Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1167 | |
Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1168 | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { |
Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1169 | unsigned int lcpu; |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1170 | |
Tejun Heo | 4afca92 | 2012-12-04 07:40:39 -0800 | [diff] [blame] | 1171 | WARN_ON_ONCE(timer_pending(timer)); |
| 1172 | WARN_ON_ONCE(!list_empty(&work->entry)); |
Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1173 | |
Andrew Liu | 8a3e77c | 2008-05-01 04:35:14 -0700 | [diff] [blame] | 1174 | timer_stats_timer_set_start_info(&dwork->timer); |
| 1175 | |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1176 | /* |
| 1177 | * This stores cwq for the moment, for the timer_fn. |
| 1178 | * Note that the work's gcwq is preserved to allow |
| 1179 | * reentrance detection for delayed works. |
| 1180 | */ |
Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1181 | if (!(wq->flags & WQ_UNBOUND)) { |
| 1182 | struct global_cwq *gcwq = get_work_gcwq(work); |
| 1183 | |
| 1184 | if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) |
| 1185 | lcpu = gcwq->cpu; |
| 1186 | else |
| 1187 | lcpu = raw_smp_processor_id(); |
| 1188 | } else |
| 1189 | lcpu = WORK_CPU_UNBOUND; |
| 1190 | |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1191 | set_work_cwq(work, get_cwq(lcpu, wq), 0); |
Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1192 | |
Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1193 | timer->expires = jiffies + delay; |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 1194 | timer->data = (unsigned long)dwork; |
Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1195 | timer->function = delayed_work_timer_fn; |
Oleg Nesterov | 63bc036 | 2007-05-09 02:34:16 -0700 | [diff] [blame] | 1196 | |
| 1197 | if (unlikely(cpu >= 0)) |
| 1198 | add_timer_on(timer, cpu); |
| 1199 | else |
| 1200 | add_timer(timer); |
Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 1201 | ret = 1; |
| 1202 | } |
| 1203 | return ret; |
| 1204 | } |
Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 1205 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1207 | /** |
| 1208 | * worker_enter_idle - enter idle state |
| 1209 | * @worker: worker which is entering idle state |
| 1210 | * |
| 1211 | * @worker is entering idle state. Update stats and idle timer if |
| 1212 | * necessary. |
| 1213 | * |
| 1214 | * LOCKING: |
| 1215 | * spin_lock_irq(gcwq->lock). |
| 1216 | */ |
| 1217 | static void worker_enter_idle(struct worker *worker) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | { |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1219 | struct worker_pool *pool = worker->pool; |
| 1220 | struct global_cwq *gcwq = pool->gcwq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1222 | BUG_ON(worker->flags & WORKER_IDLE); |
| 1223 | BUG_ON(!list_empty(&worker->entry) && |
| 1224 | (worker->hentry.next || worker->hentry.pprev)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1225 | |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1226 | /* can't use worker_set_flags(), also called from start_worker() */ |
| 1227 | worker->flags |= WORKER_IDLE; |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1228 | pool->nr_idle++; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1229 | worker->last_active = jiffies; |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 1230 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1231 | /* idle_list is LIFO */ |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1232 | list_add(&worker->entry, &pool->idle_list); |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1233 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1234 | if (likely(!(worker->flags & WORKER_ROGUE))) { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1235 | if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1236 | mod_timer(&pool->idle_timer, |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1237 | jiffies + IDLE_WORKER_TIMEOUT); |
| 1238 | } else |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1239 | wake_up_all(&gcwq->trustee_wait); |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1240 | |
Tejun Heo | 24312d3 | 2012-05-14 15:04:50 -0700 | [diff] [blame] | 1241 | /* |
| 1242 | * Sanity check nr_running. Because trustee releases gcwq->lock |
| 1243 | * between setting %WORKER_ROGUE and zapping nr_running, the |
| 1244 | * warning may trigger spuriously. Check iff trustee is idle. |
| 1245 | */ |
| 1246 | WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE && |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1247 | pool->nr_workers == pool->nr_idle && |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1248 | atomic_read(get_pool_nr_running(pool))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | } |
| 1250 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1251 | /** |
| 1252 | * worker_leave_idle - leave idle state |
| 1253 | * @worker: worker which is leaving idle state |
| 1254 | * |
| 1255 | * @worker is leaving idle state. Update stats. |
| 1256 | * |
| 1257 | * LOCKING: |
| 1258 | * spin_lock_irq(gcwq->lock). |
| 1259 | */ |
| 1260 | static void worker_leave_idle(struct worker *worker) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 | { |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1262 | struct worker_pool *pool = worker->pool; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1264 | BUG_ON(!(worker->flags & WORKER_IDLE)); |
Tejun Heo | d302f01 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1265 | worker_clr_flags(worker, WORKER_IDLE); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1266 | pool->nr_idle--; |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1267 | list_del_init(&worker->entry); |
| 1268 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1270 | /** |
| 1271 | * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq |
| 1272 | * @worker: self |
| 1273 | * |
| 1274 | * Works which are scheduled while the cpu is online must at least be |
| 1275 | * scheduled to a worker which is bound to the cpu so that if they are |
| 1276 | * flushed from cpu callbacks while cpu is going down, they are |
| 1277 | * guaranteed to execute on the cpu. |
| 1278 | * |
| 1279 | * This function is to be used by rogue workers and rescuers to bind |
| 1280 | * themselves to the target cpu and may race with cpu going down or |
| 1281 | * coming online. kthread_bind() can't be used because it may put the |
| 1282 | * worker to already dead cpu and set_cpus_allowed_ptr() can't be used |
| 1283 | * verbatim as it's best effort and blocking and gcwq may be |
| 1284 | * [dis]associated in the meantime. |
| 1285 | * |
| 1286 | * This function tries set_cpus_allowed() and locks gcwq and verifies |
| 1287 | * the binding against GCWQ_DISASSOCIATED which is set during |
| 1288 | * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters |
| 1289 | * idle state or fetches works without dropping lock, it can guarantee |
| 1290 | * the scheduling requirement described in the first paragraph. |
| 1291 | * |
| 1292 | * CONTEXT: |
| 1293 | * Might sleep. Called without any lock but returns with gcwq->lock |
| 1294 | * held. |
| 1295 | * |
| 1296 | * RETURNS: |
| 1297 | * %true if the associated gcwq is online (@worker is successfully |
| 1298 | * bound), %false if offline. |
| 1299 | */ |
| 1300 | static bool worker_maybe_bind_and_lock(struct worker *worker) |
Namhyung Kim | 972fa1c | 2010-08-22 23:19:43 +0900 | [diff] [blame] | 1301 | __acquires(&gcwq->lock) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1302 | { |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1303 | struct global_cwq *gcwq = worker->pool->gcwq; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1304 | struct task_struct *task = worker->task; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1306 | while (true) { |
| 1307 | /* |
| 1308 | * The following call may fail, succeed or succeed |
| 1309 | * without actually migrating the task to the cpu if |
| 1310 | * it races with cpu hotunplug operation. Verify |
| 1311 | * against GCWQ_DISASSOCIATED. |
| 1312 | */ |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1313 | if (!(gcwq->flags & GCWQ_DISASSOCIATED)) |
| 1314 | set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); |
Oleg Nesterov | 85f4186 | 2007-05-09 02:34:20 -0700 | [diff] [blame] | 1315 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1316 | spin_lock_irq(&gcwq->lock); |
| 1317 | if (gcwq->flags & GCWQ_DISASSOCIATED) |
| 1318 | return false; |
| 1319 | if (task_cpu(task) == gcwq->cpu && |
| 1320 | cpumask_equal(¤t->cpus_allowed, |
| 1321 | get_cpu_mask(gcwq->cpu))) |
| 1322 | return true; |
| 1323 | spin_unlock_irq(&gcwq->lock); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1324 | |
Tejun Heo | 5035b20 | 2011-04-29 18:08:37 +0200 | [diff] [blame] | 1325 | /* |
| 1326 | * We've raced with CPU hot[un]plug. Give it a breather |
| 1327 | * and retry migration. cond_resched() is required here; |
| 1328 | * otherwise, we might deadlock against cpu_stop trying to |
| 1329 | * bring down the CPU on non-preemptive kernel. |
| 1330 | */ |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1331 | cpu_relax(); |
Tejun Heo | 5035b20 | 2011-04-29 18:08:37 +0200 | [diff] [blame] | 1332 | cond_resched(); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1333 | } |
| 1334 | } |
| 1335 | |
| 1336 | /* |
| 1337 | * Function for worker->rebind_work used to rebind rogue busy workers |
| 1338 | * to the associated cpu which is coming back online. This is |
| 1339 | * scheduled by cpu up but can race with other cpu hotplug operations |
| 1340 | * and may be executed twice without intervening cpu down. |
| 1341 | */ |
| 1342 | static void worker_rebind_fn(struct work_struct *work) |
| 1343 | { |
| 1344 | struct worker *worker = container_of(work, struct worker, rebind_work); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1345 | struct global_cwq *gcwq = worker->pool->gcwq; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1346 | |
| 1347 | if (worker_maybe_bind_and_lock(worker)) |
| 1348 | worker_clr_flags(worker, WORKER_REBIND); |
| 1349 | |
| 1350 | spin_unlock_irq(&gcwq->lock); |
| 1351 | } |
| 1352 | |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1353 | static struct worker *alloc_worker(void) |
| 1354 | { |
| 1355 | struct worker *worker; |
| 1356 | |
| 1357 | worker = kzalloc(sizeof(*worker), GFP_KERNEL); |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1358 | if (worker) { |
| 1359 | INIT_LIST_HEAD(&worker->entry); |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1360 | INIT_LIST_HEAD(&worker->scheduled); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1361 | INIT_WORK(&worker->rebind_work, worker_rebind_fn); |
| 1362 | /* on creation a worker is in !idle && prep state */ |
| 1363 | worker->flags = WORKER_PREP; |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1364 | } |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1365 | return worker; |
| 1366 | } |
| 1367 | |
| 1368 | /** |
| 1369 | * create_worker - create a new workqueue worker |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1370 | * @pool: pool the new worker will belong to |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1371 | * @bind: whether to set affinity to @cpu or not |
| 1372 | * |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1373 | * Create a new worker which is bound to @pool. The returned worker |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1374 | * can be started by calling start_worker() or destroyed using |
| 1375 | * destroy_worker(). |
| 1376 | * |
| 1377 | * CONTEXT: |
| 1378 | * Might sleep. Does GFP_KERNEL allocations. |
| 1379 | * |
| 1380 | * RETURNS: |
| 1381 | * Pointer to the newly created worker. |
| 1382 | */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1383 | static struct worker *create_worker(struct worker_pool *pool, bool bind) |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1384 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1385 | struct global_cwq *gcwq = pool->gcwq; |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1386 | bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 1387 | const char *pri = worker_pool_pri(pool) ? "H" : ""; |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1388 | struct worker *worker = NULL; |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1389 | int id = -1; |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1390 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1391 | spin_lock_irq(&gcwq->lock); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1392 | while (ida_get_new(&pool->worker_ida, &id)) { |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1393 | spin_unlock_irq(&gcwq->lock); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1394 | if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1395 | goto fail; |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1396 | spin_lock_irq(&gcwq->lock); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1397 | } |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1398 | spin_unlock_irq(&gcwq->lock); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1399 | |
| 1400 | worker = alloc_worker(); |
| 1401 | if (!worker) |
| 1402 | goto fail; |
| 1403 | |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1404 | worker->pool = pool; |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1405 | worker->id = id; |
| 1406 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1407 | if (!on_unbound_cpu) |
Eric Dumazet | 94dcf29 | 2011-03-22 16:30:45 -0700 | [diff] [blame] | 1408 | worker->task = kthread_create_on_node(worker_thread, |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 1409 | worker, cpu_to_node(gcwq->cpu), |
| 1410 | "kworker/%u:%d%s", gcwq->cpu, id, pri); |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1411 | else |
| 1412 | worker->task = kthread_create(worker_thread, worker, |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 1413 | "kworker/u:%d%s", id, pri); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1414 | if (IS_ERR(worker->task)) |
| 1415 | goto fail; |
| 1416 | |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 1417 | if (worker_pool_pri(pool)) |
| 1418 | set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); |
| 1419 | |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1420 | /* |
| 1421 | * A rogue worker will become a regular one if CPU comes |
| 1422 | * online later on. Make sure every worker has |
| 1423 | * PF_THREAD_BOUND set. |
| 1424 | */ |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1425 | if (bind && !on_unbound_cpu) |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1426 | kthread_bind(worker->task, gcwq->cpu); |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1427 | else { |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1428 | worker->task->flags |= PF_THREAD_BOUND; |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1429 | if (on_unbound_cpu) |
| 1430 | worker->flags |= WORKER_UNBOUND; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1431 | } |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 1432 | |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1433 | return worker; |
| 1434 | fail: |
| 1435 | if (id >= 0) { |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1436 | spin_lock_irq(&gcwq->lock); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1437 | ida_remove(&pool->worker_ida, id); |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1438 | spin_unlock_irq(&gcwq->lock); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1439 | } |
| 1440 | kfree(worker); |
| 1441 | return NULL; |
| 1442 | } |
| 1443 | |
| 1444 | /** |
| 1445 | * start_worker - start a newly created worker |
| 1446 | * @worker: worker to start |
| 1447 | * |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1448 | * Make the gcwq aware of @worker and start it. |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1449 | * |
| 1450 | * CONTEXT: |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1451 | * spin_lock_irq(gcwq->lock). |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1452 | */ |
| 1453 | static void start_worker(struct worker *worker) |
| 1454 | { |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1455 | worker->flags |= WORKER_STARTED; |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1456 | worker->pool->nr_workers++; |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1457 | worker_enter_idle(worker); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1458 | wake_up_process(worker->task); |
| 1459 | } |
| 1460 | |
| 1461 | /** |
| 1462 | * destroy_worker - destroy a workqueue worker |
| 1463 | * @worker: worker to be destroyed |
| 1464 | * |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1465 | * Destroy @worker and adjust @gcwq stats accordingly. |
| 1466 | * |
| 1467 | * CONTEXT: |
| 1468 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1469 | */ |
| 1470 | static void destroy_worker(struct worker *worker) |
| 1471 | { |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1472 | struct worker_pool *pool = worker->pool; |
| 1473 | struct global_cwq *gcwq = pool->gcwq; |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1474 | int id = worker->id; |
| 1475 | |
| 1476 | /* sanity check frenzy */ |
| 1477 | BUG_ON(worker->current_work); |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1478 | BUG_ON(!list_empty(&worker->scheduled)); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1479 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1480 | if (worker->flags & WORKER_STARTED) |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1481 | pool->nr_workers--; |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1482 | if (worker->flags & WORKER_IDLE) |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1483 | pool->nr_idle--; |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1484 | |
Lai Jiangshan | 23f0913 | 2014-02-15 22:02:28 +0800 | [diff] [blame] | 1485 | /* |
| 1486 | * Once WORKER_DIE is set, the kworker may destroy itself at any |
| 1487 | * point. Pin to ensure the task stays until we're done with it. |
| 1488 | */ |
| 1489 | get_task_struct(worker->task); |
| 1490 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1491 | list_del_init(&worker->entry); |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 1492 | worker->flags |= WORKER_DIE; |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1493 | |
| 1494 | spin_unlock_irq(&gcwq->lock); |
| 1495 | |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1496 | kthread_stop(worker->task); |
Lai Jiangshan | 23f0913 | 2014-02-15 22:02:28 +0800 | [diff] [blame] | 1497 | put_task_struct(worker->task); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1498 | kfree(worker); |
| 1499 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1500 | spin_lock_irq(&gcwq->lock); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1501 | ida_remove(&pool->worker_ida, id); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1502 | } |
| 1503 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1504 | static void idle_worker_timeout(unsigned long __pool) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1505 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1506 | struct worker_pool *pool = (void *)__pool; |
| 1507 | struct global_cwq *gcwq = pool->gcwq; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1508 | |
| 1509 | spin_lock_irq(&gcwq->lock); |
| 1510 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1511 | if (too_many_workers(pool)) { |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1512 | struct worker *worker; |
| 1513 | unsigned long expires; |
| 1514 | |
| 1515 | /* idle_list is kept in LIFO order, check the last one */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1516 | worker = list_entry(pool->idle_list.prev, struct worker, entry); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1517 | expires = worker->last_active + IDLE_WORKER_TIMEOUT; |
| 1518 | |
| 1519 | if (time_before(jiffies, expires)) |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1520 | mod_timer(&pool->idle_timer, expires); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1521 | else { |
| 1522 | /* it's been idle for too long, wake up manager */ |
Tejun Heo | 22ad564 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1523 | pool->flags |= POOL_MANAGE_WORKERS; |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1524 | wake_up_worker(pool); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1525 | } |
| 1526 | } |
| 1527 | |
| 1528 | spin_unlock_irq(&gcwq->lock); |
| 1529 | } |
| 1530 | |
| 1531 | static bool send_mayday(struct work_struct *work) |
| 1532 | { |
| 1533 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
| 1534 | struct workqueue_struct *wq = cwq->wq; |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1535 | unsigned int cpu; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1536 | |
| 1537 | if (!(wq->flags & WQ_RESCUER)) |
| 1538 | return false; |
| 1539 | |
| 1540 | /* mayday mayday mayday */ |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1541 | cpu = cwq->pool->gcwq->cpu; |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 1542 | /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ |
| 1543 | if (cpu == WORK_CPU_UNBOUND) |
| 1544 | cpu = 0; |
Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 1545 | if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask)) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1546 | wake_up_process(wq->rescuer->task); |
| 1547 | return true; |
| 1548 | } |
| 1549 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1550 | static void gcwq_mayday_timeout(unsigned long __pool) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1551 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1552 | struct worker_pool *pool = (void *)__pool; |
| 1553 | struct global_cwq *gcwq = pool->gcwq; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1554 | struct work_struct *work; |
| 1555 | |
| 1556 | spin_lock_irq(&gcwq->lock); |
| 1557 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1558 | if (need_to_create_worker(pool)) { |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1559 | /* |
| 1560 | * We've been trying to create a new worker but |
| 1561 | * haven't been successful. We might be hitting an |
| 1562 | * allocation deadlock. Send distress signals to |
| 1563 | * rescuers. |
| 1564 | */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1565 | list_for_each_entry(work, &pool->worklist, entry) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1566 | send_mayday(work); |
| 1567 | } |
| 1568 | |
| 1569 | spin_unlock_irq(&gcwq->lock); |
| 1570 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1571 | mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1572 | } |
| 1573 | |
| 1574 | /** |
| 1575 | * maybe_create_worker - create a new worker if necessary |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1576 | * @pool: pool to create a new worker for |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1577 | * |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1578 | * Create a new worker for @pool if necessary. @pool is guaranteed to |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1579 | * have at least one idle worker on return from this function. If |
| 1580 | * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1581 | * sent to all rescuers with works scheduled on @pool to resolve |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1582 | * possible allocation deadlock. |
| 1583 | * |
| 1584 | * On return, need_to_create_worker() is guaranteed to be false and |
| 1585 | * may_start_working() true. |
| 1586 | * |
| 1587 | * LOCKING: |
| 1588 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed |
| 1589 | * multiple times. Does GFP_KERNEL allocations. Called only from |
| 1590 | * manager. |
| 1591 | * |
| 1592 | * RETURNS: |
| 1593 | * false if no action was taken and gcwq->lock stayed locked, true |
| 1594 | * otherwise. |
| 1595 | */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1596 | static bool maybe_create_worker(struct worker_pool *pool) |
Namhyung Kim | 06bd6eb | 2010-08-22 23:19:42 +0900 | [diff] [blame] | 1597 | __releases(&gcwq->lock) |
| 1598 | __acquires(&gcwq->lock) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1599 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1600 | struct global_cwq *gcwq = pool->gcwq; |
| 1601 | |
| 1602 | if (!need_to_create_worker(pool)) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1603 | return false; |
| 1604 | restart: |
Tejun Heo | 9f9c236 | 2010-07-14 11:31:20 +0200 | [diff] [blame] | 1605 | spin_unlock_irq(&gcwq->lock); |
| 1606 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1607 | /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1608 | mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1609 | |
| 1610 | while (true) { |
| 1611 | struct worker *worker; |
| 1612 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1613 | worker = create_worker(pool, true); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1614 | if (worker) { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1615 | del_timer_sync(&pool->mayday_timer); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1616 | spin_lock_irq(&gcwq->lock); |
| 1617 | start_worker(worker); |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1618 | BUG_ON(need_to_create_worker(pool)); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1619 | return true; |
| 1620 | } |
| 1621 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1622 | if (!need_to_create_worker(pool)) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1623 | break; |
| 1624 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1625 | __set_current_state(TASK_INTERRUPTIBLE); |
| 1626 | schedule_timeout(CREATE_COOLDOWN); |
Tejun Heo | 9f9c236 | 2010-07-14 11:31:20 +0200 | [diff] [blame] | 1627 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1628 | if (!need_to_create_worker(pool)) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1629 | break; |
| 1630 | } |
| 1631 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1632 | del_timer_sync(&pool->mayday_timer); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1633 | spin_lock_irq(&gcwq->lock); |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1634 | if (need_to_create_worker(pool)) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1635 | goto restart; |
| 1636 | return true; |
| 1637 | } |
| 1638 | |
| 1639 | /** |
| 1640 | * maybe_destroy_worker - destroy workers which have been idle for a while |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1641 | * @pool: pool to destroy workers for |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1642 | * |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1643 | * Destroy @pool workers which have been idle for longer than |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1644 | * IDLE_WORKER_TIMEOUT. |
| 1645 | * |
| 1646 | * LOCKING: |
| 1647 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed |
| 1648 | * multiple times. Called only from manager. |
| 1649 | * |
| 1650 | * RETURNS: |
| 1651 | * false if no action was taken and gcwq->lock stayed locked, true |
| 1652 | * otherwise. |
| 1653 | */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1654 | static bool maybe_destroy_workers(struct worker_pool *pool) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1655 | { |
| 1656 | bool ret = false; |
| 1657 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1658 | while (too_many_workers(pool)) { |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1659 | struct worker *worker; |
| 1660 | unsigned long expires; |
| 1661 | |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1662 | worker = list_entry(pool->idle_list.prev, struct worker, entry); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1663 | expires = worker->last_active + IDLE_WORKER_TIMEOUT; |
| 1664 | |
| 1665 | if (time_before(jiffies, expires)) { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1666 | mod_timer(&pool->idle_timer, expires); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1667 | break; |
| 1668 | } |
| 1669 | |
| 1670 | destroy_worker(worker); |
| 1671 | ret = true; |
| 1672 | } |
| 1673 | |
| 1674 | return ret; |
| 1675 | } |
| 1676 | |
| 1677 | /** |
| 1678 | * manage_workers - manage worker pool |
| 1679 | * @worker: self |
| 1680 | * |
| 1681 | * Assume the manager role and manage gcwq worker pool @worker belongs |
| 1682 | * to. At any given time, there can be only zero or one manager per |
| 1683 | * gcwq. The exclusion is handled automatically by this function. |
| 1684 | * |
| 1685 | * The caller can safely start processing works on false return. On |
| 1686 | * true return, it's guaranteed that need_to_create_worker() is false |
| 1687 | * and may_start_working() is true. |
| 1688 | * |
| 1689 | * CONTEXT: |
| 1690 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed |
| 1691 | * multiple times. Does GFP_KERNEL allocations. |
| 1692 | * |
| 1693 | * RETURNS: |
| 1694 | * false if no action was taken and gcwq->lock stayed locked, true if |
| 1695 | * some action was taken. |
| 1696 | */ |
| 1697 | static bool manage_workers(struct worker *worker) |
| 1698 | { |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1699 | struct worker_pool *pool = worker->pool; |
| 1700 | struct global_cwq *gcwq = pool->gcwq; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1701 | bool ret = false; |
| 1702 | |
Tejun Heo | 22ad564 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1703 | if (pool->flags & POOL_MANAGING_WORKERS) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1704 | return ret; |
| 1705 | |
Tejun Heo | 22ad564 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1706 | pool->flags &= ~POOL_MANAGE_WORKERS; |
| 1707 | pool->flags |= POOL_MANAGING_WORKERS; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1708 | |
| 1709 | /* |
| 1710 | * Destroy and then create so that may_start_working() is true |
| 1711 | * on return. |
| 1712 | */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1713 | ret |= maybe_destroy_workers(pool); |
| 1714 | ret |= maybe_create_worker(pool); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1715 | |
Tejun Heo | 22ad564 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1716 | pool->flags &= ~POOL_MANAGING_WORKERS; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1717 | |
| 1718 | /* |
| 1719 | * The trustee might be waiting to take over the manager |
| 1720 | * position, tell it we're done. |
| 1721 | */ |
| 1722 | if (unlikely(gcwq->trustee)) |
| 1723 | wake_up_all(&gcwq->trustee_wait); |
| 1724 | |
| 1725 | return ret; |
| 1726 | } |
| 1727 | |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1728 | /** |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1729 | * move_linked_works - move linked works to a list |
| 1730 | * @work: start of series of works to be scheduled |
| 1731 | * @head: target list to append @work to |
| 1732 | * @nextp: out paramter for nested worklist walking |
| 1733 | * |
| 1734 | * Schedule linked works starting from @work to @head. Work series to |
| 1735 | * be scheduled starts at @work and includes any consecutive work with |
| 1736 | * WORK_STRUCT_LINKED set in its predecessor. |
| 1737 | * |
| 1738 | * If @nextp is not NULL, it's updated to point to the next work of |
| 1739 | * the last scheduled work. This allows move_linked_works() to be |
| 1740 | * nested inside outer list_for_each_entry_safe(). |
| 1741 | * |
| 1742 | * CONTEXT: |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1743 | * spin_lock_irq(gcwq->lock). |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1744 | */ |
| 1745 | static void move_linked_works(struct work_struct *work, struct list_head *head, |
| 1746 | struct work_struct **nextp) |
| 1747 | { |
| 1748 | struct work_struct *n; |
| 1749 | |
| 1750 | /* |
| 1751 | * Linked worklist will always end before the end of the list, |
| 1752 | * use NULL for list head. |
| 1753 | */ |
| 1754 | list_for_each_entry_safe_from(work, n, NULL, entry) { |
| 1755 | list_move_tail(&work->entry, head); |
| 1756 | if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) |
| 1757 | break; |
| 1758 | } |
| 1759 | |
| 1760 | /* |
| 1761 | * If we're already inside safe list traversal and have moved |
| 1762 | * multiple works to the scheduled queue, the next position |
| 1763 | * needs to be updated. |
| 1764 | */ |
| 1765 | if (nextp) |
| 1766 | *nextp = n; |
| 1767 | } |
| 1768 | |
Lai Jiangshan | 31eafff | 2012-09-18 10:40:00 -0700 | [diff] [blame] | 1769 | static void cwq_activate_delayed_work(struct work_struct *work) |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1770 | { |
Lai Jiangshan | 31eafff | 2012-09-18 10:40:00 -0700 | [diff] [blame] | 1771 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1772 | |
Tejun Heo | cdadf00 | 2010-10-05 10:49:55 +0200 | [diff] [blame] | 1773 | trace_workqueue_activate_work(work); |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 1774 | move_linked_works(work, &cwq->pool->worklist, NULL); |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1775 | __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1776 | cwq->nr_active++; |
| 1777 | } |
| 1778 | |
Lai Jiangshan | 31eafff | 2012-09-18 10:40:00 -0700 | [diff] [blame] | 1779 | static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) |
| 1780 | { |
| 1781 | struct work_struct *work = list_first_entry(&cwq->delayed_works, |
| 1782 | struct work_struct, entry); |
| 1783 | |
| 1784 | cwq_activate_delayed_work(work); |
| 1785 | } |
| 1786 | |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1787 | /** |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1788 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight |
| 1789 | * @cwq: cwq of interest |
| 1790 | * @color: color of work which left the queue |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1791 | * @delayed: for a delayed work |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1792 | * |
| 1793 | * A work either has completed or is removed from pending queue, |
| 1794 | * decrement nr_in_flight of its cwq and handle workqueue flushing. |
| 1795 | * |
| 1796 | * CONTEXT: |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1797 | * spin_lock_irq(gcwq->lock). |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1798 | */ |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1799 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, |
| 1800 | bool delayed) |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1801 | { |
| 1802 | /* ignore uncolored works */ |
| 1803 | if (color == WORK_NO_COLOR) |
| 1804 | return; |
| 1805 | |
| 1806 | cwq->nr_in_flight[color]--; |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1807 | |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1808 | if (!delayed) { |
| 1809 | cwq->nr_active--; |
| 1810 | if (!list_empty(&cwq->delayed_works)) { |
| 1811 | /* one down, submit a delayed one */ |
| 1812 | if (cwq->nr_active < cwq->max_active) |
| 1813 | cwq_activate_first_delayed(cwq); |
| 1814 | } |
Tejun Heo | 502ca9d | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1815 | } |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1816 | |
| 1817 | /* is flush in progress and are we at the flushing tip? */ |
| 1818 | if (likely(cwq->flush_color != color)) |
| 1819 | return; |
| 1820 | |
| 1821 | /* are there still in-flight works? */ |
| 1822 | if (cwq->nr_in_flight[color]) |
| 1823 | return; |
| 1824 | |
| 1825 | /* this cwq is done, clear flush_color */ |
| 1826 | cwq->flush_color = -1; |
| 1827 | |
| 1828 | /* |
| 1829 | * If this was the last cwq, wake up the first flusher. It |
| 1830 | * will handle the rest. |
| 1831 | */ |
| 1832 | if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) |
| 1833 | complete(&cwq->wq->first_flusher->done); |
| 1834 | } |
| 1835 | |
| 1836 | /** |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1837 | * process_one_work - process single work |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1838 | * @worker: self |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1839 | * @work: work to process |
| 1840 | * |
| 1841 | * Process @work. This function contains all the logics necessary to |
| 1842 | * process a single work including synchronization against and |
| 1843 | * interaction with other workers on the same cpu, queueing and |
| 1844 | * flushing. As long as context requirement is met, any worker can |
| 1845 | * call this function to process a work. |
| 1846 | * |
| 1847 | * CONTEXT: |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1848 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1849 | */ |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1850 | static void process_one_work(struct worker *worker, struct work_struct *work) |
Namhyung Kim | 06bd6eb | 2010-08-22 23:19:42 +0900 | [diff] [blame] | 1851 | __releases(&gcwq->lock) |
| 1852 | __acquires(&gcwq->lock) |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1853 | { |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1854 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1855 | struct worker_pool *pool = worker->pool; |
| 1856 | struct global_cwq *gcwq = pool->gcwq; |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1857 | struct hlist_head *bwh = busy_worker_head(gcwq, work); |
Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 1858 | bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1859 | int work_color; |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1860 | struct worker *collision; |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1861 | #ifdef CONFIG_LOCKDEP |
| 1862 | /* |
| 1863 | * It is permissible to free the struct work_struct from |
| 1864 | * inside the function that is called from it, this we need to |
| 1865 | * take into account for lockdep too. To avoid bogus "held |
| 1866 | * lock freed" warnings as well as problems when looking into |
| 1867 | * work->lockdep_map, make a copy and use that here. |
| 1868 | */ |
| 1869 | struct lockdep_map lockdep_map = work->lockdep_map; |
| 1870 | #endif |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1871 | /* |
| 1872 | * A single work shouldn't be executed concurrently by |
| 1873 | * multiple workers on a single cpu. Check whether anyone is |
| 1874 | * already processing the work. If so, defer the work to the |
| 1875 | * currently executing one. |
| 1876 | */ |
| 1877 | collision = __find_worker_executing_work(gcwq, bwh, work); |
| 1878 | if (unlikely(collision)) { |
| 1879 | move_linked_works(work, &collision->scheduled, NULL); |
| 1880 | return; |
| 1881 | } |
| 1882 | |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1883 | /* claim and process */ |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1884 | debug_work_deactivate(work); |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1885 | hlist_add_head(&worker->hentry, bwh); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1886 | worker->current_work = work; |
Tejun Heo | 55e3e1f | 2012-12-18 10:35:02 -0800 | [diff] [blame] | 1887 | worker->current_func = work->func; |
Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1888 | worker->current_cwq = cwq; |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1889 | work_color = get_work_color(work); |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1890 | |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1891 | /* record the current cpu number in the work data and dequeue */ |
| 1892 | set_work_cpu(work, gcwq->cpu); |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1893 | list_del_init(&work->entry); |
| 1894 | |
Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1895 | /* |
Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 1896 | * CPU intensive works don't participate in concurrency |
| 1897 | * management. They're the scheduler's responsibility. |
| 1898 | */ |
| 1899 | if (unlikely(cpu_intensive)) |
| 1900 | worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); |
| 1901 | |
Tejun Heo | b7b5c68 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1902 | /* |
| 1903 | * Unbound gcwq isn't concurrency managed and work items should be |
| 1904 | * executed ASAP. Wake up another worker if necessary. |
| 1905 | */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1906 | if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) |
| 1907 | wake_up_worker(pool); |
Tejun Heo | b7b5c68 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1908 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1909 | spin_unlock_irq(&gcwq->lock); |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1910 | |
Tejun Heo | 66307ae | 2012-08-03 10:30:45 -0700 | [diff] [blame] | 1911 | smp_wmb(); /* paired with test_and_set_bit(PENDING) */ |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1912 | work_clear_pending(work); |
Tejun Heo | 66307ae | 2012-08-03 10:30:45 -0700 | [diff] [blame] | 1913 | |
Tejun Heo | e159489 | 2011-01-09 23:32:15 +0100 | [diff] [blame] | 1914 | lock_map_acquire_read(&cwq->wq->lockdep_map); |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1915 | lock_map_acquire(&lockdep_map); |
Arjan van de Ven | e36c886 | 2010-08-21 13:07:26 -0700 | [diff] [blame] | 1916 | trace_workqueue_execute_start(work); |
Tejun Heo | 55e3e1f | 2012-12-18 10:35:02 -0800 | [diff] [blame] | 1917 | worker->current_func(work); |
Arjan van de Ven | e36c886 | 2010-08-21 13:07:26 -0700 | [diff] [blame] | 1918 | /* |
| 1919 | * While we must be careful to not use "work" after this, the trace |
| 1920 | * point will only record its address. |
| 1921 | */ |
| 1922 | trace_workqueue_execute_end(work); |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1923 | lock_map_release(&lockdep_map); |
| 1924 | lock_map_release(&cwq->wq->lockdep_map); |
| 1925 | |
| 1926 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { |
Tejun Heo | 55e3e1f | 2012-12-18 10:35:02 -0800 | [diff] [blame] | 1927 | pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" |
| 1928 | " last function: %pf\n", |
| 1929 | current->comm, preempt_count(), task_pid_nr(current), |
| 1930 | worker->current_func); |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1931 | debug_show_held_locks(current); |
Syed Rameez Mustafa | 1bee7b9 | 2013-07-15 11:52:09 -0700 | [diff] [blame] | 1932 | BUG_ON(PANIC_CORRUPTION); |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1933 | dump_stack(); |
| 1934 | } |
| 1935 | |
Tejun Heo | 00cef7a | 2013-08-28 17:33:37 -0400 | [diff] [blame] | 1936 | /* |
| 1937 | * The following prevents a kworker from hogging CPU on !PREEMPT |
| 1938 | * kernels, where a requeueing work item waiting for something to |
| 1939 | * happen could deadlock with stop_machine as such work item could |
| 1940 | * indefinitely requeue itself while all other CPUs are trapped in |
| 1941 | * stop_machine. |
| 1942 | */ |
| 1943 | cond_resched(); |
| 1944 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1945 | spin_lock_irq(&gcwq->lock); |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1946 | |
Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 1947 | /* clear cpu intensive status */ |
| 1948 | if (unlikely(cpu_intensive)) |
| 1949 | worker_clr_flags(worker, WORKER_CPU_INTENSIVE); |
| 1950 | |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1951 | /* we're done with it, release */ |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1952 | hlist_del_init(&worker->hentry); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1953 | worker->current_work = NULL; |
Tejun Heo | 55e3e1f | 2012-12-18 10:35:02 -0800 | [diff] [blame] | 1954 | worker->current_func = NULL; |
Tejun Heo | 8cca0ee | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 1955 | worker->current_cwq = NULL; |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 1956 | cwq_dec_nr_in_flight(cwq, work_color, false); |
Tejun Heo | a62428c | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1957 | } |
| 1958 | |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1959 | /** |
| 1960 | * process_scheduled_works - process scheduled works |
| 1961 | * @worker: self |
| 1962 | * |
| 1963 | * Process all scheduled works. Please note that the scheduled list |
| 1964 | * may change while processing a work, so this function repeatedly |
| 1965 | * fetches a work from the top and executes it. |
| 1966 | * |
| 1967 | * CONTEXT: |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1968 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1969 | * multiple times. |
| 1970 | */ |
| 1971 | static void process_scheduled_works(struct worker *worker) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1972 | { |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1973 | while (!list_empty(&worker->scheduled)) { |
| 1974 | struct work_struct *work = list_first_entry(&worker->scheduled, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1975 | struct work_struct, entry); |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1976 | process_one_work(worker, work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1977 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1978 | } |
| 1979 | |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1980 | /** |
| 1981 | * worker_thread - the worker thread function |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1982 | * @__worker: self |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1983 | * |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1984 | * The gcwq worker thread function. There's a single dynamic pool of |
| 1985 | * these per each cpu. These workers process all works regardless of |
| 1986 | * their specific target workqueue. The only exception is works which |
| 1987 | * belong to workqueues with a rescuer which will be explained in |
| 1988 | * rescuer_thread(). |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 1989 | */ |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1990 | static int worker_thread(void *__worker) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1991 | { |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 1992 | struct worker *worker = __worker; |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 1993 | struct worker_pool *pool = worker->pool; |
| 1994 | struct global_cwq *gcwq = pool->gcwq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1995 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 1996 | /* tell the scheduler that this is a workqueue worker */ |
| 1997 | worker->task->flags |= PF_WQ_WORKER; |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1998 | woke_up: |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 1999 | spin_lock_irq(&gcwq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2000 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2001 | /* DIE can be set only while we're idle, checking here is enough */ |
| 2002 | if (worker->flags & WORKER_DIE) { |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2003 | spin_unlock_irq(&gcwq->lock); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2004 | worker->task->flags &= ~PF_WQ_WORKER; |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2005 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2006 | } |
| 2007 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2008 | worker_leave_idle(worker); |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2009 | recheck: |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2010 | /* no more worker necessary? */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2011 | if (!need_more_worker(pool)) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2012 | goto sleep; |
| 2013 | |
| 2014 | /* do we need to manage? */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2015 | if (unlikely(!may_start_working(pool)) && manage_workers(worker)) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2016 | goto recheck; |
| 2017 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2018 | /* |
| 2019 | * ->scheduled list can only be filled while a worker is |
| 2020 | * preparing to process a work or actually processing it. |
| 2021 | * Make sure nobody diddled with it while I was sleeping. |
| 2022 | */ |
| 2023 | BUG_ON(!list_empty(&worker->scheduled)); |
| 2024 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2025 | /* |
| 2026 | * When control reaches this point, we're guaranteed to have |
| 2027 | * at least one idle worker or that someone else has already |
| 2028 | * assumed the manager role. |
| 2029 | */ |
| 2030 | worker_clr_flags(worker, WORKER_PREP); |
| 2031 | |
| 2032 | do { |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2033 | struct work_struct *work = |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2034 | list_first_entry(&pool->worklist, |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2035 | struct work_struct, entry); |
| 2036 | |
| 2037 | if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { |
| 2038 | /* optimization path, not strictly necessary */ |
| 2039 | process_one_work(worker, work); |
| 2040 | if (unlikely(!list_empty(&worker->scheduled))) |
| 2041 | process_scheduled_works(worker); |
| 2042 | } else { |
| 2043 | move_linked_works(work, &worker->scheduled, NULL); |
| 2044 | process_scheduled_works(worker); |
| 2045 | } |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2046 | } while (keep_working(pool)); |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2047 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2048 | worker_set_flags(worker, WORKER_PREP, false); |
Tejun Heo | d313dd8 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2049 | sleep: |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2050 | if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker)) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2051 | goto recheck; |
Tejun Heo | d313dd8 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2052 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2053 | /* |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2054 | * gcwq->lock is held and there's no work to process and no |
| 2055 | * need to manage, sleep. Workers are woken up only while |
| 2056 | * holding gcwq->lock or from local cpu, so setting the |
| 2057 | * current state before releasing gcwq->lock is enough to |
| 2058 | * prevent losing any event. |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2059 | */ |
| 2060 | worker_enter_idle(worker); |
| 2061 | __set_current_state(TASK_INTERRUPTIBLE); |
| 2062 | spin_unlock_irq(&gcwq->lock); |
| 2063 | schedule(); |
| 2064 | goto woke_up; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2065 | } |
| 2066 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2067 | /** |
| 2068 | * rescuer_thread - the rescuer thread function |
| 2069 | * @__wq: the associated workqueue |
| 2070 | * |
| 2071 | * Workqueue rescuer thread function. There's one rescuer for each |
| 2072 | * workqueue which has WQ_RESCUER set. |
| 2073 | * |
| 2074 | * Regular work processing on a gcwq may block trying to create a new |
| 2075 | * worker which uses GFP_KERNEL allocation which has slight chance of |
| 2076 | * developing into deadlock if some works currently on the same queue |
| 2077 | * need to be processed to satisfy the GFP_KERNEL allocation. This is |
| 2078 | * the problem rescuer solves. |
| 2079 | * |
| 2080 | * When such condition is possible, the gcwq summons rescuers of all |
| 2081 | * workqueues which have works queued on the gcwq and let them process |
| 2082 | * those works so that forward progress can be guaranteed. |
| 2083 | * |
| 2084 | * This should happen rarely. |
| 2085 | */ |
| 2086 | static int rescuer_thread(void *__wq) |
| 2087 | { |
| 2088 | struct workqueue_struct *wq = __wq; |
| 2089 | struct worker *rescuer = wq->rescuer; |
| 2090 | struct list_head *scheduled = &rescuer->scheduled; |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2091 | bool is_unbound = wq->flags & WQ_UNBOUND; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2092 | unsigned int cpu; |
| 2093 | |
| 2094 | set_user_nice(current, RESCUER_NICE_LEVEL); |
| 2095 | repeat: |
| 2096 | set_current_state(TASK_INTERRUPTIBLE); |
| 2097 | |
Mike Galbraith | dbdd7f0 | 2012-11-28 07:17:18 +0100 | [diff] [blame] | 2098 | if (kthread_should_stop()) { |
| 2099 | __set_current_state(TASK_RUNNING); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2100 | return 0; |
Mike Galbraith | dbdd7f0 | 2012-11-28 07:17:18 +0100 | [diff] [blame] | 2101 | } |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2102 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2103 | /* |
| 2104 | * See whether any cpu is asking for help. Unbounded |
| 2105 | * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. |
| 2106 | */ |
Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 2107 | for_each_mayday_cpu(cpu, wq->mayday_mask) { |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2108 | unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; |
| 2109 | struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2110 | struct worker_pool *pool = cwq->pool; |
| 2111 | struct global_cwq *gcwq = pool->gcwq; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2112 | struct work_struct *work, *n; |
| 2113 | |
| 2114 | __set_current_state(TASK_RUNNING); |
Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 2115 | mayday_clear_cpu(cpu, wq->mayday_mask); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2116 | |
| 2117 | /* migrate to the target cpu if possible */ |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2118 | rescuer->pool = pool; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2119 | worker_maybe_bind_and_lock(rescuer); |
| 2120 | |
| 2121 | /* |
| 2122 | * Slurp in all works issued via this workqueue and |
| 2123 | * process'em. |
| 2124 | */ |
| 2125 | BUG_ON(!list_empty(&rescuer->scheduled)); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2126 | list_for_each_entry_safe(work, n, &pool->worklist, entry) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2127 | if (get_work_cwq(work) == cwq) |
| 2128 | move_linked_works(work, scheduled, &n); |
| 2129 | |
| 2130 | process_scheduled_works(rescuer); |
Tejun Heo | 7576958 | 2011-02-14 14:04:46 +0100 | [diff] [blame] | 2131 | |
| 2132 | /* |
| 2133 | * Leave this gcwq. If keep_working() is %true, notify a |
| 2134 | * regular worker; otherwise, we end up with 0 concurrency |
| 2135 | * and stalling the execution. |
| 2136 | */ |
Tejun Heo | 7ef6a93 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2137 | if (keep_working(pool)) |
| 2138 | wake_up_worker(pool); |
Tejun Heo | 7576958 | 2011-02-14 14:04:46 +0100 | [diff] [blame] | 2139 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2140 | spin_unlock_irq(&gcwq->lock); |
| 2141 | } |
| 2142 | |
| 2143 | schedule(); |
| 2144 | goto repeat; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2145 | } |
| 2146 | |
Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2147 | struct wq_barrier { |
| 2148 | struct work_struct work; |
| 2149 | struct completion done; |
| 2150 | }; |
| 2151 | |
| 2152 | static void wq_barrier_func(struct work_struct *work) |
| 2153 | { |
| 2154 | struct wq_barrier *barr = container_of(work, struct wq_barrier, work); |
| 2155 | complete(&barr->done); |
| 2156 | } |
| 2157 | |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2158 | /** |
| 2159 | * insert_wq_barrier - insert a barrier work |
| 2160 | * @cwq: cwq to insert barrier into |
| 2161 | * @barr: wq_barrier to insert |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2162 | * @target: target work to attach @barr to |
| 2163 | * @worker: worker currently executing @target, NULL if @target is not executing |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2164 | * |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2165 | * @barr is linked to @target such that @barr is completed only after |
| 2166 | * @target finishes execution. Please note that the ordering |
| 2167 | * guarantee is observed only with respect to @target and on the local |
| 2168 | * cpu. |
| 2169 | * |
| 2170 | * Currently, a queued barrier can't be canceled. This is because |
| 2171 | * try_to_grab_pending() can't determine whether the work to be |
| 2172 | * grabbed is at the head of the queue and thus can't clear LINKED |
| 2173 | * flag of the previous work while there must be a valid next work |
| 2174 | * after a work with LINKED flag set. |
| 2175 | * |
| 2176 | * Note that when @worker is non-NULL, @target may be modified |
| 2177 | * underneath us, so we can't reliably determine cwq from @target. |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2178 | * |
| 2179 | * CONTEXT: |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2180 | * spin_lock_irq(gcwq->lock). |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2181 | */ |
Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 2182 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2183 | struct wq_barrier *barr, |
| 2184 | struct work_struct *target, struct worker *worker) |
Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2185 | { |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2186 | struct list_head *head; |
| 2187 | unsigned int linked = 0; |
| 2188 | |
Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2189 | /* |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2190 | * debugobject calls are safe here even with gcwq->lock locked |
Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2191 | * as we know for sure that this will not trigger any of the |
| 2192 | * checks and call back into the fixup functions where we |
| 2193 | * might deadlock. |
| 2194 | */ |
Andrew Morton | ca1cab3 | 2010-10-26 14:22:34 -0700 | [diff] [blame] | 2195 | INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); |
Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2196 | __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); |
Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2197 | init_completion(&barr->done); |
Oleg Nesterov | 83c2252 | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 2198 | |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2199 | /* |
| 2200 | * If @target is currently being executed, schedule the |
| 2201 | * barrier to the worker; otherwise, put it after @target. |
| 2202 | */ |
| 2203 | if (worker) |
| 2204 | head = worker->scheduled.next; |
| 2205 | else { |
| 2206 | unsigned long *bits = work_data_bits(target); |
| 2207 | |
| 2208 | head = target->entry.next; |
| 2209 | /* there can already be other linked works, inherit and set */ |
| 2210 | linked = *bits & WORK_STRUCT_LINKED; |
| 2211 | __set_bit(WORK_STRUCT_LINKED_BIT, bits); |
| 2212 | } |
| 2213 | |
Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2214 | debug_work_activate(&barr->work); |
Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2215 | insert_work(cwq, &barr->work, head, |
| 2216 | work_color_to_flags(WORK_NO_COLOR) | linked); |
Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2217 | } |
| 2218 | |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2219 | /** |
| 2220 | * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing |
| 2221 | * @wq: workqueue being flushed |
| 2222 | * @flush_color: new flush color, < 0 for no-op |
| 2223 | * @work_color: new work color, < 0 for no-op |
| 2224 | * |
| 2225 | * Prepare cwqs for workqueue flushing. |
| 2226 | * |
| 2227 | * If @flush_color is non-negative, flush_color on all cwqs should be |
| 2228 | * -1. If no cwq has in-flight commands at the specified color, all |
| 2229 | * cwq->flush_color's stay at -1 and %false is returned. If any cwq |
| 2230 | * has in flight commands, its cwq->flush_color is set to |
| 2231 | * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq |
| 2232 | * wakeup logic is armed and %true is returned. |
| 2233 | * |
| 2234 | * The caller should have initialized @wq->first_flusher prior to |
| 2235 | * calling this function with non-negative @flush_color. If |
| 2236 | * @flush_color is negative, no flush color update is done and %false |
| 2237 | * is returned. |
| 2238 | * |
| 2239 | * If @work_color is non-negative, all cwqs should have the same |
| 2240 | * work_color which is previous to @work_color and all will be |
| 2241 | * advanced to @work_color. |
| 2242 | * |
| 2243 | * CONTEXT: |
| 2244 | * mutex_lock(wq->flush_mutex). |
| 2245 | * |
| 2246 | * RETURNS: |
| 2247 | * %true if @flush_color >= 0 and there's something to flush. %false |
| 2248 | * otherwise. |
| 2249 | */ |
| 2250 | static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, |
| 2251 | int flush_color, int work_color) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2252 | { |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2253 | bool wait = false; |
| 2254 | unsigned int cpu; |
Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 2255 | |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2256 | if (flush_color >= 0) { |
| 2257 | BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); |
| 2258 | atomic_set(&wq->nr_cwqs_to_flush, 1); |
Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2259 | } |
Oleg Nesterov | 1444196 | 2007-05-23 13:57:57 -0700 | [diff] [blame] | 2260 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2261 | for_each_cwq_cpu(cpu, wq) { |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2262 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2263 | struct global_cwq *gcwq = cwq->pool->gcwq; |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2264 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2265 | spin_lock_irq(&gcwq->lock); |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2266 | |
| 2267 | if (flush_color >= 0) { |
| 2268 | BUG_ON(cwq->flush_color != -1); |
| 2269 | |
| 2270 | if (cwq->nr_in_flight[flush_color]) { |
| 2271 | cwq->flush_color = flush_color; |
| 2272 | atomic_inc(&wq->nr_cwqs_to_flush); |
| 2273 | wait = true; |
| 2274 | } |
| 2275 | } |
| 2276 | |
| 2277 | if (work_color >= 0) { |
| 2278 | BUG_ON(work_color != work_next_color(cwq->work_color)); |
| 2279 | cwq->work_color = work_color; |
| 2280 | } |
| 2281 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2282 | spin_unlock_irq(&gcwq->lock); |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2283 | } |
| 2284 | |
| 2285 | if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) |
| 2286 | complete(&wq->first_flusher->done); |
| 2287 | |
| 2288 | return wait; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2289 | } |
| 2290 | |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2291 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2292 | * flush_workqueue - ensure that any scheduled work has run to completion. |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2293 | * @wq: workqueue to flush |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2294 | * |
| 2295 | * Forces execution of the workqueue and blocks until its completion. |
| 2296 | * This is typically used in driver shutdown handlers. |
| 2297 | * |
Oleg Nesterov | fc2e4d7 | 2007-05-09 02:33:51 -0700 | [diff] [blame] | 2298 | * We sleep until all works which were queued on entry have been handled, |
| 2299 | * but we are not livelocked by new incoming ones. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2300 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2301 | void flush_workqueue(struct workqueue_struct *wq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2302 | { |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2303 | struct wq_flusher this_flusher = { |
| 2304 | .list = LIST_HEAD_INIT(this_flusher.list), |
| 2305 | .flush_color = -1, |
| 2306 | .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), |
| 2307 | }; |
| 2308 | int next_color; |
Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 2309 | |
Ingo Molnar | 3295f0e | 2008-08-11 10:30:30 +0200 | [diff] [blame] | 2310 | lock_map_acquire(&wq->lockdep_map); |
| 2311 | lock_map_release(&wq->lockdep_map); |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2312 | |
| 2313 | mutex_lock(&wq->flush_mutex); |
| 2314 | |
| 2315 | /* |
| 2316 | * Start-to-wait phase |
| 2317 | */ |
| 2318 | next_color = work_next_color(wq->work_color); |
| 2319 | |
| 2320 | if (next_color != wq->flush_color) { |
| 2321 | /* |
| 2322 | * Color space is not full. The current work_color |
| 2323 | * becomes our flush_color and work_color is advanced |
| 2324 | * by one. |
| 2325 | */ |
| 2326 | BUG_ON(!list_empty(&wq->flusher_overflow)); |
| 2327 | this_flusher.flush_color = wq->work_color; |
| 2328 | wq->work_color = next_color; |
| 2329 | |
| 2330 | if (!wq->first_flusher) { |
| 2331 | /* no flush in progress, become the first flusher */ |
| 2332 | BUG_ON(wq->flush_color != this_flusher.flush_color); |
| 2333 | |
| 2334 | wq->first_flusher = &this_flusher; |
| 2335 | |
| 2336 | if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, |
| 2337 | wq->work_color)) { |
| 2338 | /* nothing to flush, done */ |
| 2339 | wq->flush_color = next_color; |
| 2340 | wq->first_flusher = NULL; |
| 2341 | goto out_unlock; |
| 2342 | } |
| 2343 | } else { |
| 2344 | /* wait in queue */ |
| 2345 | BUG_ON(wq->flush_color == this_flusher.flush_color); |
| 2346 | list_add_tail(&this_flusher.list, &wq->flusher_queue); |
| 2347 | flush_workqueue_prep_cwqs(wq, -1, wq->work_color); |
| 2348 | } |
| 2349 | } else { |
| 2350 | /* |
| 2351 | * Oops, color space is full, wait on overflow queue. |
| 2352 | * The next flush completion will assign us |
| 2353 | * flush_color and transfer to flusher_queue. |
| 2354 | */ |
| 2355 | list_add_tail(&this_flusher.list, &wq->flusher_overflow); |
| 2356 | } |
| 2357 | |
| 2358 | mutex_unlock(&wq->flush_mutex); |
| 2359 | |
| 2360 | wait_for_completion(&this_flusher.done); |
| 2361 | |
| 2362 | /* |
| 2363 | * Wake-up-and-cascade phase |
| 2364 | * |
| 2365 | * First flushers are responsible for cascading flushes and |
| 2366 | * handling overflow. Non-first flushers can simply return. |
| 2367 | */ |
| 2368 | if (wq->first_flusher != &this_flusher) |
| 2369 | return; |
| 2370 | |
| 2371 | mutex_lock(&wq->flush_mutex); |
| 2372 | |
Tejun Heo | 4ce48b3 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2373 | /* we might have raced, check again with mutex held */ |
| 2374 | if (wq->first_flusher != &this_flusher) |
| 2375 | goto out_unlock; |
| 2376 | |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2377 | wq->first_flusher = NULL; |
| 2378 | |
| 2379 | BUG_ON(!list_empty(&this_flusher.list)); |
| 2380 | BUG_ON(wq->flush_color != this_flusher.flush_color); |
| 2381 | |
| 2382 | while (true) { |
| 2383 | struct wq_flusher *next, *tmp; |
| 2384 | |
| 2385 | /* complete all the flushers sharing the current flush color */ |
| 2386 | list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { |
| 2387 | if (next->flush_color != wq->flush_color) |
| 2388 | break; |
| 2389 | list_del_init(&next->list); |
| 2390 | complete(&next->done); |
| 2391 | } |
| 2392 | |
| 2393 | BUG_ON(!list_empty(&wq->flusher_overflow) && |
| 2394 | wq->flush_color != work_next_color(wq->work_color)); |
| 2395 | |
| 2396 | /* this flush_color is finished, advance by one */ |
| 2397 | wq->flush_color = work_next_color(wq->flush_color); |
| 2398 | |
| 2399 | /* one color has been freed, handle overflow queue */ |
| 2400 | if (!list_empty(&wq->flusher_overflow)) { |
| 2401 | /* |
| 2402 | * Assign the same color to all overflowed |
| 2403 | * flushers, advance work_color and append to |
| 2404 | * flusher_queue. This is the start-to-wait |
| 2405 | * phase for these overflowed flushers. |
| 2406 | */ |
| 2407 | list_for_each_entry(tmp, &wq->flusher_overflow, list) |
| 2408 | tmp->flush_color = wq->work_color; |
| 2409 | |
| 2410 | wq->work_color = work_next_color(wq->work_color); |
| 2411 | |
| 2412 | list_splice_tail_init(&wq->flusher_overflow, |
| 2413 | &wq->flusher_queue); |
| 2414 | flush_workqueue_prep_cwqs(wq, -1, wq->work_color); |
| 2415 | } |
| 2416 | |
| 2417 | if (list_empty(&wq->flusher_queue)) { |
| 2418 | BUG_ON(wq->flush_color != wq->work_color); |
| 2419 | break; |
| 2420 | } |
| 2421 | |
| 2422 | /* |
| 2423 | * Need to flush more colors. Make the next flusher |
| 2424 | * the new first flusher and arm cwqs. |
| 2425 | */ |
| 2426 | BUG_ON(wq->flush_color == wq->work_color); |
| 2427 | BUG_ON(wq->flush_color != next->flush_color); |
| 2428 | |
| 2429 | list_del_init(&next->list); |
| 2430 | wq->first_flusher = next; |
| 2431 | |
| 2432 | if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) |
| 2433 | break; |
| 2434 | |
| 2435 | /* |
| 2436 | * Meh... this color is already done, clear first |
| 2437 | * flusher and repeat cascading. |
| 2438 | */ |
| 2439 | wq->first_flusher = NULL; |
| 2440 | } |
| 2441 | |
| 2442 | out_unlock: |
| 2443 | mutex_unlock(&wq->flush_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2444 | } |
Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2445 | EXPORT_SYMBOL_GPL(flush_workqueue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2446 | |
Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 2447 | /** |
| 2448 | * drain_workqueue - drain a workqueue |
| 2449 | * @wq: workqueue to drain |
| 2450 | * |
| 2451 | * Wait until the workqueue becomes empty. While draining is in progress, |
| 2452 | * only chain queueing is allowed. IOW, only currently pending or running |
| 2453 | * work items on @wq can queue further work items on it. @wq is flushed |
| 2454 | * repeatedly until it becomes empty. The number of flushing is detemined |
| 2455 | * by the depth of chaining and should be relatively short. Whine if it |
| 2456 | * takes too long. |
| 2457 | */ |
| 2458 | void drain_workqueue(struct workqueue_struct *wq) |
| 2459 | { |
| 2460 | unsigned int flush_cnt = 0; |
| 2461 | unsigned int cpu; |
| 2462 | |
| 2463 | /* |
| 2464 | * __queue_work() needs to test whether there are drainers, is much |
| 2465 | * hotter than drain_workqueue() and already looks at @wq->flags. |
| 2466 | * Use WQ_DRAINING so that queue doesn't have to check nr_drainers. |
| 2467 | */ |
| 2468 | spin_lock(&workqueue_lock); |
| 2469 | if (!wq->nr_drainers++) |
| 2470 | wq->flags |= WQ_DRAINING; |
| 2471 | spin_unlock(&workqueue_lock); |
| 2472 | reflush: |
| 2473 | flush_workqueue(wq); |
| 2474 | |
| 2475 | for_each_cwq_cpu(cpu, wq) { |
| 2476 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
Thomas Tuttle | fa2563e | 2011-09-14 16:22:28 -0700 | [diff] [blame] | 2477 | bool drained; |
Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 2478 | |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2479 | spin_lock_irq(&cwq->pool->gcwq->lock); |
Thomas Tuttle | fa2563e | 2011-09-14 16:22:28 -0700 | [diff] [blame] | 2480 | drained = !cwq->nr_active && list_empty(&cwq->delayed_works); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2481 | spin_unlock_irq(&cwq->pool->gcwq->lock); |
Thomas Tuttle | fa2563e | 2011-09-14 16:22:28 -0700 | [diff] [blame] | 2482 | |
| 2483 | if (drained) |
Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 2484 | continue; |
| 2485 | |
| 2486 | if (++flush_cnt == 10 || |
| 2487 | (flush_cnt % 100 == 0 && flush_cnt <= 1000)) |
| 2488 | pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n", |
| 2489 | wq->name, flush_cnt); |
| 2490 | goto reflush; |
| 2491 | } |
| 2492 | |
| 2493 | spin_lock(&workqueue_lock); |
| 2494 | if (!--wq->nr_drainers) |
| 2495 | wq->flags &= ~WQ_DRAINING; |
| 2496 | spin_unlock(&workqueue_lock); |
| 2497 | } |
| 2498 | EXPORT_SYMBOL_GPL(drain_workqueue); |
| 2499 | |
Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2500 | static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, |
| 2501 | bool wait_executing) |
| 2502 | { |
| 2503 | struct worker *worker = NULL; |
| 2504 | struct global_cwq *gcwq; |
| 2505 | struct cpu_workqueue_struct *cwq; |
| 2506 | |
| 2507 | might_sleep(); |
| 2508 | gcwq = get_work_gcwq(work); |
| 2509 | if (!gcwq) |
| 2510 | return false; |
| 2511 | |
| 2512 | spin_lock_irq(&gcwq->lock); |
| 2513 | if (!list_empty(&work->entry)) { |
| 2514 | /* |
| 2515 | * See the comment near try_to_grab_pending()->smp_rmb(). |
| 2516 | * If it was re-queued to a different gcwq under us, we |
| 2517 | * are not going to wait. |
| 2518 | */ |
| 2519 | smp_rmb(); |
| 2520 | cwq = get_work_cwq(work); |
Tejun Heo | 5865888 | 2012-07-12 14:46:37 -0700 | [diff] [blame] | 2521 | if (unlikely(!cwq || gcwq != cwq->pool->gcwq)) |
Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2522 | goto already_gone; |
| 2523 | } else if (wait_executing) { |
| 2524 | worker = find_worker_executing_work(gcwq, work); |
| 2525 | if (!worker) |
| 2526 | goto already_gone; |
| 2527 | cwq = worker->current_cwq; |
| 2528 | } else |
| 2529 | goto already_gone; |
| 2530 | |
| 2531 | insert_wq_barrier(cwq, barr, work, worker); |
| 2532 | spin_unlock_irq(&gcwq->lock); |
| 2533 | |
Tejun Heo | e159489 | 2011-01-09 23:32:15 +0100 | [diff] [blame] | 2534 | /* |
| 2535 | * If @max_active is 1 or rescuer is in use, flushing another work |
| 2536 | * item on the same workqueue may lead to deadlock. Make sure the |
| 2537 | * flusher is not running on the same workqueue by verifying write |
| 2538 | * access. |
| 2539 | */ |
| 2540 | if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) |
| 2541 | lock_map_acquire(&cwq->wq->lockdep_map); |
| 2542 | else |
| 2543 | lock_map_acquire_read(&cwq->wq->lockdep_map); |
Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2544 | lock_map_release(&cwq->wq->lockdep_map); |
Tejun Heo | e159489 | 2011-01-09 23:32:15 +0100 | [diff] [blame] | 2545 | |
Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2546 | return true; |
| 2547 | already_gone: |
| 2548 | spin_unlock_irq(&gcwq->lock); |
| 2549 | return false; |
| 2550 | } |
| 2551 | |
Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2552 | /** |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2553 | * flush_work - wait for a work to finish executing the last queueing instance |
| 2554 | * @work: the work to flush |
Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2555 | * |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2556 | * Wait until @work has finished execution. This function considers |
| 2557 | * only the last queueing instance of @work. If @work has been |
| 2558 | * enqueued across different CPUs on a non-reentrant workqueue or on |
| 2559 | * multiple workqueues, @work might still be executing on return on |
| 2560 | * some of the CPUs from earlier queueing. |
Oleg Nesterov | a67da70 | 2008-07-25 01:47:52 -0700 | [diff] [blame] | 2561 | * |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2562 | * If @work was queued only on a non-reentrant, ordered or unbound |
| 2563 | * workqueue, @work is guaranteed to be idle on return if it hasn't |
| 2564 | * been requeued since flush started. |
| 2565 | * |
| 2566 | * RETURNS: |
| 2567 | * %true if flush_work() waited for the work to finish execution, |
| 2568 | * %false if it was already idle. |
Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2569 | */ |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2570 | bool flush_work(struct work_struct *work) |
Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2571 | { |
Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2572 | struct wq_barrier barr; |
| 2573 | |
Tejun Heo | baf5902 | 2010-09-16 10:42:16 +0200 | [diff] [blame] | 2574 | if (start_flush_work(work, &barr, true)) { |
| 2575 | wait_for_completion(&barr.done); |
| 2576 | destroy_work_on_stack(&barr.work); |
| 2577 | return true; |
| 2578 | } else |
| 2579 | return false; |
Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 2580 | } |
| 2581 | EXPORT_SYMBOL_GPL(flush_work); |
| 2582 | |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2583 | static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) |
| 2584 | { |
| 2585 | struct wq_barrier barr; |
| 2586 | struct worker *worker; |
| 2587 | |
| 2588 | spin_lock_irq(&gcwq->lock); |
| 2589 | |
| 2590 | worker = find_worker_executing_work(gcwq, work); |
| 2591 | if (unlikely(worker)) |
| 2592 | insert_wq_barrier(worker->current_cwq, &barr, work, worker); |
| 2593 | |
| 2594 | spin_unlock_irq(&gcwq->lock); |
| 2595 | |
| 2596 | if (unlikely(worker)) { |
| 2597 | wait_for_completion(&barr.done); |
| 2598 | destroy_work_on_stack(&barr.work); |
| 2599 | return true; |
| 2600 | } else |
| 2601 | return false; |
| 2602 | } |
| 2603 | |
| 2604 | static bool wait_on_work(struct work_struct *work) |
| 2605 | { |
| 2606 | bool ret = false; |
| 2607 | int cpu; |
| 2608 | |
| 2609 | might_sleep(); |
| 2610 | |
| 2611 | lock_map_acquire(&work->lockdep_map); |
| 2612 | lock_map_release(&work->lockdep_map); |
| 2613 | |
| 2614 | for_each_gcwq_cpu(cpu) |
| 2615 | ret |= wait_on_cpu_work(get_gcwq(cpu), work); |
| 2616 | return ret; |
| 2617 | } |
| 2618 | |
Tejun Heo | 0938349 | 2010-09-16 10:48:29 +0200 | [diff] [blame] | 2619 | /** |
| 2620 | * flush_work_sync - wait until a work has finished execution |
| 2621 | * @work: the work to flush |
| 2622 | * |
| 2623 | * Wait until @work has finished execution. On return, it's |
| 2624 | * guaranteed that all queueing instances of @work which happened |
| 2625 | * before this function is called are finished. In other words, if |
| 2626 | * @work hasn't been requeued since this function was called, @work is |
| 2627 | * guaranteed to be idle on return. |
| 2628 | * |
| 2629 | * RETURNS: |
| 2630 | * %true if flush_work_sync() waited for the work to finish execution, |
| 2631 | * %false if it was already idle. |
| 2632 | */ |
| 2633 | bool flush_work_sync(struct work_struct *work) |
| 2634 | { |
| 2635 | struct wq_barrier barr; |
| 2636 | bool pending, waited; |
| 2637 | |
| 2638 | /* we'll wait for executions separately, queue barr only if pending */ |
| 2639 | pending = start_flush_work(work, &barr, false); |
| 2640 | |
| 2641 | /* wait for executions to finish */ |
| 2642 | waited = wait_on_work(work); |
| 2643 | |
| 2644 | /* wait for the pending one */ |
| 2645 | if (pending) { |
| 2646 | wait_for_completion(&barr.done); |
| 2647 | destroy_work_on_stack(&barr.work); |
| 2648 | } |
| 2649 | |
| 2650 | return pending || waited; |
| 2651 | } |
| 2652 | EXPORT_SYMBOL_GPL(flush_work_sync); |
| 2653 | |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2654 | /* |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2655 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2656 | * so this work can't be re-armed in any way. |
| 2657 | */ |
| 2658 | static int try_to_grab_pending(struct work_struct *work) |
| 2659 | { |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2660 | struct global_cwq *gcwq; |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2661 | int ret = -1; |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2662 | |
Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 2663 | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2664 | return 0; |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2665 | |
| 2666 | /* |
| 2667 | * The queueing is in progress, or it is already queued. Try to |
| 2668 | * steal it from ->worklist without clearing WORK_STRUCT_PENDING. |
| 2669 | */ |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2670 | gcwq = get_work_gcwq(work); |
| 2671 | if (!gcwq) |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2672 | return ret; |
| 2673 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2674 | spin_lock_irq(&gcwq->lock); |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2675 | if (!list_empty(&work->entry)) { |
| 2676 | /* |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2677 | * This work is queued, but perhaps we locked the wrong gcwq. |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2678 | * In that case we must see the new value after rmb(), see |
| 2679 | * insert_work()->wmb(). |
| 2680 | */ |
| 2681 | smp_rmb(); |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2682 | if (gcwq == get_work_gcwq(work)) { |
Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 2683 | debug_work_deactivate(work); |
Lai Jiangshan | 31eafff | 2012-09-18 10:40:00 -0700 | [diff] [blame] | 2684 | |
| 2685 | /* |
| 2686 | * A delayed work item cannot be grabbed directly |
| 2687 | * because it might have linked NO_COLOR work items |
| 2688 | * which, if left on the delayed_list, will confuse |
| 2689 | * cwq->nr_active management later on and cause |
| 2690 | * stall. Make sure the work item is activated |
| 2691 | * before grabbing. |
| 2692 | */ |
| 2693 | if (*work_data_bits(work) & WORK_STRUCT_DELAYED) |
| 2694 | cwq_activate_delayed_work(work); |
| 2695 | |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2696 | list_del_init(&work->entry); |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2697 | cwq_dec_nr_in_flight(get_work_cwq(work), |
Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 2698 | get_work_color(work), |
| 2699 | *work_data_bits(work) & WORK_STRUCT_DELAYED); |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2700 | ret = 1; |
| 2701 | } |
| 2702 | } |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 2703 | spin_unlock_irq(&gcwq->lock); |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2704 | |
| 2705 | return ret; |
| 2706 | } |
| 2707 | |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2708 | static bool __cancel_work_timer(struct work_struct *work, |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2709 | struct timer_list* timer) |
| 2710 | { |
| 2711 | int ret; |
| 2712 | |
| 2713 | do { |
| 2714 | ret = (timer && likely(del_timer(timer))); |
| 2715 | if (!ret) |
| 2716 | ret = try_to_grab_pending(work); |
| 2717 | wait_on_work(work); |
| 2718 | } while (unlikely(ret < 0)); |
| 2719 | |
Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 2720 | clear_work_data(work); |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2721 | return ret; |
| 2722 | } |
| 2723 | |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2724 | /** |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2725 | * cancel_work_sync - cancel a work and wait for it to finish |
| 2726 | * @work: the work to cancel |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2727 | * |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2728 | * Cancel @work and wait for its execution to finish. This function |
| 2729 | * can be used even if the work re-queues itself or migrates to |
| 2730 | * another workqueue. On return from this function, @work is |
| 2731 | * guaranteed to be not pending or executing on any CPU. |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2732 | * |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2733 | * cancel_work_sync(&delayed_work->work) must not be used for |
| 2734 | * delayed_work's. Use cancel_delayed_work_sync() instead. |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2735 | * |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2736 | * The caller must ensure that the workqueue on which @work was last |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2737 | * queued can't be destroyed before this function returns. |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2738 | * |
| 2739 | * RETURNS: |
| 2740 | * %true if @work was pending, %false otherwise. |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2741 | */ |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2742 | bool cancel_work_sync(struct work_struct *work) |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2743 | { |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2744 | return __cancel_work_timer(work, NULL); |
Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 2745 | } |
Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 2746 | EXPORT_SYMBOL_GPL(cancel_work_sync); |
Oleg Nesterov | b89deed | 2007-05-09 02:33:52 -0700 | [diff] [blame] | 2747 | |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2748 | /** |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2749 | * flush_delayed_work - wait for a dwork to finish executing the last queueing |
| 2750 | * @dwork: the delayed work to flush |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2751 | * |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2752 | * Delayed timer is cancelled and the pending work is queued for |
| 2753 | * immediate execution. Like flush_work(), this function only |
| 2754 | * considers the last queueing instance of @dwork. |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2755 | * |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2756 | * RETURNS: |
| 2757 | * %true if flush_work() waited for the work to finish execution, |
| 2758 | * %false if it was already idle. |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2759 | */ |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2760 | bool flush_delayed_work(struct delayed_work *dwork) |
| 2761 | { |
| 2762 | if (del_timer_sync(&dwork->timer)) |
| 2763 | __queue_work(raw_smp_processor_id(), |
| 2764 | get_work_cwq(&dwork->work)->wq, &dwork->work); |
| 2765 | return flush_work(&dwork->work); |
| 2766 | } |
| 2767 | EXPORT_SYMBOL(flush_delayed_work); |
| 2768 | |
| 2769 | /** |
Tejun Heo | 0938349 | 2010-09-16 10:48:29 +0200 | [diff] [blame] | 2770 | * flush_delayed_work_sync - wait for a dwork to finish |
| 2771 | * @dwork: the delayed work to flush |
| 2772 | * |
| 2773 | * Delayed timer is cancelled and the pending work is queued for |
| 2774 | * execution immediately. Other than timer handling, its behavior |
| 2775 | * is identical to flush_work_sync(). |
| 2776 | * |
| 2777 | * RETURNS: |
| 2778 | * %true if flush_work_sync() waited for the work to finish execution, |
| 2779 | * %false if it was already idle. |
| 2780 | */ |
| 2781 | bool flush_delayed_work_sync(struct delayed_work *dwork) |
| 2782 | { |
| 2783 | if (del_timer_sync(&dwork->timer)) |
| 2784 | __queue_work(raw_smp_processor_id(), |
| 2785 | get_work_cwq(&dwork->work)->wq, &dwork->work); |
| 2786 | return flush_work_sync(&dwork->work); |
| 2787 | } |
| 2788 | EXPORT_SYMBOL(flush_delayed_work_sync); |
| 2789 | |
| 2790 | /** |
Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 2791 | * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish |
| 2792 | * @dwork: the delayed work cancel |
| 2793 | * |
| 2794 | * This is cancel_work_sync() for delayed works. |
| 2795 | * |
| 2796 | * RETURNS: |
| 2797 | * %true if @dwork was pending, %false otherwise. |
| 2798 | */ |
| 2799 | bool cancel_delayed_work_sync(struct delayed_work *dwork) |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2800 | { |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2801 | return __cancel_work_timer(&dwork->work, &dwork->timer); |
Oleg Nesterov | 6e84d64 | 2007-05-09 02:34:46 -0700 | [diff] [blame] | 2802 | } |
Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 2803 | EXPORT_SYMBOL(cancel_delayed_work_sync); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2804 | |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2805 | /** |
| 2806 | * schedule_work - put work task in global workqueue |
| 2807 | * @work: job to be done |
| 2808 | * |
Bart Van Assche | 5b0f437d | 2009-07-30 19:00:53 +0200 | [diff] [blame] | 2809 | * Returns zero if @work was already on the kernel-global workqueue and |
| 2810 | * non-zero otherwise. |
| 2811 | * |
| 2812 | * This puts a job in the kernel-global workqueue if it was not already |
| 2813 | * queued and leaves it in the same position on the kernel-global |
| 2814 | * workqueue otherwise. |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2815 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2816 | int schedule_work(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2817 | { |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2818 | return queue_work(system_wq, work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2819 | } |
Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2820 | EXPORT_SYMBOL(schedule_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2821 | |
Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 2822 | /* |
| 2823 | * schedule_work_on - put work task on a specific cpu |
| 2824 | * @cpu: cpu to put the work task on |
| 2825 | * @work: job to be done |
| 2826 | * |
| 2827 | * This puts a job on a specific cpu |
| 2828 | */ |
| 2829 | int schedule_work_on(int cpu, struct work_struct *work) |
| 2830 | { |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2831 | return queue_work_on(cpu, system_wq, work); |
Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 2832 | } |
| 2833 | EXPORT_SYMBOL(schedule_work_on); |
| 2834 | |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2835 | /** |
| 2836 | * schedule_delayed_work - put work task in global workqueue after delay |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 2837 | * @dwork: job to be done |
| 2838 | * @delay: number of jiffies to wait or 0 for immediate execution |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2839 | * |
| 2840 | * After waiting for a given time this puts a job in the kernel-global |
| 2841 | * workqueue. |
| 2842 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2843 | int schedule_delayed_work(struct delayed_work *dwork, |
Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 2844 | unsigned long delay) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2845 | { |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2846 | return queue_delayed_work(system_wq, dwork, delay); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2847 | } |
Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2848 | EXPORT_SYMBOL(schedule_delayed_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2849 | |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2850 | /** |
| 2851 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
| 2852 | * @cpu: cpu to use |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 2853 | * @dwork: job to be done |
Rolf Eike Beer | 0fcb78c | 2006-07-30 03:03:42 -0700 | [diff] [blame] | 2854 | * @delay: number of jiffies to wait |
| 2855 | * |
| 2856 | * After waiting for a given time this puts a job in the kernel-global |
| 2857 | * workqueue on the specified CPU. |
| 2858 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2859 | int schedule_delayed_work_on(int cpu, |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 2860 | struct delayed_work *dwork, unsigned long delay) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2861 | { |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2862 | return queue_delayed_work_on(cpu, system_wq, dwork, delay); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2863 | } |
Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2864 | EXPORT_SYMBOL(schedule_delayed_work_on); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2865 | |
Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2866 | /** |
Tejun Heo | 31ddd87 | 2010-10-19 11:14:49 +0200 | [diff] [blame] | 2867 | * schedule_on_each_cpu - execute a function synchronously on each online CPU |
Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2868 | * @func: the function to call |
Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2869 | * |
Tejun Heo | 31ddd87 | 2010-10-19 11:14:49 +0200 | [diff] [blame] | 2870 | * schedule_on_each_cpu() executes @func on each online CPU using the |
| 2871 | * system workqueue and blocks until all CPUs have completed. |
Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2872 | * schedule_on_each_cpu() is very slow. |
Tejun Heo | 31ddd87 | 2010-10-19 11:14:49 +0200 | [diff] [blame] | 2873 | * |
| 2874 | * RETURNS: |
| 2875 | * 0 on success, -errno on failure. |
Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2876 | */ |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2877 | int schedule_on_each_cpu(work_func_t func) |
Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2878 | { |
| 2879 | int cpu; |
Namhyung Kim | 38f5156 | 2010-08-08 14:24:09 +0200 | [diff] [blame] | 2880 | struct work_struct __percpu *works; |
Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2881 | |
Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2882 | works = alloc_percpu(struct work_struct); |
| 2883 | if (!works) |
Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2884 | return -ENOMEM; |
Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2885 | |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 2886 | get_online_cpus(); |
Tejun Heo | 9398180 | 2009-11-17 14:06:20 -0800 | [diff] [blame] | 2887 | |
Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2888 | for_each_online_cpu(cpu) { |
Ingo Molnar | 9bfb183 | 2006-12-18 20:05:09 +0100 | [diff] [blame] | 2889 | struct work_struct *work = per_cpu_ptr(works, cpu); |
| 2890 | |
| 2891 | INIT_WORK(work, func); |
Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2892 | schedule_work_on(cpu, work); |
Andi Kleen | 65a6446 | 2009-10-14 06:22:47 +0200 | [diff] [blame] | 2893 | } |
Tejun Heo | 9398180 | 2009-11-17 14:06:20 -0800 | [diff] [blame] | 2894 | |
| 2895 | for_each_online_cpu(cpu) |
| 2896 | flush_work(per_cpu_ptr(works, cpu)); |
| 2897 | |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 2898 | put_online_cpus(); |
Andrew Morton | b613677 | 2006-06-25 05:47:49 -0700 | [diff] [blame] | 2899 | free_percpu(works); |
Christoph Lameter | 15316ba | 2006-01-08 01:00:43 -0800 | [diff] [blame] | 2900 | return 0; |
| 2901 | } |
| 2902 | |
Alan Stern | eef6a7d | 2010-02-12 17:39:21 +0900 | [diff] [blame] | 2903 | /** |
| 2904 | * flush_scheduled_work - ensure that any scheduled work has run to completion. |
| 2905 | * |
| 2906 | * Forces execution of the kernel-global workqueue and blocks until its |
| 2907 | * completion. |
| 2908 | * |
| 2909 | * Think twice before calling this function! It's very easy to get into |
| 2910 | * trouble if you don't take great care. Either of the following situations |
| 2911 | * will lead to deadlock: |
| 2912 | * |
| 2913 | * One of the work items currently on the workqueue needs to acquire |
| 2914 | * a lock held by your code or its caller. |
| 2915 | * |
| 2916 | * Your code is running in the context of a work routine. |
| 2917 | * |
| 2918 | * They will be detected by lockdep when they occur, but the first might not |
| 2919 | * occur very often. It depends on what work items are on the workqueue and |
| 2920 | * what locks they need, which you have no control over. |
| 2921 | * |
| 2922 | * In most situations flushing the entire workqueue is overkill; you merely |
| 2923 | * need to know that a particular work item isn't queued and isn't running. |
| 2924 | * In such cases you should use cancel_delayed_work_sync() or |
| 2925 | * cancel_work_sync() instead. |
| 2926 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2927 | void flush_scheduled_work(void) |
| 2928 | { |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2929 | flush_workqueue(system_wq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2930 | } |
Dave Jones | ae90dd5 | 2006-06-30 01:40:45 -0400 | [diff] [blame] | 2931 | EXPORT_SYMBOL(flush_scheduled_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2932 | |
| 2933 | /** |
James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2934 | * execute_in_process_context - reliably execute the routine with user context |
| 2935 | * @fn: the function to execute |
James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2936 | * @ew: guaranteed storage for the execute work structure (must |
| 2937 | * be available when the work executes) |
| 2938 | * |
| 2939 | * Executes the function immediately if process context is available, |
| 2940 | * otherwise schedules the function for delayed execution. |
| 2941 | * |
| 2942 | * Returns: 0 - function was executed |
| 2943 | * 1 - function was scheduled for execution |
| 2944 | */ |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2945 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) |
James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2946 | { |
| 2947 | if (!in_interrupt()) { |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2948 | fn(&ew->work); |
James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2949 | return 0; |
| 2950 | } |
| 2951 | |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 2952 | INIT_WORK(&ew->work, fn); |
James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 2953 | schedule_work(&ew->work); |
| 2954 | |
| 2955 | return 1; |
| 2956 | } |
| 2957 | EXPORT_SYMBOL_GPL(execute_in_process_context); |
| 2958 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2959 | int keventd_up(void) |
| 2960 | { |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 2961 | return system_wq != NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2962 | } |
| 2963 | |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2964 | static int alloc_cwqs(struct workqueue_struct *wq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2965 | { |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2966 | /* |
Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2967 | * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. |
| 2968 | * Make sure that the alignment isn't lower than that of |
| 2969 | * unsigned long long. |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2970 | */ |
Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 2971 | const size_t size = sizeof(struct cpu_workqueue_struct); |
| 2972 | const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, |
| 2973 | __alignof__(unsigned long long)); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2974 | |
Lai Jiangshan | e06ffa1 | 2012-03-09 18:03:20 +0800 | [diff] [blame] | 2975 | if (!(wq->flags & WQ_UNBOUND)) |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2976 | wq->cpu_wq.pcpu = __alloc_percpu(size, align); |
Tejun Heo | 931ac77 | 2010-07-20 11:07:48 +0200 | [diff] [blame] | 2977 | else { |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2978 | void *ptr; |
Frederic Weisbecker | e1d8aa9 | 2009-01-12 23:15:46 +0100 | [diff] [blame] | 2979 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2980 | /* |
| 2981 | * Allocate enough room to align cwq and put an extra |
| 2982 | * pointer at the end pointing back to the originally |
| 2983 | * allocated pointer which will be used for free. |
| 2984 | */ |
| 2985 | ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); |
| 2986 | if (ptr) { |
| 2987 | wq->cpu_wq.single = PTR_ALIGN(ptr, align); |
| 2988 | *(void **)(wq->cpu_wq.single + 1) = ptr; |
| 2989 | } |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2990 | } |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2991 | |
Tejun Heo | 0415b00 | 2011-03-24 18:50:09 +0100 | [diff] [blame] | 2992 | /* just in case, make sure it's actually aligned */ |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2993 | BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); |
| 2994 | return wq->cpu_wq.v ? 0 : -ENOMEM; |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 2995 | } |
| 2996 | |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 2997 | static void free_cwqs(struct workqueue_struct *wq) |
Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 2998 | { |
Lai Jiangshan | e06ffa1 | 2012-03-09 18:03:20 +0800 | [diff] [blame] | 2999 | if (!(wq->flags & WQ_UNBOUND)) |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3000 | free_percpu(wq->cpu_wq.pcpu); |
| 3001 | else if (wq->cpu_wq.single) { |
| 3002 | /* the pointer to free is stored right after the cwq */ |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3003 | kfree(*(void **)(wq->cpu_wq.single + 1)); |
Oleg Nesterov | 06ba38a | 2007-05-09 02:34:15 -0700 | [diff] [blame] | 3004 | } |
| 3005 | } |
| 3006 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3007 | static int wq_clamp_max_active(int max_active, unsigned int flags, |
| 3008 | const char *name) |
Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3009 | { |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3010 | int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; |
| 3011 | |
| 3012 | if (max_active < 1 || max_active > lim) |
Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3013 | printk(KERN_WARNING "workqueue: max_active %d requested for %s " |
| 3014 | "is out of range, clamping between %d and %d\n", |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3015 | max_active, name, 1, lim); |
Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3016 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3017 | return clamp_val(max_active, 1, lim); |
Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3018 | } |
| 3019 | |
Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 3020 | struct workqueue_struct *__alloc_workqueue_key(const char *fmt, |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3021 | unsigned int flags, |
| 3022 | int max_active, |
| 3023 | struct lock_class_key *key, |
Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 3024 | const char *lock_name, ...) |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3025 | { |
Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 3026 | va_list args, args1; |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3027 | struct workqueue_struct *wq; |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3028 | unsigned int cpu; |
shumash | 7f490b2 | 2015-10-06 09:49:52 -0600 | [diff] [blame] | 3029 | size_t namelen; |
Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 3030 | |
Viresh Kumar | 154e311 | 2013-04-08 16:45:40 +0530 | [diff] [blame] | 3031 | /* see the comment above the definition of WQ_POWER_EFFICIENT */ |
| 3032 | if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) |
| 3033 | flags |= WQ_UNBOUND; |
| 3034 | |
Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 3035 | /* determine namelen, allocate wq and format name */ |
| 3036 | va_start(args, lock_name); |
| 3037 | va_copy(args1, args); |
| 3038 | namelen = vsnprintf(NULL, 0, fmt, args) + 1; |
| 3039 | |
| 3040 | wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL); |
| 3041 | if (!wq) |
| 3042 | goto err; |
| 3043 | |
| 3044 | vsnprintf(wq->name, namelen, fmt, args1); |
| 3045 | va_end(args); |
| 3046 | va_end(args1); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3047 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3048 | /* |
Tejun Heo | 6370a6a | 2010-10-11 15:12:27 +0200 | [diff] [blame] | 3049 | * Workqueues which may be used during memory reclaim should |
| 3050 | * have a rescuer to guarantee forward progress. |
| 3051 | */ |
| 3052 | if (flags & WQ_MEM_RECLAIM) |
| 3053 | flags |= WQ_RESCUER; |
| 3054 | |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3055 | max_active = max_active ?: WQ_DFL_ACTIVE; |
Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 3056 | max_active = wq_clamp_max_active(max_active, flags, wq->name); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3057 | |
Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 3058 | /* init wq */ |
Tejun Heo | 97e37d7 | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 3059 | wq->flags = flags; |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3060 | wq->saved_max_active = max_active; |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3061 | mutex_init(&wq->flush_mutex); |
| 3062 | atomic_set(&wq->nr_cwqs_to_flush, 0); |
| 3063 | INIT_LIST_HEAD(&wq->flusher_queue); |
| 3064 | INIT_LIST_HEAD(&wq->flusher_overflow); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3065 | |
Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 3066 | lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); |
Oleg Nesterov | cce1a16 | 2007-05-09 02:34:13 -0700 | [diff] [blame] | 3067 | INIT_LIST_HEAD(&wq->list); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3068 | |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3069 | if (alloc_cwqs(wq) < 0) |
| 3070 | goto err; |
| 3071 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3072 | for_each_cwq_cpu(cpu, wq) { |
Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3073 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3074 | struct global_cwq *gcwq = get_gcwq(cpu); |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 3075 | int pool_idx = (bool)(flags & WQ_HIGHPRI); |
Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3076 | |
Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3077 | BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); |
Tejun Heo | dcb32ee | 2012-07-13 22:16:45 -0700 | [diff] [blame] | 3078 | cwq->pool = &gcwq->pools[pool_idx]; |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3079 | cwq->wq = wq; |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3080 | cwq->flush_color = -1; |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3081 | cwq->max_active = max_active; |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3082 | INIT_LIST_HEAD(&cwq->delayed_works); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3083 | } |
| 3084 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3085 | if (flags & WQ_RESCUER) { |
| 3086 | struct worker *rescuer; |
| 3087 | |
Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 3088 | if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL)) |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3089 | goto err; |
| 3090 | |
| 3091 | wq->rescuer = rescuer = alloc_worker(); |
| 3092 | if (!rescuer) |
| 3093 | goto err; |
| 3094 | |
Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 3095 | rescuer->task = kthread_create(rescuer_thread, wq, "%s", |
| 3096 | wq->name); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3097 | if (IS_ERR(rescuer->task)) |
| 3098 | goto err; |
| 3099 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3100 | rescuer->task->flags |= PF_THREAD_BOUND; |
| 3101 | wake_up_process(rescuer->task); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3102 | } |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3103 | |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3104 | /* |
| 3105 | * workqueue_lock protects global freeze state and workqueues |
| 3106 | * list. Grab it, set max_active accordingly and add the new |
| 3107 | * workqueue to workqueues list. |
| 3108 | */ |
Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3109 | spin_lock(&workqueue_lock); |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3110 | |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3111 | if (workqueue_freezing && wq->flags & WQ_FREEZABLE) |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3112 | for_each_cwq_cpu(cpu, wq) |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3113 | get_cwq(cpu, wq)->max_active = 0; |
| 3114 | |
Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3115 | list_add(&wq->list, &workqueues); |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3116 | |
Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3117 | spin_unlock(&workqueue_lock); |
| 3118 | |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3119 | return wq; |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 3120 | err: |
| 3121 | if (wq) { |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3122 | free_cwqs(wq); |
Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 3123 | free_mayday_mask(wq->mayday_mask); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3124 | kfree(wq->rescuer); |
Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 3125 | kfree(wq); |
| 3126 | } |
| 3127 | return NULL; |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3128 | } |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3129 | EXPORT_SYMBOL_GPL(__alloc_workqueue_key); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3130 | |
| 3131 | /** |
| 3132 | * destroy_workqueue - safely terminate a workqueue |
| 3133 | * @wq: target workqueue |
| 3134 | * |
| 3135 | * Safely destroy a workqueue. All work currently pending will be done first. |
| 3136 | */ |
| 3137 | void destroy_workqueue(struct workqueue_struct *wq) |
| 3138 | { |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3139 | unsigned int cpu; |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3140 | |
Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 3141 | /* drain it before proceeding with destruction */ |
| 3142 | drain_workqueue(wq); |
Tejun Heo | c8efcc2 | 2010-12-20 19:32:04 +0100 | [diff] [blame] | 3143 | |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3144 | /* |
| 3145 | * wq list is used to freeze wq, remove from list after |
| 3146 | * flushing is complete in case freeze races us. |
| 3147 | */ |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 3148 | spin_lock(&workqueue_lock); |
Oleg Nesterov | b1f4ec1 | 2007-05-09 02:34:12 -0700 | [diff] [blame] | 3149 | list_del(&wq->list); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 3150 | spin_unlock(&workqueue_lock); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3151 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3152 | /* sanity check */ |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3153 | for_each_cwq_cpu(cpu, wq) { |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3154 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
| 3155 | int i; |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3156 | |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3157 | for (i = 0; i < WORK_NR_COLORS; i++) |
| 3158 | BUG_ON(cwq->nr_in_flight[i]); |
Tejun Heo | 1e19ffc | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3159 | BUG_ON(cwq->nr_active); |
| 3160 | BUG_ON(!list_empty(&cwq->delayed_works)); |
Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3161 | } |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3162 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3163 | if (wq->flags & WQ_RESCUER) { |
| 3164 | kthread_stop(wq->rescuer->task); |
Tejun Heo | f2e005a | 2010-07-20 15:59:09 +0200 | [diff] [blame] | 3165 | free_mayday_mask(wq->mayday_mask); |
Xiaotian Feng | 8d9df9f | 2010-08-16 09:54:28 +0200 | [diff] [blame] | 3166 | kfree(wq->rescuer); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3167 | } |
| 3168 | |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3169 | free_cwqs(wq); |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3170 | kfree(wq); |
| 3171 | } |
| 3172 | EXPORT_SYMBOL_GPL(destroy_workqueue); |
| 3173 | |
Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3174 | /** |
| 3175 | * workqueue_set_max_active - adjust max_active of a workqueue |
| 3176 | * @wq: target workqueue |
| 3177 | * @max_active: new max_active value. |
| 3178 | * |
| 3179 | * Set max_active of @wq to @max_active. |
| 3180 | * |
| 3181 | * CONTEXT: |
| 3182 | * Don't call from IRQ context. |
| 3183 | */ |
| 3184 | void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) |
| 3185 | { |
| 3186 | unsigned int cpu; |
| 3187 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3188 | max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); |
Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3189 | |
| 3190 | spin_lock(&workqueue_lock); |
| 3191 | |
| 3192 | wq->saved_max_active = max_active; |
| 3193 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3194 | for_each_cwq_cpu(cpu, wq) { |
Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3195 | struct global_cwq *gcwq = get_gcwq(cpu); |
| 3196 | |
| 3197 | spin_lock_irq(&gcwq->lock); |
| 3198 | |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3199 | if (!(wq->flags & WQ_FREEZABLE) || |
Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3200 | !(gcwq->flags & GCWQ_FREEZING)) |
| 3201 | get_cwq(gcwq->cpu, wq)->max_active = max_active; |
| 3202 | |
| 3203 | spin_unlock_irq(&gcwq->lock); |
| 3204 | } |
| 3205 | |
| 3206 | spin_unlock(&workqueue_lock); |
| 3207 | } |
| 3208 | EXPORT_SYMBOL_GPL(workqueue_set_max_active); |
| 3209 | |
| 3210 | /** |
| 3211 | * workqueue_congested - test whether a workqueue is congested |
| 3212 | * @cpu: CPU in question |
| 3213 | * @wq: target workqueue |
| 3214 | * |
| 3215 | * Test whether @wq's cpu workqueue for @cpu is congested. There is |
| 3216 | * no synchronization around this function and the test result is |
| 3217 | * unreliable and only useful as advisory hints or for debugging. |
| 3218 | * |
| 3219 | * RETURNS: |
| 3220 | * %true if congested, %false otherwise. |
| 3221 | */ |
| 3222 | bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) |
| 3223 | { |
| 3224 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
| 3225 | |
| 3226 | return !list_empty(&cwq->delayed_works); |
| 3227 | } |
| 3228 | EXPORT_SYMBOL_GPL(workqueue_congested); |
| 3229 | |
| 3230 | /** |
| 3231 | * work_cpu - return the last known associated cpu for @work |
| 3232 | * @work: the work of interest |
| 3233 | * |
| 3234 | * RETURNS: |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3235 | * CPU number if @work was ever queued. WORK_CPU_NONE otherwise. |
Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3236 | */ |
| 3237 | unsigned int work_cpu(struct work_struct *work) |
| 3238 | { |
| 3239 | struct global_cwq *gcwq = get_work_gcwq(work); |
| 3240 | |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3241 | return gcwq ? gcwq->cpu : WORK_CPU_NONE; |
Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3242 | } |
| 3243 | EXPORT_SYMBOL_GPL(work_cpu); |
| 3244 | |
| 3245 | /** |
| 3246 | * work_busy - test whether a work is currently pending or running |
| 3247 | * @work: the work to be tested |
| 3248 | * |
| 3249 | * Test whether @work is currently pending or running. There is no |
| 3250 | * synchronization around this function and the test result is |
| 3251 | * unreliable and only useful as advisory hints or for debugging. |
| 3252 | * Especially for reentrant wqs, the pending state might hide the |
| 3253 | * running state. |
| 3254 | * |
| 3255 | * RETURNS: |
| 3256 | * OR'd bitmask of WORK_BUSY_* bits. |
| 3257 | */ |
| 3258 | unsigned int work_busy(struct work_struct *work) |
| 3259 | { |
| 3260 | struct global_cwq *gcwq = get_work_gcwq(work); |
| 3261 | unsigned long flags; |
| 3262 | unsigned int ret = 0; |
| 3263 | |
| 3264 | if (!gcwq) |
| 3265 | return false; |
| 3266 | |
| 3267 | spin_lock_irqsave(&gcwq->lock, flags); |
| 3268 | |
| 3269 | if (work_pending(work)) |
| 3270 | ret |= WORK_BUSY_PENDING; |
| 3271 | if (find_worker_executing_work(gcwq, work)) |
| 3272 | ret |= WORK_BUSY_RUNNING; |
| 3273 | |
| 3274 | spin_unlock_irqrestore(&gcwq->lock, flags); |
| 3275 | |
| 3276 | return ret; |
| 3277 | } |
| 3278 | EXPORT_SYMBOL_GPL(work_busy); |
| 3279 | |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3280 | /* |
| 3281 | * CPU hotplug. |
| 3282 | * |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3283 | * There are two challenges in supporting CPU hotplug. Firstly, there |
| 3284 | * are a lot of assumptions on strong associations among work, cwq and |
| 3285 | * gcwq which make migrating pending and scheduled works very |
| 3286 | * difficult to implement without impacting hot paths. Secondly, |
| 3287 | * gcwqs serve mix of short, long and very long running works making |
| 3288 | * blocked draining impractical. |
| 3289 | * |
| 3290 | * This is solved by allowing a gcwq to be detached from CPU, running |
| 3291 | * it with unbound (rogue) workers and allowing it to be reattached |
| 3292 | * later if the cpu comes back online. A separate thread is created |
| 3293 | * to govern a gcwq in such state and is called the trustee of the |
| 3294 | * gcwq. |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3295 | * |
| 3296 | * Trustee states and their descriptions. |
| 3297 | * |
| 3298 | * START Command state used on startup. On CPU_DOWN_PREPARE, a |
| 3299 | * new trustee is started with this state. |
| 3300 | * |
| 3301 | * IN_CHARGE Once started, trustee will enter this state after |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3302 | * assuming the manager role and making all existing |
| 3303 | * workers rogue. DOWN_PREPARE waits for trustee to |
| 3304 | * enter this state. After reaching IN_CHARGE, trustee |
| 3305 | * tries to execute the pending worklist until it's empty |
| 3306 | * and the state is set to BUTCHER, or the state is set |
| 3307 | * to RELEASE. |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3308 | * |
| 3309 | * BUTCHER Command state which is set by the cpu callback after |
| 3310 | * the cpu has went down. Once this state is set trustee |
| 3311 | * knows that there will be no new works on the worklist |
| 3312 | * and once the worklist is empty it can proceed to |
| 3313 | * killing idle workers. |
| 3314 | * |
| 3315 | * RELEASE Command state which is set by the cpu callback if the |
| 3316 | * cpu down has been canceled or it has come online |
| 3317 | * again. After recognizing this state, trustee stops |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3318 | * trying to drain or butcher and clears ROGUE, rebinds |
| 3319 | * all remaining workers back to the cpu and releases |
| 3320 | * manager role. |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3321 | * |
| 3322 | * DONE Trustee will enter this state after BUTCHER or RELEASE |
| 3323 | * is complete. |
| 3324 | * |
| 3325 | * trustee CPU draining |
| 3326 | * took over down complete |
| 3327 | * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE |
| 3328 | * | | ^ |
| 3329 | * | CPU is back online v return workers | |
| 3330 | * ----------------> RELEASE -------------- |
| 3331 | */ |
| 3332 | |
| 3333 | /** |
| 3334 | * trustee_wait_event_timeout - timed event wait for trustee |
| 3335 | * @cond: condition to wait for |
| 3336 | * @timeout: timeout in jiffies |
| 3337 | * |
| 3338 | * wait_event_timeout() for trustee to use. Handles locking and |
| 3339 | * checks for RELEASE request. |
| 3340 | * |
| 3341 | * CONTEXT: |
| 3342 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed |
| 3343 | * multiple times. To be used by trustee. |
| 3344 | * |
| 3345 | * RETURNS: |
| 3346 | * Positive indicating left time if @cond is satisfied, 0 if timed |
| 3347 | * out, -1 if canceled. |
| 3348 | */ |
| 3349 | #define trustee_wait_event_timeout(cond, timeout) ({ \ |
| 3350 | long __ret = (timeout); \ |
| 3351 | while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \ |
| 3352 | __ret) { \ |
| 3353 | spin_unlock_irq(&gcwq->lock); \ |
| 3354 | __wait_event_timeout(gcwq->trustee_wait, (cond) || \ |
| 3355 | (gcwq->trustee_state == TRUSTEE_RELEASE), \ |
| 3356 | __ret); \ |
| 3357 | spin_lock_irq(&gcwq->lock); \ |
| 3358 | } \ |
| 3359 | gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \ |
| 3360 | }) |
| 3361 | |
| 3362 | /** |
| 3363 | * trustee_wait_event - event wait for trustee |
| 3364 | * @cond: condition to wait for |
| 3365 | * |
| 3366 | * wait_event() for trustee to use. Automatically handles locking and |
| 3367 | * checks for CANCEL request. |
| 3368 | * |
| 3369 | * CONTEXT: |
| 3370 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed |
| 3371 | * multiple times. To be used by trustee. |
| 3372 | * |
| 3373 | * RETURNS: |
| 3374 | * 0 if @cond is satisfied, -1 if canceled. |
| 3375 | */ |
| 3376 | #define trustee_wait_event(cond) ({ \ |
| 3377 | long __ret1; \ |
| 3378 | __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\ |
| 3379 | __ret1 < 0 ? -1 : 0; \ |
| 3380 | }) |
| 3381 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3382 | static bool gcwq_is_managing_workers(struct global_cwq *gcwq) |
| 3383 | { |
| 3384 | struct worker_pool *pool; |
| 3385 | |
| 3386 | for_each_worker_pool(pool, gcwq) |
| 3387 | if (pool->flags & POOL_MANAGING_WORKERS) |
| 3388 | return true; |
| 3389 | return false; |
| 3390 | } |
| 3391 | |
| 3392 | static bool gcwq_has_idle_workers(struct global_cwq *gcwq) |
| 3393 | { |
| 3394 | struct worker_pool *pool; |
| 3395 | |
| 3396 | for_each_worker_pool(pool, gcwq) |
| 3397 | if (!list_empty(&pool->idle_list)) |
| 3398 | return true; |
| 3399 | return false; |
| 3400 | } |
| 3401 | |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3402 | static int __cpuinit trustee_thread(void *__gcwq) |
| 3403 | { |
| 3404 | struct global_cwq *gcwq = __gcwq; |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3405 | struct worker_pool *pool; |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3406 | struct worker *worker; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3407 | struct work_struct *work; |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3408 | struct hlist_node *pos; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3409 | long rc; |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3410 | int i; |
| 3411 | |
| 3412 | BUG_ON(gcwq->cpu != smp_processor_id()); |
| 3413 | |
| 3414 | spin_lock_irq(&gcwq->lock); |
| 3415 | /* |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3416 | * Claim the manager position and make all workers rogue. |
| 3417 | * Trustee must be bound to the target cpu and can't be |
| 3418 | * cancelled. |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3419 | */ |
| 3420 | BUG_ON(gcwq->cpu != smp_processor_id()); |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3421 | rc = trustee_wait_event(!gcwq_is_managing_workers(gcwq)); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3422 | BUG_ON(rc < 0); |
| 3423 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3424 | for_each_worker_pool(pool, gcwq) { |
| 3425 | pool->flags |= POOL_MANAGING_WORKERS; |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3426 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3427 | list_for_each_entry(worker, &pool->idle_list, entry) |
| 3428 | worker->flags |= WORKER_ROGUE; |
| 3429 | } |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3430 | |
| 3431 | for_each_busy_worker(worker, i, pos, gcwq) |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3432 | worker->flags |= WORKER_ROGUE; |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3433 | |
| 3434 | /* |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3435 | * Call schedule() so that we cross rq->lock and thus can |
| 3436 | * guarantee sched callbacks see the rogue flag. This is |
| 3437 | * necessary as scheduler callbacks may be invoked from other |
| 3438 | * cpus. |
| 3439 | */ |
| 3440 | spin_unlock_irq(&gcwq->lock); |
| 3441 | schedule(); |
| 3442 | spin_lock_irq(&gcwq->lock); |
| 3443 | |
| 3444 | /* |
Tejun Heo | cb44476 | 2010-07-02 10:03:50 +0200 | [diff] [blame] | 3445 | * Sched callbacks are disabled now. Zap nr_running. After |
| 3446 | * this, nr_running stays zero and need_more_worker() and |
| 3447 | * keep_working() are always true as long as the worklist is |
| 3448 | * not empty. |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3449 | */ |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3450 | for_each_worker_pool(pool, gcwq) |
| 3451 | atomic_set(get_pool_nr_running(pool), 0); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3452 | |
| 3453 | spin_unlock_irq(&gcwq->lock); |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3454 | for_each_worker_pool(pool, gcwq) |
| 3455 | del_timer_sync(&pool->idle_timer); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3456 | spin_lock_irq(&gcwq->lock); |
| 3457 | |
| 3458 | /* |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3459 | * We're now in charge. Notify and proceed to drain. We need |
| 3460 | * to keep the gcwq running during the whole CPU down |
| 3461 | * procedure as other cpu hotunplug callbacks may need to |
| 3462 | * flush currently running tasks. |
| 3463 | */ |
| 3464 | gcwq->trustee_state = TRUSTEE_IN_CHARGE; |
| 3465 | wake_up_all(&gcwq->trustee_wait); |
| 3466 | |
| 3467 | /* |
| 3468 | * The original cpu is in the process of dying and may go away |
| 3469 | * anytime now. When that happens, we and all workers would |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3470 | * be migrated to other cpus. Try draining any left work. We |
| 3471 | * want to get it over with ASAP - spam rescuers, wake up as |
| 3472 | * many idlers as necessary and create new ones till the |
| 3473 | * worklist is empty. Note that if the gcwq is frozen, there |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3474 | * may be frozen works in freezable cwqs. Don't declare |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3475 | * completion while frozen. |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3476 | */ |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3477 | while (true) { |
| 3478 | bool busy = false; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3479 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3480 | for_each_worker_pool(pool, gcwq) |
| 3481 | busy |= pool->nr_workers != pool->nr_idle; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3482 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3483 | if (!busy && !(gcwq->flags & GCWQ_FREEZING) && |
| 3484 | gcwq->trustee_state != TRUSTEE_IN_CHARGE) |
| 3485 | break; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3486 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3487 | for_each_worker_pool(pool, gcwq) { |
| 3488 | int nr_works = 0; |
| 3489 | |
| 3490 | list_for_each_entry(work, &pool->worklist, entry) { |
| 3491 | send_mayday(work); |
| 3492 | nr_works++; |
| 3493 | } |
| 3494 | |
| 3495 | list_for_each_entry(worker, &pool->idle_list, entry) { |
| 3496 | if (!nr_works--) |
| 3497 | break; |
| 3498 | wake_up_process(worker->task); |
| 3499 | } |
| 3500 | |
| 3501 | if (need_to_create_worker(pool)) { |
| 3502 | spin_unlock_irq(&gcwq->lock); |
| 3503 | worker = create_worker(pool, false); |
| 3504 | spin_lock_irq(&gcwq->lock); |
| 3505 | if (worker) { |
| 3506 | worker->flags |= WORKER_ROGUE; |
| 3507 | start_worker(worker); |
| 3508 | } |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3509 | } |
| 3510 | } |
| 3511 | |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3512 | /* give a breather */ |
| 3513 | if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) |
| 3514 | break; |
| 3515 | } |
| 3516 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3517 | /* |
| 3518 | * Either all works have been scheduled and cpu is down, or |
| 3519 | * cpu down has already been canceled. Wait for and butcher |
| 3520 | * all workers till we're canceled. |
| 3521 | */ |
| 3522 | do { |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3523 | rc = trustee_wait_event(gcwq_has_idle_workers(gcwq)); |
| 3524 | |
| 3525 | i = 0; |
| 3526 | for_each_worker_pool(pool, gcwq) { |
| 3527 | while (!list_empty(&pool->idle_list)) { |
| 3528 | worker = list_first_entry(&pool->idle_list, |
| 3529 | struct worker, entry); |
| 3530 | destroy_worker(worker); |
| 3531 | } |
| 3532 | i |= pool->nr_workers; |
| 3533 | } |
| 3534 | } while (i && rc >= 0); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3535 | |
| 3536 | /* |
| 3537 | * At this point, either draining has completed and no worker |
| 3538 | * is left, or cpu down has been canceled or the cpu is being |
| 3539 | * brought back up. There shouldn't be any idle one left. |
| 3540 | * Tell the remaining busy ones to rebind once it finishes the |
| 3541 | * currently scheduled works by scheduling the rebind_work. |
| 3542 | */ |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3543 | for_each_worker_pool(pool, gcwq) |
| 3544 | WARN_ON(!list_empty(&pool->idle_list)); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3545 | |
| 3546 | for_each_busy_worker(worker, i, pos, gcwq) { |
| 3547 | struct work_struct *rebind_work = &worker->rebind_work; |
Lai Jiangshan | 6adebb0 | 2012-09-02 00:28:19 +0800 | [diff] [blame] | 3548 | unsigned long worker_flags = worker->flags; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3549 | |
| 3550 | /* |
| 3551 | * Rebind_work may race with future cpu hotplug |
| 3552 | * operations. Use a separate flag to mark that |
Lai Jiangshan | 6adebb0 | 2012-09-02 00:28:19 +0800 | [diff] [blame] | 3553 | * rebinding is scheduled. The morphing should |
| 3554 | * be atomic. |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3555 | */ |
Lai Jiangshan | 6adebb0 | 2012-09-02 00:28:19 +0800 | [diff] [blame] | 3556 | worker_flags |= WORKER_REBIND; |
| 3557 | worker_flags &= ~WORKER_ROGUE; |
| 3558 | ACCESS_ONCE(worker->flags) = worker_flags; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3559 | |
| 3560 | /* queue rebind_work, wq doesn't matter, use the default one */ |
| 3561 | if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, |
| 3562 | work_data_bits(rebind_work))) |
| 3563 | continue; |
| 3564 | |
| 3565 | debug_work_activate(rebind_work); |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3566 | insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3567 | worker->scheduled.next, |
| 3568 | work_color_to_flags(WORK_NO_COLOR)); |
| 3569 | } |
| 3570 | |
| 3571 | /* relinquish manager role */ |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3572 | for_each_worker_pool(pool, gcwq) |
| 3573 | pool->flags &= ~POOL_MANAGING_WORKERS; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3574 | |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3575 | /* notify completion */ |
| 3576 | gcwq->trustee = NULL; |
| 3577 | gcwq->trustee_state = TRUSTEE_DONE; |
| 3578 | wake_up_all(&gcwq->trustee_wait); |
| 3579 | spin_unlock_irq(&gcwq->lock); |
| 3580 | return 0; |
| 3581 | } |
| 3582 | |
| 3583 | /** |
| 3584 | * wait_trustee_state - wait for trustee to enter the specified state |
| 3585 | * @gcwq: gcwq the trustee of interest belongs to |
| 3586 | * @state: target state to wait for |
| 3587 | * |
| 3588 | * Wait for the trustee to reach @state. DONE is already matched. |
| 3589 | * |
| 3590 | * CONTEXT: |
| 3591 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed |
| 3592 | * multiple times. To be used by cpu_callback. |
| 3593 | */ |
| 3594 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) |
Namhyung Kim | 06bd6eb | 2010-08-22 23:19:42 +0900 | [diff] [blame] | 3595 | __releases(&gcwq->lock) |
| 3596 | __acquires(&gcwq->lock) |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3597 | { |
| 3598 | if (!(gcwq->trustee_state == state || |
| 3599 | gcwq->trustee_state == TRUSTEE_DONE)) { |
| 3600 | spin_unlock_irq(&gcwq->lock); |
| 3601 | __wait_event(gcwq->trustee_wait, |
| 3602 | gcwq->trustee_state == state || |
| 3603 | gcwq->trustee_state == TRUSTEE_DONE); |
| 3604 | spin_lock_irq(&gcwq->lock); |
| 3605 | } |
| 3606 | } |
| 3607 | |
Oleg Nesterov | 3af24433 | 2007-05-09 02:34:09 -0700 | [diff] [blame] | 3608 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, |
| 3609 | unsigned long action, |
| 3610 | void *hcpu) |
| 3611 | { |
| 3612 | unsigned int cpu = (unsigned long)hcpu; |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3613 | struct global_cwq *gcwq = get_gcwq(cpu); |
| 3614 | struct task_struct *new_trustee = NULL; |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3615 | struct worker *new_workers[NR_WORKER_POOLS] = { }; |
| 3616 | struct worker_pool *pool; |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3617 | unsigned long flags; |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3618 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3619 | |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 3620 | action &= ~CPU_TASKS_FROZEN; |
| 3621 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3622 | switch (action) { |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3623 | case CPU_DOWN_PREPARE: |
| 3624 | new_trustee = kthread_create(trustee_thread, gcwq, |
| 3625 | "workqueue_trustee/%d\n", cpu); |
| 3626 | if (IS_ERR(new_trustee)) |
| 3627 | return notifier_from_errno(PTR_ERR(new_trustee)); |
| 3628 | kthread_bind(new_trustee, cpu); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3629 | /* fall through */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3630 | case CPU_UP_PREPARE: |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3631 | i = 0; |
| 3632 | for_each_worker_pool(pool, gcwq) { |
| 3633 | BUG_ON(pool->first_idle); |
| 3634 | new_workers[i] = create_worker(pool, false); |
| 3635 | if (!new_workers[i++]) |
| 3636 | goto err_destroy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3637 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3638 | } |
| 3639 | |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3640 | /* some are called w/ irq disabled, don't disturb irq status */ |
| 3641 | spin_lock_irqsave(&gcwq->lock, flags); |
| 3642 | |
Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 3643 | switch (action) { |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3644 | case CPU_DOWN_PREPARE: |
| 3645 | /* initialize trustee and tell it to acquire the gcwq */ |
| 3646 | BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE); |
| 3647 | gcwq->trustee = new_trustee; |
| 3648 | gcwq->trustee_state = TRUSTEE_START; |
| 3649 | wake_up_process(gcwq->trustee); |
| 3650 | wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3651 | /* fall through */ |
| 3652 | case CPU_UP_PREPARE: |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3653 | i = 0; |
| 3654 | for_each_worker_pool(pool, gcwq) { |
| 3655 | BUG_ON(pool->first_idle); |
| 3656 | pool->first_idle = new_workers[i++]; |
| 3657 | } |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3658 | break; |
| 3659 | |
| 3660 | case CPU_DYING: |
| 3661 | /* |
| 3662 | * Before this, the trustee and all workers except for |
| 3663 | * the ones which are still executing works from |
| 3664 | * before the last CPU down must be on the cpu. After |
| 3665 | * this, they'll all be diasporas. |
| 3666 | */ |
| 3667 | gcwq->flags |= GCWQ_DISASSOCIATED; |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3668 | break; |
| 3669 | |
Oleg Nesterov | 3da1c84 | 2008-07-25 01:47:50 -0700 | [diff] [blame] | 3670 | case CPU_POST_DEAD: |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3671 | gcwq->trustee_state = TRUSTEE_BUTCHER; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3672 | /* fall through */ |
| 3673 | case CPU_UP_CANCELED: |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3674 | for_each_worker_pool(pool, gcwq) { |
| 3675 | destroy_worker(pool->first_idle); |
| 3676 | pool->first_idle = NULL; |
| 3677 | } |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3678 | break; |
| 3679 | |
| 3680 | case CPU_DOWN_FAILED: |
| 3681 | case CPU_ONLINE: |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3682 | gcwq->flags &= ~GCWQ_DISASSOCIATED; |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3683 | if (gcwq->trustee_state != TRUSTEE_DONE) { |
| 3684 | gcwq->trustee_state = TRUSTEE_RELEASE; |
| 3685 | wake_up_process(gcwq->trustee); |
| 3686 | wait_trustee_state(gcwq, TRUSTEE_DONE); |
| 3687 | } |
| 3688 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3689 | /* |
| 3690 | * Trustee is done and there might be no worker left. |
| 3691 | * Put the first_idle in and request a real manager to |
| 3692 | * take a look. |
| 3693 | */ |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3694 | for_each_worker_pool(pool, gcwq) { |
| 3695 | spin_unlock_irq(&gcwq->lock); |
| 3696 | kthread_bind(pool->first_idle->task, cpu); |
| 3697 | spin_lock_irq(&gcwq->lock); |
| 3698 | pool->flags |= POOL_MANAGE_WORKERS; |
| 3699 | start_worker(pool->first_idle); |
| 3700 | pool->first_idle = NULL; |
| 3701 | } |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3702 | break; |
Oleg Nesterov | 00dfcaf | 2008-04-29 01:00:27 -0700 | [diff] [blame] | 3703 | } |
| 3704 | |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3705 | spin_unlock_irqrestore(&gcwq->lock, flags); |
| 3706 | |
Tejun Heo | 1537663 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3707 | return notifier_from_errno(0); |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3708 | |
| 3709 | err_destroy: |
| 3710 | if (new_trustee) |
| 3711 | kthread_stop(new_trustee); |
| 3712 | |
| 3713 | spin_lock_irqsave(&gcwq->lock, flags); |
| 3714 | for (i = 0; i < NR_WORKER_POOLS; i++) |
| 3715 | if (new_workers[i]) |
| 3716 | destroy_worker(new_workers[i]); |
| 3717 | spin_unlock_irqrestore(&gcwq->lock, flags); |
| 3718 | |
| 3719 | return NOTIFY_BAD; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3720 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3721 | |
Tejun Heo | d3b4254 | 2012-07-17 12:39:26 -0700 | [diff] [blame] | 3722 | /* |
| 3723 | * Workqueues should be brought up before normal priority CPU notifiers. |
| 3724 | * This will be registered high priority CPU notifier. |
| 3725 | */ |
| 3726 | static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb, |
| 3727 | unsigned long action, |
| 3728 | void *hcpu) |
| 3729 | { |
| 3730 | switch (action & ~CPU_TASKS_FROZEN) { |
| 3731 | case CPU_UP_PREPARE: |
| 3732 | case CPU_UP_CANCELED: |
| 3733 | case CPU_DOWN_FAILED: |
| 3734 | case CPU_ONLINE: |
| 3735 | return workqueue_cpu_callback(nfb, action, hcpu); |
| 3736 | } |
| 3737 | return NOTIFY_OK; |
| 3738 | } |
| 3739 | |
| 3740 | /* |
| 3741 | * Workqueues should be brought down after normal priority CPU notifiers. |
| 3742 | * This will be registered as low priority CPU notifier. |
| 3743 | */ |
| 3744 | static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb, |
| 3745 | unsigned long action, |
| 3746 | void *hcpu) |
| 3747 | { |
| 3748 | switch (action & ~CPU_TASKS_FROZEN) { |
| 3749 | case CPU_DOWN_PREPARE: |
| 3750 | case CPU_DYING: |
| 3751 | case CPU_POST_DEAD: |
| 3752 | return workqueue_cpu_callback(nfb, action, hcpu); |
| 3753 | } |
| 3754 | return NOTIFY_OK; |
| 3755 | } |
| 3756 | |
Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3757 | #ifdef CONFIG_SMP |
Rusty Russell | 8ccad40 | 2009-01-16 15:31:15 -0800 | [diff] [blame] | 3758 | |
Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3759 | struct work_for_cpu { |
Tejun Heo | fc7da7e | 2012-09-18 12:48:43 -0700 | [diff] [blame] | 3760 | struct work_struct work; |
Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3761 | long (*fn)(void *); |
| 3762 | void *arg; |
| 3763 | long ret; |
| 3764 | }; |
| 3765 | |
Tejun Heo | fc7da7e | 2012-09-18 12:48:43 -0700 | [diff] [blame] | 3766 | static void work_for_cpu_fn(struct work_struct *work) |
Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3767 | { |
Tejun Heo | fc7da7e | 2012-09-18 12:48:43 -0700 | [diff] [blame] | 3768 | struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); |
| 3769 | |
Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3770 | wfc->ret = wfc->fn(wfc->arg); |
| 3771 | } |
| 3772 | |
| 3773 | /** |
| 3774 | * work_on_cpu - run a function in user context on a particular cpu |
| 3775 | * @cpu: the cpu to run on |
| 3776 | * @fn: the function to run |
| 3777 | * @arg: the function arg |
| 3778 | * |
Rusty Russell | 31ad908 | 2009-01-16 15:31:15 -0800 | [diff] [blame] | 3779 | * This will return the value @fn returns. |
| 3780 | * It is up to the caller to ensure that the cpu doesn't go offline. |
Andrew Morton | 6b44003 | 2009-04-09 09:50:37 -0600 | [diff] [blame] | 3781 | * The caller must not hold any locks which would prevent @fn from completing. |
Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3782 | */ |
| 3783 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
| 3784 | { |
Tejun Heo | fc7da7e | 2012-09-18 12:48:43 -0700 | [diff] [blame] | 3785 | struct work_for_cpu wfc = { .fn = fn, .arg = arg }; |
Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3786 | |
Tejun Heo | fc7da7e | 2012-09-18 12:48:43 -0700 | [diff] [blame] | 3787 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); |
| 3788 | schedule_work_on(cpu, &wfc.work); |
| 3789 | flush_work(&wfc.work); |
Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 3790 | return wfc.ret; |
| 3791 | } |
| 3792 | EXPORT_SYMBOL_GPL(work_on_cpu); |
| 3793 | #endif /* CONFIG_SMP */ |
| 3794 | |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3795 | #ifdef CONFIG_FREEZER |
Rusty Russell | e7577c5 | 2009-01-01 10:12:25 +1030 | [diff] [blame] | 3796 | |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3797 | /** |
| 3798 | * freeze_workqueues_begin - begin freezing workqueues |
| 3799 | * |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3800 | * Start freezing workqueues. After this function returns, all freezable |
| 3801 | * workqueues will queue new works to their frozen_works list instead of |
| 3802 | * gcwq->worklist. |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3803 | * |
| 3804 | * CONTEXT: |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3805 | * Grabs and releases workqueue_lock and gcwq->lock's. |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3806 | */ |
| 3807 | void freeze_workqueues_begin(void) |
| 3808 | { |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3809 | unsigned int cpu; |
| 3810 | |
| 3811 | spin_lock(&workqueue_lock); |
| 3812 | |
| 3813 | BUG_ON(workqueue_freezing); |
| 3814 | workqueue_freezing = true; |
| 3815 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3816 | for_each_gcwq_cpu(cpu) { |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3817 | struct global_cwq *gcwq = get_gcwq(cpu); |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3818 | struct workqueue_struct *wq; |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3819 | |
| 3820 | spin_lock_irq(&gcwq->lock); |
| 3821 | |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3822 | BUG_ON(gcwq->flags & GCWQ_FREEZING); |
| 3823 | gcwq->flags |= GCWQ_FREEZING; |
| 3824 | |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3825 | list_for_each_entry(wq, &workqueues, list) { |
| 3826 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
| 3827 | |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3828 | if (cwq && wq->flags & WQ_FREEZABLE) |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3829 | cwq->max_active = 0; |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3830 | } |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3831 | |
| 3832 | spin_unlock_irq(&gcwq->lock); |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3833 | } |
| 3834 | |
| 3835 | spin_unlock(&workqueue_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3836 | } |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3837 | |
| 3838 | /** |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3839 | * freeze_workqueues_busy - are freezable workqueues still busy? |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3840 | * |
| 3841 | * Check whether freezing is complete. This function must be called |
| 3842 | * between freeze_workqueues_begin() and thaw_workqueues(). |
| 3843 | * |
| 3844 | * CONTEXT: |
| 3845 | * Grabs and releases workqueue_lock. |
| 3846 | * |
| 3847 | * RETURNS: |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3848 | * %true if some freezable workqueues are still busy. %false if freezing |
| 3849 | * is complete. |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3850 | */ |
| 3851 | bool freeze_workqueues_busy(void) |
| 3852 | { |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3853 | unsigned int cpu; |
| 3854 | bool busy = false; |
| 3855 | |
| 3856 | spin_lock(&workqueue_lock); |
| 3857 | |
| 3858 | BUG_ON(!workqueue_freezing); |
| 3859 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3860 | for_each_gcwq_cpu(cpu) { |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3861 | struct workqueue_struct *wq; |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3862 | /* |
| 3863 | * nr_active is monotonically decreasing. It's safe |
| 3864 | * to peek without lock. |
| 3865 | */ |
| 3866 | list_for_each_entry(wq, &workqueues, list) { |
| 3867 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
| 3868 | |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3869 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3870 | continue; |
| 3871 | |
| 3872 | BUG_ON(cwq->nr_active < 0); |
| 3873 | if (cwq->nr_active) { |
| 3874 | busy = true; |
| 3875 | goto out_unlock; |
| 3876 | } |
| 3877 | } |
| 3878 | } |
| 3879 | out_unlock: |
| 3880 | spin_unlock(&workqueue_lock); |
| 3881 | return busy; |
| 3882 | } |
| 3883 | |
| 3884 | /** |
| 3885 | * thaw_workqueues - thaw workqueues |
| 3886 | * |
| 3887 | * Thaw workqueues. Normal queueing is restored and all collected |
Tejun Heo | 7e11629 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 3888 | * frozen works are transferred to their respective gcwq worklists. |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3889 | * |
| 3890 | * CONTEXT: |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3891 | * Grabs and releases workqueue_lock and gcwq->lock's. |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3892 | */ |
| 3893 | void thaw_workqueues(void) |
| 3894 | { |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3895 | unsigned int cpu; |
| 3896 | |
| 3897 | spin_lock(&workqueue_lock); |
| 3898 | |
| 3899 | if (!workqueue_freezing) |
| 3900 | goto out_unlock; |
| 3901 | |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3902 | for_each_gcwq_cpu(cpu) { |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3903 | struct global_cwq *gcwq = get_gcwq(cpu); |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3904 | struct worker_pool *pool; |
Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3905 | struct workqueue_struct *wq; |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3906 | |
| 3907 | spin_lock_irq(&gcwq->lock); |
| 3908 | |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3909 | BUG_ON(!(gcwq->flags & GCWQ_FREEZING)); |
| 3910 | gcwq->flags &= ~GCWQ_FREEZING; |
| 3911 | |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3912 | list_for_each_entry(wq, &workqueues, list) { |
| 3913 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
| 3914 | |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 3915 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3916 | continue; |
| 3917 | |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3918 | /* restore max_active and repopulate worklist */ |
| 3919 | cwq->max_active = wq->saved_max_active; |
| 3920 | |
| 3921 | while (!list_empty(&cwq->delayed_works) && |
| 3922 | cwq->nr_active < cwq->max_active) |
| 3923 | cwq_activate_first_delayed(cwq); |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3924 | } |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3925 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3926 | for_each_worker_pool(pool, gcwq) |
| 3927 | wake_up_worker(pool); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3928 | |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3929 | spin_unlock_irq(&gcwq->lock); |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3930 | } |
| 3931 | |
| 3932 | workqueue_freezing = false; |
| 3933 | out_unlock: |
| 3934 | spin_unlock(&workqueue_lock); |
| 3935 | } |
| 3936 | #endif /* CONFIG_FREEZER */ |
| 3937 | |
Suresh Siddha | 6ee0578 | 2010-07-30 14:57:37 -0700 | [diff] [blame] | 3938 | static int __init init_workqueues(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3939 | { |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3940 | unsigned int cpu; |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3941 | int i; |
Tejun Heo | c34056a | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 3942 | |
Tejun Heo | d3b4254 | 2012-07-17 12:39:26 -0700 | [diff] [blame] | 3943 | cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); |
| 3944 | cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3945 | |
| 3946 | /* initialize gcwqs */ |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3947 | for_each_gcwq_cpu(cpu) { |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3948 | struct global_cwq *gcwq = get_gcwq(cpu); |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3949 | struct worker_pool *pool; |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3950 | |
| 3951 | spin_lock_init(&gcwq->lock); |
| 3952 | gcwq->cpu = cpu; |
Tejun Heo | 477a3c3 | 2010-08-31 10:54:35 +0200 | [diff] [blame] | 3953 | gcwq->flags |= GCWQ_DISASSOCIATED; |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3954 | |
Tejun Heo | c8e55f3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3955 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) |
| 3956 | INIT_HLIST_HEAD(&gcwq->busy_hash[i]); |
| 3957 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3958 | for_each_worker_pool(pool, gcwq) { |
| 3959 | pool->gcwq = gcwq; |
| 3960 | INIT_LIST_HEAD(&pool->worklist); |
| 3961 | INIT_LIST_HEAD(&pool->idle_list); |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3962 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3963 | init_timer_deferrable(&pool->idle_timer); |
| 3964 | pool->idle_timer.function = idle_worker_timeout; |
| 3965 | pool->idle_timer.data = (unsigned long)pool; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3966 | |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3967 | setup_timer(&pool->mayday_timer, gcwq_mayday_timeout, |
| 3968 | (unsigned long)pool); |
| 3969 | |
| 3970 | ida_init(&pool->worker_ida); |
| 3971 | } |
Tejun Heo | db7bccf | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3972 | |
| 3973 | gcwq->trustee_state = TRUSTEE_DONE; |
| 3974 | init_waitqueue_head(&gcwq->trustee_wait); |
Tejun Heo | 8b03ae3 | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 3975 | } |
| 3976 | |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3977 | /* create the initial worker */ |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3978 | for_each_online_gcwq_cpu(cpu) { |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3979 | struct global_cwq *gcwq = get_gcwq(cpu); |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3980 | struct worker_pool *pool; |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3981 | |
Tejun Heo | 477a3c3 | 2010-08-31 10:54:35 +0200 | [diff] [blame] | 3982 | if (cpu != WORK_CPU_UNBOUND) |
| 3983 | gcwq->flags &= ~GCWQ_DISASSOCIATED; |
Tejun Heo | 9c6bae0 | 2012-07-13 22:16:44 -0700 | [diff] [blame] | 3984 | |
| 3985 | for_each_worker_pool(pool, gcwq) { |
| 3986 | struct worker *worker; |
| 3987 | |
| 3988 | worker = create_worker(pool, true); |
| 3989 | BUG_ON(!worker); |
| 3990 | spin_lock_irq(&gcwq->lock); |
| 3991 | start_worker(worker); |
| 3992 | spin_unlock_irq(&gcwq->lock); |
| 3993 | } |
Tejun Heo | e22bee7 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3994 | } |
| 3995 | |
Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 3996 | system_wq = alloc_workqueue("events", 0, 0); |
| 3997 | system_long_wq = alloc_workqueue("events_long", 0, 0); |
| 3998 | system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); |
Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 3999 | system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, |
| 4000 | WQ_UNBOUND_MAX_ACTIVE); |
Tejun Heo | 24d51ad | 2011-02-21 09:52:50 +0100 | [diff] [blame] | 4001 | system_freezable_wq = alloc_workqueue("events_freezable", |
| 4002 | WQ_FREEZABLE, 0); |
Alan Stern | 62d3c54 | 2012-03-02 10:51:00 +0100 | [diff] [blame] | 4003 | system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable", |
| 4004 | WQ_NON_REENTRANT | WQ_FREEZABLE, 0); |
Viresh Kumar | 1adb4b6 | 2013-04-24 17:12:54 +0530 | [diff] [blame] | 4005 | system_power_efficient_wq = alloc_workqueue("events_power_efficient", |
| 4006 | WQ_POWER_EFFICIENT, 0); |
| 4007 | system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", |
| 4008 | WQ_FREEZABLE | WQ_POWER_EFFICIENT, 0); |
Hitoshi Mitake | e5cba24 | 2010-11-26 12:06:44 +0100 | [diff] [blame] | 4009 | BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || |
Alan Stern | 62d3c54 | 2012-03-02 10:51:00 +0100 | [diff] [blame] | 4010 | !system_unbound_wq || !system_freezable_wq || |
Viresh Kumar | 1adb4b6 | 2013-04-24 17:12:54 +0530 | [diff] [blame] | 4011 | !system_nrt_freezable_wq || !system_power_efficient_wq || |
| 4012 | !system_freezable_power_efficient_wq); |
Suresh Siddha | 6ee0578 | 2010-07-30 14:57:37 -0700 | [diff] [blame] | 4013 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4014 | } |
Suresh Siddha | 6ee0578 | 2010-07-30 14:57:37 -0700 | [diff] [blame] | 4015 | early_initcall(init_workqueues); |