| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * workqueue.h --- work queue handling for Linux. | 
 | 3 |  */ | 
 | 4 |  | 
 | 5 | #ifndef _LINUX_WORKQUEUE_H | 
 | 6 | #define _LINUX_WORKQUEUE_H | 
 | 7 |  | 
 | 8 | #include <linux/timer.h> | 
 | 9 | #include <linux/linkage.h> | 
 | 10 | #include <linux/bitops.h> | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 11 | #include <linux/lockdep.h> | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 12 | #include <linux/threads.h> | 
| Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 13 | #include <linux/atomic.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 |  | 
 | 15 | struct workqueue_struct; | 
 | 16 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 17 | struct work_struct; | 
 | 18 | typedef void (*work_func_t)(struct work_struct *work); | 
| David Howells | 6bb49e5 | 2006-11-22 14:54:45 +0000 | [diff] [blame] | 19 |  | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 20 | /* | 
 | 21 |  * The first word is the work queue pointer and the flags rolled into | 
 | 22 |  * one | 
 | 23 |  */ | 
 | 24 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) | 
 | 25 |  | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 26 | enum { | 
 | 27 | 	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */ | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 28 | 	WORK_STRUCT_DELAYED_BIT	= 1,	/* work item is delayed */ | 
 | 29 | 	WORK_STRUCT_CWQ_BIT	= 2,	/* data points to cwq */ | 
 | 30 | 	WORK_STRUCT_LINKED_BIT	= 3,	/* next work is linked to this one */ | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 31 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 32 | 	WORK_STRUCT_STATIC_BIT	= 4,	/* static initializer (debugobjects) */ | 
 | 33 | 	WORK_STRUCT_COLOR_SHIFT	= 5,	/* color for workqueue flushing */ | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 34 | #else | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 35 | 	WORK_STRUCT_COLOR_SHIFT	= 4,	/* color for workqueue flushing */ | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 36 | #endif | 
 | 37 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 38 | 	WORK_STRUCT_COLOR_BITS	= 4, | 
 | 39 |  | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 40 | 	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT, | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 41 | 	WORK_STRUCT_DELAYED	= 1 << WORK_STRUCT_DELAYED_BIT, | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 42 | 	WORK_STRUCT_CWQ		= 1 << WORK_STRUCT_CWQ_BIT, | 
| Tejun Heo | affee4b | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 43 | 	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT, | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 44 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 
 | 45 | 	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT, | 
 | 46 | #else | 
 | 47 | 	WORK_STRUCT_STATIC	= 0, | 
 | 48 | #endif | 
 | 49 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 50 | 	/* | 
 | 51 | 	 * The last color is no color used for works which don't | 
 | 52 | 	 * participate in workqueue flushing. | 
 | 53 | 	 */ | 
 | 54 | 	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS) - 1, | 
 | 55 | 	WORK_NO_COLOR		= WORK_NR_COLORS, | 
 | 56 |  | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 57 | 	/* special cpu IDs */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 58 | 	WORK_CPU_UNBOUND	= NR_CPUS, | 
 | 59 | 	WORK_CPU_NONE		= NR_CPUS + 1, | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 60 | 	WORK_CPU_LAST		= WORK_CPU_NONE, | 
 | 61 |  | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 62 | 	/* | 
| Tejun Heo | e120153 | 2010-07-22 14:14:25 +0200 | [diff] [blame] | 63 | 	 * Reserve 7 bits off of cwq pointer w/ debugobjects turned | 
| Tejun Heo | 8a2e8e5d | 2010-08-25 10:33:56 +0200 | [diff] [blame] | 64 | 	 * off.  This makes cwqs aligned to 256 bytes and allows 15 | 
 | 65 | 	 * workqueue flush colors. | 
| Tejun Heo | 73f53c4 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 66 | 	 */ | 
 | 67 | 	WORK_STRUCT_FLAG_BITS	= WORK_STRUCT_COLOR_SHIFT + | 
 | 68 | 				  WORK_STRUCT_COLOR_BITS, | 
 | 69 |  | 
| Tejun Heo | 0f90004 | 2010-06-29 10:07:11 +0200 | [diff] [blame] | 70 | 	WORK_STRUCT_FLAG_MASK	= (1UL << WORK_STRUCT_FLAG_BITS) - 1, | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 71 | 	WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 72 | 	WORK_STRUCT_NO_CPU	= WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 73 |  | 
 | 74 | 	/* bit mask for work_busy() return values */ | 
 | 75 | 	WORK_BUSY_PENDING	= 1 << 0, | 
 | 76 | 	WORK_BUSY_RUNNING	= 1 << 1, | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 77 | }; | 
 | 78 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | struct work_struct { | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 80 | 	atomic_long_t data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | 	struct list_head entry; | 
| David Howells | 6bb49e5 | 2006-11-22 14:54:45 +0000 | [diff] [blame] | 82 | 	work_func_t func; | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 83 | #ifdef CONFIG_LOCKDEP | 
 | 84 | 	struct lockdep_map lockdep_map; | 
 | 85 | #endif | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 86 | }; | 
 | 87 |  | 
| Tejun Heo | 7a22ad7 | 2010-06-29 10:07:13 +0200 | [diff] [blame] | 88 | #define WORK_DATA_INIT()	ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) | 
 | 89 | #define WORK_DATA_STATIC_INIT()	\ | 
 | 90 | 	ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 91 |  | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 92 | struct delayed_work { | 
 | 93 | 	struct work_struct work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | 	struct timer_list timer; | 
 | 95 | }; | 
 | 96 |  | 
| Jean Delvare | bf6aede | 2009-04-02 16:56:54 -0700 | [diff] [blame] | 97 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) | 
 | 98 | { | 
 | 99 | 	return container_of(work, struct delayed_work, work); | 
 | 100 | } | 
 | 101 |  | 
| James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 102 | struct execute_work { | 
 | 103 | 	struct work_struct work; | 
 | 104 | }; | 
 | 105 |  | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 106 | #ifdef CONFIG_LOCKDEP | 
 | 107 | /* | 
 | 108 |  * NB: because we have to copy the lockdep_map, setting _key | 
 | 109 |  * here is required, otherwise it could get initialised to the | 
 | 110 |  * copy of the lockdep_map! | 
 | 111 |  */ | 
 | 112 | #define __WORK_INIT_LOCKDEP_MAP(n, k) \ | 
 | 113 | 	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), | 
 | 114 | #else | 
 | 115 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | 
 | 116 | #endif | 
 | 117 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 118 | #define __WORK_INITIALIZER(n, f) {				\ | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 119 | 	.data = WORK_DATA_STATIC_INIT(),			\ | 
| Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 120 | 	.entry	= { &(n).entry, &(n).entry },			\ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 121 | 	.func = (f),						\ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 122 | 	__WORK_INIT_LOCKDEP_MAP(#n, &(n))			\ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 123 | 	} | 
 | 124 |  | 
 | 125 | #define __DELAYED_WORK_INITIALIZER(n, f) {			\ | 
 | 126 | 	.work = __WORK_INITIALIZER((n).work, (f)),		\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | 	.timer = TIMER_INITIALIZER(NULL, 0, 0),			\ | 
 | 128 | 	} | 
 | 129 |  | 
| Phil Carmody | dd6414b | 2010-10-20 15:57:33 -0700 | [diff] [blame] | 130 | #define __DEFERRED_WORK_INITIALIZER(n, f) {			\ | 
 | 131 | 	.work = __WORK_INITIALIZER((n).work, (f)),		\ | 
 | 132 | 	.timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0),	\ | 
 | 133 | 	} | 
 | 134 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 135 | #define DECLARE_WORK(n, f)					\ | 
 | 136 | 	struct work_struct n = __WORK_INITIALIZER(n, f) | 
 | 137 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 138 | #define DECLARE_DELAYED_WORK(n, f)				\ | 
 | 139 | 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) | 
 | 140 |  | 
| Phil Carmody | dd6414b | 2010-10-20 15:57:33 -0700 | [diff] [blame] | 141 | #define DECLARE_DEFERRED_WORK(n, f)				\ | 
 | 142 | 	struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f) | 
 | 143 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | /* | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 145 |  * initialize a work item's function pointer | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 |  */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 147 | #define PREPARE_WORK(_work, _func)				\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | 	do {							\ | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 149 | 		(_work)->func = (_func);			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | 	} while (0) | 
 | 151 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 152 | #define PREPARE_DELAYED_WORK(_work, _func)			\ | 
 | 153 | 	PREPARE_WORK(&(_work)->work, (_func)) | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 154 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 155 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 
 | 156 | extern void __init_work(struct work_struct *work, int onstack); | 
 | 157 | extern void destroy_work_on_stack(struct work_struct *work); | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 158 | static inline unsigned int work_static(struct work_struct *work) | 
 | 159 | { | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 160 | 	return *work_data_bits(work) & WORK_STRUCT_STATIC; | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 161 | } | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 162 | #else | 
 | 163 | static inline void __init_work(struct work_struct *work, int onstack) { } | 
 | 164 | static inline void destroy_work_on_stack(struct work_struct *work) { } | 
| Tejun Heo | 4690c4a | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 165 | static inline unsigned int work_static(struct work_struct *work) { return 0; } | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 166 | #endif | 
 | 167 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | /* | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 169 |  * initialize all of a work item in one go | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 170 |  * | 
| Dmitri Vorobiev | b9049df | 2009-06-23 12:09:29 +0200 | [diff] [blame] | 171 |  * NOTE! No point in using "atomic_long_set()": using a direct | 
| Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 172 |  * assignment of the work data initializer allows the compiler | 
 | 173 |  * to generate better code. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 |  */ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 175 | #ifdef CONFIG_LOCKDEP | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 176 | #define __INIT_WORK(_work, _func, _onstack)				\ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 177 | 	do {								\ | 
 | 178 | 		static struct lock_class_key __key;			\ | 
 | 179 | 									\ | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 180 | 		__init_work((_work), _onstack);				\ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 181 | 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\ | 
 | 182 | 		lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ | 
 | 183 | 		INIT_LIST_HEAD(&(_work)->entry);			\ | 
 | 184 | 		PREPARE_WORK((_work), (_func));				\ | 
 | 185 | 	} while (0) | 
 | 186 | #else | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 187 | #define __INIT_WORK(_work, _func, _onstack)				\ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 188 | 	do {								\ | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 189 | 		__init_work((_work), _onstack);				\ | 
| Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 190 | 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 191 | 		INIT_LIST_HEAD(&(_work)->entry);			\ | 
 | 192 | 		PREPARE_WORK((_work), (_func));				\ | 
 | 193 | 	} while (0) | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 194 | #endif | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 195 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 196 | #define INIT_WORK(_work, _func)					\ | 
 | 197 | 	do {							\ | 
 | 198 | 		__INIT_WORK((_work), (_func), 0);		\ | 
 | 199 | 	} while (0) | 
 | 200 |  | 
| Andrew Morton | ca1cab3 | 2010-10-26 14:22:34 -0700 | [diff] [blame] | 201 | #define INIT_WORK_ONSTACK(_work, _func)				\ | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 202 | 	do {							\ | 
 | 203 | 		__INIT_WORK((_work), (_func), 1);		\ | 
 | 204 | 	} while (0) | 
 | 205 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 206 | #define INIT_DELAYED_WORK(_work, _func)				\ | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 207 | 	do {							\ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 208 | 		INIT_WORK(&(_work)->work, (_func));		\ | 
 | 209 | 		init_timer(&(_work)->timer);			\ | 
 | 210 | 	} while (0) | 
 | 211 |  | 
| Andrew Morton | ca1cab3 | 2010-10-26 14:22:34 -0700 | [diff] [blame] | 212 | #define INIT_DELAYED_WORK_ONSTACK(_work, _func)			\ | 
| Peter Zijlstra | 6d612b0 | 2009-01-12 12:52:23 +0100 | [diff] [blame] | 213 | 	do {							\ | 
| Andrew Morton | ca1cab3 | 2010-10-26 14:22:34 -0700 | [diff] [blame] | 214 | 		INIT_WORK_ONSTACK(&(_work)->work, (_func));	\ | 
| Peter Zijlstra | 6d612b0 | 2009-01-12 12:52:23 +0100 | [diff] [blame] | 215 | 		init_timer_on_stack(&(_work)->timer);		\ | 
 | 216 | 	} while (0) | 
 | 217 |  | 
| Thomas Gleixner | dc186ad | 2009-11-16 01:09:48 +0900 | [diff] [blame] | 218 | #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)		\ | 
| Venki Pallipadi | 2828703 | 2007-05-08 00:27:47 -0700 | [diff] [blame] | 219 | 	do {							\ | 
 | 220 | 		INIT_WORK(&(_work)->work, (_func));		\ | 
 | 221 | 		init_timer_deferrable(&(_work)->timer);		\ | 
 | 222 | 	} while (0) | 
 | 223 |  | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 224 | /** | 
 | 225 |  * work_pending - Find out whether a work item is currently pending | 
 | 226 |  * @work: The work item in question | 
 | 227 |  */ | 
 | 228 | #define work_pending(work) \ | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 229 | 	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 230 |  | 
 | 231 | /** | 
 | 232 |  * delayed_work_pending - Find out whether a delayable work item is currently | 
 | 233 |  * pending | 
 | 234 |  * @work: The work item in question | 
 | 235 |  */ | 
| Linus Torvalds | 0221872 | 2006-12-15 14:13:51 -0800 | [diff] [blame] | 236 | #define delayed_work_pending(w) \ | 
 | 237 | 	work_pending(&(w)->work) | 
| David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 238 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 239 | /** | 
| Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 240 |  * work_clear_pending - for internal use only, mark a work item as not pending | 
 | 241 |  * @work: The work item in question | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 242 |  */ | 
| Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 243 | #define work_clear_pending(work) \ | 
| Tejun Heo | 22df02b | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 244 | 	clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 245 |  | 
| Tejun Heo | c54fce6 | 2010-09-10 16:51:36 +0200 | [diff] [blame] | 246 | /* | 
 | 247 |  * Workqueue flags and constants.  For details, please refer to | 
 | 248 |  * Documentation/workqueue.txt. | 
 | 249 |  */ | 
| Tejun Heo | 97e37d7 | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 250 | enum { | 
| Tejun Heo | bdbc5dd | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 251 | 	WQ_NON_REENTRANT	= 1 << 0, /* guarantee non-reentrance */ | 
| Tejun Heo | c7fc77f | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 252 | 	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */ | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 253 | 	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */ | 
| Tejun Heo | 6370a6a | 2010-10-11 15:12:27 +0200 | [diff] [blame] | 254 | 	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */ | 
| Tejun Heo | 649027d | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 255 | 	WQ_HIGHPRI		= 1 << 4, /* high priority */ | 
| Tejun Heo | fb0e7be | 2010-06-29 10:07:15 +0200 | [diff] [blame] | 256 | 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu instensive workqueue */ | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 257 |  | 
| Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 258 | 	WQ_DRAINING		= 1 << 6, /* internal: workqueue is draining */ | 
| Tejun Heo | 6370a6a | 2010-10-11 15:12:27 +0200 | [diff] [blame] | 259 | 	WQ_RESCUER		= 1 << 7, /* internal: workqueue has rescuer */ | 
| Tejun Heo | e41e704 | 2010-08-24 14:22:47 +0200 | [diff] [blame] | 260 |  | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 261 | 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */ | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 262 | 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */ | 
| Tejun Heo | b71ab8c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 263 | 	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2, | 
| Tejun Heo | 97e37d7 | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 264 | }; | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 265 |  | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 266 | /* unbound wq's aren't per-cpu, scale max_active according to #cpus */ | 
 | 267 | #define WQ_UNBOUND_MAX_ACTIVE	\ | 
 | 268 | 	max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) | 
 | 269 |  | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 270 | /* | 
 | 271 |  * System-wide workqueues which are always present. | 
 | 272 |  * | 
 | 273 |  * system_wq is the one used by schedule[_delayed]_work[_on](). | 
 | 274 |  * Multi-CPU multi-threaded.  There are users which expect relatively | 
 | 275 |  * short queue flush time.  Don't queue works which can run for too | 
 | 276 |  * long. | 
 | 277 |  * | 
 | 278 |  * system_long_wq is similar to system_wq but may host long running | 
 | 279 |  * works.  Queue flushing might take relatively long. | 
 | 280 |  * | 
 | 281 |  * system_nrt_wq is non-reentrant and guarantees that any given work | 
 | 282 |  * item is never executed in parallel by multiple CPUs.  Queue | 
 | 283 |  * flushing might take relatively long. | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 284 |  * | 
 | 285 |  * system_unbound_wq is unbound workqueue.  Workers are not bound to | 
 | 286 |  * any specific CPU, not concurrency managed, and all queued works are | 
 | 287 |  * executed immediately as long as max_active limit is not reached and | 
 | 288 |  * resources are available. | 
| Tejun Heo | 4149efb | 2011-02-08 10:39:03 +0100 | [diff] [blame] | 289 |  * | 
| Tejun Heo | 24d51ad | 2011-02-21 09:52:50 +0100 | [diff] [blame] | 290 |  * system_freezable_wq is equivalent to system_wq except that it's | 
 | 291 |  * freezable. | 
| Alan Stern | 62d3c54 | 2012-03-02 10:51:00 +0100 | [diff] [blame] | 292 |  * | 
 | 293 |  * system_nrt_freezable_wq is equivalent to system_nrt_wq except that | 
 | 294 |  * it's freezable. | 
| Tejun Heo | d320c03 | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 295 |  */ | 
 | 296 | extern struct workqueue_struct *system_wq; | 
 | 297 | extern struct workqueue_struct *system_long_wq; | 
 | 298 | extern struct workqueue_struct *system_nrt_wq; | 
| Tejun Heo | f342179 | 2010-07-02 10:03:51 +0200 | [diff] [blame] | 299 | extern struct workqueue_struct *system_unbound_wq; | 
| Tejun Heo | 24d51ad | 2011-02-21 09:52:50 +0100 | [diff] [blame] | 300 | extern struct workqueue_struct *system_freezable_wq; | 
| Alan Stern | 62d3c54 | 2012-03-02 10:51:00 +0100 | [diff] [blame] | 301 | extern struct workqueue_struct *system_nrt_freezable_wq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 |  | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 303 | extern struct workqueue_struct * | 
| Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 304 | __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | 
 | 305 | 	struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 306 |  | 
| Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 307 | /** | 
 | 308 |  * alloc_workqueue - allocate a workqueue | 
 | 309 |  * @fmt: printf format for the name of the workqueue | 
 | 310 |  * @flags: WQ_* flags | 
 | 311 |  * @max_active: max in-flight work items, 0 for default | 
 | 312 |  * @args: args for @fmt | 
 | 313 |  * | 
 | 314 |  * Allocate a workqueue with the specified parameters.  For detailed | 
 | 315 |  * information on WQ_* flags, please refer to Documentation/workqueue.txt. | 
 | 316 |  * | 
 | 317 |  * The __lock_name macro dance is to guarantee that single lock_class_key | 
 | 318 |  * doesn't end up with different namesm, which isn't allowed by lockdep. | 
 | 319 |  * | 
 | 320 |  * RETURNS: | 
 | 321 |  * Pointer to the allocated workqueue on success, %NULL on failure. | 
 | 322 |  */ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 323 | #ifdef CONFIG_LOCKDEP | 
| Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 324 | #define alloc_workqueue(fmt, flags, max_active, args...)	\ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 325 | ({								\ | 
 | 326 | 	static struct lock_class_key __key;			\ | 
| Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 327 | 	const char *__lock_name;				\ | 
 | 328 | 								\ | 
| Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 329 | 	if (__builtin_constant_p(fmt))				\ | 
 | 330 | 		__lock_name = (fmt);				\ | 
| Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 331 | 	else							\ | 
| Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 332 | 		__lock_name = #fmt;				\ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 333 | 								\ | 
| Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 334 | 	__alloc_workqueue_key((fmt), (flags), (max_active),	\ | 
 | 335 | 			      &__key, __lock_name, ##args);	\ | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 336 | }) | 
 | 337 | #else | 
| Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 338 | #define alloc_workqueue(fmt, flags, max_active, args...)	\ | 
 | 339 | 	__alloc_workqueue_key((fmt), (flags), (max_active),	\ | 
 | 340 | 			      NULL, NULL, ##args) | 
| Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 341 | #endif | 
 | 342 |  | 
| Tejun Heo | 81dcaf6 | 2010-09-16 10:17:35 +0200 | [diff] [blame] | 343 | /** | 
 | 344 |  * alloc_ordered_workqueue - allocate an ordered workqueue | 
| Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 345 |  * @fmt: printf format for the name of the workqueue | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 346 |  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) | 
| Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 347 |  * @args: args for @fmt | 
| Tejun Heo | 81dcaf6 | 2010-09-16 10:17:35 +0200 | [diff] [blame] | 348 |  * | 
 | 349 |  * Allocate an ordered workqueue.  An ordered workqueue executes at | 
 | 350 |  * most one work item at any given time in the queued order.  They are | 
 | 351 |  * implemented as unbound workqueues with @max_active of one. | 
 | 352 |  * | 
 | 353 |  * RETURNS: | 
 | 354 |  * Pointer to the allocated workqueue on success, %NULL on failure. | 
 | 355 |  */ | 
| Tejun Heo | b196be8 | 2012-01-10 15:11:35 -0800 | [diff] [blame] | 356 | #define alloc_ordered_workqueue(fmt, flags, args...)		\ | 
 | 357 | 	alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) | 
| Tejun Heo | 81dcaf6 | 2010-09-16 10:17:35 +0200 | [diff] [blame] | 358 |  | 
| Tejun Heo | 97e37d7 | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 359 | #define create_workqueue(name)					\ | 
| Tejun Heo | 6370a6a | 2010-10-11 15:12:27 +0200 | [diff] [blame] | 360 | 	alloc_workqueue((name), WQ_MEM_RECLAIM, 1) | 
| Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 361 | #define create_freezable_workqueue(name)			\ | 
 | 362 | 	alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 
| Tejun Heo | 97e37d7 | 2010-06-29 10:07:10 +0200 | [diff] [blame] | 363 | #define create_singlethread_workqueue(name)			\ | 
| Tejun Heo | 6370a6a | 2010-10-11 15:12:27 +0200 | [diff] [blame] | 364 | 	alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 |  | 
 | 366 | extern void destroy_workqueue(struct workqueue_struct *wq); | 
 | 367 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 368 | extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 369 | extern int queue_work_on(int cpu, struct workqueue_struct *wq, | 
 | 370 | 			struct work_struct *work); | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 371 | extern int queue_delayed_work(struct workqueue_struct *wq, | 
 | 372 | 			struct delayed_work *work, unsigned long delay); | 
| Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 373 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 374 | 			struct delayed_work *work, unsigned long delay); | 
 | 375 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 376 | extern void flush_workqueue(struct workqueue_struct *wq); | 
| Tejun Heo | 9c5a2ba | 2011-04-05 18:01:44 +0200 | [diff] [blame] | 377 | extern void drain_workqueue(struct workqueue_struct *wq); | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 378 | extern void flush_scheduled_work(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 380 | extern int schedule_work(struct work_struct *work); | 
| Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 381 | extern int schedule_work_on(int cpu, struct work_struct *work); | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 382 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 383 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, | 
 | 384 | 					unsigned long delay); | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 385 | extern int schedule_on_each_cpu(work_func_t func); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | extern int keventd_up(void); | 
 | 387 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 388 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 |  | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 390 | extern bool flush_work(struct work_struct *work); | 
| Tejun Heo | 0938349 | 2010-09-16 10:48:29 +0200 | [diff] [blame] | 391 | extern bool flush_work_sync(struct work_struct *work); | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 392 | extern bool cancel_work_sync(struct work_struct *work); | 
 | 393 |  | 
 | 394 | extern bool flush_delayed_work(struct delayed_work *dwork); | 
| Tejun Heo | 0938349 | 2010-09-16 10:48:29 +0200 | [diff] [blame] | 395 | extern bool flush_delayed_work_sync(struct delayed_work *work); | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 396 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 397 |  | 
| Tejun Heo | dcd989c | 2010-06-29 10:07:14 +0200 | [diff] [blame] | 398 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | 
 | 399 | 				     int max_active); | 
 | 400 | extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); | 
 | 401 | extern unsigned int work_cpu(struct work_struct *work); | 
 | 402 | extern unsigned int work_busy(struct work_struct *work); | 
 | 403 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | /* | 
 | 405 |  * Kill off a pending schedule_delayed_work().  Note that the work callback | 
| Oleg Nesterov | 071b638 | 2007-04-26 15:45:32 -0700 | [diff] [blame] | 406 |  * function may still be running on return from cancel_delayed_work(), unless | 
 | 407 |  * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 408 |  * cancel_work_sync() to wait on it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 |  */ | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 410 | static inline bool cancel_delayed_work(struct delayed_work *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | { | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 412 | 	bool ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 |  | 
| Oleg Nesterov | 223a10a | 2007-05-18 00:36:42 -0700 | [diff] [blame] | 414 | 	ret = del_timer_sync(&work->timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | 	if (ret) | 
| Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 416 | 		work_clear_pending(&work->work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | 	return ret; | 
 | 418 | } | 
 | 419 |  | 
| Oleg Nesterov | 4e49627 | 2009-09-05 11:17:06 -0700 | [diff] [blame] | 420 | /* | 
 | 421 |  * Like above, but uses del_timer() instead of del_timer_sync(). This means, | 
 | 422 |  * if it returns 0 the timer function may be running and the queueing is in | 
 | 423 |  * progress. | 
 | 424 |  */ | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 425 | static inline bool __cancel_delayed_work(struct delayed_work *work) | 
| Oleg Nesterov | 4e49627 | 2009-09-05 11:17:06 -0700 | [diff] [blame] | 426 | { | 
| Tejun Heo | 401a8d0 | 2010-09-16 10:36:00 +0200 | [diff] [blame] | 427 | 	bool ret; | 
| Oleg Nesterov | 4e49627 | 2009-09-05 11:17:06 -0700 | [diff] [blame] | 428 |  | 
 | 429 | 	ret = del_timer(&work->timer); | 
 | 430 | 	if (ret) | 
 | 431 | 		work_clear_pending(&work->work); | 
 | 432 | 	return ret; | 
 | 433 | } | 
 | 434 |  | 
| Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 435 | #ifndef CONFIG_SMP | 
 | 436 | static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 
 | 437 | { | 
 | 438 | 	return fn(arg); | 
 | 439 | } | 
 | 440 | #else | 
 | 441 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); | 
 | 442 | #endif /* CONFIG_SMP */ | 
| Paul E. McKenney | a25909a | 2010-05-13 12:32:28 -0700 | [diff] [blame] | 443 |  | 
| Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 444 | #ifdef CONFIG_FREEZER | 
 | 445 | extern void freeze_workqueues_begin(void); | 
 | 446 | extern bool freeze_workqueues_busy(void); | 
 | 447 | extern void thaw_workqueues(void); | 
 | 448 | #endif /* CONFIG_FREEZER */ | 
 | 449 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | #endif |