| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* Kernel thread helper functions. | 
 | 2 |  *   Copyright (C) 2004 IBM Corporation, Rusty Russell. | 
 | 3 |  * | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 4 |  * Creation is done via kthreadd, so that we get a clean environment | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 |  * even if we're invoked from userspace (think modprobe, hotplug cpu, | 
 | 6 |  * etc.). | 
 | 7 |  */ | 
 | 8 | #include <linux/sched.h> | 
 | 9 | #include <linux/kthread.h> | 
 | 10 | #include <linux/completion.h> | 
 | 11 | #include <linux/err.h> | 
| Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 12 | #include <linux/cpuset.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/unistd.h> | 
 | 14 | #include <linux/file.h> | 
| Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 15 | #include <linux/export.h> | 
| Arjan van de Ven | 97d1f15 | 2006-03-23 03:00:24 -0800 | [diff] [blame] | 16 | #include <linux/mutex.h> | 
| Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 17 | #include <linux/slab.h> | 
 | 18 | #include <linux/freezer.h> | 
| Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 19 | #include <trace/events/sched.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 21 | static DEFINE_SPINLOCK(kthread_create_lock); | 
 | 22 | static LIST_HEAD(kthread_create_list); | 
 | 23 | struct task_struct *kthreadd_task; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 |  | 
 | 25 | struct kthread_create_info | 
 | 26 | { | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 27 | 	/* Information passed to kthread() from kthreadd. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | 	int (*threadfn)(void *data); | 
 | 29 | 	void *data; | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 30 | 	int node; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 |  | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 32 | 	/* Result passed back to kthread_create() from kthreadd. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | 	struct task_struct *result; | 
 | 34 | 	struct completion done; | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 35 |  | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 36 | 	struct list_head list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | }; | 
 | 38 |  | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 39 | struct kthread { | 
 | 40 | 	int should_stop; | 
| Tejun Heo | 82805ab | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 41 | 	void *data; | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 42 | 	struct completion exited; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | }; | 
 | 44 |  | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 45 | #define to_kthread(tsk)	\ | 
 | 46 | 	container_of((tsk)->vfork_done, struct kthread, exited) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 48 | /** | 
 | 49 |  * kthread_should_stop - should this kthread return now? | 
 | 50 |  * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 51 |  * When someone calls kthread_stop() on your kthread, it will be woken | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 52 |  * and this will return true.  You should then return, and your return | 
 | 53 |  * value will be passed through to kthread_stop(). | 
 | 54 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | int kthread_should_stop(void) | 
 | 56 | { | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 57 | 	return to_kthread(current)->should_stop; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | } | 
 | 59 | EXPORT_SYMBOL(kthread_should_stop); | 
 | 60 |  | 
| Tejun Heo | 82805ab | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 61 | /** | 
| Tejun Heo | 8a32c44 | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 62 |  * kthread_freezable_should_stop - should this freezable kthread return now? | 
 | 63 |  * @was_frozen: optional out parameter, indicates whether %current was frozen | 
 | 64 |  * | 
 | 65 |  * kthread_should_stop() for freezable kthreads, which will enter | 
 | 66 |  * refrigerator if necessary.  This function is safe from kthread_stop() / | 
 | 67 |  * freezer deadlock and freezable kthreads should use this function instead | 
 | 68 |  * of calling try_to_freeze() directly. | 
 | 69 |  */ | 
 | 70 | bool kthread_freezable_should_stop(bool *was_frozen) | 
 | 71 | { | 
 | 72 | 	bool frozen = false; | 
 | 73 |  | 
 | 74 | 	might_sleep(); | 
 | 75 |  | 
 | 76 | 	if (unlikely(freezing(current))) | 
 | 77 | 		frozen = __refrigerator(true); | 
 | 78 |  | 
 | 79 | 	if (was_frozen) | 
 | 80 | 		*was_frozen = frozen; | 
 | 81 |  | 
 | 82 | 	return kthread_should_stop(); | 
 | 83 | } | 
 | 84 | EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); | 
 | 85 |  | 
 | 86 | /** | 
| Tejun Heo | 82805ab | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 87 |  * kthread_data - return data value specified on kthread creation | 
 | 88 |  * @task: kthread task in question | 
 | 89 |  * | 
 | 90 |  * Return the data value specified when kthread @task was created. | 
 | 91 |  * The caller is responsible for ensuring the validity of @task when | 
 | 92 |  * calling this function. | 
 | 93 |  */ | 
 | 94 | void *kthread_data(struct task_struct *task) | 
 | 95 | { | 
 | 96 | 	return to_kthread(task)->data; | 
 | 97 | } | 
 | 98 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | static int kthread(void *_create) | 
 | 100 | { | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 101 | 	/* Copy data: it's on kthread's stack */ | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 102 | 	struct kthread_create_info *create = _create; | 
 | 103 | 	int (*threadfn)(void *data) = create->threadfn; | 
 | 104 | 	void *data = create->data; | 
 | 105 | 	struct kthread self; | 
 | 106 | 	int ret; | 
 | 107 |  | 
 | 108 | 	self.should_stop = 0; | 
| Tejun Heo | 82805ab | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 109 | 	self.data = data; | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 110 | 	init_completion(&self.exited); | 
 | 111 | 	current->vfork_done = &self.exited; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | 	/* OK, tell user we're spawned, wait for stop or wakeup */ | 
| Oleg Nesterov | a076e4b | 2007-05-23 13:57:27 -0700 | [diff] [blame] | 114 | 	__set_current_state(TASK_UNINTERRUPTIBLE); | 
| Vitaliy Gusev | 3217ab9 | 2009-04-09 09:50:35 -0600 | [diff] [blame] | 115 | 	create->result = current; | 
| Oleg Nesterov | cdd140b | 2009-06-17 16:27:43 -0700 | [diff] [blame] | 116 | 	complete(&create->done); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | 	schedule(); | 
 | 118 |  | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 119 | 	ret = -EINTR; | 
 | 120 | 	if (!self.should_stop) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | 		ret = threadfn(data); | 
 | 122 |  | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 123 | 	/* we can't just return, we must preserve "self" on stack */ | 
 | 124 | 	do_exit(ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | } | 
 | 126 |  | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 127 | /* called from do_fork() to get node information for about to be created task */ | 
 | 128 | int tsk_fork_get_node(struct task_struct *tsk) | 
 | 129 | { | 
 | 130 | #ifdef CONFIG_NUMA | 
 | 131 | 	if (tsk == kthreadd_task) | 
 | 132 | 		return tsk->pref_node_fork; | 
 | 133 | #endif | 
 | 134 | 	return numa_node_id(); | 
 | 135 | } | 
 | 136 |  | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 137 | static void create_kthread(struct kthread_create_info *create) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | 	int pid; | 
 | 140 |  | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 141 | #ifdef CONFIG_NUMA | 
 | 142 | 	current->pref_node_fork = create->node; | 
 | 143 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | 	/* We want our own signal handler (we take no signals by default). */ | 
 | 145 | 	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); | 
| Oleg Nesterov | cdd140b | 2009-06-17 16:27:43 -0700 | [diff] [blame] | 146 | 	if (pid < 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | 		create->result = ERR_PTR(pid); | 
| Oleg Nesterov | cdd140b | 2009-06-17 16:27:43 -0700 | [diff] [blame] | 148 | 		complete(&create->done); | 
 | 149 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | } | 
 | 151 |  | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 152 | /** | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 153 |  * kthread_create_on_node - create a kthread. | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 154 |  * @threadfn: the function to run until signal_pending(current). | 
 | 155 |  * @data: data ptr for @threadfn. | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 156 |  * @node: memory node number. | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 157 |  * @namefmt: printf-style name for the thread. | 
 | 158 |  * | 
 | 159 |  * Description: This helper function creates and names a kernel | 
 | 160 |  * thread.  The thread will be stopped: use wake_up_process() to start | 
| Anton Blanchard | 301ba04 | 2010-02-09 15:07:40 +1100 | [diff] [blame] | 161 |  * it.  See also kthread_run(). | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 162 |  * | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 163 |  * If thread is going to be bound on a particular cpu, give its node | 
 | 164 |  * in @node, to get NUMA affinity for kthread stack, or else give -1. | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 165 |  * When woken, the thread will run @threadfn() with @data as its | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 166 |  * argument. @threadfn() can either call do_exit() directly if it is a | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 167 |  * standalone thread for which no one will call kthread_stop(), or | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 168 |  * return when 'kthread_should_stop()' is true (which means | 
 | 169 |  * kthread_stop() has been called).  The return value should be zero | 
 | 170 |  * or a negative error number; it will be passed to kthread_stop(). | 
 | 171 |  * | 
 | 172 |  * Returns a task_struct or ERR_PTR(-ENOMEM). | 
 | 173 |  */ | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 174 | struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | 
 | 175 | 					   void *data, | 
 | 176 | 					   int node, | 
 | 177 | 					   const char namefmt[], | 
 | 178 | 					   ...) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | { | 
 | 180 | 	struct kthread_create_info create; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 |  | 
 | 182 | 	create.threadfn = threadfn; | 
 | 183 | 	create.data = data; | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 184 | 	create.node = node; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | 	init_completion(&create.done); | 
 | 186 |  | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 187 | 	spin_lock(&kthread_create_lock); | 
 | 188 | 	list_add_tail(&create.list, &kthread_create_list); | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 189 | 	spin_unlock(&kthread_create_lock); | 
 | 190 |  | 
| Dmitry Adamushko | cbd9b67 | 2008-04-29 00:59:23 -0700 | [diff] [blame] | 191 | 	wake_up_process(kthreadd_task); | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 192 | 	wait_for_completion(&create.done); | 
 | 193 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | 	if (!IS_ERR(create.result)) { | 
| Peter Zijlstra | c9b5f50 | 2011-01-07 13:41:40 +0100 | [diff] [blame] | 195 | 		static const struct sched_param param = { .sched_priority = 0 }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | 		va_list args; | 
| Oleg Nesterov | 1c99315 | 2009-04-09 09:50:36 -0600 | [diff] [blame] | 197 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | 		va_start(args, namefmt); | 
 | 199 | 		vsnprintf(create.result->comm, sizeof(create.result->comm), | 
 | 200 | 			  namefmt, args); | 
 | 201 | 		va_end(args); | 
| Oleg Nesterov | 1c99315 | 2009-04-09 09:50:36 -0600 | [diff] [blame] | 202 | 		/* | 
 | 203 | 		 * root may have changed our (kthreadd's) priority or CPU mask. | 
 | 204 | 		 * The kernel thread should not inherit these properties. | 
 | 205 | 		 */ | 
 | 206 | 		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); | 
| Oleg Nesterov | 1c99315 | 2009-04-09 09:50:36 -0600 | [diff] [blame] | 207 | 		set_cpus_allowed_ptr(create.result, cpu_all_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | 	return create.result; | 
 | 210 | } | 
| Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 211 | EXPORT_SYMBOL(kthread_create_on_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 |  | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 213 | /** | 
| Peter Zijlstra | 881232b | 2009-12-16 18:04:39 +0100 | [diff] [blame] | 214 |  * kthread_bind - bind a just-created kthread to a cpu. | 
 | 215 |  * @p: thread created by kthread_create(). | 
 | 216 |  * @cpu: cpu (might not be online, must be possible) for @k to run on. | 
 | 217 |  * | 
 | 218 |  * Description: This function is equivalent to set_cpus_allowed(), | 
 | 219 |  * except that @cpu doesn't need to be online, and the thread must be | 
 | 220 |  * stopped (i.e., just returned from kthread_create()). | 
 | 221 |  */ | 
 | 222 | void kthread_bind(struct task_struct *p, unsigned int cpu) | 
 | 223 | { | 
 | 224 | 	/* Must have done schedule() in kthread() before we set_task_cpu */ | 
 | 225 | 	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { | 
 | 226 | 		WARN_ON(1); | 
 | 227 | 		return; | 
 | 228 | 	} | 
 | 229 |  | 
| KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 230 | 	/* It's safe because the task is inactive. */ | 
 | 231 | 	do_set_cpus_allowed(p, cpumask_of(cpu)); | 
| Peter Zijlstra | 881232b | 2009-12-16 18:04:39 +0100 | [diff] [blame] | 232 | 	p->flags |= PF_THREAD_BOUND; | 
 | 233 | } | 
 | 234 | EXPORT_SYMBOL(kthread_bind); | 
 | 235 |  | 
 | 236 | /** | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 237 |  * kthread_stop - stop a thread created by kthread_create(). | 
 | 238 |  * @k: thread created by kthread_create(). | 
 | 239 |  * | 
 | 240 |  * Sets kthread_should_stop() for @k to return true, wakes it, and | 
| Oleg Nesterov | 9ae2602 | 2009-06-19 02:51:13 +0200 | [diff] [blame] | 241 |  * waits for it to exit. This can also be called after kthread_create() | 
 | 242 |  * instead of calling wake_up_process(): the thread will exit without | 
 | 243 |  * calling threadfn(). | 
 | 244 |  * | 
 | 245 |  * If threadfn() may call do_exit() itself, the caller must ensure | 
 | 246 |  * task_struct can't go away. | 
| Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 247 |  * | 
 | 248 |  * Returns the result of threadfn(), or %-EINTR if wake_up_process() | 
 | 249 |  * was never called. | 
 | 250 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | int kthread_stop(struct task_struct *k) | 
 | 252 | { | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 253 | 	struct kthread *kthread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | 	int ret; | 
 | 255 |  | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 256 | 	trace_sched_kthread_stop(k); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | 	get_task_struct(k); | 
 | 258 |  | 
| Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 259 | 	kthread = to_kthread(k); | 
 | 260 | 	barrier(); /* it might have exited */ | 
 | 261 | 	if (k->vfork_done != NULL) { | 
 | 262 | 		kthread->should_stop = 1; | 
 | 263 | 		wake_up_process(k); | 
 | 264 | 		wait_for_completion(&kthread->exited); | 
 | 265 | 	} | 
 | 266 | 	ret = k->exit_code; | 
| Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 267 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | 	put_task_struct(k); | 
| Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 269 | 	trace_sched_kthread_stop_ret(ret); | 
 | 270 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | 	return ret; | 
 | 272 | } | 
| Adrian Bunk | 52e92e5 | 2006-07-14 00:24:05 -0700 | [diff] [blame] | 273 | EXPORT_SYMBOL(kthread_stop); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 |  | 
| Satyam Sharma | e804a4a | 2007-07-31 00:39:16 -0700 | [diff] [blame] | 275 | int kthreadd(void *unused) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | { | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 277 | 	struct task_struct *tsk = current; | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 278 |  | 
| Satyam Sharma | e804a4a | 2007-07-31 00:39:16 -0700 | [diff] [blame] | 279 | 	/* Setup a clean context for our children to inherit. */ | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 280 | 	set_task_comm(tsk, "kthreadd"); | 
| Oleg Nesterov | 10ab825 | 2007-05-09 02:34:37 -0700 | [diff] [blame] | 281 | 	ignore_signals(tsk); | 
| Rusty Russell | 1a2142a | 2009-03-30 22:05:10 -0600 | [diff] [blame] | 282 | 	set_cpus_allowed_ptr(tsk, cpu_all_mask); | 
| Miao Xie | 5ab116c | 2010-03-23 13:35:34 -0700 | [diff] [blame] | 283 | 	set_mems_allowed(node_states[N_HIGH_MEMORY]); | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 284 |  | 
| Tejun Heo | 34b087e | 2011-11-23 09:28:17 -0800 | [diff] [blame] | 285 | 	current->flags |= PF_NOFREEZE; | 
| Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 286 |  | 
 | 287 | 	for (;;) { | 
 | 288 | 		set_current_state(TASK_INTERRUPTIBLE); | 
 | 289 | 		if (list_empty(&kthread_create_list)) | 
 | 290 | 			schedule(); | 
 | 291 | 		__set_current_state(TASK_RUNNING); | 
 | 292 |  | 
 | 293 | 		spin_lock(&kthread_create_lock); | 
 | 294 | 		while (!list_empty(&kthread_create_list)) { | 
 | 295 | 			struct kthread_create_info *create; | 
 | 296 |  | 
 | 297 | 			create = list_entry(kthread_create_list.next, | 
 | 298 | 					    struct kthread_create_info, list); | 
 | 299 | 			list_del_init(&create->list); | 
 | 300 | 			spin_unlock(&kthread_create_lock); | 
 | 301 |  | 
 | 302 | 			create_kthread(create); | 
 | 303 |  | 
 | 304 | 			spin_lock(&kthread_create_lock); | 
 | 305 | 		} | 
 | 306 | 		spin_unlock(&kthread_create_lock); | 
 | 307 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 |  | 
 | 309 | 	return 0; | 
 | 310 | } | 
| Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 311 |  | 
| Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 312 | void __init_kthread_worker(struct kthread_worker *worker, | 
 | 313 | 				const char *name, | 
 | 314 | 				struct lock_class_key *key) | 
 | 315 | { | 
 | 316 | 	spin_lock_init(&worker->lock); | 
 | 317 | 	lockdep_set_class_and_name(&worker->lock, key, name); | 
 | 318 | 	INIT_LIST_HEAD(&worker->work_list); | 
 | 319 | 	worker->task = NULL; | 
 | 320 | } | 
 | 321 | EXPORT_SYMBOL_GPL(__init_kthread_worker); | 
 | 322 |  | 
| Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 323 | /** | 
 | 324 |  * kthread_worker_fn - kthread function to process kthread_worker | 
 | 325 |  * @worker_ptr: pointer to initialized kthread_worker | 
 | 326 |  * | 
 | 327 |  * This function can be used as @threadfn to kthread_create() or | 
 | 328 |  * kthread_run() with @worker_ptr argument pointing to an initialized | 
 | 329 |  * kthread_worker.  The started kthread will process work_list until | 
 | 330 |  * the it is stopped with kthread_stop().  A kthread can also call | 
 | 331 |  * this function directly after extra initialization. | 
 | 332 |  * | 
 | 333 |  * Different kthreads can be used for the same kthread_worker as long | 
 | 334 |  * as there's only one kthread attached to it at any given time.  A | 
 | 335 |  * kthread_worker without an attached kthread simply collects queued | 
 | 336 |  * kthread_works. | 
 | 337 |  */ | 
 | 338 | int kthread_worker_fn(void *worker_ptr) | 
 | 339 | { | 
 | 340 | 	struct kthread_worker *worker = worker_ptr; | 
 | 341 | 	struct kthread_work *work; | 
 | 342 |  | 
 | 343 | 	WARN_ON(worker->task); | 
 | 344 | 	worker->task = current; | 
 | 345 | repeat: | 
 | 346 | 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */ | 
 | 347 |  | 
 | 348 | 	if (kthread_should_stop()) { | 
 | 349 | 		__set_current_state(TASK_RUNNING); | 
 | 350 | 		spin_lock_irq(&worker->lock); | 
 | 351 | 		worker->task = NULL; | 
 | 352 | 		spin_unlock_irq(&worker->lock); | 
 | 353 | 		return 0; | 
 | 354 | 	} | 
 | 355 |  | 
 | 356 | 	work = NULL; | 
 | 357 | 	spin_lock_irq(&worker->lock); | 
 | 358 | 	if (!list_empty(&worker->work_list)) { | 
 | 359 | 		work = list_first_entry(&worker->work_list, | 
 | 360 | 					struct kthread_work, node); | 
 | 361 | 		list_del_init(&work->node); | 
 | 362 | 	} | 
 | 363 | 	spin_unlock_irq(&worker->lock); | 
 | 364 |  | 
 | 365 | 	if (work) { | 
 | 366 | 		__set_current_state(TASK_RUNNING); | 
 | 367 | 		work->func(work); | 
 | 368 | 		smp_wmb();	/* wmb worker-b0 paired with flush-b1 */ | 
 | 369 | 		work->done_seq = work->queue_seq; | 
 | 370 | 		smp_mb();	/* mb worker-b1 paired with flush-b0 */ | 
 | 371 | 		if (atomic_read(&work->flushing)) | 
 | 372 | 			wake_up_all(&work->done); | 
 | 373 | 	} else if (!freezing(current)) | 
 | 374 | 		schedule(); | 
 | 375 |  | 
 | 376 | 	try_to_freeze(); | 
 | 377 | 	goto repeat; | 
 | 378 | } | 
 | 379 | EXPORT_SYMBOL_GPL(kthread_worker_fn); | 
 | 380 |  | 
 | 381 | /** | 
 | 382 |  * queue_kthread_work - queue a kthread_work | 
 | 383 |  * @worker: target kthread_worker | 
 | 384 |  * @work: kthread_work to queue | 
 | 385 |  * | 
 | 386 |  * Queue @work to work processor @task for async execution.  @task | 
 | 387 |  * must have been created with kthread_worker_create().  Returns %true | 
 | 388 |  * if @work was successfully queued, %false if it was already pending. | 
 | 389 |  */ | 
 | 390 | bool queue_kthread_work(struct kthread_worker *worker, | 
 | 391 | 			struct kthread_work *work) | 
 | 392 | { | 
 | 393 | 	bool ret = false; | 
 | 394 | 	unsigned long flags; | 
 | 395 |  | 
 | 396 | 	spin_lock_irqsave(&worker->lock, flags); | 
 | 397 | 	if (list_empty(&work->node)) { | 
 | 398 | 		list_add_tail(&work->node, &worker->work_list); | 
 | 399 | 		work->queue_seq++; | 
 | 400 | 		if (likely(worker->task)) | 
 | 401 | 			wake_up_process(worker->task); | 
 | 402 | 		ret = true; | 
 | 403 | 	} | 
 | 404 | 	spin_unlock_irqrestore(&worker->lock, flags); | 
 | 405 | 	return ret; | 
 | 406 | } | 
 | 407 | EXPORT_SYMBOL_GPL(queue_kthread_work); | 
 | 408 |  | 
 | 409 | /** | 
 | 410 |  * flush_kthread_work - flush a kthread_work | 
 | 411 |  * @work: work to flush | 
 | 412 |  * | 
 | 413 |  * If @work is queued or executing, wait for it to finish execution. | 
 | 414 |  */ | 
 | 415 | void flush_kthread_work(struct kthread_work *work) | 
 | 416 | { | 
 | 417 | 	int seq = work->queue_seq; | 
 | 418 |  | 
 | 419 | 	atomic_inc(&work->flushing); | 
 | 420 |  | 
 | 421 | 	/* | 
 | 422 | 	 * mb flush-b0 paired with worker-b1, to make sure either | 
 | 423 | 	 * worker sees the above increment or we see done_seq update. | 
 | 424 | 	 */ | 
 | 425 | 	smp_mb__after_atomic_inc(); | 
 | 426 |  | 
 | 427 | 	/* A - B <= 0 tests whether B is in front of A regardless of overflow */ | 
 | 428 | 	wait_event(work->done, seq - work->done_seq <= 0); | 
 | 429 | 	atomic_dec(&work->flushing); | 
 | 430 |  | 
 | 431 | 	/* | 
 | 432 | 	 * rmb flush-b1 paired with worker-b0, to make sure our caller | 
 | 433 | 	 * sees every change made by work->func(). | 
 | 434 | 	 */ | 
 | 435 | 	smp_mb__after_atomic_dec(); | 
 | 436 | } | 
 | 437 | EXPORT_SYMBOL_GPL(flush_kthread_work); | 
 | 438 |  | 
 | 439 | struct kthread_flush_work { | 
 | 440 | 	struct kthread_work	work; | 
 | 441 | 	struct completion	done; | 
 | 442 | }; | 
 | 443 |  | 
 | 444 | static void kthread_flush_work_fn(struct kthread_work *work) | 
 | 445 | { | 
 | 446 | 	struct kthread_flush_work *fwork = | 
 | 447 | 		container_of(work, struct kthread_flush_work, work); | 
 | 448 | 	complete(&fwork->done); | 
 | 449 | } | 
 | 450 |  | 
 | 451 | /** | 
 | 452 |  * flush_kthread_worker - flush all current works on a kthread_worker | 
 | 453 |  * @worker: worker to flush | 
 | 454 |  * | 
 | 455 |  * Wait until all currently executing or pending works on @worker are | 
 | 456 |  * finished. | 
 | 457 |  */ | 
 | 458 | void flush_kthread_worker(struct kthread_worker *worker) | 
 | 459 | { | 
 | 460 | 	struct kthread_flush_work fwork = { | 
 | 461 | 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), | 
 | 462 | 		COMPLETION_INITIALIZER_ONSTACK(fwork.done), | 
 | 463 | 	}; | 
 | 464 |  | 
 | 465 | 	queue_kthread_work(worker, &fwork.work); | 
 | 466 | 	wait_for_completion(&fwork.done); | 
 | 467 | } | 
 | 468 | EXPORT_SYMBOL_GPL(flush_kthread_worker); |