| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* Kernel thread helper functions. | 
|  | 2 | *   Copyright (C) 2004 IBM Corporation, Rusty Russell. | 
|  | 3 | * | 
|  | 4 | * Creation is done via keventd, so that we get a clean environment | 
|  | 5 | * even if we're invoked from userspace (think modprobe, hotplug cpu, | 
|  | 6 | * etc.). | 
|  | 7 | */ | 
|  | 8 | #include <linux/sched.h> | 
|  | 9 | #include <linux/kthread.h> | 
|  | 10 | #include <linux/completion.h> | 
|  | 11 | #include <linux/err.h> | 
|  | 12 | #include <linux/unistd.h> | 
|  | 13 | #include <linux/file.h> | 
|  | 14 | #include <linux/module.h> | 
|  | 15 | #include <asm/semaphore.h> | 
|  | 16 |  | 
|  | 17 | /* | 
|  | 18 | * We dont want to execute off keventd since it might | 
|  | 19 | * hold a semaphore our callers hold too: | 
|  | 20 | */ | 
|  | 21 | static struct workqueue_struct *helper_wq; | 
|  | 22 |  | 
|  | 23 | struct kthread_create_info | 
|  | 24 | { | 
|  | 25 | /* Information passed to kthread() from keventd. */ | 
|  | 26 | int (*threadfn)(void *data); | 
|  | 27 | void *data; | 
|  | 28 | struct completion started; | 
|  | 29 |  | 
|  | 30 | /* Result passed back to kthread_create() from keventd. */ | 
|  | 31 | struct task_struct *result; | 
|  | 32 | struct completion done; | 
|  | 33 | }; | 
|  | 34 |  | 
|  | 35 | struct kthread_stop_info | 
|  | 36 | { | 
|  | 37 | struct task_struct *k; | 
|  | 38 | int err; | 
|  | 39 | struct completion done; | 
|  | 40 | }; | 
|  | 41 |  | 
|  | 42 | /* Thread stopping is done by setthing this var: lock serializes | 
|  | 43 | * multiple kthread_stop calls. */ | 
|  | 44 | static DECLARE_MUTEX(kthread_stop_lock); | 
|  | 45 | static struct kthread_stop_info kthread_stop_info; | 
|  | 46 |  | 
|  | 47 | int kthread_should_stop(void) | 
|  | 48 | { | 
|  | 49 | return (kthread_stop_info.k == current); | 
|  | 50 | } | 
|  | 51 | EXPORT_SYMBOL(kthread_should_stop); | 
|  | 52 |  | 
|  | 53 | static void kthread_exit_files(void) | 
|  | 54 | { | 
|  | 55 | struct fs_struct *fs; | 
|  | 56 | struct task_struct *tsk = current; | 
|  | 57 |  | 
|  | 58 | exit_fs(tsk);		/* current->fs->count--; */ | 
|  | 59 | fs = init_task.fs; | 
|  | 60 | tsk->fs = fs; | 
|  | 61 | atomic_inc(&fs->count); | 
|  | 62 | exit_files(tsk); | 
|  | 63 | current->files = init_task.files; | 
|  | 64 | atomic_inc(&tsk->files->count); | 
|  | 65 | } | 
|  | 66 |  | 
|  | 67 | static int kthread(void *_create) | 
|  | 68 | { | 
|  | 69 | struct kthread_create_info *create = _create; | 
|  | 70 | int (*threadfn)(void *data); | 
|  | 71 | void *data; | 
|  | 72 | sigset_t blocked; | 
|  | 73 | int ret = -EINTR; | 
|  | 74 |  | 
|  | 75 | kthread_exit_files(); | 
|  | 76 |  | 
|  | 77 | /* Copy data: it's on keventd's stack */ | 
|  | 78 | threadfn = create->threadfn; | 
|  | 79 | data = create->data; | 
|  | 80 |  | 
|  | 81 | /* Block and flush all signals (in case we're not from keventd). */ | 
|  | 82 | sigfillset(&blocked); | 
|  | 83 | sigprocmask(SIG_BLOCK, &blocked, NULL); | 
|  | 84 | flush_signals(current); | 
|  | 85 |  | 
|  | 86 | /* By default we can run anywhere, unlike keventd. */ | 
|  | 87 | set_cpus_allowed(current, CPU_MASK_ALL); | 
|  | 88 |  | 
|  | 89 | /* OK, tell user we're spawned, wait for stop or wakeup */ | 
|  | 90 | __set_current_state(TASK_INTERRUPTIBLE); | 
|  | 91 | complete(&create->started); | 
|  | 92 | schedule(); | 
|  | 93 |  | 
|  | 94 | if (!kthread_should_stop()) | 
|  | 95 | ret = threadfn(data); | 
|  | 96 |  | 
|  | 97 | /* It might have exited on its own, w/o kthread_stop.  Check. */ | 
|  | 98 | if (kthread_should_stop()) { | 
|  | 99 | kthread_stop_info.err = ret; | 
|  | 100 | complete(&kthread_stop_info.done); | 
|  | 101 | } | 
|  | 102 | return 0; | 
|  | 103 | } | 
|  | 104 |  | 
|  | 105 | /* We are keventd: create a thread. */ | 
|  | 106 | static void keventd_create_kthread(void *_create) | 
|  | 107 | { | 
|  | 108 | struct kthread_create_info *create = _create; | 
|  | 109 | int pid; | 
|  | 110 |  | 
|  | 111 | /* We want our own signal handler (we take no signals by default). */ | 
|  | 112 | pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); | 
|  | 113 | if (pid < 0) { | 
|  | 114 | create->result = ERR_PTR(pid); | 
|  | 115 | } else { | 
|  | 116 | wait_for_completion(&create->started); | 
|  | 117 | create->result = find_task_by_pid(pid); | 
|  | 118 | } | 
|  | 119 | complete(&create->done); | 
|  | 120 | } | 
|  | 121 |  | 
|  | 122 | struct task_struct *kthread_create(int (*threadfn)(void *data), | 
|  | 123 | void *data, | 
|  | 124 | const char namefmt[], | 
|  | 125 | ...) | 
|  | 126 | { | 
|  | 127 | struct kthread_create_info create; | 
|  | 128 | DECLARE_WORK(work, keventd_create_kthread, &create); | 
|  | 129 |  | 
|  | 130 | create.threadfn = threadfn; | 
|  | 131 | create.data = data; | 
|  | 132 | init_completion(&create.started); | 
|  | 133 | init_completion(&create.done); | 
|  | 134 |  | 
|  | 135 | /* | 
|  | 136 | * The workqueue needs to start up first: | 
|  | 137 | */ | 
|  | 138 | if (!helper_wq) | 
|  | 139 | work.func(work.data); | 
|  | 140 | else { | 
|  | 141 | queue_work(helper_wq, &work); | 
|  | 142 | wait_for_completion(&create.done); | 
|  | 143 | } | 
|  | 144 | if (!IS_ERR(create.result)) { | 
|  | 145 | va_list args; | 
|  | 146 | va_start(args, namefmt); | 
|  | 147 | vsnprintf(create.result->comm, sizeof(create.result->comm), | 
|  | 148 | namefmt, args); | 
|  | 149 | va_end(args); | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | return create.result; | 
|  | 153 | } | 
|  | 154 | EXPORT_SYMBOL(kthread_create); | 
|  | 155 |  | 
|  | 156 | void kthread_bind(struct task_struct *k, unsigned int cpu) | 
|  | 157 | { | 
|  | 158 | BUG_ON(k->state != TASK_INTERRUPTIBLE); | 
|  | 159 | /* Must have done schedule() in kthread() before we set_task_cpu */ | 
|  | 160 | wait_task_inactive(k); | 
|  | 161 | set_task_cpu(k, cpu); | 
|  | 162 | k->cpus_allowed = cpumask_of_cpu(cpu); | 
|  | 163 | } | 
|  | 164 | EXPORT_SYMBOL(kthread_bind); | 
|  | 165 |  | 
|  | 166 | int kthread_stop(struct task_struct *k) | 
|  | 167 | { | 
|  | 168 | int ret; | 
|  | 169 |  | 
|  | 170 | down(&kthread_stop_lock); | 
|  | 171 |  | 
|  | 172 | /* It could exit after stop_info.k set, but before wake_up_process. */ | 
|  | 173 | get_task_struct(k); | 
|  | 174 |  | 
|  | 175 | /* Must init completion *before* thread sees kthread_stop_info.k */ | 
|  | 176 | init_completion(&kthread_stop_info.done); | 
| akpm@osdl.org | d59dd46 | 2005-05-01 08:58:47 -0700 | [diff] [blame] | 177 | smp_wmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 |  | 
|  | 179 | /* Now set kthread_should_stop() to true, and wake it up. */ | 
|  | 180 | kthread_stop_info.k = k; | 
|  | 181 | wake_up_process(k); | 
|  | 182 | put_task_struct(k); | 
|  | 183 |  | 
|  | 184 | /* Once it dies, reset stop ptr, gather result and we're done. */ | 
|  | 185 | wait_for_completion(&kthread_stop_info.done); | 
|  | 186 | kthread_stop_info.k = NULL; | 
|  | 187 | ret = kthread_stop_info.err; | 
|  | 188 | up(&kthread_stop_lock); | 
|  | 189 |  | 
|  | 190 | return ret; | 
|  | 191 | } | 
|  | 192 | EXPORT_SYMBOL(kthread_stop); | 
|  | 193 |  | 
|  | 194 | static __init int helper_init(void) | 
|  | 195 | { | 
|  | 196 | helper_wq = create_singlethread_workqueue("kthread"); | 
|  | 197 | BUG_ON(!helper_wq); | 
|  | 198 |  | 
|  | 199 | return 0; | 
|  | 200 | } | 
|  | 201 | core_initcall(helper_init); | 
|  | 202 |  |