blob: bba83762c4841de60f5ef0812eeadc6e6517445d [file] [log] [blame]
Miklos Szeredi334f4852005-09-09 13:10:27 -07001/*
2 FUSE: Filesystem in Userspace
Miklos Szeredid7133112006-04-10 22:54:55 -07003 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
Miklos Szeredi334f4852005-09-09 13:10:27 -07004
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
19
20MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21
Christoph Lametere18b8902006-12-06 20:33:20 -080022static struct kmem_cache *fuse_req_cachep;
Miklos Szeredi334f4852005-09-09 13:10:27 -070023
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080024static struct fuse_conn *fuse_get_conn(struct file *file)
Miklos Szeredi334f4852005-09-09 13:10:27 -070025{
Miklos Szeredi0720b312006-04-10 22:54:55 -070026 /*
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
29 */
30 return file->private_data;
Miklos Szeredi334f4852005-09-09 13:10:27 -070031}
32
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080033static void fuse_request_init(struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -070034{
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
Miklos Szeredia4d27e72006-06-25 05:48:54 -070037 INIT_LIST_HEAD(&req->intr_entry);
Miklos Szeredi334f4852005-09-09 13:10:27 -070038 init_waitqueue_head(&req->waitq);
39 atomic_set(&req->count, 1);
40}
41
42struct fuse_req *fuse_request_alloc(void)
43{
Christoph Lametere94b1762006-12-06 20:33:17 -080044 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
Miklos Szeredi334f4852005-09-09 13:10:27 -070045 if (req)
46 fuse_request_init(req);
47 return req;
48}
49
Miklos Szeredi3be5a522008-04-30 00:54:41 -070050struct fuse_req *fuse_request_alloc_nofs(void)
51{
52 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
53 if (req)
54 fuse_request_init(req);
55 return req;
56}
57
Miklos Szeredi334f4852005-09-09 13:10:27 -070058void fuse_request_free(struct fuse_req *req)
59{
60 kmem_cache_free(fuse_req_cachep, req);
61}
62
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080063static void block_sigs(sigset_t *oldset)
Miklos Szeredi334f4852005-09-09 13:10:27 -070064{
65 sigset_t mask;
66
67 siginitsetinv(&mask, sigmask(SIGKILL));
68 sigprocmask(SIG_BLOCK, &mask, oldset);
69}
70
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080071static void restore_sigs(sigset_t *oldset)
Miklos Szeredi334f4852005-09-09 13:10:27 -070072{
73 sigprocmask(SIG_SETMASK, oldset, NULL);
74}
75
Miklos Szeredi334f4852005-09-09 13:10:27 -070076static void __fuse_get_request(struct fuse_req *req)
77{
78 atomic_inc(&req->count);
79}
80
81/* Must be called with > 1 refcount */
82static void __fuse_put_request(struct fuse_req *req)
83{
84 BUG_ON(atomic_read(&req->count) < 2);
85 atomic_dec(&req->count);
86}
87
Miklos Szeredi33649c92006-06-25 05:48:52 -070088static void fuse_req_init_context(struct fuse_req *req)
89{
90 req->in.h.uid = current->fsuid;
91 req->in.h.gid = current->fsgid;
92 req->in.h.pid = current->pid;
93}
94
Miklos Szeredice1d5a42006-04-10 22:54:58 -070095struct fuse_req *fuse_get_req(struct fuse_conn *fc)
Miklos Szeredi334f4852005-09-09 13:10:27 -070096{
Miklos Szeredi08a53cd2006-04-10 22:54:59 -070097 struct fuse_req *req;
98 sigset_t oldset;
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +020099 int intr;
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700100 int err;
101
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200102 atomic_inc(&fc->num_waiting);
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700103 block_sigs(&oldset);
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200104 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700105 restore_sigs(&oldset);
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200106 err = -EINTR;
107 if (intr)
108 goto out;
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700109
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700110 err = -ENOTCONN;
111 if (!fc->connected)
112 goto out;
113
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700114 req = fuse_request_alloc();
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200115 err = -ENOMEM;
Miklos Szeredice1d5a42006-04-10 22:54:58 -0700116 if (!req)
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200117 goto out;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700118
Miklos Szeredi33649c92006-06-25 05:48:52 -0700119 fuse_req_init_context(req);
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200120 req->waiting = 1;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700121 return req;
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200122
123 out:
124 atomic_dec(&fc->num_waiting);
125 return ERR_PTR(err);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700126}
127
Miklos Szeredi33649c92006-06-25 05:48:52 -0700128/*
129 * Return request in fuse_file->reserved_req. However that may
130 * currently be in use. If that is the case, wait for it to become
131 * available.
132 */
133static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
134 struct file *file)
135{
136 struct fuse_req *req = NULL;
137 struct fuse_file *ff = file->private_data;
138
139 do {
Miklos Szeredide5e3de2007-10-16 23:31:00 -0700140 wait_event(fc->reserved_req_waitq, ff->reserved_req);
Miklos Szeredi33649c92006-06-25 05:48:52 -0700141 spin_lock(&fc->lock);
142 if (ff->reserved_req) {
143 req = ff->reserved_req;
144 ff->reserved_req = NULL;
145 get_file(file);
146 req->stolen_file = file;
147 }
148 spin_unlock(&fc->lock);
149 } while (!req);
150
151 return req;
152}
153
154/*
155 * Put stolen request back into fuse_file->reserved_req
156 */
157static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
158{
159 struct file *file = req->stolen_file;
160 struct fuse_file *ff = file->private_data;
161
162 spin_lock(&fc->lock);
163 fuse_request_init(req);
164 BUG_ON(ff->reserved_req);
165 ff->reserved_req = req;
Miklos Szeredide5e3de2007-10-16 23:31:00 -0700166 wake_up_all(&fc->reserved_req_waitq);
Miklos Szeredi33649c92006-06-25 05:48:52 -0700167 spin_unlock(&fc->lock);
168 fput(file);
169}
170
171/*
172 * Gets a requests for a file operation, always succeeds
173 *
174 * This is used for sending the FLUSH request, which must get to
175 * userspace, due to POSIX locks which may need to be unlocked.
176 *
177 * If allocation fails due to OOM, use the reserved request in
178 * fuse_file.
179 *
180 * This is very unlikely to deadlock accidentally, since the
181 * filesystem should not have it's own file open. If deadlock is
182 * intentional, it can still be broken by "aborting" the filesystem.
183 */
184struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
185{
186 struct fuse_req *req;
187
188 atomic_inc(&fc->num_waiting);
189 wait_event(fc->blocked_waitq, !fc->blocked);
190 req = fuse_request_alloc();
191 if (!req)
192 req = get_reserved_req(fc, file);
193
194 fuse_req_init_context(req);
195 req->waiting = 1;
196 return req;
197}
198
Miklos Szeredi334f4852005-09-09 13:10:27 -0700199void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
200{
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800201 if (atomic_dec_and_test(&req->count)) {
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200202 if (req->waiting)
203 atomic_dec(&fc->num_waiting);
Miklos Szeredi33649c92006-06-25 05:48:52 -0700204
205 if (req->stolen_file)
206 put_reserved_req(fc, req);
207 else
208 fuse_request_free(req);
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800209 }
210}
211
Miklos Szeredid12def12008-02-06 01:38:39 -0800212static unsigned len_args(unsigned numargs, struct fuse_arg *args)
213{
214 unsigned nbytes = 0;
215 unsigned i;
216
217 for (i = 0; i < numargs; i++)
218 nbytes += args[i].size;
219
220 return nbytes;
221}
222
223static u64 fuse_get_unique(struct fuse_conn *fc)
224{
225 fc->reqctr++;
226 /* zero is special */
227 if (fc->reqctr == 0)
228 fc->reqctr = 1;
229
230 return fc->reqctr;
231}
232
233static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
234{
235 req->in.h.unique = fuse_get_unique(fc);
236 req->in.h.len = sizeof(struct fuse_in_header) +
237 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
238 list_add_tail(&req->list, &fc->pending);
239 req->state = FUSE_REQ_PENDING;
240 if (!req->waiting) {
241 req->waiting = 1;
242 atomic_inc(&fc->num_waiting);
243 }
244 wake_up(&fc->waitq);
245 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
246}
247
248static void flush_bg_queue(struct fuse_conn *fc)
249{
250 while (fc->active_background < FUSE_MAX_BACKGROUND &&
251 !list_empty(&fc->bg_queue)) {
252 struct fuse_req *req;
253
254 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
255 list_del(&req->list);
256 fc->active_background++;
257 queue_request(fc, req);
258 }
259}
260
Miklos Szeredi6dbbcb12006-04-26 10:49:06 +0200261/*
Miklos Szeredi334f4852005-09-09 13:10:27 -0700262 * This function is called when a request is finished. Either a reply
Miklos Szeredif9a28422006-06-25 05:48:53 -0700263 * has arrived or it was aborted (and not yet sent) or some error
Miklos Szeredif43b1552006-01-16 22:14:26 -0800264 * occurred during communication with userspace, or the device file
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700265 * was closed. The requester thread is woken up (if still waiting),
266 * the 'end' callback is called if given, else the reference to the
267 * request is released
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800268 *
Miklos Szeredid7133112006-04-10 22:54:55 -0700269 * Called with fc->lock, unlocks it
Miklos Szeredi334f4852005-09-09 13:10:27 -0700270 */
271static void request_end(struct fuse_conn *fc, struct fuse_req *req)
Josh Triplett105f4d72006-09-29 01:59:25 -0700272 __releases(fc->lock)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700273{
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700274 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
275 req->end = NULL;
Miklos Szeredid77a1d52006-01-16 22:14:31 -0800276 list_del(&req->list);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700277 list_del(&req->intr_entry);
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800278 req->state = FUSE_REQ_FINISHED;
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700279 if (req->background) {
280 if (fc->num_background == FUSE_MAX_BACKGROUND) {
281 fc->blocked = 0;
282 wake_up_all(&fc->blocked_waitq);
283 }
Miklos Szeredif92b99b2007-10-16 23:30:59 -0700284 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
285 clear_bdi_congested(&fc->bdi, READ);
286 clear_bdi_congested(&fc->bdi, WRITE);
287 }
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700288 fc->num_background--;
Miklos Szeredid12def12008-02-06 01:38:39 -0800289 fc->active_background--;
290 flush_bg_queue(fc);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700291 }
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700292 spin_unlock(&fc->lock);
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700293 wake_up(&req->waitq);
294 if (end)
295 end(fc, req);
296 else
297 fuse_put_request(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700298}
299
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700300static void wait_answer_interruptible(struct fuse_conn *fc,
301 struct fuse_req *req)
302{
303 if (signal_pending(current))
304 return;
305
306 spin_unlock(&fc->lock);
307 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
308 spin_lock(&fc->lock);
309}
310
311static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
312{
313 list_add_tail(&req->intr_entry, &fc->interrupts);
314 wake_up(&fc->waitq);
315 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
316}
317
Miklos Szeredid7133112006-04-10 22:54:55 -0700318/* Called with fc->lock held. Releases, and then reacquires it. */
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700319static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700320{
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700321 if (!fc->no_interrupt) {
322 /* Any signal may interrupt this */
323 wait_answer_interruptible(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700324
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700325 if (req->aborted)
326 goto aborted;
327 if (req->state == FUSE_REQ_FINISHED)
328 return;
329
330 req->interrupted = 1;
331 if (req->state == FUSE_REQ_SENT)
332 queue_interrupt(fc, req);
333 }
334
Miklos Szeredia131de02007-10-16 23:31:04 -0700335 if (!req->force) {
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700336 sigset_t oldset;
337
338 /* Only fatal signals may interrupt this */
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700339 block_sigs(&oldset);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700340 wait_answer_interruptible(fc, req);
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700341 restore_sigs(&oldset);
Miklos Szeredia131de02007-10-16 23:31:04 -0700342
343 if (req->aborted)
344 goto aborted;
345 if (req->state == FUSE_REQ_FINISHED)
346 return;
347
348 /* Request is not yet in userspace, bail out */
349 if (req->state == FUSE_REQ_PENDING) {
350 list_del(&req->list);
351 __fuse_put_request(req);
352 req->out.h.error = -EINTR;
353 return;
354 }
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700355 }
Miklos Szeredi334f4852005-09-09 13:10:27 -0700356
Miklos Szeredia131de02007-10-16 23:31:04 -0700357 /*
358 * Either request is already in userspace, or it was forced.
359 * Wait it out.
360 */
361 spin_unlock(&fc->lock);
362 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
363 spin_lock(&fc->lock);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700364
Miklos Szeredia131de02007-10-16 23:31:04 -0700365 if (!req->aborted)
366 return;
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700367
368 aborted:
Miklos Szeredia131de02007-10-16 23:31:04 -0700369 BUG_ON(req->state != FUSE_REQ_FINISHED);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700370 if (req->locked) {
371 /* This is uninterruptible sleep, because data is
372 being copied to/from the buffers of req. During
373 locked state, there mustn't be any filesystem
374 operation (e.g. page fault), since that could lead
375 to deadlock */
Miklos Szeredid7133112006-04-10 22:54:55 -0700376 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700377 wait_event(req->waitq, !req->locked);
Miklos Szeredid7133112006-04-10 22:54:55 -0700378 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700379 }
Miklos Szeredi334f4852005-09-09 13:10:27 -0700380}
381
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700382void request_send(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700383{
384 req->isreply = 1;
Miklos Szeredid7133112006-04-10 22:54:55 -0700385 spin_lock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700386 if (!fc->connected)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700387 req->out.h.error = -ENOTCONN;
388 else if (fc->conn_error)
389 req->out.h.error = -ECONNREFUSED;
390 else {
391 queue_request(fc, req);
392 /* acquire extra reference, since request is still needed
393 after request_end() */
394 __fuse_get_request(req);
395
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700396 request_wait_answer(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700397 }
Miklos Szeredid7133112006-04-10 22:54:55 -0700398 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700399}
400
Miklos Szeredid12def12008-02-06 01:38:39 -0800401static void request_send_nowait_locked(struct fuse_conn *fc,
402 struct fuse_req *req)
403{
404 req->background = 1;
405 fc->num_background++;
406 if (fc->num_background == FUSE_MAX_BACKGROUND)
407 fc->blocked = 1;
408 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
409 set_bdi_congested(&fc->bdi, READ);
410 set_bdi_congested(&fc->bdi, WRITE);
411 }
412 list_add_tail(&req->list, &fc->bg_queue);
413 flush_bg_queue(fc);
414}
415
Miklos Szeredi334f4852005-09-09 13:10:27 -0700416static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
417{
Miklos Szeredid7133112006-04-10 22:54:55 -0700418 spin_lock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700419 if (fc->connected) {
Miklos Szeredid12def12008-02-06 01:38:39 -0800420 request_send_nowait_locked(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -0700421 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700422 } else {
423 req->out.h.error = -ENOTCONN;
424 request_end(fc, req);
425 }
426}
427
428void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
429{
430 req->isreply = 0;
431 request_send_nowait(fc, req);
432}
433
434void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
435{
436 req->isreply = 1;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700437 request_send_nowait(fc, req);
438}
439
Miklos Szeredi334f4852005-09-09 13:10:27 -0700440/*
Miklos Szeredi3be5a522008-04-30 00:54:41 -0700441 * Called under fc->lock
442 *
443 * fc->connected must have been checked previously
444 */
445void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req)
446{
447 req->isreply = 1;
448 request_send_nowait_locked(fc, req);
449}
450
451/*
Miklos Szeredi334f4852005-09-09 13:10:27 -0700452 * Lock the request. Up to the next unlock_request() there mustn't be
453 * anything that could cause a page-fault. If the request was already
Miklos Szeredif9a28422006-06-25 05:48:53 -0700454 * aborted bail out.
Miklos Szeredi334f4852005-09-09 13:10:27 -0700455 */
Miklos Szeredid7133112006-04-10 22:54:55 -0700456static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700457{
458 int err = 0;
459 if (req) {
Miklos Szeredid7133112006-04-10 22:54:55 -0700460 spin_lock(&fc->lock);
Miklos Szeredif9a28422006-06-25 05:48:53 -0700461 if (req->aborted)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700462 err = -ENOENT;
463 else
464 req->locked = 1;
Miklos Szeredid7133112006-04-10 22:54:55 -0700465 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700466 }
467 return err;
468}
469
470/*
Miklos Szeredif9a28422006-06-25 05:48:53 -0700471 * Unlock request. If it was aborted during being locked, the
Miklos Szeredi334f4852005-09-09 13:10:27 -0700472 * requester thread is currently waiting for it to be unlocked, so
473 * wake it up.
474 */
Miklos Szeredid7133112006-04-10 22:54:55 -0700475static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700476{
477 if (req) {
Miklos Szeredid7133112006-04-10 22:54:55 -0700478 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700479 req->locked = 0;
Miklos Szeredif9a28422006-06-25 05:48:53 -0700480 if (req->aborted)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700481 wake_up(&req->waitq);
Miklos Szeredid7133112006-04-10 22:54:55 -0700482 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700483 }
484}
485
486struct fuse_copy_state {
Miklos Szeredid7133112006-04-10 22:54:55 -0700487 struct fuse_conn *fc;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700488 int write;
489 struct fuse_req *req;
490 const struct iovec *iov;
491 unsigned long nr_segs;
492 unsigned long seglen;
493 unsigned long addr;
494 struct page *pg;
495 void *mapaddr;
496 void *buf;
497 unsigned len;
498};
499
Miklos Szeredid7133112006-04-10 22:54:55 -0700500static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
501 int write, struct fuse_req *req,
502 const struct iovec *iov, unsigned long nr_segs)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700503{
504 memset(cs, 0, sizeof(*cs));
Miklos Szeredid7133112006-04-10 22:54:55 -0700505 cs->fc = fc;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700506 cs->write = write;
507 cs->req = req;
508 cs->iov = iov;
509 cs->nr_segs = nr_segs;
510}
511
512/* Unmap and put previous page of userspace buffer */
Miklos Szeredi8bfc0162006-01-16 22:14:28 -0800513static void fuse_copy_finish(struct fuse_copy_state *cs)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700514{
515 if (cs->mapaddr) {
516 kunmap_atomic(cs->mapaddr, KM_USER0);
517 if (cs->write) {
518 flush_dcache_page(cs->pg);
519 set_page_dirty_lock(cs->pg);
520 }
521 put_page(cs->pg);
522 cs->mapaddr = NULL;
523 }
524}
525
526/*
527 * Get another pagefull of userspace buffer, and map it to kernel
528 * address space, and lock request
529 */
530static int fuse_copy_fill(struct fuse_copy_state *cs)
531{
532 unsigned long offset;
533 int err;
534
Miklos Szeredid7133112006-04-10 22:54:55 -0700535 unlock_request(cs->fc, cs->req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700536 fuse_copy_finish(cs);
537 if (!cs->seglen) {
538 BUG_ON(!cs->nr_segs);
539 cs->seglen = cs->iov[0].iov_len;
540 cs->addr = (unsigned long) cs->iov[0].iov_base;
541 cs->iov ++;
542 cs->nr_segs --;
543 }
544 down_read(&current->mm->mmap_sem);
545 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
546 &cs->pg, NULL);
547 up_read(&current->mm->mmap_sem);
548 if (err < 0)
549 return err;
550 BUG_ON(err != 1);
551 offset = cs->addr % PAGE_SIZE;
552 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
553 cs->buf = cs->mapaddr + offset;
554 cs->len = min(PAGE_SIZE - offset, cs->seglen);
555 cs->seglen -= cs->len;
556 cs->addr += cs->len;
557
Miklos Szeredid7133112006-04-10 22:54:55 -0700558 return lock_request(cs->fc, cs->req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700559}
560
561/* Do as much copy to/from userspace buffer as we can */
Miklos Szeredi8bfc0162006-01-16 22:14:28 -0800562static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700563{
564 unsigned ncpy = min(*size, cs->len);
565 if (val) {
566 if (cs->write)
567 memcpy(cs->buf, *val, ncpy);
568 else
569 memcpy(*val, cs->buf, ncpy);
570 *val += ncpy;
571 }
572 *size -= ncpy;
573 cs->len -= ncpy;
574 cs->buf += ncpy;
575 return ncpy;
576}
577
578/*
579 * Copy a page in the request to/from the userspace buffer. Must be
580 * done atomically
581 */
Miklos Szeredi8bfc0162006-01-16 22:14:28 -0800582static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
583 unsigned offset, unsigned count, int zeroing)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700584{
585 if (page && zeroing && count < PAGE_SIZE) {
586 void *mapaddr = kmap_atomic(page, KM_USER1);
587 memset(mapaddr, 0, PAGE_SIZE);
588 kunmap_atomic(mapaddr, KM_USER1);
589 }
590 while (count) {
591 int err;
592 if (!cs->len && (err = fuse_copy_fill(cs)))
593 return err;
594 if (page) {
595 void *mapaddr = kmap_atomic(page, KM_USER1);
596 void *buf = mapaddr + offset;
597 offset += fuse_copy_do(cs, &buf, &count);
598 kunmap_atomic(mapaddr, KM_USER1);
599 } else
600 offset += fuse_copy_do(cs, NULL, &count);
601 }
602 if (page && !cs->write)
603 flush_dcache_page(page);
604 return 0;
605}
606
607/* Copy pages in the request to/from userspace buffer */
608static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
609 int zeroing)
610{
611 unsigned i;
612 struct fuse_req *req = cs->req;
613 unsigned offset = req->page_offset;
614 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
615
616 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
617 struct page *page = req->pages[i];
618 int err = fuse_copy_page(cs, page, offset, count, zeroing);
619 if (err)
620 return err;
621
622 nbytes -= count;
623 count = min(nbytes, (unsigned) PAGE_SIZE);
624 offset = 0;
625 }
626 return 0;
627}
628
629/* Copy a single argument in the request to/from userspace buffer */
630static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
631{
632 while (size) {
633 int err;
634 if (!cs->len && (err = fuse_copy_fill(cs)))
635 return err;
636 fuse_copy_do(cs, &val, &size);
637 }
638 return 0;
639}
640
641/* Copy request arguments to/from userspace buffer */
642static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
643 unsigned argpages, struct fuse_arg *args,
644 int zeroing)
645{
646 int err = 0;
647 unsigned i;
648
649 for (i = 0; !err && i < numargs; i++) {
650 struct fuse_arg *arg = &args[i];
651 if (i == numargs - 1 && argpages)
652 err = fuse_copy_pages(cs, arg->size, zeroing);
653 else
654 err = fuse_copy_one(cs, arg->value, arg->size);
655 }
656 return err;
657}
658
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700659static int request_pending(struct fuse_conn *fc)
660{
661 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
662}
663
Miklos Szeredi334f4852005-09-09 13:10:27 -0700664/* Wait until a request is available on the pending list */
665static void request_wait(struct fuse_conn *fc)
666{
667 DECLARE_WAITQUEUE(wait, current);
668
669 add_wait_queue_exclusive(&fc->waitq, &wait);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700670 while (fc->connected && !request_pending(fc)) {
Miklos Szeredi334f4852005-09-09 13:10:27 -0700671 set_current_state(TASK_INTERRUPTIBLE);
672 if (signal_pending(current))
673 break;
674
Miklos Szeredid7133112006-04-10 22:54:55 -0700675 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700676 schedule();
Miklos Szeredid7133112006-04-10 22:54:55 -0700677 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700678 }
679 set_current_state(TASK_RUNNING);
680 remove_wait_queue(&fc->waitq, &wait);
681}
682
683/*
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700684 * Transfer an interrupt request to userspace
685 *
686 * Unlike other requests this is assembled on demand, without a need
687 * to allocate a separate fuse_req structure.
688 *
689 * Called with fc->lock held, releases it
690 */
691static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
692 const struct iovec *iov, unsigned long nr_segs)
Josh Triplett105f4d72006-09-29 01:59:25 -0700693 __releases(fc->lock)
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700694{
695 struct fuse_copy_state cs;
696 struct fuse_in_header ih;
697 struct fuse_interrupt_in arg;
698 unsigned reqsize = sizeof(ih) + sizeof(arg);
699 int err;
700
701 list_del_init(&req->intr_entry);
702 req->intr_unique = fuse_get_unique(fc);
703 memset(&ih, 0, sizeof(ih));
704 memset(&arg, 0, sizeof(arg));
705 ih.len = reqsize;
706 ih.opcode = FUSE_INTERRUPT;
707 ih.unique = req->intr_unique;
708 arg.unique = req->in.h.unique;
709
710 spin_unlock(&fc->lock);
711 if (iov_length(iov, nr_segs) < reqsize)
712 return -EINVAL;
713
714 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
715 err = fuse_copy_one(&cs, &ih, sizeof(ih));
716 if (!err)
717 err = fuse_copy_one(&cs, &arg, sizeof(arg));
718 fuse_copy_finish(&cs);
719
720 return err ? err : reqsize;
721}
722
723/*
Miklos Szeredi334f4852005-09-09 13:10:27 -0700724 * Read a single request into the userspace filesystem's buffer. This
725 * function waits until a request is available, then removes it from
726 * the pending list and copies request data to userspace buffer. If
Miklos Szeredif9a28422006-06-25 05:48:53 -0700727 * no reply is needed (FORGET) or request has been aborted or there
728 * was an error during the copying then it's finished by calling
Miklos Szeredi334f4852005-09-09 13:10:27 -0700729 * request_end(). Otherwise add it to the processing list, and set
730 * the 'sent' flag.
731 */
Badari Pulavartyee0b3e62006-09-30 23:28:47 -0700732static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
733 unsigned long nr_segs, loff_t pos)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700734{
735 int err;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700736 struct fuse_req *req;
737 struct fuse_in *in;
738 struct fuse_copy_state cs;
739 unsigned reqsize;
Badari Pulavartyee0b3e62006-09-30 23:28:47 -0700740 struct file *file = iocb->ki_filp;
Miklos Szeredi0720b312006-04-10 22:54:55 -0700741 struct fuse_conn *fc = fuse_get_conn(file);
742 if (!fc)
743 return -EPERM;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700744
Miklos Szeredi1d3d7522006-01-06 00:19:40 -0800745 restart:
Miklos Szeredid7133112006-04-10 22:54:55 -0700746 spin_lock(&fc->lock);
Jeff Dikee5ac1d12006-04-10 22:54:53 -0700747 err = -EAGAIN;
748 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700749 !request_pending(fc))
Jeff Dikee5ac1d12006-04-10 22:54:53 -0700750 goto err_unlock;
751
Miklos Szeredi334f4852005-09-09 13:10:27 -0700752 request_wait(fc);
753 err = -ENODEV;
Miklos Szeredi9ba7cbb2006-01-16 22:14:34 -0800754 if (!fc->connected)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700755 goto err_unlock;
756 err = -ERESTARTSYS;
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700757 if (!request_pending(fc))
Miklos Szeredi334f4852005-09-09 13:10:27 -0700758 goto err_unlock;
759
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700760 if (!list_empty(&fc->interrupts)) {
761 req = list_entry(fc->interrupts.next, struct fuse_req,
762 intr_entry);
763 return fuse_read_interrupt(fc, req, iov, nr_segs);
764 }
765
Miklos Szeredi334f4852005-09-09 13:10:27 -0700766 req = list_entry(fc->pending.next, struct fuse_req, list);
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800767 req->state = FUSE_REQ_READING;
Miklos Szeredid77a1d52006-01-16 22:14:31 -0800768 list_move(&req->list, &fc->io);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700769
770 in = &req->in;
Miklos Szeredi1d3d7522006-01-06 00:19:40 -0800771 reqsize = in->h.len;
772 /* If request is too large, reply with an error and restart the read */
773 if (iov_length(iov, nr_segs) < reqsize) {
774 req->out.h.error = -EIO;
775 /* SETXATTR is special, since it may contain too large data */
776 if (in->h.opcode == FUSE_SETXATTR)
777 req->out.h.error = -E2BIG;
778 request_end(fc, req);
779 goto restart;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700780 }
Miklos Szeredid7133112006-04-10 22:54:55 -0700781 spin_unlock(&fc->lock);
782 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
Miklos Szeredi1d3d7522006-01-06 00:19:40 -0800783 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
784 if (!err)
785 err = fuse_copy_args(&cs, in->numargs, in->argpages,
786 (struct fuse_arg *) in->args, 0);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700787 fuse_copy_finish(&cs);
Miklos Szeredid7133112006-04-10 22:54:55 -0700788 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700789 req->locked = 0;
Miklos Szeredic9c9d7d2007-10-16 23:31:05 -0700790 if (req->aborted) {
791 request_end(fc, req);
792 return -ENODEV;
793 }
Miklos Szeredi334f4852005-09-09 13:10:27 -0700794 if (err) {
Miklos Szeredic9c9d7d2007-10-16 23:31:05 -0700795 req->out.h.error = -EIO;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700796 request_end(fc, req);
797 return err;
798 }
799 if (!req->isreply)
800 request_end(fc, req);
801 else {
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800802 req->state = FUSE_REQ_SENT;
Miklos Szeredid77a1d52006-01-16 22:14:31 -0800803 list_move_tail(&req->list, &fc->processing);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700804 if (req->interrupted)
805 queue_interrupt(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -0700806 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700807 }
808 return reqsize;
809
810 err_unlock:
Miklos Szeredid7133112006-04-10 22:54:55 -0700811 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700812 return err;
813}
814
Miklos Szeredi334f4852005-09-09 13:10:27 -0700815/* Look up request on processing list by unique ID */
816static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
817{
818 struct list_head *entry;
819
820 list_for_each(entry, &fc->processing) {
821 struct fuse_req *req;
822 req = list_entry(entry, struct fuse_req, list);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700823 if (req->in.h.unique == unique || req->intr_unique == unique)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700824 return req;
825 }
826 return NULL;
827}
828
829static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
830 unsigned nbytes)
831{
832 unsigned reqsize = sizeof(struct fuse_out_header);
833
834 if (out->h.error)
835 return nbytes != reqsize ? -EINVAL : 0;
836
837 reqsize += len_args(out->numargs, out->args);
838
839 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
840 return -EINVAL;
841 else if (reqsize > nbytes) {
842 struct fuse_arg *lastarg = &out->args[out->numargs-1];
843 unsigned diffsize = reqsize - nbytes;
844 if (diffsize > lastarg->size)
845 return -EINVAL;
846 lastarg->size -= diffsize;
847 }
848 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
849 out->page_zeroing);
850}
851
852/*
853 * Write a single reply to a request. First the header is copied from
854 * the write buffer. The request is then searched on the processing
855 * list by the unique ID found in the header. If found, then remove
856 * it from the list and copy the rest of the buffer to the request.
857 * The request is finished by calling request_end()
858 */
Badari Pulavartyee0b3e62006-09-30 23:28:47 -0700859static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
860 unsigned long nr_segs, loff_t pos)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700861{
862 int err;
863 unsigned nbytes = iov_length(iov, nr_segs);
864 struct fuse_req *req;
865 struct fuse_out_header oh;
866 struct fuse_copy_state cs;
Badari Pulavartyee0b3e62006-09-30 23:28:47 -0700867 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700868 if (!fc)
Miklos Szeredia87046d2006-04-10 22:54:56 -0700869 return -EPERM;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700870
Miklos Szeredid7133112006-04-10 22:54:55 -0700871 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700872 if (nbytes < sizeof(struct fuse_out_header))
873 return -EINVAL;
874
875 err = fuse_copy_one(&cs, &oh, sizeof(oh));
876 if (err)
877 goto err_finish;
878 err = -EINVAL;
879 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
880 oh.len != nbytes)
881 goto err_finish;
882
Miklos Szeredid7133112006-04-10 22:54:55 -0700883 spin_lock(&fc->lock);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800884 err = -ENOENT;
885 if (!fc->connected)
886 goto err_unlock;
887
Miklos Szeredi334f4852005-09-09 13:10:27 -0700888 req = request_find(fc, oh.unique);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700889 if (!req)
890 goto err_unlock;
891
Miklos Szeredif9a28422006-06-25 05:48:53 -0700892 if (req->aborted) {
Miklos Szeredid7133112006-04-10 22:54:55 -0700893 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700894 fuse_copy_finish(&cs);
Miklos Szeredid7133112006-04-10 22:54:55 -0700895 spin_lock(&fc->lock);
Miklos Szeredi222f1d62006-01-16 22:14:25 -0800896 request_end(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700897 return -ENOENT;
898 }
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700899 /* Is it an interrupt reply? */
900 if (req->intr_unique == oh.unique) {
901 err = -EINVAL;
902 if (nbytes != sizeof(struct fuse_out_header))
903 goto err_unlock;
904
905 if (oh.error == -ENOSYS)
906 fc->no_interrupt = 1;
907 else if (oh.error == -EAGAIN)
908 queue_interrupt(fc, req);
909
910 spin_unlock(&fc->lock);
911 fuse_copy_finish(&cs);
912 return nbytes;
913 }
914
915 req->state = FUSE_REQ_WRITING;
Miklos Szeredid77a1d52006-01-16 22:14:31 -0800916 list_move(&req->list, &fc->io);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700917 req->out.h = oh;
918 req->locked = 1;
919 cs.req = req;
Miklos Szeredid7133112006-04-10 22:54:55 -0700920 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700921
922 err = copy_out_args(&cs, &req->out, nbytes);
923 fuse_copy_finish(&cs);
924
Miklos Szeredid7133112006-04-10 22:54:55 -0700925 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700926 req->locked = 0;
927 if (!err) {
Miklos Szeredif9a28422006-06-25 05:48:53 -0700928 if (req->aborted)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700929 err = -ENOENT;
Miklos Szeredif9a28422006-06-25 05:48:53 -0700930 } else if (!req->aborted)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700931 req->out.h.error = -EIO;
932 request_end(fc, req);
933
934 return err ? err : nbytes;
935
936 err_unlock:
Miklos Szeredid7133112006-04-10 22:54:55 -0700937 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700938 err_finish:
939 fuse_copy_finish(&cs);
940 return err;
941}
942
Miklos Szeredi334f4852005-09-09 13:10:27 -0700943static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
944{
Miklos Szeredi334f4852005-09-09 13:10:27 -0700945 unsigned mask = POLLOUT | POLLWRNORM;
Miklos Szeredi7025d9a2006-04-10 22:54:50 -0700946 struct fuse_conn *fc = fuse_get_conn(file);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700947 if (!fc)
Miklos Szeredi7025d9a2006-04-10 22:54:50 -0700948 return POLLERR;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700949
950 poll_wait(file, &fc->waitq, wait);
951
Miklos Szeredid7133112006-04-10 22:54:55 -0700952 spin_lock(&fc->lock);
Miklos Szeredi7025d9a2006-04-10 22:54:50 -0700953 if (!fc->connected)
954 mask = POLLERR;
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700955 else if (request_pending(fc))
Miklos Szeredi7025d9a2006-04-10 22:54:50 -0700956 mask |= POLLIN | POLLRDNORM;
Miklos Szeredid7133112006-04-10 22:54:55 -0700957 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700958
959 return mask;
960}
961
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800962/*
963 * Abort all requests on the given list (pending or processing)
964 *
Miklos Szeredid7133112006-04-10 22:54:55 -0700965 * This function releases and reacquires fc->lock
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800966 */
Miklos Szeredi334f4852005-09-09 13:10:27 -0700967static void end_requests(struct fuse_conn *fc, struct list_head *head)
968{
969 while (!list_empty(head)) {
970 struct fuse_req *req;
971 req = list_entry(head->next, struct fuse_req, list);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700972 req->out.h.error = -ECONNABORTED;
973 request_end(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -0700974 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700975 }
976}
977
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800978/*
979 * Abort requests under I/O
980 *
Miklos Szeredif9a28422006-06-25 05:48:53 -0700981 * The requests are set to aborted and finished, and the request
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800982 * waiter is woken up. This will make request_wait_answer() wait
983 * until the request is unlocked and then return.
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -0800984 *
985 * If the request is asynchronous, then the end function needs to be
986 * called after waiting for the request to be unlocked (if it was
987 * locked).
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800988 */
989static void end_io_requests(struct fuse_conn *fc)
990{
991 while (!list_empty(&fc->io)) {
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -0800992 struct fuse_req *req =
993 list_entry(fc->io.next, struct fuse_req, list);
994 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
995
Miklos Szeredif9a28422006-06-25 05:48:53 -0700996 req->aborted = 1;
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800997 req->out.h.error = -ECONNABORTED;
998 req->state = FUSE_REQ_FINISHED;
999 list_del_init(&req->list);
1000 wake_up(&req->waitq);
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -08001001 if (end) {
1002 req->end = NULL;
1003 /* The end function will consume this reference */
1004 __fuse_get_request(req);
Miklos Szeredid7133112006-04-10 22:54:55 -07001005 spin_unlock(&fc->lock);
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -08001006 wait_event(req->waitq, !req->locked);
1007 end(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -07001008 spin_lock(&fc->lock);
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -08001009 }
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001010 }
1011}
1012
1013/*
1014 * Abort all requests.
1015 *
1016 * Emergency exit in case of a malicious or accidental deadlock, or
1017 * just a hung filesystem.
1018 *
1019 * The same effect is usually achievable through killing the
1020 * filesystem daemon and all users of the filesystem. The exception
1021 * is the combination of an asynchronous request and the tricky
1022 * deadlock (see Documentation/filesystems/fuse.txt).
1023 *
1024 * During the aborting, progression of requests from the pending and
1025 * processing lists onto the io list, and progression of new requests
1026 * onto the pending list is prevented by req->connected being false.
1027 *
1028 * Progression of requests under I/O to the processing list is
Miklos Szeredif9a28422006-06-25 05:48:53 -07001029 * prevented by the req->aborted flag being true for these requests.
1030 * For this reason requests on the io list must be aborted first.
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001031 */
1032void fuse_abort_conn(struct fuse_conn *fc)
1033{
Miklos Szeredid7133112006-04-10 22:54:55 -07001034 spin_lock(&fc->lock);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001035 if (fc->connected) {
1036 fc->connected = 0;
Miklos Szeredi51eb01e2006-06-25 05:48:50 -07001037 fc->blocked = 0;
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001038 end_io_requests(fc);
1039 end_requests(fc, &fc->pending);
1040 end_requests(fc, &fc->processing);
1041 wake_up_all(&fc->waitq);
Miklos Szeredi51eb01e2006-06-25 05:48:50 -07001042 wake_up_all(&fc->blocked_waitq);
Jeff Dike385a17b2006-04-10 22:54:52 -07001043 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001044 }
Miklos Szeredid7133112006-04-10 22:54:55 -07001045 spin_unlock(&fc->lock);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001046}
1047
Miklos Szeredi334f4852005-09-09 13:10:27 -07001048static int fuse_dev_release(struct inode *inode, struct file *file)
1049{
Miklos Szeredi0720b312006-04-10 22:54:55 -07001050 struct fuse_conn *fc = fuse_get_conn(file);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001051 if (fc) {
Miklos Szeredid7133112006-04-10 22:54:55 -07001052 spin_lock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -07001053 fc->connected = 0;
Miklos Szeredi334f4852005-09-09 13:10:27 -07001054 end_requests(fc, &fc->pending);
1055 end_requests(fc, &fc->processing);
Miklos Szeredid7133112006-04-10 22:54:55 -07001056 spin_unlock(&fc->lock);
Jeff Dike385a17b2006-04-10 22:54:52 -07001057 fasync_helper(-1, file, 0, &fc->fasync);
Miklos Szeredibafa9652006-06-25 05:48:51 -07001058 fuse_conn_put(fc);
Jeff Dike385a17b2006-04-10 22:54:52 -07001059 }
Miklos Szeredif543f252006-01-16 22:14:35 -08001060
Miklos Szeredi334f4852005-09-09 13:10:27 -07001061 return 0;
1062}
1063
Jeff Dike385a17b2006-04-10 22:54:52 -07001064static int fuse_dev_fasync(int fd, struct file *file, int on)
1065{
1066 struct fuse_conn *fc = fuse_get_conn(file);
1067 if (!fc)
Miklos Szeredia87046d2006-04-10 22:54:56 -07001068 return -EPERM;
Jeff Dike385a17b2006-04-10 22:54:52 -07001069
1070 /* No locking - fasync_helper does its own locking */
1071 return fasync_helper(fd, file, on, &fc->fasync);
1072}
1073
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001074const struct file_operations fuse_dev_operations = {
Miklos Szeredi334f4852005-09-09 13:10:27 -07001075 .owner = THIS_MODULE,
1076 .llseek = no_llseek,
Badari Pulavartyee0b3e62006-09-30 23:28:47 -07001077 .read = do_sync_read,
1078 .aio_read = fuse_dev_read,
1079 .write = do_sync_write,
1080 .aio_write = fuse_dev_write,
Miklos Szeredi334f4852005-09-09 13:10:27 -07001081 .poll = fuse_dev_poll,
1082 .release = fuse_dev_release,
Jeff Dike385a17b2006-04-10 22:54:52 -07001083 .fasync = fuse_dev_fasync,
Miklos Szeredi334f4852005-09-09 13:10:27 -07001084};
1085
1086static struct miscdevice fuse_miscdevice = {
1087 .minor = FUSE_MINOR,
1088 .name = "fuse",
1089 .fops = &fuse_dev_operations,
1090};
1091
1092int __init fuse_dev_init(void)
1093{
1094 int err = -ENOMEM;
1095 fuse_req_cachep = kmem_cache_create("fuse_request",
1096 sizeof(struct fuse_req),
Paul Mundt20c2df82007-07-20 10:11:58 +09001097 0, 0, NULL);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001098 if (!fuse_req_cachep)
1099 goto out;
1100
1101 err = misc_register(&fuse_miscdevice);
1102 if (err)
1103 goto out_cache_clean;
1104
1105 return 0;
1106
1107 out_cache_clean:
1108 kmem_cache_destroy(fuse_req_cachep);
1109 out:
1110 return err;
1111}
1112
1113void fuse_dev_cleanup(void)
1114{
1115 misc_deregister(&fuse_miscdevice);
1116 kmem_cache_destroy(fuse_req_cachep);
1117}