blob: fd7ac5e841c1ddd57dab6c7a55941c392211e129 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/nfs/direct.c
3 *
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5 *
6 * High-performance uncached I/O for the Linux NFS client
7 *
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
10 * (multiple copies of the same instance running on separate hosts)
11 * implement their own cache coherency protocol that subsumes file
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
15 * need to cache the contents of a file.
16 *
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
22 * an application.
23 *
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
28 *
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
31 *
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
37 *
38 */
39
40#include <linux/config.h>
41#include <linux/errno.h>
42#include <linux/sched.h>
43#include <linux/kernel.h>
44#include <linux/smp_lock.h>
45#include <linux/file.h>
46#include <linux/pagemap.h>
47#include <linux/kref.h>
48
49#include <linux/nfs_fs.h>
50#include <linux/nfs_page.h>
51#include <linux/sunrpc/clnt.h>
52
53#include <asm/system.h>
54#include <asm/uaccess.h>
55#include <asm/atomic.h>
56
57#define NFSDBG_FACILITY NFSDBG_VFS
58#define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT)
59
60static kmem_cache_t *nfs_direct_cachep;
61
62/*
63 * This represents a set of asynchronous requests that we're waiting on
64 */
65struct nfs_direct_req {
66 struct kref kref; /* release manager */
67 struct list_head list; /* nfs_read_data structs */
68 wait_queue_head_t wait; /* wait for i/o completion */
69 struct page ** pages; /* pages in our buffer */
70 unsigned int npages; /* count of pages */
71 atomic_t complete, /* i/os we're waiting for */
72 count, /* bytes actually processed */
73 error; /* any reported error */
74};
75
76
77/**
78 * nfs_get_user_pages - find and set up pages underlying user's buffer
79 * rw: direction (read or write)
80 * user_addr: starting address of this segment of user's buffer
81 * count: size of this segment
82 * @pages: returned array of page struct pointers underlying user's buffer
83 */
84static inline int
85nfs_get_user_pages(int rw, unsigned long user_addr, size_t size,
86 struct page ***pages)
87{
88 int result = -ENOMEM;
89 unsigned long page_count;
90 size_t array_size;
91
92 /* set an arbitrary limit to prevent type overflow */
93 /* XXX: this can probably be as large as INT_MAX */
94 if (size > MAX_DIRECTIO_SIZE) {
95 *pages = NULL;
96 return -EFBIG;
97 }
98
99 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
100 page_count -= user_addr >> PAGE_SHIFT;
101
102 array_size = (page_count * sizeof(struct page *));
103 *pages = kmalloc(array_size, GFP_KERNEL);
104 if (*pages) {
105 down_read(&current->mm->mmap_sem);
106 result = get_user_pages(current, current->mm, user_addr,
107 page_count, (rw == READ), 0,
108 *pages, NULL);
109 up_read(&current->mm->mmap_sem);
110 }
111 return result;
112}
113
114/**
115 * nfs_free_user_pages - tear down page struct array
116 * @pages: array of page struct pointers underlying target buffer
117 * @npages: number of pages in the array
118 * @do_dirty: dirty the pages as we release them
119 */
120static void
121nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
122{
123 int i;
124 for (i = 0; i < npages; i++) {
125 if (do_dirty)
126 set_page_dirty_lock(pages[i]);
127 page_cache_release(pages[i]);
128 }
129 kfree(pages);
130}
131
132/**
133 * nfs_direct_req_release - release nfs_direct_req structure for direct read
134 * @kref: kref object embedded in an nfs_direct_req structure
135 *
136 */
137static void nfs_direct_req_release(struct kref *kref)
138{
139 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
140 kmem_cache_free(nfs_direct_cachep, dreq);
141}
142
143/**
144 * nfs_direct_read_alloc - allocate nfs_read_data structures for direct read
145 * @count: count of bytes for the read request
146 * @rsize: local rsize setting
147 *
148 * Note we also set the number of requests we have in the dreq when we are
149 * done. This prevents races with I/O completion so we will always wait
150 * until all requests have been dispatched and completed.
151 */
152static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, unsigned int rsize)
153{
154 struct list_head *list;
155 struct nfs_direct_req *dreq;
156 unsigned int reads = 0;
Chuck Lever40859d72005-11-30 18:09:02 -0500157 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
160 if (!dreq)
161 return NULL;
162
163 kref_init(&dreq->kref);
164 init_waitqueue_head(&dreq->wait);
165 INIT_LIST_HEAD(&dreq->list);
166 atomic_set(&dreq->count, 0);
167 atomic_set(&dreq->error, 0);
168
169 list = &dreq->list;
170 for(;;) {
Chuck Lever40859d72005-11-30 18:09:02 -0500171 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173 if (unlikely(!data)) {
174 while (!list_empty(list)) {
175 data = list_entry(list->next,
176 struct nfs_read_data, pages);
177 list_del(&data->pages);
178 nfs_readdata_free(data);
179 }
180 kref_put(&dreq->kref, nfs_direct_req_release);
181 return NULL;
182 }
183
184 INIT_LIST_HEAD(&data->pages);
185 list_add(&data->pages, list);
186
187 data->req = (struct nfs_page *) dreq;
188 reads++;
189 if (nbytes <= rsize)
190 break;
191 nbytes -= rsize;
192 }
193 kref_get(&dreq->kref);
194 atomic_set(&dreq->complete, reads);
195 return dreq;
196}
197
198/**
199 * nfs_direct_read_result - handle a read reply for a direct read request
200 * @data: address of NFS READ operation control block
201 * @status: status of this NFS READ operation
202 *
203 * We must hold a reference to all the pages in this direct read request
204 * until the RPCs complete. This could be long *after* we are woken up in
205 * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server).
206 */
207static void nfs_direct_read_result(struct nfs_read_data *data, int status)
208{
209 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
210
211 if (likely(status >= 0))
212 atomic_add(data->res.count, &dreq->count);
213 else
214 atomic_set(&dreq->error, status);
215
216 if (unlikely(atomic_dec_and_test(&dreq->complete))) {
217 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
218 wake_up(&dreq->wait);
219 kref_put(&dreq->kref, nfs_direct_req_release);
220 }
221}
222
223/**
224 * nfs_direct_read_schedule - dispatch NFS READ operations for a direct read
225 * @dreq: address of nfs_direct_req struct for this request
226 * @inode: target inode
227 * @ctx: target file open context
228 * @user_addr: starting address of this segment of user's buffer
229 * @count: size of this segment
230 * @file_offset: offset in file to begin the operation
231 *
232 * For each nfs_read_data struct that was allocated on the list, dispatch
233 * an NFS READ operation
234 */
235static void nfs_direct_read_schedule(struct nfs_direct_req *dreq,
236 struct inode *inode, struct nfs_open_context *ctx,
237 unsigned long user_addr, size_t count, loff_t file_offset)
238{
239 struct list_head *list = &dreq->list;
240 struct page **pages = dreq->pages;
241 unsigned int curpage, pgbase;
242 unsigned int rsize = NFS_SERVER(inode)->rsize;
243
244 curpage = 0;
245 pgbase = user_addr & ~PAGE_MASK;
246 do {
247 struct nfs_read_data *data;
248 unsigned int bytes;
249
250 bytes = rsize;
251 if (count < rsize)
252 bytes = count;
253
254 data = list_entry(list->next, struct nfs_read_data, pages);
255 list_del_init(&data->pages);
256
257 data->inode = inode;
258 data->cred = ctx->cred;
259 data->args.fh = NFS_FH(inode);
260 data->args.context = ctx;
261 data->args.offset = file_offset;
262 data->args.pgbase = pgbase;
263 data->args.pages = &pages[curpage];
264 data->args.count = bytes;
265 data->res.fattr = &data->fattr;
266 data->res.eof = 0;
267 data->res.count = bytes;
268
269 NFS_PROTO(inode)->read_setup(data);
270
271 data->task.tk_cookie = (unsigned long) inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 data->complete = nfs_direct_read_result;
273
274 lock_kernel();
275 rpc_execute(&data->task);
276 unlock_kernel();
277
278 dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
279 data->task.tk_pid,
280 inode->i_sb->s_id,
281 (long long)NFS_FILEID(inode),
282 bytes,
283 (unsigned long long)data->args.offset);
284
285 file_offset += bytes;
286 pgbase += bytes;
287 curpage += pgbase >> PAGE_SHIFT;
288 pgbase &= ~PAGE_MASK;
289
290 count -= bytes;
291 } while (count != 0);
292}
293
294/**
295 * nfs_direct_read_wait - wait for I/O completion for direct reads
296 * @dreq: request on which we are to wait
297 * @intr: whether or not this wait can be interrupted
298 *
299 * Collects and returns the final error value/byte-count.
300 */
301static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr)
302{
303 int result = 0;
304
305 if (intr) {
306 result = wait_event_interruptible(dreq->wait,
307 (atomic_read(&dreq->complete) == 0));
308 } else {
309 wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0));
310 }
311
312 if (!result)
313 result = atomic_read(&dreq->error);
314 if (!result)
315 result = atomic_read(&dreq->count);
316
317 kref_put(&dreq->kref, nfs_direct_req_release);
318 return (ssize_t) result;
319}
320
321/**
322 * nfs_direct_read_seg - Read in one iov segment. Generate separate
323 * read RPCs for each "rsize" bytes.
324 * @inode: target inode
325 * @ctx: target file open context
326 * @user_addr: starting address of this segment of user's buffer
327 * @count: size of this segment
328 * @file_offset: offset in file to begin the operation
329 * @pages: array of addresses of page structs defining user's buffer
330 * @nr_pages: number of pages in the array
331 *
332 */
333static ssize_t nfs_direct_read_seg(struct inode *inode,
334 struct nfs_open_context *ctx, unsigned long user_addr,
335 size_t count, loff_t file_offset, struct page **pages,
336 unsigned int nr_pages)
337{
338 ssize_t result;
339 sigset_t oldset;
340 struct rpc_clnt *clnt = NFS_CLIENT(inode);
341 struct nfs_direct_req *dreq;
342
343 dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize);
344 if (!dreq)
345 return -ENOMEM;
346
347 dreq->pages = pages;
348 dreq->npages = nr_pages;
349
350 rpc_clnt_sigmask(clnt, &oldset);
351 nfs_direct_read_schedule(dreq, inode, ctx, user_addr, count,
352 file_offset);
353 result = nfs_direct_read_wait(dreq, clnt->cl_intr);
354 rpc_clnt_sigunmask(clnt, &oldset);
355
356 return result;
357}
358
359/**
360 * nfs_direct_read - For each iov segment, map the user's buffer
361 * then generate read RPCs.
362 * @inode: target inode
363 * @ctx: target file open context
364 * @iov: array of vectors that define I/O buffer
365 * file_offset: offset in file to begin the operation
366 * nr_segs: size of iovec array
367 *
368 * We've already pushed out any non-direct writes so that this read
369 * will see them when we read from the server.
370 */
371static ssize_t
372nfs_direct_read(struct inode *inode, struct nfs_open_context *ctx,
373 const struct iovec *iov, loff_t file_offset,
374 unsigned long nr_segs)
375{
376 ssize_t tot_bytes = 0;
377 unsigned long seg = 0;
378
379 while ((seg < nr_segs) && (tot_bytes >= 0)) {
380 ssize_t result;
381 int page_count;
382 struct page **pages;
383 const struct iovec *vec = &iov[seg++];
384 unsigned long user_addr = (unsigned long) vec->iov_base;
385 size_t size = vec->iov_len;
386
387 page_count = nfs_get_user_pages(READ, user_addr, size, &pages);
388 if (page_count < 0) {
389 nfs_free_user_pages(pages, 0, 0);
390 if (tot_bytes > 0)
391 break;
392 return page_count;
393 }
394
395 result = nfs_direct_read_seg(inode, ctx, user_addr, size,
396 file_offset, pages, page_count);
397
398 if (result <= 0) {
399 if (tot_bytes > 0)
400 break;
401 return result;
402 }
403 tot_bytes += result;
404 file_offset += result;
405 if (result < size)
406 break;
407 }
408
409 return tot_bytes;
410}
411
412/**
413 * nfs_direct_write_seg - Write out one iov segment. Generate separate
414 * write RPCs for each "wsize" bytes, then commit.
415 * @inode: target inode
416 * @ctx: target file open context
417 * user_addr: starting address of this segment of user's buffer
418 * count: size of this segment
419 * file_offset: offset in file to begin the operation
420 * @pages: array of addresses of page structs defining user's buffer
421 * nr_pages: size of pages array
422 */
423static ssize_t nfs_direct_write_seg(struct inode *inode,
424 struct nfs_open_context *ctx, unsigned long user_addr,
425 size_t count, loff_t file_offset, struct page **pages,
426 int nr_pages)
427{
428 const unsigned int wsize = NFS_SERVER(inode)->wsize;
429 size_t request;
430 int curpage, need_commit;
431 ssize_t result, tot_bytes;
432 struct nfs_writeverf first_verf;
433 struct nfs_write_data *wdata;
434
Chuck Lever40859d72005-11-30 18:09:02 -0500435 wdata = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 if (!wdata)
437 return -ENOMEM;
438
439 wdata->inode = inode;
440 wdata->cred = ctx->cred;
441 wdata->args.fh = NFS_FH(inode);
442 wdata->args.context = ctx;
443 wdata->args.stable = NFS_UNSTABLE;
444 if (IS_SYNC(inode) || NFS_PROTO(inode)->version == 2 || count <= wsize)
445 wdata->args.stable = NFS_FILE_SYNC;
446 wdata->res.fattr = &wdata->fattr;
447 wdata->res.verf = &wdata->verf;
448
449 nfs_begin_data_update(inode);
450retry:
451 need_commit = 0;
452 tot_bytes = 0;
453 curpage = 0;
454 request = count;
455 wdata->args.pgbase = user_addr & ~PAGE_MASK;
456 wdata->args.offset = file_offset;
457 do {
458 wdata->args.count = request;
459 if (wdata->args.count > wsize)
460 wdata->args.count = wsize;
461 wdata->args.pages = &pages[curpage];
462
463 dprintk("NFS: direct write: c=%u o=%Ld ua=%lu, pb=%u, cp=%u\n",
464 wdata->args.count, (long long) wdata->args.offset,
465 user_addr + tot_bytes, wdata->args.pgbase, curpage);
466
467 lock_kernel();
468 result = NFS_PROTO(inode)->write(wdata);
469 unlock_kernel();
470
471 if (result <= 0) {
472 if (tot_bytes > 0)
473 break;
474 goto out;
475 }
476
477 if (tot_bytes == 0)
478 memcpy(&first_verf.verifier, &wdata->verf.verifier,
479 sizeof(first_verf.verifier));
480 if (wdata->verf.committed != NFS_FILE_SYNC) {
481 need_commit = 1;
482 if (memcmp(&first_verf.verifier, &wdata->verf.verifier,
483 sizeof(first_verf.verifier)));
484 goto sync_retry;
485 }
486
487 tot_bytes += result;
488
489 /* in case of a short write: stop now, let the app recover */
490 if (result < wdata->args.count)
491 break;
492
493 wdata->args.offset += result;
494 wdata->args.pgbase += result;
495 curpage += wdata->args.pgbase >> PAGE_SHIFT;
496 wdata->args.pgbase &= ~PAGE_MASK;
497 request -= result;
498 } while (request != 0);
499
500 /*
501 * Commit data written so far, even in the event of an error
502 */
503 if (need_commit) {
504 wdata->args.count = tot_bytes;
505 wdata->args.offset = file_offset;
506
507 lock_kernel();
508 result = NFS_PROTO(inode)->commit(wdata);
509 unlock_kernel();
510
511 if (result < 0 || memcmp(&first_verf.verifier,
512 &wdata->verf.verifier,
513 sizeof(first_verf.verifier)) != 0)
514 goto sync_retry;
515 }
516 result = tot_bytes;
517
518out:
Trond Myklebust951a1432005-06-22 17:16:30 +0000519 nfs_end_data_update(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 nfs_writedata_free(wdata);
521 return result;
522
523sync_retry:
524 wdata->args.stable = NFS_FILE_SYNC;
525 goto retry;
526}
527
528/**
529 * nfs_direct_write - For each iov segment, map the user's buffer
530 * then generate write and commit RPCs.
531 * @inode: target inode
532 * @ctx: target file open context
533 * @iov: array of vectors that define I/O buffer
534 * file_offset: offset in file to begin the operation
535 * nr_segs: size of iovec array
536 *
537 * Upon return, generic_file_direct_IO invalidates any cached pages
538 * that non-direct readers might access, so they will pick up these
539 * writes immediately.
540 */
541static ssize_t nfs_direct_write(struct inode *inode,
542 struct nfs_open_context *ctx, const struct iovec *iov,
543 loff_t file_offset, unsigned long nr_segs)
544{
545 ssize_t tot_bytes = 0;
546 unsigned long seg = 0;
547
548 while ((seg < nr_segs) && (tot_bytes >= 0)) {
549 ssize_t result;
550 int page_count;
551 struct page **pages;
552 const struct iovec *vec = &iov[seg++];
553 unsigned long user_addr = (unsigned long) vec->iov_base;
554 size_t size = vec->iov_len;
555
556 page_count = nfs_get_user_pages(WRITE, user_addr, size, &pages);
557 if (page_count < 0) {
558 nfs_free_user_pages(pages, 0, 0);
559 if (tot_bytes > 0)
560 break;
561 return page_count;
562 }
563
564 result = nfs_direct_write_seg(inode, ctx, user_addr, size,
565 file_offset, pages, page_count);
566 nfs_free_user_pages(pages, page_count, 0);
567
568 if (result <= 0) {
569 if (tot_bytes > 0)
570 break;
571 return result;
572 }
573 tot_bytes += result;
574 file_offset += result;
575 if (result < size)
576 break;
577 }
578 return tot_bytes;
579}
580
581/**
582 * nfs_direct_IO - NFS address space operation for direct I/O
583 * rw: direction (read or write)
584 * @iocb: target I/O control block
585 * @iov: array of vectors that define I/O buffer
586 * file_offset: offset in file to begin the operation
587 * nr_segs: size of iovec array
588 *
589 */
590ssize_t
591nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
592 loff_t file_offset, unsigned long nr_segs)
593{
594 ssize_t result = -EINVAL;
595 struct file *file = iocb->ki_filp;
596 struct nfs_open_context *ctx;
597 struct dentry *dentry = file->f_dentry;
598 struct inode *inode = dentry->d_inode;
599
600 /*
601 * No support for async yet
602 */
603 if (!is_sync_kiocb(iocb))
604 return result;
605
606 ctx = (struct nfs_open_context *)file->private_data;
607 switch (rw) {
608 case READ:
609 dprintk("NFS: direct_IO(read) (%s) off/no(%Lu/%lu)\n",
610 dentry->d_name.name, file_offset, nr_segs);
611
612 result = nfs_direct_read(inode, ctx, iov,
613 file_offset, nr_segs);
614 break;
615 case WRITE:
616 dprintk("NFS: direct_IO(write) (%s) off/no(%Lu/%lu)\n",
617 dentry->d_name.name, file_offset, nr_segs);
618
619 result = nfs_direct_write(inode, ctx, iov,
620 file_offset, nr_segs);
621 break;
622 default:
623 break;
624 }
625 return result;
626}
627
628/**
629 * nfs_file_direct_read - file direct read operation for NFS files
630 * @iocb: target I/O control block
631 * @buf: user's buffer into which to read data
632 * count: number of bytes to read
633 * pos: byte offset in file where reading starts
634 *
635 * We use this function for direct reads instead of calling
636 * generic_file_aio_read() in order to avoid gfar's check to see if
637 * the request starts before the end of the file. For that check
638 * to work, we must generate a GETATTR before each direct read, and
639 * even then there is a window between the GETATTR and the subsequent
640 * READ where the file size could change. So our preference is simply
641 * to do all reads the application wants, and the server will take
642 * care of managing the end of file boundary.
643 *
644 * This function also eliminates unnecessarily updating the file's
645 * atime locally, as the NFS server sets the file's atime, and this
646 * client must read the updated atime from the server back into its
647 * cache.
648 */
649ssize_t
650nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
651{
652 ssize_t retval = -EINVAL;
653 loff_t *ppos = &iocb->ki_pos;
654 struct file *file = iocb->ki_filp;
655 struct nfs_open_context *ctx =
656 (struct nfs_open_context *) file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 struct address_space *mapping = file->f_mapping;
658 struct inode *inode = mapping->host;
659 struct iovec iov = {
660 .iov_base = buf,
661 .iov_len = count,
662 };
663
Chuck Leverce1a8e62005-11-30 18:08:17 -0500664 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
Chuck Lever0bbacc42005-11-01 16:53:32 -0500665 file->f_dentry->d_parent->d_name.name,
666 file->f_dentry->d_name.name,
Chuck Leverce1a8e62005-11-30 18:08:17 -0500667 (unsigned long) count, (long long) pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669 if (!is_sync_kiocb(iocb))
670 goto out;
671 if (count < 0)
672 goto out;
673 retval = -EFAULT;
674 if (!access_ok(VERIFY_WRITE, iov.iov_base, iov.iov_len))
675 goto out;
676 retval = 0;
677 if (!count)
678 goto out;
679
Trond Myklebust29884df2005-12-13 16:13:54 -0500680 retval = nfs_sync_mapping(mapping);
681 if (retval)
682 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
684 retval = nfs_direct_read(inode, ctx, &iov, pos, 1);
685 if (retval > 0)
686 *ppos = pos + retval;
687
688out:
689 return retval;
690}
691
692/**
693 * nfs_file_direct_write - file direct write operation for NFS files
694 * @iocb: target I/O control block
695 * @buf: user's buffer from which to write data
696 * count: number of bytes to write
697 * pos: byte offset in file where writing starts
698 *
699 * We use this function for direct writes instead of calling
700 * generic_file_aio_write() in order to avoid taking the inode
701 * semaphore and updating the i_size. The NFS server will set
702 * the new i_size and this client must read the updated size
703 * back into its cache. We let the server do generic write
704 * parameter checking and report problems.
705 *
706 * We also avoid an unnecessary invocation of generic_osync_inode(),
707 * as it is fairly meaningless to sync the metadata of an NFS file.
708 *
709 * We eliminate local atime updates, see direct read above.
710 *
711 * We avoid unnecessary page cache invalidations for normal cached
712 * readers of this file.
713 *
714 * Note that O_APPEND is not supported for NFS direct writes, as there
715 * is no atomic O_APPEND write facility in the NFS protocol.
716 */
717ssize_t
718nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
719{
Chuck Leverce1a8e62005-11-30 18:08:17 -0500720 ssize_t retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 struct file *file = iocb->ki_filp;
722 struct nfs_open_context *ctx =
723 (struct nfs_open_context *) file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 struct address_space *mapping = file->f_mapping;
725 struct inode *inode = mapping->host;
726 struct iovec iov = {
727 .iov_base = (char __user *)buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 };
729
Chuck Leverce1a8e62005-11-30 18:08:17 -0500730 dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
Chuck Lever0bbacc42005-11-01 16:53:32 -0500731 file->f_dentry->d_parent->d_name.name,
Chuck Leverce1a8e62005-11-30 18:08:17 -0500732 file->f_dentry->d_name.name,
733 (unsigned long) count, (long long) pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Chuck Leverce1a8e62005-11-30 18:08:17 -0500735 retval = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 if (!is_sync_kiocb(iocb))
737 goto out;
Chuck Leverce1a8e62005-11-30 18:08:17 -0500738
739 retval = generic_write_checks(file, &pos, &count, 0);
740 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 goto out;
Chuck Leverce1a8e62005-11-30 18:08:17 -0500742
743 retval = -EINVAL;
744 if ((ssize_t) count < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 retval = 0;
747 if (!count)
748 goto out;
Chuck Leverce1a8e62005-11-30 18:08:17 -0500749 iov.iov_len = count,
750
751 retval = -EFAULT;
752 if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
753 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
Trond Myklebust29884df2005-12-13 16:13:54 -0500755 retval = nfs_sync_mapping(mapping);
756 if (retval)
757 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
759 retval = nfs_direct_write(inode, ctx, &iov, pos, 1);
760 if (mapping->nrpages)
761 invalidate_inode_pages2(mapping);
762 if (retval > 0)
Chuck Leverce1a8e62005-11-30 18:08:17 -0500763 iocb->ki_pos = pos + retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
765out:
766 return retval;
767}
768
769int nfs_init_directcache(void)
770{
771 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
772 sizeof(struct nfs_direct_req),
773 0, SLAB_RECLAIM_ACCOUNT,
774 NULL, NULL);
775 if (nfs_direct_cachep == NULL)
776 return -ENOMEM;
777
778 return 0;
779}
780
781void nfs_destroy_directcache(void)
782{
783 if (kmem_cache_destroy(nfs_direct_cachep))
784 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
785}