blob: b3915bf257700bb04c49d2571e7508cd387299a0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000019#include "xfs_fs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110020#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110022#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110024#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include "xfs_mount.h"
27#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include "xfs_dinode.h"
30#include "xfs_inode.h"
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000031#include "xfs_inode_item.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000032#include "xfs_bmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_error.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100034#include "xfs_vnodeops.h"
Christoph Hellwigf999a5b2008-11-28 14:23:32 +110035#include "xfs_da_btree.h"
Christoph Hellwigddcd8562008-12-03 07:55:34 -050036#include "xfs_ioctl.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000037#include "xfs_trace.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39#include <linux/dcache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +040041static const struct vm_operations_struct xfs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Christoph Hellwigdda35b82010-02-15 09:44:46 +000043/*
44 * xfs_iozero
45 *
46 * xfs_iozero clears the specified range of buffer supplied,
47 * and marks all the affected blocks as valid and modified. If
48 * an affected block is not allocated, it will be allocated. If
49 * an affected block is not completely overwritten, and is not
50 * valid before the operation, it will be read from disk before
51 * being partially zeroed.
52 */
53STATIC int
54xfs_iozero(
55 struct xfs_inode *ip, /* inode */
56 loff_t pos, /* offset in file */
57 size_t count) /* size of data to zero */
58{
59 struct page *page;
60 struct address_space *mapping;
61 int status;
62
63 mapping = VFS_I(ip)->i_mapping;
64 do {
65 unsigned offset, bytes;
66 void *fsdata;
67
68 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
69 bytes = PAGE_CACHE_SIZE - offset;
70 if (bytes > count)
71 bytes = count;
72
73 status = pagecache_write_begin(NULL, mapping, pos, bytes,
74 AOP_FLAG_UNINTERRUPTIBLE,
75 &page, &fsdata);
76 if (status)
77 break;
78
79 zero_user(page, offset, bytes);
80
81 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
82 page, fsdata);
83 WARN_ON(status <= 0); /* can't return less than zero! */
84 pos += bytes;
85 count -= bytes;
86 status = 0;
87 } while (count);
88
89 return (-status);
90}
91
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000092STATIC int
93xfs_file_fsync(
94 struct file *file,
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000095 int datasync)
96{
Christoph Hellwig7ea80852010-05-26 17:53:25 +020097 struct inode *inode = file->f_mapping->host;
98 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000099 struct xfs_trans *tp;
100 int error = 0;
101 int log_flushed = 0;
102
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000103 trace_xfs_file_fsync(ip);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000104
105 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
106 return -XFS_ERROR(EIO);
107
108 xfs_iflags_clear(ip, XFS_ITRUNCATED);
109
Christoph Hellwig37bc5742010-04-20 17:00:59 +1000110 xfs_ioend_wait(ip);
111
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000112 /*
113 * We always need to make sure that the required inode state is safe on
114 * disk. The inode might be clean but we still might need to force the
115 * log because of committed transactions that haven't hit the disk yet.
116 * Likewise, there could be unflushed non-transactional changes to the
117 * inode core that have to go to disk and this requires us to issue
118 * a synchronous transaction to capture these changes correctly.
119 *
120 * This code relies on the assumption that if the i_update_core field
121 * of the inode is clear and the inode is unpinned then it is clean
122 * and no action is required.
123 */
124 xfs_ilock(ip, XFS_ILOCK_SHARED);
125
Christoph Hellwig66d834e2010-02-15 09:44:49 +0000126 /*
127 * First check if the VFS inode is marked dirty. All the dirtying
128 * of non-transactional updates no goes through mark_inode_dirty*,
129 * which allows us to distinguish beteeen pure timestamp updates
130 * and i_size updates which need to be caught for fdatasync.
131 * After that also theck for the dirty state in the XFS inode, which
132 * might gets cleared when the inode gets written out via the AIL
133 * or xfs_iflush_cluster.
134 */
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200135 if (((inode->i_state & I_DIRTY_DATASYNC) ||
136 ((inode->i_state & I_DIRTY_SYNC) && !datasync)) &&
Christoph Hellwig66d834e2010-02-15 09:44:49 +0000137 ip->i_update_core) {
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000138 /*
139 * Kick off a transaction to log the inode core to get the
140 * updates. The sync transaction will also force the log.
141 */
142 xfs_iunlock(ip, XFS_ILOCK_SHARED);
143 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
144 error = xfs_trans_reserve(tp, 0,
145 XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
146 if (error) {
147 xfs_trans_cancel(tp, 0);
148 return -error;
149 }
150 xfs_ilock(ip, XFS_ILOCK_EXCL);
151
152 /*
153 * Note - it's possible that we might have pushed ourselves out
154 * of the way during trans_reserve which would flush the inode.
155 * But there's no guarantee that the inode buffer has actually
156 * gone out yet (it's delwri). Plus the buffer could be pinned
157 * anyway if it's part of an inode in another recent
158 * transaction. So we play it safe and fire off the
159 * transaction anyway.
160 */
Christoph Hellwig898621d2010-06-24 11:36:58 +1000161 xfs_trans_ijoin(tp, ip);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000162 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
163 xfs_trans_set_sync(tp);
164 error = _xfs_trans_commit(tp, 0, &log_flushed);
165
166 xfs_iunlock(ip, XFS_ILOCK_EXCL);
167 } else {
168 /*
169 * Timestamps/size haven't changed since last inode flush or
170 * inode transaction commit. That means either nothing got
171 * written or a transaction committed which caught the updates.
172 * If the latter happened and the transaction hasn't hit the
173 * disk yet, the inode will be still be pinned. If it is,
174 * force the log.
175 */
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000176 if (xfs_ipincount(ip)) {
Christoph Hellwig024910c2010-02-17 19:34:57 +0000177 error = _xfs_log_force_lsn(ip->i_mount,
178 ip->i_itemp->ili_last_lsn,
179 XFS_LOG_SYNC, &log_flushed);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000180 }
Christoph Hellwig024910c2010-02-17 19:34:57 +0000181 xfs_iunlock(ip, XFS_ILOCK_SHARED);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000182 }
183
184 if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
185 /*
186 * If the log write didn't issue an ordered tag we need
187 * to flush the disk cache for the data device now.
188 */
189 if (!log_flushed)
190 xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp);
191
192 /*
193 * If this inode is on the RT dev we need to flush that
194 * cache as well.
195 */
196 if (XFS_IS_REALTIME_INODE(ip))
197 xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
198 }
199
200 return -error;
201}
202
Christoph Hellwig00258e32010-02-15 09:44:47 +0000203STATIC ssize_t
204xfs_file_aio_read(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000205 struct kiocb *iocb,
206 const struct iovec *iovp,
Christoph Hellwig00258e32010-02-15 09:44:47 +0000207 unsigned long nr_segs,
208 loff_t pos)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000209{
210 struct file *file = iocb->ki_filp;
211 struct inode *inode = file->f_mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000212 struct xfs_inode *ip = XFS_I(inode);
213 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000214 size_t size = 0;
215 ssize_t ret = 0;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000216 int ioflags = 0;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000217 xfs_fsize_t n;
218 unsigned long seg;
219
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000220 XFS_STATS_INC(xs_read_calls);
221
Christoph Hellwig00258e32010-02-15 09:44:47 +0000222 BUG_ON(iocb->ki_pos != pos);
223
224 if (unlikely(file->f_flags & O_DIRECT))
225 ioflags |= IO_ISDIRECT;
226 if (file->f_mode & FMODE_NOCMTIME)
227 ioflags |= IO_INVIS;
228
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000229 /* START copy & waste from filemap.c */
Christoph Hellwig00258e32010-02-15 09:44:47 +0000230 for (seg = 0; seg < nr_segs; seg++) {
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000231 const struct iovec *iv = &iovp[seg];
232
233 /*
234 * If any segment has a negative length, or the cumulative
235 * length ever wraps negative then return -EINVAL.
236 */
237 size += iv->iov_len;
238 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
239 return XFS_ERROR(-EINVAL);
240 }
241 /* END copy & waste from filemap.c */
242
243 if (unlikely(ioflags & IO_ISDIRECT)) {
244 xfs_buftarg_t *target =
245 XFS_IS_REALTIME_INODE(ip) ?
246 mp->m_rtdev_targp : mp->m_ddev_targp;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000247 if ((iocb->ki_pos & target->bt_smask) ||
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000248 (size & target->bt_smask)) {
Christoph Hellwig00258e32010-02-15 09:44:47 +0000249 if (iocb->ki_pos == ip->i_size)
250 return 0;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000251 return -XFS_ERROR(EINVAL);
252 }
253 }
254
Christoph Hellwig00258e32010-02-15 09:44:47 +0000255 n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
256 if (n <= 0 || size == 0)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000257 return 0;
258
259 if (n < size)
260 size = n;
261
262 if (XFS_FORCED_SHUTDOWN(mp))
263 return -EIO;
264
265 if (unlikely(ioflags & IO_ISDIRECT))
266 mutex_lock(&inode->i_mutex);
267 xfs_ilock(ip, XFS_IOLOCK_SHARED);
268
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000269 if (unlikely(ioflags & IO_ISDIRECT)) {
Christoph Hellwig00258e32010-02-15 09:44:47 +0000270 if (inode->i_mapping->nrpages) {
271 ret = -xfs_flushinval_pages(ip,
272 (iocb->ki_pos & PAGE_CACHE_MASK),
273 -1, FI_REMAPF_LOCKED);
274 }
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000275 mutex_unlock(&inode->i_mutex);
276 if (ret) {
277 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
278 return ret;
279 }
280 }
281
Christoph Hellwig00258e32010-02-15 09:44:47 +0000282 trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000283
Christoph Hellwig00258e32010-02-15 09:44:47 +0000284 ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000285 if (ret > 0)
286 XFS_STATS_ADD(xs_read_bytes, ret);
287
288 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
289 return ret;
290}
291
Christoph Hellwig00258e32010-02-15 09:44:47 +0000292STATIC ssize_t
293xfs_file_splice_read(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000294 struct file *infilp,
295 loff_t *ppos,
296 struct pipe_inode_info *pipe,
297 size_t count,
Christoph Hellwig00258e32010-02-15 09:44:47 +0000298 unsigned int flags)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000299{
Christoph Hellwig00258e32010-02-15 09:44:47 +0000300 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
Christoph Hellwig00258e32010-02-15 09:44:47 +0000301 int ioflags = 0;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000302 ssize_t ret;
303
304 XFS_STATS_INC(xs_read_calls);
Christoph Hellwig00258e32010-02-15 09:44:47 +0000305
306 if (infilp->f_mode & FMODE_NOCMTIME)
307 ioflags |= IO_INVIS;
308
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000309 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
310 return -EIO;
311
312 xfs_ilock(ip, XFS_IOLOCK_SHARED);
313
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000314 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
315
316 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
317 if (ret > 0)
318 XFS_STATS_ADD(xs_read_bytes, ret);
319
320 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
321 return ret;
322}
323
Dave Chinneredafb6d2011-01-11 10:14:06 +1100324STATIC void
325xfs_aio_write_isize_update(
326 struct inode *inode,
327 loff_t *ppos,
328 ssize_t bytes_written)
329{
330 struct xfs_inode *ip = XFS_I(inode);
331 xfs_fsize_t isize = i_size_read(inode);
332
333 if (bytes_written > 0)
334 XFS_STATS_ADD(xs_write_bytes, bytes_written);
335
336 if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
337 *ppos > isize))
338 *ppos = isize;
339
340 if (*ppos > ip->i_size) {
341 xfs_ilock(ip, XFS_ILOCK_EXCL);
342 if (*ppos > ip->i_size)
343 ip->i_size = *ppos;
344 xfs_iunlock(ip, XFS_ILOCK_EXCL);
345 }
346}
347
Christoph Hellwig00258e32010-02-15 09:44:47 +0000348STATIC ssize_t
349xfs_file_splice_write(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000350 struct pipe_inode_info *pipe,
351 struct file *outfilp,
352 loff_t *ppos,
353 size_t count,
Christoph Hellwig00258e32010-02-15 09:44:47 +0000354 unsigned int flags)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000355{
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000356 struct inode *inode = outfilp->f_mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000357 struct xfs_inode *ip = XFS_I(inode);
Dave Chinneredafb6d2011-01-11 10:14:06 +1100358 xfs_fsize_t new_size;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000359 int ioflags = 0;
360 ssize_t ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000361
362 XFS_STATS_INC(xs_write_calls);
Christoph Hellwig00258e32010-02-15 09:44:47 +0000363
364 if (outfilp->f_mode & FMODE_NOCMTIME)
365 ioflags |= IO_INVIS;
366
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000367 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
368 return -EIO;
369
370 xfs_ilock(ip, XFS_IOLOCK_EXCL);
371
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000372 new_size = *ppos + count;
373
374 xfs_ilock(ip, XFS_ILOCK_EXCL);
375 if (new_size > ip->i_size)
376 ip->i_new_size = new_size;
377 xfs_iunlock(ip, XFS_ILOCK_EXCL);
378
379 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
380
381 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000382
Dave Chinneredafb6d2011-01-11 10:14:06 +1100383 xfs_aio_write_isize_update(inode, ppos, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000384
385 if (ip->i_new_size) {
386 xfs_ilock(ip, XFS_ILOCK_EXCL);
387 ip->i_new_size = 0;
388 if (ip->i_d.di_size > ip->i_size)
389 ip->i_d.di_size = ip->i_size;
390 xfs_iunlock(ip, XFS_ILOCK_EXCL);
391 }
392 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
393 return ret;
394}
395
396/*
397 * This routine is called to handle zeroing any space in the last
398 * block of the file that is beyond the EOF. We do this since the
399 * size is being increased without writing anything to that block
400 * and we don't want anyone to read the garbage on the disk.
401 */
402STATIC int /* error (positive) */
403xfs_zero_last_block(
404 xfs_inode_t *ip,
405 xfs_fsize_t offset,
406 xfs_fsize_t isize)
407{
408 xfs_fileoff_t last_fsb;
409 xfs_mount_t *mp = ip->i_mount;
410 int nimaps;
411 int zero_offset;
412 int zero_len;
413 int error = 0;
414 xfs_bmbt_irec_t imap;
415
416 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
417
418 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
419 if (zero_offset == 0) {
420 /*
421 * There are no extra bytes in the last block on disk to
422 * zero, so return.
423 */
424 return 0;
425 }
426
427 last_fsb = XFS_B_TO_FSBT(mp, isize);
428 nimaps = 1;
429 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
Christoph Hellwigb4e91812010-06-23 18:11:15 +1000430 &nimaps, NULL);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000431 if (error) {
432 return error;
433 }
434 ASSERT(nimaps > 0);
435 /*
436 * If the block underlying isize is just a hole, then there
437 * is nothing to zero.
438 */
439 if (imap.br_startblock == HOLESTARTBLOCK) {
440 return 0;
441 }
442 /*
443 * Zero the part of the last block beyond the EOF, and write it
444 * out sync. We need to drop the ilock while we do this so we
445 * don't deadlock when the buffer cache calls back to us.
446 */
447 xfs_iunlock(ip, XFS_ILOCK_EXCL);
448
449 zero_len = mp->m_sb.sb_blocksize - zero_offset;
450 if (isize + zero_len > offset)
451 zero_len = offset - isize;
452 error = xfs_iozero(ip, isize, zero_len);
453
454 xfs_ilock(ip, XFS_ILOCK_EXCL);
455 ASSERT(error >= 0);
456 return error;
457}
458
459/*
460 * Zero any on disk space between the current EOF and the new,
461 * larger EOF. This handles the normal case of zeroing the remainder
462 * of the last block in the file and the unusual case of zeroing blocks
463 * out beyond the size of the file. This second case only happens
464 * with fixed size extents and when the system crashes before the inode
465 * size was updated but after blocks were allocated. If fill is set,
466 * then any holes in the range are filled and zeroed. If not, the holes
467 * are left alone as holes.
468 */
469
470int /* error (positive) */
471xfs_zero_eof(
472 xfs_inode_t *ip,
473 xfs_off_t offset, /* starting I/O offset */
474 xfs_fsize_t isize) /* current inode size */
475{
476 xfs_mount_t *mp = ip->i_mount;
477 xfs_fileoff_t start_zero_fsb;
478 xfs_fileoff_t end_zero_fsb;
479 xfs_fileoff_t zero_count_fsb;
480 xfs_fileoff_t last_fsb;
481 xfs_fileoff_t zero_off;
482 xfs_fsize_t zero_len;
483 int nimaps;
484 int error = 0;
485 xfs_bmbt_irec_t imap;
486
487 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
488 ASSERT(offset > isize);
489
490 /*
491 * First handle zeroing the block on which isize resides.
492 * We only zero a part of that block so it is handled specially.
493 */
494 error = xfs_zero_last_block(ip, offset, isize);
495 if (error) {
496 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
497 return error;
498 }
499
500 /*
501 * Calculate the range between the new size and the old
502 * where blocks needing to be zeroed may exist. To get the
503 * block where the last byte in the file currently resides,
504 * we need to subtract one from the size and truncate back
505 * to a block boundary. We subtract 1 in case the size is
506 * exactly on a block boundary.
507 */
508 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
509 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
510 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
511 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
512 if (last_fsb == end_zero_fsb) {
513 /*
514 * The size was only incremented on its last block.
515 * We took care of that above, so just return.
516 */
517 return 0;
518 }
519
520 ASSERT(start_zero_fsb <= end_zero_fsb);
521 while (start_zero_fsb <= end_zero_fsb) {
522 nimaps = 1;
523 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
524 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
Christoph Hellwigb4e91812010-06-23 18:11:15 +1000525 0, NULL, 0, &imap, &nimaps, NULL);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000526 if (error) {
527 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
528 return error;
529 }
530 ASSERT(nimaps > 0);
531
532 if (imap.br_state == XFS_EXT_UNWRITTEN ||
533 imap.br_startblock == HOLESTARTBLOCK) {
534 /*
535 * This loop handles initializing pages that were
536 * partially initialized by the code below this
537 * loop. It basically zeroes the part of the page
538 * that sits on a hole and sets the page as P_HOLE
539 * and calls remapf if it is a mapped file.
540 */
541 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
542 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
543 continue;
544 }
545
546 /*
547 * There are blocks we need to zero.
548 * Drop the inode lock while we're doing the I/O.
549 * We'll still have the iolock to protect us.
550 */
551 xfs_iunlock(ip, XFS_ILOCK_EXCL);
552
553 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
554 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
555
556 if ((zero_off + zero_len) > offset)
557 zero_len = offset - zero_off;
558
559 error = xfs_iozero(ip, zero_off, zero_len);
560 if (error) {
561 goto out_lock;
562 }
563
564 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
565 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
566
567 xfs_ilock(ip, XFS_ILOCK_EXCL);
568 }
569
570 return 0;
571
572out_lock:
573 xfs_ilock(ip, XFS_ILOCK_EXCL);
574 ASSERT(error >= 0);
575 return error;
576}
577
Christoph Hellwig00258e32010-02-15 09:44:47 +0000578STATIC ssize_t
579xfs_file_aio_write(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000580 struct kiocb *iocb,
581 const struct iovec *iovp,
Christoph Hellwig00258e32010-02-15 09:44:47 +0000582 unsigned long nr_segs,
583 loff_t pos)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000584{
585 struct file *file = iocb->ki_filp;
586 struct address_space *mapping = file->f_mapping;
587 struct inode *inode = mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000588 struct xfs_inode *ip = XFS_I(inode);
589 struct xfs_mount *mp = ip->i_mount;
Dave Chinnera363f0c2011-01-11 10:13:53 +1100590 ssize_t ret = 0;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000591 int ioflags = 0;
Dave Chinneredafb6d2011-01-11 10:14:06 +1100592 xfs_fsize_t new_size;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000593 int iolock;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000594 size_t ocount = 0, count;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000595 int need_i_mutex;
596
597 XFS_STATS_INC(xs_write_calls);
598
Christoph Hellwig00258e32010-02-15 09:44:47 +0000599 BUG_ON(iocb->ki_pos != pos);
600
601 if (unlikely(file->f_flags & O_DIRECT))
602 ioflags |= IO_ISDIRECT;
603 if (file->f_mode & FMODE_NOCMTIME)
604 ioflags |= IO_INVIS;
605
Dave Chinnera363f0c2011-01-11 10:13:53 +1100606 ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
607 if (ret)
608 return ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000609
610 count = ocount;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000611 if (count == 0)
612 return 0;
613
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000614 xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
615
616 if (XFS_FORCED_SHUTDOWN(mp))
617 return -EIO;
618
619relock:
620 if (ioflags & IO_ISDIRECT) {
621 iolock = XFS_IOLOCK_SHARED;
622 need_i_mutex = 0;
623 } else {
624 iolock = XFS_IOLOCK_EXCL;
625 need_i_mutex = 1;
626 mutex_lock(&inode->i_mutex);
627 }
628
Christoph Hellwig00258e32010-02-15 09:44:47 +0000629 xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000630
631start:
Dave Chinnera363f0c2011-01-11 10:13:53 +1100632 ret = generic_write_checks(file, &pos, &count,
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000633 S_ISBLK(inode->i_mode));
Dave Chinnera363f0c2011-01-11 10:13:53 +1100634 if (ret) {
Christoph Hellwig00258e32010-02-15 09:44:47 +0000635 xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000636 goto out_unlock_mutex;
637 }
638
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000639 if (ioflags & IO_ISDIRECT) {
640 xfs_buftarg_t *target =
Christoph Hellwig00258e32010-02-15 09:44:47 +0000641 XFS_IS_REALTIME_INODE(ip) ?
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000642 mp->m_rtdev_targp : mp->m_ddev_targp;
643
644 if ((pos & target->bt_smask) || (count & target->bt_smask)) {
Christoph Hellwig00258e32010-02-15 09:44:47 +0000645 xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000646 return XFS_ERROR(-EINVAL);
647 }
648
Christoph Hellwig00258e32010-02-15 09:44:47 +0000649 if (!need_i_mutex && (mapping->nrpages || pos > ip->i_size)) {
650 xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000651 iolock = XFS_IOLOCK_EXCL;
652 need_i_mutex = 1;
653 mutex_lock(&inode->i_mutex);
Christoph Hellwig00258e32010-02-15 09:44:47 +0000654 xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000655 goto start;
656 }
657 }
658
659 new_size = pos + count;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000660 if (new_size > ip->i_size)
661 ip->i_new_size = new_size;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000662
663 if (likely(!(ioflags & IO_INVIS)))
664 file_update_time(file);
665
666 /*
667 * If the offset is beyond the size of the file, we have a couple
668 * of things to do. First, if there is already space allocated
669 * we need to either create holes or zero the disk or ...
670 *
671 * If there is a page where the previous size lands, we need
672 * to zero it out up to the new size.
673 */
674
Christoph Hellwig00258e32010-02-15 09:44:47 +0000675 if (pos > ip->i_size) {
Dave Chinnera363f0c2011-01-11 10:13:53 +1100676 ret = -xfs_zero_eof(ip, pos, ip->i_size);
677 if (ret) {
Christoph Hellwig00258e32010-02-15 09:44:47 +0000678 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000679 goto out_unlock_internal;
680 }
681 }
Christoph Hellwig00258e32010-02-15 09:44:47 +0000682 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000683
684 /*
685 * If we're writing the file then make sure to clear the
686 * setuid and setgid bits if the process is not being run
687 * by root. This keeps people from modifying setuid and
688 * setgid binaries.
689 */
Dave Chinnera363f0c2011-01-11 10:13:53 +1100690 ret = file_remove_suid(file);
691 if (unlikely(ret))
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000692 goto out_unlock_internal;
693
694 /* We can write back this queue in page reclaim */
695 current->backing_dev_info = mapping->backing_dev_info;
696
697 if ((ioflags & IO_ISDIRECT)) {
698 if (mapping->nrpages) {
699 WARN_ON(need_i_mutex == 0);
Dave Chinnera363f0c2011-01-11 10:13:53 +1100700 ret = -xfs_flushinval_pages(ip,
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000701 (pos & PAGE_CACHE_MASK),
702 -1, FI_REMAPF_LOCKED);
Dave Chinnera363f0c2011-01-11 10:13:53 +1100703 if (ret)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000704 goto out_unlock_internal;
705 }
706
707 if (need_i_mutex) {
708 /* demote the lock now the cached pages are gone */
Christoph Hellwig00258e32010-02-15 09:44:47 +0000709 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000710 mutex_unlock(&inode->i_mutex);
711
712 iolock = XFS_IOLOCK_SHARED;
713 need_i_mutex = 0;
714 }
715
Christoph Hellwig00258e32010-02-15 09:44:47 +0000716 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, ioflags);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000717 ret = generic_file_direct_write(iocb, iovp,
Christoph Hellwig00258e32010-02-15 09:44:47 +0000718 &nr_segs, pos, &iocb->ki_pos, count, ocount);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000719
720 /*
721 * direct-io write to a hole: fall through to buffered I/O
722 * for completing the rest of the request.
723 */
724 if (ret >= 0 && ret != count) {
725 XFS_STATS_ADD(xs_write_bytes, ret);
726
727 pos += ret;
728 count -= ret;
729
730 ioflags &= ~IO_ISDIRECT;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000731 xfs_iunlock(ip, iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000732 goto relock;
733 }
734 } else {
735 int enospc = 0;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000736
737write_retry:
Christoph Hellwig00258e32010-02-15 09:44:47 +0000738 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags);
Dave Chinnera363f0c2011-01-11 10:13:53 +1100739 ret = generic_file_buffered_write(iocb, iovp, nr_segs,
Christoph Hellwig00258e32010-02-15 09:44:47 +0000740 pos, &iocb->ki_pos, count, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000741 /*
742 * if we just got an ENOSPC, flush the inode now we
743 * aren't holding any page locks and retry *once*
744 */
Dave Chinnera363f0c2011-01-11 10:13:53 +1100745 if (ret == -ENOSPC && !enospc) {
746 ret = xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
747 if (ret)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000748 goto out_unlock_internal;
749 enospc = 1;
750 goto write_retry;
751 }
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000752 }
753
754 current->backing_dev_info = NULL;
755
Dave Chinneredafb6d2011-01-11 10:14:06 +1100756 xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000757
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000758 if (ret <= 0)
759 goto out_unlock_internal;
760
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000761 /* Handle various SYNC-type writes */
762 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
763 loff_t end = pos + ret - 1;
Dave Chinnera363f0c2011-01-11 10:13:53 +1100764 int error, error2;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000765
Christoph Hellwig00258e32010-02-15 09:44:47 +0000766 xfs_iunlock(ip, iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000767 if (need_i_mutex)
768 mutex_unlock(&inode->i_mutex);
769
Dave Chinnera363f0c2011-01-11 10:13:53 +1100770 error = filemap_write_and_wait_range(mapping, pos, end);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000771 if (need_i_mutex)
772 mutex_lock(&inode->i_mutex);
Christoph Hellwig00258e32010-02-15 09:44:47 +0000773 xfs_ilock(ip, iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000774
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200775 error2 = -xfs_file_fsync(file,
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000776 (file->f_flags & __O_SYNC) ? 0 : 1);
Dave Chinnera363f0c2011-01-11 10:13:53 +1100777 if (error)
778 ret = error;
779 else if (error2)
780 ret = error2;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000781 }
782
783 out_unlock_internal:
Christoph Hellwig00258e32010-02-15 09:44:47 +0000784 if (ip->i_new_size) {
785 xfs_ilock(ip, XFS_ILOCK_EXCL);
786 ip->i_new_size = 0;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000787 /*
788 * If this was a direct or synchronous I/O that failed (such
789 * as ENOSPC) then part of the I/O may have been written to
790 * disk before the error occured. In this case the on-disk
791 * file size may have been adjusted beyond the in-memory file
792 * size and now needs to be truncated back.
793 */
Christoph Hellwig00258e32010-02-15 09:44:47 +0000794 if (ip->i_d.di_size > ip->i_size)
795 ip->i_d.di_size = ip->i_size;
796 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000797 }
Christoph Hellwig00258e32010-02-15 09:44:47 +0000798 xfs_iunlock(ip, iolock);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000799 out_unlock_mutex:
800 if (need_i_mutex)
801 mutex_unlock(&inode->i_mutex);
Dave Chinnera363f0c2011-01-11 10:13:53 +1100802 return ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000803}
804
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100806xfs_file_open(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 struct inode *inode,
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100808 struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809{
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100810 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 return -EFBIG;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100812 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
813 return -EIO;
814 return 0;
815}
816
817STATIC int
818xfs_dir_open(
819 struct inode *inode,
820 struct file *file)
821{
822 struct xfs_inode *ip = XFS_I(inode);
823 int mode;
824 int error;
825
826 error = xfs_file_open(inode, file);
827 if (error)
828 return error;
829
830 /*
831 * If there are any blocks, read-ahead block 0 as we're almost
832 * certain to have the next operation be a read there.
833 */
834 mode = xfs_ilock_map_shared(ip);
835 if (ip->i_d.di_nextents > 0)
836 xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
837 xfs_iunlock(ip, mode);
838 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839}
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100842xfs_file_release(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 struct inode *inode,
844 struct file *filp)
845{
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000846 return -xfs_release(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847}
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100850xfs_file_readdir(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 struct file *filp,
852 void *dirent,
853 filldir_t filldir)
854{
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000855 struct inode *inode = filp->f_path.dentry->d_inode;
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000856 xfs_inode_t *ip = XFS_I(inode);
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000857 int error;
858 size_t bufsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000860 /*
861 * The Linux API doesn't pass down the total size of the buffer
862 * we read into down to the filesystem. With the filldir concept
863 * it's not needed for correct information, but the XFS dir2 leaf
864 * code wants an estimate of the buffer size to calculate it's
865 * readahead window and size the buffers used for mapping to
866 * physical blocks.
867 *
868 * Try to give it an estimate that's good enough, maybe at some
869 * point we can change the ->readdir prototype to include the
Eric Sandeena9cc7992010-02-03 17:50:13 +0000870 * buffer size. For now we use the current glibc buffer size.
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000871 */
Eric Sandeena9cc7992010-02-03 17:50:13 +0000872 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000874 error = xfs_readdir(ip, dirent, bufsize,
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000875 (xfs_off_t *)&filp->f_pos, filldir);
876 if (error)
877 return -error;
878 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879}
880
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100882xfs_file_mmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 struct file *filp,
884 struct vm_area_struct *vma)
885{
Nathan Scott3562fd42006-03-14 14:00:35 +1100886 vma->vm_ops = &xfs_file_vm_ops;
Nick Piggind0217ac2007-07-19 01:47:03 -0700887 vma->vm_flags |= VM_CAN_NONLINEAR;
Dean Roehrich6fac0cb2005-06-21 14:07:45 +1000888
Nathan Scottfbc14622006-06-09 14:52:13 +1000889 file_accessed(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 return 0;
891}
892
David Chinner4f57dbc2007-07-19 16:28:17 +1000893/*
894 * mmap()d file has taken write protection fault and is being made
895 * writable. We can set the page state up correctly for a writable
896 * page, which means we can do correct delalloc accounting (ENOSPC
897 * checking!) and unwritten extent mapping.
898 */
899STATIC int
900xfs_vm_page_mkwrite(
901 struct vm_area_struct *vma,
Nick Pigginc2ec1752009-03-31 15:23:21 -0700902 struct vm_fault *vmf)
David Chinner4f57dbc2007-07-19 16:28:17 +1000903{
Nick Pigginc2ec1752009-03-31 15:23:21 -0700904 return block_page_mkwrite(vma, vmf, xfs_get_blocks);
David Chinner4f57dbc2007-07-19 16:28:17 +1000905}
906
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800907const struct file_operations xfs_file_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 .llseek = generic_file_llseek,
909 .read = do_sync_read,
Dean Roehrichbb3f7242005-09-02 15:43:05 +1000910 .write = do_sync_write,
Nathan Scott3562fd42006-03-14 14:00:35 +1100911 .aio_read = xfs_file_aio_read,
912 .aio_write = xfs_file_aio_write,
Nathan Scott1b895842006-03-31 13:08:59 +1000913 .splice_read = xfs_file_splice_read,
914 .splice_write = xfs_file_splice_write,
Nathan Scott3562fd42006-03-14 14:00:35 +1100915 .unlocked_ioctl = xfs_file_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +1100917 .compat_ioctl = xfs_file_compat_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918#endif
Nathan Scott3562fd42006-03-14 14:00:35 +1100919 .mmap = xfs_file_mmap,
920 .open = xfs_file_open,
921 .release = xfs_file_release,
922 .fsync = xfs_file_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923};
924
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800925const struct file_operations xfs_dir_file_operations = {
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100926 .open = xfs_dir_open,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 .read = generic_read_dir,
Nathan Scott3562fd42006-03-14 14:00:35 +1100928 .readdir = xfs_file_readdir,
Al Viro59af1582008-08-24 07:24:41 -0400929 .llseek = generic_file_llseek,
Nathan Scott3562fd42006-03-14 14:00:35 +1100930 .unlocked_ioctl = xfs_file_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -0700931#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +1100932 .compat_ioctl = xfs_file_compat_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -0700933#endif
Nathan Scott3562fd42006-03-14 14:00:35 +1100934 .fsync = xfs_file_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935};
936
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400937static const struct vm_operations_struct xfs_file_vm_ops = {
Nick Piggin54cb8822007-07-19 01:46:59 -0700938 .fault = filemap_fault,
David Chinner4f57dbc2007-07-19 16:28:17 +1000939 .page_mkwrite = xfs_vm_page_mkwrite,
Dean Roehrich6fac0cb2005-06-21 14:07:45 +1000940};