| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 |  * Copyright (c) 2000-2005 Silicon Graphics, Inc. | 
 | 3 |  * All Rights Reserved. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 |  * | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 |  * This program is free software; you can redistribute it and/or | 
 | 6 |  * modify it under the terms of the GNU General Public License as | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 |  * published by the Free Software Foundation. | 
 | 8 |  * | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 |  * This program is distributed in the hope that it would be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 12 |  * GNU General Public License for more details. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 |  * | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 |  * You should have received a copy of the GNU General Public License | 
 | 15 |  * along with this program; if not, write the Free Software Foundation, | 
 | 16 |  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs.h" | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 19 | #include "xfs_fs.h" | 
| Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 20 | #include "xfs_bit.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include "xfs_log.h" | 
| Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 22 | #include "xfs_inum.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include "xfs_sb.h" | 
| Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 24 | #include "xfs_ag.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include "xfs_trans.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include "xfs_mount.h" | 
 | 27 | #include "xfs_bmap_btree.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include "xfs_alloc.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include "xfs_dinode.h" | 
 | 30 | #include "xfs_inode.h" | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 31 | #include "xfs_inode_item.h" | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 32 | #include "xfs_bmap.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include "xfs_error.h" | 
| Christoph Hellwig | 739bfb2 | 2007-08-29 10:58:01 +1000 | [diff] [blame] | 34 | #include "xfs_vnodeops.h" | 
| Christoph Hellwig | f999a5b | 2008-11-28 14:23:32 +1100 | [diff] [blame] | 35 | #include "xfs_da_btree.h" | 
| Christoph Hellwig | ddcd856 | 2008-12-03 07:55:34 -0500 | [diff] [blame] | 36 | #include "xfs_ioctl.h" | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 37 | #include "xfs_trace.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 |  | 
 | 39 | #include <linux/dcache.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 41 | static const struct vm_operations_struct xfs_file_vm_ops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 |  | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 43 | /* | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 44 |  * Locking primitives for read and write IO paths to ensure we consistently use | 
 | 45 |  * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. | 
 | 46 |  */ | 
 | 47 | static inline void | 
 | 48 | xfs_rw_ilock( | 
 | 49 | 	struct xfs_inode	*ip, | 
 | 50 | 	int			type) | 
 | 51 | { | 
 | 52 | 	if (type & XFS_IOLOCK_EXCL) | 
 | 53 | 		mutex_lock(&VFS_I(ip)->i_mutex); | 
 | 54 | 	xfs_ilock(ip, type); | 
 | 55 | } | 
 | 56 |  | 
 | 57 | static inline void | 
 | 58 | xfs_rw_iunlock( | 
 | 59 | 	struct xfs_inode	*ip, | 
 | 60 | 	int			type) | 
 | 61 | { | 
 | 62 | 	xfs_iunlock(ip, type); | 
 | 63 | 	if (type & XFS_IOLOCK_EXCL) | 
 | 64 | 		mutex_unlock(&VFS_I(ip)->i_mutex); | 
 | 65 | } | 
 | 66 |  | 
 | 67 | static inline void | 
 | 68 | xfs_rw_ilock_demote( | 
 | 69 | 	struct xfs_inode	*ip, | 
 | 70 | 	int			type) | 
 | 71 | { | 
 | 72 | 	xfs_ilock_demote(ip, type); | 
 | 73 | 	if (type & XFS_IOLOCK_EXCL) | 
 | 74 | 		mutex_unlock(&VFS_I(ip)->i_mutex); | 
 | 75 | } | 
 | 76 |  | 
 | 77 | /* | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 78 |  *	xfs_iozero | 
 | 79 |  * | 
 | 80 |  *	xfs_iozero clears the specified range of buffer supplied, | 
 | 81 |  *	and marks all the affected blocks as valid and modified.  If | 
 | 82 |  *	an affected block is not allocated, it will be allocated.  If | 
 | 83 |  *	an affected block is not completely overwritten, and is not | 
 | 84 |  *	valid before the operation, it will be read from disk before | 
 | 85 |  *	being partially zeroed. | 
 | 86 |  */ | 
 | 87 | STATIC int | 
 | 88 | xfs_iozero( | 
 | 89 | 	struct xfs_inode	*ip,	/* inode			*/ | 
 | 90 | 	loff_t			pos,	/* offset in file		*/ | 
 | 91 | 	size_t			count)	/* size of data to zero		*/ | 
 | 92 | { | 
 | 93 | 	struct page		*page; | 
 | 94 | 	struct address_space	*mapping; | 
 | 95 | 	int			status; | 
 | 96 |  | 
 | 97 | 	mapping = VFS_I(ip)->i_mapping; | 
 | 98 | 	do { | 
 | 99 | 		unsigned offset, bytes; | 
 | 100 | 		void *fsdata; | 
 | 101 |  | 
 | 102 | 		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | 
 | 103 | 		bytes = PAGE_CACHE_SIZE - offset; | 
 | 104 | 		if (bytes > count) | 
 | 105 | 			bytes = count; | 
 | 106 |  | 
 | 107 | 		status = pagecache_write_begin(NULL, mapping, pos, bytes, | 
 | 108 | 					AOP_FLAG_UNINTERRUPTIBLE, | 
 | 109 | 					&page, &fsdata); | 
 | 110 | 		if (status) | 
 | 111 | 			break; | 
 | 112 |  | 
 | 113 | 		zero_user(page, offset, bytes); | 
 | 114 |  | 
 | 115 | 		status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, | 
 | 116 | 					page, fsdata); | 
 | 117 | 		WARN_ON(status <= 0); /* can't return less than zero! */ | 
 | 118 | 		pos += bytes; | 
 | 119 | 		count -= bytes; | 
 | 120 | 		status = 0; | 
 | 121 | 	} while (count); | 
 | 122 |  | 
 | 123 | 	return (-status); | 
 | 124 | } | 
 | 125 |  | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 126 | STATIC int | 
 | 127 | xfs_file_fsync( | 
 | 128 | 	struct file		*file, | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 129 | 	int			datasync) | 
 | 130 | { | 
| Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 131 | 	struct inode		*inode = file->f_mapping->host; | 
 | 132 | 	struct xfs_inode	*ip = XFS_I(inode); | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 133 | 	struct xfs_trans	*tp; | 
 | 134 | 	int			error = 0; | 
 | 135 | 	int			log_flushed = 0; | 
 | 136 |  | 
| Christoph Hellwig | cca28fb | 2010-06-24 11:57:09 +1000 | [diff] [blame] | 137 | 	trace_xfs_file_fsync(ip); | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 138 |  | 
 | 139 | 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 
 | 140 | 		return -XFS_ERROR(EIO); | 
 | 141 |  | 
 | 142 | 	xfs_iflags_clear(ip, XFS_ITRUNCATED); | 
 | 143 |  | 
| Christoph Hellwig | 37bc574 | 2010-04-20 17:00:59 +1000 | [diff] [blame] | 144 | 	xfs_ioend_wait(ip); | 
 | 145 |  | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 146 | 	/* | 
 | 147 | 	 * We always need to make sure that the required inode state is safe on | 
 | 148 | 	 * disk.  The inode might be clean but we still might need to force the | 
 | 149 | 	 * log because of committed transactions that haven't hit the disk yet. | 
 | 150 | 	 * Likewise, there could be unflushed non-transactional changes to the | 
 | 151 | 	 * inode core that have to go to disk and this requires us to issue | 
 | 152 | 	 * a synchronous transaction to capture these changes correctly. | 
 | 153 | 	 * | 
 | 154 | 	 * This code relies on the assumption that if the i_update_core field | 
 | 155 | 	 * of the inode is clear and the inode is unpinned then it is clean | 
 | 156 | 	 * and no action is required. | 
 | 157 | 	 */ | 
 | 158 | 	xfs_ilock(ip, XFS_ILOCK_SHARED); | 
 | 159 |  | 
| Christoph Hellwig | 66d834e | 2010-02-15 09:44:49 +0000 | [diff] [blame] | 160 | 	/* | 
 | 161 | 	 * First check if the VFS inode is marked dirty.  All the dirtying | 
 | 162 | 	 * of non-transactional updates no goes through mark_inode_dirty*, | 
 | 163 | 	 * which allows us to distinguish beteeen pure timestamp updates | 
 | 164 | 	 * and i_size updates which need to be caught for fdatasync. | 
 | 165 | 	 * After that also theck for the dirty state in the XFS inode, which | 
 | 166 | 	 * might gets cleared when the inode gets written out via the AIL | 
 | 167 | 	 * or xfs_iflush_cluster. | 
 | 168 | 	 */ | 
| Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 169 | 	if (((inode->i_state & I_DIRTY_DATASYNC) || | 
 | 170 | 	    ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && | 
| Christoph Hellwig | 66d834e | 2010-02-15 09:44:49 +0000 | [diff] [blame] | 171 | 	    ip->i_update_core) { | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 172 | 		/* | 
 | 173 | 		 * Kick off a transaction to log the inode core to get the | 
 | 174 | 		 * updates.  The sync transaction will also force the log. | 
 | 175 | 		 */ | 
 | 176 | 		xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
 | 177 | 		tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); | 
 | 178 | 		error = xfs_trans_reserve(tp, 0, | 
 | 179 | 				XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0); | 
 | 180 | 		if (error) { | 
 | 181 | 			xfs_trans_cancel(tp, 0); | 
 | 182 | 			return -error; | 
 | 183 | 		} | 
 | 184 | 		xfs_ilock(ip, XFS_ILOCK_EXCL); | 
 | 185 |  | 
 | 186 | 		/* | 
 | 187 | 		 * Note - it's possible that we might have pushed ourselves out | 
 | 188 | 		 * of the way during trans_reserve which would flush the inode. | 
 | 189 | 		 * But there's no guarantee that the inode buffer has actually | 
 | 190 | 		 * gone out yet (it's delwri).	Plus the buffer could be pinned | 
 | 191 | 		 * anyway if it's part of an inode in another recent | 
 | 192 | 		 * transaction.	 So we play it safe and fire off the | 
 | 193 | 		 * transaction anyway. | 
 | 194 | 		 */ | 
| Christoph Hellwig | 898621d | 2010-06-24 11:36:58 +1000 | [diff] [blame] | 195 | 		xfs_trans_ijoin(tp, ip); | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 196 | 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 
 | 197 | 		xfs_trans_set_sync(tp); | 
 | 198 | 		error = _xfs_trans_commit(tp, 0, &log_flushed); | 
 | 199 |  | 
 | 200 | 		xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
 | 201 | 	} else { | 
 | 202 | 		/* | 
 | 203 | 		 * Timestamps/size haven't changed since last inode flush or | 
 | 204 | 		 * inode transaction commit.  That means either nothing got | 
 | 205 | 		 * written or a transaction committed which caught the updates. | 
 | 206 | 		 * If the latter happened and the transaction hasn't hit the | 
 | 207 | 		 * disk yet, the inode will be still be pinned.  If it is, | 
 | 208 | 		 * force the log. | 
 | 209 | 		 */ | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 210 | 		if (xfs_ipincount(ip)) { | 
| Christoph Hellwig | 024910c | 2010-02-17 19:34:57 +0000 | [diff] [blame] | 211 | 			error = _xfs_log_force_lsn(ip->i_mount, | 
 | 212 | 					ip->i_itemp->ili_last_lsn, | 
 | 213 | 					XFS_LOG_SYNC, &log_flushed); | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 214 | 		} | 
| Christoph Hellwig | 024910c | 2010-02-17 19:34:57 +0000 | [diff] [blame] | 215 | 		xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 216 | 	} | 
 | 217 |  | 
 | 218 | 	if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) { | 
 | 219 | 		/* | 
 | 220 | 		 * If the log write didn't issue an ordered tag we need | 
 | 221 | 		 * to flush the disk cache for the data device now. | 
 | 222 | 		 */ | 
 | 223 | 		if (!log_flushed) | 
 | 224 | 			xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp); | 
 | 225 |  | 
 | 226 | 		/* | 
 | 227 | 		 * If this inode is on the RT dev we need to flush that | 
 | 228 | 		 * cache as well. | 
 | 229 | 		 */ | 
 | 230 | 		if (XFS_IS_REALTIME_INODE(ip)) | 
 | 231 | 			xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); | 
 | 232 | 	} | 
 | 233 |  | 
 | 234 | 	return -error; | 
 | 235 | } | 
 | 236 |  | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 237 | STATIC ssize_t | 
 | 238 | xfs_file_aio_read( | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 239 | 	struct kiocb		*iocb, | 
 | 240 | 	const struct iovec	*iovp, | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 241 | 	unsigned long		nr_segs, | 
 | 242 | 	loff_t			pos) | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 243 | { | 
 | 244 | 	struct file		*file = iocb->ki_filp; | 
 | 245 | 	struct inode		*inode = file->f_mapping->host; | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 246 | 	struct xfs_inode	*ip = XFS_I(inode); | 
 | 247 | 	struct xfs_mount	*mp = ip->i_mount; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 248 | 	size_t			size = 0; | 
 | 249 | 	ssize_t			ret = 0; | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 250 | 	int			ioflags = 0; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 251 | 	xfs_fsize_t		n; | 
 | 252 | 	unsigned long		seg; | 
 | 253 |  | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 254 | 	XFS_STATS_INC(xs_read_calls); | 
 | 255 |  | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 256 | 	BUG_ON(iocb->ki_pos != pos); | 
 | 257 |  | 
 | 258 | 	if (unlikely(file->f_flags & O_DIRECT)) | 
 | 259 | 		ioflags |= IO_ISDIRECT; | 
 | 260 | 	if (file->f_mode & FMODE_NOCMTIME) | 
 | 261 | 		ioflags |= IO_INVIS; | 
 | 262 |  | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 263 | 	/* START copy & waste from filemap.c */ | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 264 | 	for (seg = 0; seg < nr_segs; seg++) { | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 265 | 		const struct iovec *iv = &iovp[seg]; | 
 | 266 |  | 
 | 267 | 		/* | 
 | 268 | 		 * If any segment has a negative length, or the cumulative | 
 | 269 | 		 * length ever wraps negative then return -EINVAL. | 
 | 270 | 		 */ | 
 | 271 | 		size += iv->iov_len; | 
 | 272 | 		if (unlikely((ssize_t)(size|iv->iov_len) < 0)) | 
 | 273 | 			return XFS_ERROR(-EINVAL); | 
 | 274 | 	} | 
 | 275 | 	/* END copy & waste from filemap.c */ | 
 | 276 |  | 
 | 277 | 	if (unlikely(ioflags & IO_ISDIRECT)) { | 
 | 278 | 		xfs_buftarg_t	*target = | 
 | 279 | 			XFS_IS_REALTIME_INODE(ip) ? | 
 | 280 | 				mp->m_rtdev_targp : mp->m_ddev_targp; | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 281 | 		if ((iocb->ki_pos & target->bt_smask) || | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 282 | 		    (size & target->bt_smask)) { | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 283 | 			if (iocb->ki_pos == ip->i_size) | 
 | 284 | 				return 0; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 285 | 			return -XFS_ERROR(EINVAL); | 
 | 286 | 		} | 
 | 287 | 	} | 
 | 288 |  | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 289 | 	n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; | 
 | 290 | 	if (n <= 0 || size == 0) | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 291 | 		return 0; | 
 | 292 |  | 
 | 293 | 	if (n < size) | 
 | 294 | 		size = n; | 
 | 295 |  | 
 | 296 | 	if (XFS_FORCED_SHUTDOWN(mp)) | 
 | 297 | 		return -EIO; | 
 | 298 |  | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 299 | 	if (unlikely(ioflags & IO_ISDIRECT)) { | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 300 | 		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); | 
 | 301 |  | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 302 | 		if (inode->i_mapping->nrpages) { | 
 | 303 | 			ret = -xfs_flushinval_pages(ip, | 
 | 304 | 					(iocb->ki_pos & PAGE_CACHE_MASK), | 
 | 305 | 					-1, FI_REMAPF_LOCKED); | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 306 | 			if (ret) { | 
 | 307 | 				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | 
 | 308 | 				return ret; | 
 | 309 | 			} | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 310 | 		} | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 311 | 		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); | 
 | 312 | 	} else | 
 | 313 | 		xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 314 |  | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 315 | 	trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 316 |  | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 317 | 	ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 318 | 	if (ret > 0) | 
 | 319 | 		XFS_STATS_ADD(xs_read_bytes, ret); | 
 | 320 |  | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 321 | 	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 322 | 	return ret; | 
 | 323 | } | 
 | 324 |  | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 325 | STATIC ssize_t | 
 | 326 | xfs_file_splice_read( | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 327 | 	struct file		*infilp, | 
 | 328 | 	loff_t			*ppos, | 
 | 329 | 	struct pipe_inode_info	*pipe, | 
 | 330 | 	size_t			count, | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 331 | 	unsigned int		flags) | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 332 | { | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 333 | 	struct xfs_inode	*ip = XFS_I(infilp->f_mapping->host); | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 334 | 	int			ioflags = 0; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 335 | 	ssize_t			ret; | 
 | 336 |  | 
 | 337 | 	XFS_STATS_INC(xs_read_calls); | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 338 |  | 
 | 339 | 	if (infilp->f_mode & FMODE_NOCMTIME) | 
 | 340 | 		ioflags |= IO_INVIS; | 
 | 341 |  | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 342 | 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 
 | 343 | 		return -EIO; | 
 | 344 |  | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 345 | 	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 346 |  | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 347 | 	trace_xfs_file_splice_read(ip, count, *ppos, ioflags); | 
 | 348 |  | 
 | 349 | 	ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | 
 | 350 | 	if (ret > 0) | 
 | 351 | 		XFS_STATS_ADD(xs_read_bytes, ret); | 
 | 352 |  | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 353 | 	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 354 | 	return ret; | 
 | 355 | } | 
 | 356 |  | 
| Dave Chinner | edafb6d | 2011-01-11 10:14:06 +1100 | [diff] [blame] | 357 | STATIC void | 
 | 358 | xfs_aio_write_isize_update( | 
 | 359 | 	struct inode	*inode, | 
 | 360 | 	loff_t		*ppos, | 
 | 361 | 	ssize_t		bytes_written) | 
 | 362 | { | 
 | 363 | 	struct xfs_inode	*ip = XFS_I(inode); | 
 | 364 | 	xfs_fsize_t		isize = i_size_read(inode); | 
 | 365 |  | 
 | 366 | 	if (bytes_written > 0) | 
 | 367 | 		XFS_STATS_ADD(xs_write_bytes, bytes_written); | 
 | 368 |  | 
 | 369 | 	if (unlikely(bytes_written < 0 && bytes_written != -EFAULT && | 
 | 370 | 					*ppos > isize)) | 
 | 371 | 		*ppos = isize; | 
 | 372 |  | 
 | 373 | 	if (*ppos > ip->i_size) { | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 374 | 		xfs_rw_ilock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | edafb6d | 2011-01-11 10:14:06 +1100 | [diff] [blame] | 375 | 		if (*ppos > ip->i_size) | 
 | 376 | 			ip->i_size = *ppos; | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 377 | 		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | edafb6d | 2011-01-11 10:14:06 +1100 | [diff] [blame] | 378 | 	} | 
 | 379 | } | 
 | 380 |  | 
| Dave Chinner | 4c5cfd1 | 2011-01-11 10:14:16 +1100 | [diff] [blame] | 381 | /* | 
 | 382 |  * If this was a direct or synchronous I/O that failed (such as ENOSPC) then | 
 | 383 |  * part of the I/O may have been written to disk before the error occured.  In | 
 | 384 |  * this case the on-disk file size may have been adjusted beyond the in-memory | 
 | 385 |  * file size and now needs to be truncated back. | 
 | 386 |  */ | 
 | 387 | STATIC void | 
 | 388 | xfs_aio_write_newsize_update( | 
 | 389 | 	struct xfs_inode	*ip) | 
 | 390 | { | 
 | 391 | 	if (ip->i_new_size) { | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 392 | 		xfs_rw_ilock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | 4c5cfd1 | 2011-01-11 10:14:16 +1100 | [diff] [blame] | 393 | 		ip->i_new_size = 0; | 
 | 394 | 		if (ip->i_d.di_size > ip->i_size) | 
 | 395 | 			ip->i_d.di_size = ip->i_size; | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 396 | 		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | 4c5cfd1 | 2011-01-11 10:14:16 +1100 | [diff] [blame] | 397 | 	} | 
 | 398 | } | 
 | 399 |  | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 400 | /* | 
 | 401 |  * xfs_file_splice_write() does not use xfs_rw_ilock() because | 
 | 402 |  * generic_file_splice_write() takes the i_mutex itself. This, in theory, | 
 | 403 |  * couuld cause lock inversions between the aio_write path and the splice path | 
 | 404 |  * if someone is doing concurrent splice(2) based writes and write(2) based | 
 | 405 |  * writes to the same inode. The only real way to fix this is to re-implement | 
 | 406 |  * the generic code here with correct locking orders. | 
 | 407 |  */ | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 408 | STATIC ssize_t | 
 | 409 | xfs_file_splice_write( | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 410 | 	struct pipe_inode_info	*pipe, | 
 | 411 | 	struct file		*outfilp, | 
 | 412 | 	loff_t			*ppos, | 
 | 413 | 	size_t			count, | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 414 | 	unsigned int		flags) | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 415 | { | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 416 | 	struct inode		*inode = outfilp->f_mapping->host; | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 417 | 	struct xfs_inode	*ip = XFS_I(inode); | 
| Dave Chinner | edafb6d | 2011-01-11 10:14:06 +1100 | [diff] [blame] | 418 | 	xfs_fsize_t		new_size; | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 419 | 	int			ioflags = 0; | 
 | 420 | 	ssize_t			ret; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 421 |  | 
 | 422 | 	XFS_STATS_INC(xs_write_calls); | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 423 |  | 
 | 424 | 	if (outfilp->f_mode & FMODE_NOCMTIME) | 
 | 425 | 		ioflags |= IO_INVIS; | 
 | 426 |  | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 427 | 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 
 | 428 | 		return -EIO; | 
 | 429 |  | 
 | 430 | 	xfs_ilock(ip, XFS_IOLOCK_EXCL); | 
 | 431 |  | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 432 | 	new_size = *ppos + count; | 
 | 433 |  | 
 | 434 | 	xfs_ilock(ip, XFS_ILOCK_EXCL); | 
 | 435 | 	if (new_size > ip->i_size) | 
 | 436 | 		ip->i_new_size = new_size; | 
 | 437 | 	xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
 | 438 |  | 
 | 439 | 	trace_xfs_file_splice_write(ip, count, *ppos, ioflags); | 
 | 440 |  | 
 | 441 | 	ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 442 |  | 
| Dave Chinner | edafb6d | 2011-01-11 10:14:06 +1100 | [diff] [blame] | 443 | 	xfs_aio_write_isize_update(inode, ppos, ret); | 
| Dave Chinner | 4c5cfd1 | 2011-01-11 10:14:16 +1100 | [diff] [blame] | 444 | 	xfs_aio_write_newsize_update(ip); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 445 | 	xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 
 | 446 | 	return ret; | 
 | 447 | } | 
 | 448 |  | 
 | 449 | /* | 
 | 450 |  * This routine is called to handle zeroing any space in the last | 
 | 451 |  * block of the file that is beyond the EOF.  We do this since the | 
 | 452 |  * size is being increased without writing anything to that block | 
 | 453 |  * and we don't want anyone to read the garbage on the disk. | 
 | 454 |  */ | 
 | 455 | STATIC int				/* error (positive) */ | 
 | 456 | xfs_zero_last_block( | 
 | 457 | 	xfs_inode_t	*ip, | 
 | 458 | 	xfs_fsize_t	offset, | 
 | 459 | 	xfs_fsize_t	isize) | 
 | 460 | { | 
 | 461 | 	xfs_fileoff_t	last_fsb; | 
 | 462 | 	xfs_mount_t	*mp = ip->i_mount; | 
 | 463 | 	int		nimaps; | 
 | 464 | 	int		zero_offset; | 
 | 465 | 	int		zero_len; | 
 | 466 | 	int		error = 0; | 
 | 467 | 	xfs_bmbt_irec_t	imap; | 
 | 468 |  | 
 | 469 | 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 
 | 470 |  | 
 | 471 | 	zero_offset = XFS_B_FSB_OFFSET(mp, isize); | 
 | 472 | 	if (zero_offset == 0) { | 
 | 473 | 		/* | 
 | 474 | 		 * There are no extra bytes in the last block on disk to | 
 | 475 | 		 * zero, so return. | 
 | 476 | 		 */ | 
 | 477 | 		return 0; | 
 | 478 | 	} | 
 | 479 |  | 
 | 480 | 	last_fsb = XFS_B_TO_FSBT(mp, isize); | 
 | 481 | 	nimaps = 1; | 
 | 482 | 	error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, | 
| Christoph Hellwig | b4e9181 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 483 | 			  &nimaps, NULL); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 484 | 	if (error) { | 
 | 485 | 		return error; | 
 | 486 | 	} | 
 | 487 | 	ASSERT(nimaps > 0); | 
 | 488 | 	/* | 
 | 489 | 	 * If the block underlying isize is just a hole, then there | 
 | 490 | 	 * is nothing to zero. | 
 | 491 | 	 */ | 
 | 492 | 	if (imap.br_startblock == HOLESTARTBLOCK) { | 
 | 493 | 		return 0; | 
 | 494 | 	} | 
 | 495 | 	/* | 
 | 496 | 	 * Zero the part of the last block beyond the EOF, and write it | 
 | 497 | 	 * out sync.  We need to drop the ilock while we do this so we | 
 | 498 | 	 * don't deadlock when the buffer cache calls back to us. | 
 | 499 | 	 */ | 
 | 500 | 	xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
 | 501 |  | 
 | 502 | 	zero_len = mp->m_sb.sb_blocksize - zero_offset; | 
 | 503 | 	if (isize + zero_len > offset) | 
 | 504 | 		zero_len = offset - isize; | 
 | 505 | 	error = xfs_iozero(ip, isize, zero_len); | 
 | 506 |  | 
 | 507 | 	xfs_ilock(ip, XFS_ILOCK_EXCL); | 
 | 508 | 	ASSERT(error >= 0); | 
 | 509 | 	return error; | 
 | 510 | } | 
 | 511 |  | 
 | 512 | /* | 
 | 513 |  * Zero any on disk space between the current EOF and the new, | 
 | 514 |  * larger EOF.  This handles the normal case of zeroing the remainder | 
 | 515 |  * of the last block in the file and the unusual case of zeroing blocks | 
 | 516 |  * out beyond the size of the file.  This second case only happens | 
 | 517 |  * with fixed size extents and when the system crashes before the inode | 
 | 518 |  * size was updated but after blocks were allocated.  If fill is set, | 
 | 519 |  * then any holes in the range are filled and zeroed.  If not, the holes | 
 | 520 |  * are left alone as holes. | 
 | 521 |  */ | 
 | 522 |  | 
 | 523 | int					/* error (positive) */ | 
 | 524 | xfs_zero_eof( | 
 | 525 | 	xfs_inode_t	*ip, | 
 | 526 | 	xfs_off_t	offset,		/* starting I/O offset */ | 
 | 527 | 	xfs_fsize_t	isize)		/* current inode size */ | 
 | 528 | { | 
 | 529 | 	xfs_mount_t	*mp = ip->i_mount; | 
 | 530 | 	xfs_fileoff_t	start_zero_fsb; | 
 | 531 | 	xfs_fileoff_t	end_zero_fsb; | 
 | 532 | 	xfs_fileoff_t	zero_count_fsb; | 
 | 533 | 	xfs_fileoff_t	last_fsb; | 
 | 534 | 	xfs_fileoff_t	zero_off; | 
 | 535 | 	xfs_fsize_t	zero_len; | 
 | 536 | 	int		nimaps; | 
 | 537 | 	int		error = 0; | 
 | 538 | 	xfs_bmbt_irec_t	imap; | 
 | 539 |  | 
 | 540 | 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | 
 | 541 | 	ASSERT(offset > isize); | 
 | 542 |  | 
 | 543 | 	/* | 
 | 544 | 	 * First handle zeroing the block on which isize resides. | 
 | 545 | 	 * We only zero a part of that block so it is handled specially. | 
 | 546 | 	 */ | 
 | 547 | 	error = xfs_zero_last_block(ip, offset, isize); | 
 | 548 | 	if (error) { | 
 | 549 | 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | 
 | 550 | 		return error; | 
 | 551 | 	} | 
 | 552 |  | 
 | 553 | 	/* | 
 | 554 | 	 * Calculate the range between the new size and the old | 
 | 555 | 	 * where blocks needing to be zeroed may exist.  To get the | 
 | 556 | 	 * block where the last byte in the file currently resides, | 
 | 557 | 	 * we need to subtract one from the size and truncate back | 
 | 558 | 	 * to a block boundary.  We subtract 1 in case the size is | 
 | 559 | 	 * exactly on a block boundary. | 
 | 560 | 	 */ | 
 | 561 | 	last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | 
 | 562 | 	start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | 
 | 563 | 	end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | 
 | 564 | 	ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | 
 | 565 | 	if (last_fsb == end_zero_fsb) { | 
 | 566 | 		/* | 
 | 567 | 		 * The size was only incremented on its last block. | 
 | 568 | 		 * We took care of that above, so just return. | 
 | 569 | 		 */ | 
 | 570 | 		return 0; | 
 | 571 | 	} | 
 | 572 |  | 
 | 573 | 	ASSERT(start_zero_fsb <= end_zero_fsb); | 
 | 574 | 	while (start_zero_fsb <= end_zero_fsb) { | 
 | 575 | 		nimaps = 1; | 
 | 576 | 		zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | 
 | 577 | 		error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, | 
| Christoph Hellwig | b4e9181 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 578 | 				  0, NULL, 0, &imap, &nimaps, NULL); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 579 | 		if (error) { | 
 | 580 | 			ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | 
 | 581 | 			return error; | 
 | 582 | 		} | 
 | 583 | 		ASSERT(nimaps > 0); | 
 | 584 |  | 
 | 585 | 		if (imap.br_state == XFS_EXT_UNWRITTEN || | 
 | 586 | 		    imap.br_startblock == HOLESTARTBLOCK) { | 
 | 587 | 			/* | 
 | 588 | 			 * This loop handles initializing pages that were | 
 | 589 | 			 * partially initialized by the code below this | 
 | 590 | 			 * loop. It basically zeroes the part of the page | 
 | 591 | 			 * that sits on a hole and sets the page as P_HOLE | 
 | 592 | 			 * and calls remapf if it is a mapped file. | 
 | 593 | 			 */ | 
 | 594 | 			start_zero_fsb = imap.br_startoff + imap.br_blockcount; | 
 | 595 | 			ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | 
 | 596 | 			continue; | 
 | 597 | 		} | 
 | 598 |  | 
 | 599 | 		/* | 
 | 600 | 		 * There are blocks we need to zero. | 
 | 601 | 		 * Drop the inode lock while we're doing the I/O. | 
 | 602 | 		 * We'll still have the iolock to protect us. | 
 | 603 | 		 */ | 
 | 604 | 		xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
 | 605 |  | 
 | 606 | 		zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); | 
 | 607 | 		zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); | 
 | 608 |  | 
 | 609 | 		if ((zero_off + zero_len) > offset) | 
 | 610 | 			zero_len = offset - zero_off; | 
 | 611 |  | 
 | 612 | 		error = xfs_iozero(ip, zero_off, zero_len); | 
 | 613 | 		if (error) { | 
 | 614 | 			goto out_lock; | 
 | 615 | 		} | 
 | 616 |  | 
 | 617 | 		start_zero_fsb = imap.br_startoff + imap.br_blockcount; | 
 | 618 | 		ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | 
 | 619 |  | 
 | 620 | 		xfs_ilock(ip, XFS_ILOCK_EXCL); | 
 | 621 | 	} | 
 | 622 |  | 
 | 623 | 	return 0; | 
 | 624 |  | 
 | 625 | out_lock: | 
 | 626 | 	xfs_ilock(ip, XFS_ILOCK_EXCL); | 
 | 627 | 	ASSERT(error >= 0); | 
 | 628 | 	return error; | 
 | 629 | } | 
 | 630 |  | 
| Dave Chinner | f0d26e8 | 2011-01-11 10:15:36 +1100 | [diff] [blame] | 631 | /* | 
| Dave Chinner | 4d8d158 | 2011-01-11 10:23:42 +1100 | [diff] [blame] | 632 |  * Common pre-write limit and setup checks. | 
 | 633 |  * | 
 | 634 |  * Returns with iolock held according to @iolock. | 
 | 635 |  */ | 
 | 636 | STATIC ssize_t | 
 | 637 | xfs_file_aio_write_checks( | 
 | 638 | 	struct file		*file, | 
 | 639 | 	loff_t			*pos, | 
 | 640 | 	size_t			*count, | 
 | 641 | 	int			*iolock) | 
 | 642 | { | 
 | 643 | 	struct inode		*inode = file->f_mapping->host; | 
 | 644 | 	struct xfs_inode	*ip = XFS_I(inode); | 
 | 645 | 	xfs_fsize_t		new_size; | 
 | 646 | 	int			error = 0; | 
 | 647 |  | 
 | 648 | 	error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); | 
 | 649 | 	if (error) { | 
 | 650 | 		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); | 
 | 651 | 		*iolock = 0; | 
 | 652 | 		return error; | 
 | 653 | 	} | 
 | 654 |  | 
 | 655 | 	new_size = *pos + *count; | 
 | 656 | 	if (new_size > ip->i_size) | 
 | 657 | 		ip->i_new_size = new_size; | 
 | 658 |  | 
 | 659 | 	if (likely(!(file->f_mode & FMODE_NOCMTIME))) | 
 | 660 | 		file_update_time(file); | 
 | 661 |  | 
 | 662 | 	/* | 
 | 663 | 	 * If the offset is beyond the size of the file, we need to zero any | 
 | 664 | 	 * blocks that fall between the existing EOF and the start of this | 
 | 665 | 	 * write. | 
 | 666 | 	 */ | 
 | 667 | 	if (*pos > ip->i_size) | 
 | 668 | 		error = -xfs_zero_eof(ip, *pos, ip->i_size); | 
 | 669 |  | 
 | 670 | 	xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); | 
 | 671 | 	if (error) | 
 | 672 | 		return error; | 
 | 673 |  | 
 | 674 | 	/* | 
 | 675 | 	 * If we're writing the file then make sure to clear the setuid and | 
 | 676 | 	 * setgid bits if the process is not being run by root.  This keeps | 
 | 677 | 	 * people from modifying setuid and setgid binaries. | 
 | 678 | 	 */ | 
 | 679 | 	return file_remove_suid(file); | 
 | 680 |  | 
 | 681 | } | 
 | 682 |  | 
 | 683 | /* | 
| Dave Chinner | f0d26e8 | 2011-01-11 10:15:36 +1100 | [diff] [blame] | 684 |  * xfs_file_dio_aio_write - handle direct IO writes | 
 | 685 |  * | 
 | 686 |  * Lock the inode appropriately to prepare for and issue a direct IO write. | 
| Dave Chinner | eda7798 | 2011-01-11 10:22:40 +1100 | [diff] [blame] | 687 |  * By separating it from the buffered write path we remove all the tricky to | 
| Dave Chinner | f0d26e8 | 2011-01-11 10:15:36 +1100 | [diff] [blame] | 688 |  * follow locking changes and looping. | 
 | 689 |  * | 
| Dave Chinner | eda7798 | 2011-01-11 10:22:40 +1100 | [diff] [blame] | 690 |  * If there are cached pages or we're extending the file, we need IOLOCK_EXCL | 
 | 691 |  * until we're sure the bytes at the new EOF have been zeroed and/or the cached | 
 | 692 |  * pages are flushed out. | 
 | 693 |  * | 
 | 694 |  * In most cases the direct IO writes will be done holding IOLOCK_SHARED | 
 | 695 |  * allowing them to be done in parallel with reads and other direct IO writes. | 
 | 696 |  * However, if the IO is not aligned to filesystem blocks, the direct IO layer | 
 | 697 |  * needs to do sub-block zeroing and that requires serialisation against other | 
 | 698 |  * direct IOs to the same block. In this case we need to serialise the | 
 | 699 |  * submission of the unaligned IOs so that we don't get racing block zeroing in | 
 | 700 |  * the dio layer.  To avoid the problem with aio, we also need to wait for | 
 | 701 |  * outstanding IOs to complete so that unwritten extent conversion is completed | 
 | 702 |  * before we try to map the overlapping block. This is currently implemented by | 
 | 703 |  * hitting it with a big hammer (i.e. xfs_ioend_wait()). | 
 | 704 |  * | 
| Dave Chinner | f0d26e8 | 2011-01-11 10:15:36 +1100 | [diff] [blame] | 705 |  * Returns with locks held indicated by @iolock and errors indicated by | 
 | 706 |  * negative return values. | 
 | 707 |  */ | 
 | 708 | STATIC ssize_t | 
 | 709 | xfs_file_dio_aio_write( | 
 | 710 | 	struct kiocb		*iocb, | 
 | 711 | 	const struct iovec	*iovp, | 
 | 712 | 	unsigned long		nr_segs, | 
 | 713 | 	loff_t			pos, | 
 | 714 | 	size_t			ocount, | 
 | 715 | 	int			*iolock) | 
 | 716 | { | 
 | 717 | 	struct file		*file = iocb->ki_filp; | 
 | 718 | 	struct address_space	*mapping = file->f_mapping; | 
 | 719 | 	struct inode		*inode = mapping->host; | 
 | 720 | 	struct xfs_inode	*ip = XFS_I(inode); | 
 | 721 | 	struct xfs_mount	*mp = ip->i_mount; | 
 | 722 | 	ssize_t			ret = 0; | 
| Dave Chinner | f0d26e8 | 2011-01-11 10:15:36 +1100 | [diff] [blame] | 723 | 	size_t			count = ocount; | 
| Dave Chinner | eda7798 | 2011-01-11 10:22:40 +1100 | [diff] [blame] | 724 | 	int			unaligned_io = 0; | 
| Dave Chinner | f0d26e8 | 2011-01-11 10:15:36 +1100 | [diff] [blame] | 725 | 	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ? | 
 | 726 | 					mp->m_rtdev_targp : mp->m_ddev_targp; | 
 | 727 |  | 
 | 728 | 	*iolock = 0; | 
 | 729 | 	if ((pos & target->bt_smask) || (count & target->bt_smask)) | 
 | 730 | 		return -XFS_ERROR(EINVAL); | 
 | 731 |  | 
| Dave Chinner | eda7798 | 2011-01-11 10:22:40 +1100 | [diff] [blame] | 732 | 	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) | 
 | 733 | 		unaligned_io = 1; | 
 | 734 |  | 
 | 735 | 	if (unaligned_io || mapping->nrpages || pos > ip->i_size) | 
| Dave Chinner | f0d26e8 | 2011-01-11 10:15:36 +1100 | [diff] [blame] | 736 | 		*iolock = XFS_IOLOCK_EXCL; | 
 | 737 | 	else | 
 | 738 | 		*iolock = XFS_IOLOCK_SHARED; | 
 | 739 | 	xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | 
 | 740 |  | 
| Dave Chinner | 4d8d158 | 2011-01-11 10:23:42 +1100 | [diff] [blame] | 741 | 	ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); | 
 | 742 | 	if (ret) | 
| Dave Chinner | f0d26e8 | 2011-01-11 10:15:36 +1100 | [diff] [blame] | 743 | 		return ret; | 
 | 744 |  | 
 | 745 | 	if (mapping->nrpages) { | 
 | 746 | 		WARN_ON(*iolock != XFS_IOLOCK_EXCL); | 
 | 747 | 		ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, | 
 | 748 | 							FI_REMAPF_LOCKED); | 
 | 749 | 		if (ret) | 
 | 750 | 			return ret; | 
 | 751 | 	} | 
 | 752 |  | 
| Dave Chinner | eda7798 | 2011-01-11 10:22:40 +1100 | [diff] [blame] | 753 | 	/* | 
 | 754 | 	 * If we are doing unaligned IO, wait for all other IO to drain, | 
 | 755 | 	 * otherwise demote the lock if we had to flush cached pages | 
 | 756 | 	 */ | 
 | 757 | 	if (unaligned_io) | 
 | 758 | 		xfs_ioend_wait(ip); | 
 | 759 | 	else if (*iolock == XFS_IOLOCK_EXCL) { | 
| Dave Chinner | f0d26e8 | 2011-01-11 10:15:36 +1100 | [diff] [blame] | 760 | 		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); | 
 | 761 | 		*iolock = XFS_IOLOCK_SHARED; | 
 | 762 | 	} | 
 | 763 |  | 
 | 764 | 	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); | 
 | 765 | 	ret = generic_file_direct_write(iocb, iovp, | 
 | 766 | 			&nr_segs, pos, &iocb->ki_pos, count, ocount); | 
 | 767 |  | 
 | 768 | 	/* No fallback to buffered IO on errors for XFS. */ | 
 | 769 | 	ASSERT(ret < 0 || ret == count); | 
 | 770 | 	return ret; | 
 | 771 | } | 
 | 772 |  | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 773 | STATIC ssize_t | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 774 | xfs_file_buffered_aio_write( | 
 | 775 | 	struct kiocb		*iocb, | 
 | 776 | 	const struct iovec	*iovp, | 
 | 777 | 	unsigned long		nr_segs, | 
 | 778 | 	loff_t			pos, | 
 | 779 | 	size_t			ocount, | 
 | 780 | 	int			*iolock) | 
 | 781 | { | 
 | 782 | 	struct file		*file = iocb->ki_filp; | 
 | 783 | 	struct address_space	*mapping = file->f_mapping; | 
 | 784 | 	struct inode		*inode = mapping->host; | 
 | 785 | 	struct xfs_inode	*ip = XFS_I(inode); | 
 | 786 | 	ssize_t			ret; | 
 | 787 | 	int			enospc = 0; | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 788 | 	size_t			count = ocount; | 
 | 789 |  | 
 | 790 | 	*iolock = XFS_IOLOCK_EXCL; | 
 | 791 | 	xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | 
 | 792 |  | 
| Dave Chinner | 4d8d158 | 2011-01-11 10:23:42 +1100 | [diff] [blame] | 793 | 	ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); | 
 | 794 | 	if (ret) | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 795 | 		return ret; | 
 | 796 |  | 
 | 797 | 	/* We can write back this queue in page reclaim */ | 
 | 798 | 	current->backing_dev_info = mapping->backing_dev_info; | 
 | 799 |  | 
 | 800 | write_retry: | 
 | 801 | 	trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); | 
 | 802 | 	ret = generic_file_buffered_write(iocb, iovp, nr_segs, | 
 | 803 | 			pos, &iocb->ki_pos, count, ret); | 
 | 804 | 	/* | 
 | 805 | 	 * if we just got an ENOSPC, flush the inode now we aren't holding any | 
 | 806 | 	 * page locks and retry *once* | 
 | 807 | 	 */ | 
 | 808 | 	if (ret == -ENOSPC && !enospc) { | 
 | 809 | 		ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); | 
 | 810 | 		if (ret) | 
 | 811 | 			return ret; | 
 | 812 | 		enospc = 1; | 
 | 813 | 		goto write_retry; | 
 | 814 | 	} | 
 | 815 | 	current->backing_dev_info = NULL; | 
 | 816 | 	return ret; | 
 | 817 | } | 
 | 818 |  | 
 | 819 | STATIC ssize_t | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 820 | xfs_file_aio_write( | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 821 | 	struct kiocb		*iocb, | 
 | 822 | 	const struct iovec	*iovp, | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 823 | 	unsigned long		nr_segs, | 
 | 824 | 	loff_t			pos) | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 825 | { | 
 | 826 | 	struct file		*file = iocb->ki_filp; | 
 | 827 | 	struct address_space	*mapping = file->f_mapping; | 
 | 828 | 	struct inode		*inode = mapping->host; | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 829 | 	struct xfs_inode	*ip = XFS_I(inode); | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 830 | 	ssize_t			ret; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 831 | 	int			iolock; | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 832 | 	size_t			ocount = 0; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 833 |  | 
 | 834 | 	XFS_STATS_INC(xs_write_calls); | 
 | 835 |  | 
| Christoph Hellwig | 00258e3 | 2010-02-15 09:44:47 +0000 | [diff] [blame] | 836 | 	BUG_ON(iocb->ki_pos != pos); | 
 | 837 |  | 
| Dave Chinner | a363f0c | 2011-01-11 10:13:53 +1100 | [diff] [blame] | 838 | 	ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); | 
 | 839 | 	if (ret) | 
 | 840 | 		return ret; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 841 |  | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 842 | 	if (ocount == 0) | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 843 | 		return 0; | 
 | 844 |  | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 845 | 	xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 846 |  | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 847 | 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 848 | 		return -EIO; | 
 | 849 |  | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 850 | 	if (unlikely(file->f_flags & O_DIRECT)) | 
| Dave Chinner | f0d26e8 | 2011-01-11 10:15:36 +1100 | [diff] [blame] | 851 | 		ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, | 
 | 852 | 						ocount, &iolock); | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 853 | 	else | 
 | 854 | 		ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, | 
 | 855 | 						ocount, &iolock); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 856 |  | 
| Dave Chinner | edafb6d | 2011-01-11 10:14:06 +1100 | [diff] [blame] | 857 | 	xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 858 |  | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 859 | 	if (ret <= 0) | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 860 | 		goto out_unlock; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 861 |  | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 862 | 	/* Handle various SYNC-type writes */ | 
 | 863 | 	if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { | 
 | 864 | 		loff_t end = pos + ret - 1; | 
| Dave Chinner | a363f0c | 2011-01-11 10:13:53 +1100 | [diff] [blame] | 865 | 		int error, error2; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 866 |  | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 867 | 		xfs_rw_iunlock(ip, iolock); | 
| Dave Chinner | a363f0c | 2011-01-11 10:13:53 +1100 | [diff] [blame] | 868 | 		error = filemap_write_and_wait_range(mapping, pos, end); | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 869 | 		xfs_rw_ilock(ip, iolock); | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 870 |  | 
| Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 871 | 		error2 = -xfs_file_fsync(file, | 
| Christoph Hellwig | fd3200b | 2010-02-15 09:44:48 +0000 | [diff] [blame] | 872 | 					 (file->f_flags & __O_SYNC) ? 0 : 1); | 
| Dave Chinner | a363f0c | 2011-01-11 10:13:53 +1100 | [diff] [blame] | 873 | 		if (error) | 
 | 874 | 			ret = error; | 
 | 875 | 		else if (error2) | 
 | 876 | 			ret = error2; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 877 | 	} | 
 | 878 |  | 
| Dave Chinner | 637bbc7 | 2011-01-11 10:17:30 +1100 | [diff] [blame] | 879 | out_unlock: | 
| Dave Chinner | 4c5cfd1 | 2011-01-11 10:14:16 +1100 | [diff] [blame] | 880 | 	xfs_aio_write_newsize_update(ip); | 
| Dave Chinner | 487f84f | 2011-01-12 11:37:10 +1100 | [diff] [blame] | 881 | 	xfs_rw_iunlock(ip, iolock); | 
| Dave Chinner | a363f0c | 2011-01-11 10:13:53 +1100 | [diff] [blame] | 882 | 	return ret; | 
| Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 883 | } | 
 | 884 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | STATIC int | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 886 | xfs_file_open( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | 	struct inode	*inode, | 
| Christoph Hellwig | f999a5b | 2008-11-28 14:23:32 +1100 | [diff] [blame] | 888 | 	struct file	*file) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | { | 
| Christoph Hellwig | f999a5b | 2008-11-28 14:23:32 +1100 | [diff] [blame] | 890 | 	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | 		return -EFBIG; | 
| Christoph Hellwig | f999a5b | 2008-11-28 14:23:32 +1100 | [diff] [blame] | 892 | 	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) | 
 | 893 | 		return -EIO; | 
 | 894 | 	return 0; | 
 | 895 | } | 
 | 896 |  | 
 | 897 | STATIC int | 
 | 898 | xfs_dir_open( | 
 | 899 | 	struct inode	*inode, | 
 | 900 | 	struct file	*file) | 
 | 901 | { | 
 | 902 | 	struct xfs_inode *ip = XFS_I(inode); | 
 | 903 | 	int		mode; | 
 | 904 | 	int		error; | 
 | 905 |  | 
 | 906 | 	error = xfs_file_open(inode, file); | 
 | 907 | 	if (error) | 
 | 908 | 		return error; | 
 | 909 |  | 
 | 910 | 	/* | 
 | 911 | 	 * If there are any blocks, read-ahead block 0 as we're almost | 
 | 912 | 	 * certain to have the next operation be a read there. | 
 | 913 | 	 */ | 
 | 914 | 	mode = xfs_ilock_map_shared(ip); | 
 | 915 | 	if (ip->i_d.di_nextents > 0) | 
 | 916 | 		xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); | 
 | 917 | 	xfs_iunlock(ip, mode); | 
 | 918 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | } | 
 | 920 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | STATIC int | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 922 | xfs_file_release( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | 	struct inode	*inode, | 
 | 924 | 	struct file	*filp) | 
 | 925 | { | 
| Christoph Hellwig | 739bfb2 | 2007-08-29 10:58:01 +1000 | [diff] [blame] | 926 | 	return -xfs_release(XFS_I(inode)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 927 | } | 
 | 928 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | STATIC int | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 930 | xfs_file_readdir( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | 	struct file	*filp, | 
 | 932 | 	void		*dirent, | 
 | 933 | 	filldir_t	filldir) | 
 | 934 | { | 
| Christoph Hellwig | 051e7cd | 2007-08-28 13:58:24 +1000 | [diff] [blame] | 935 | 	struct inode	*inode = filp->f_path.dentry->d_inode; | 
| Christoph Hellwig | 739bfb2 | 2007-08-29 10:58:01 +1000 | [diff] [blame] | 936 | 	xfs_inode_t	*ip = XFS_I(inode); | 
| Christoph Hellwig | 051e7cd | 2007-08-28 13:58:24 +1000 | [diff] [blame] | 937 | 	int		error; | 
 | 938 | 	size_t		bufsize; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 939 |  | 
| Christoph Hellwig | 051e7cd | 2007-08-28 13:58:24 +1000 | [diff] [blame] | 940 | 	/* | 
 | 941 | 	 * The Linux API doesn't pass down the total size of the buffer | 
 | 942 | 	 * we read into down to the filesystem.  With the filldir concept | 
 | 943 | 	 * it's not needed for correct information, but the XFS dir2 leaf | 
 | 944 | 	 * code wants an estimate of the buffer size to calculate it's | 
 | 945 | 	 * readahead window and size the buffers used for mapping to | 
 | 946 | 	 * physical blocks. | 
 | 947 | 	 * | 
 | 948 | 	 * Try to give it an estimate that's good enough, maybe at some | 
 | 949 | 	 * point we can change the ->readdir prototype to include the | 
| Eric Sandeen | a9cc799 | 2010-02-03 17:50:13 +0000 | [diff] [blame] | 950 | 	 * buffer size.  For now we use the current glibc buffer size. | 
| Christoph Hellwig | 051e7cd | 2007-08-28 13:58:24 +1000 | [diff] [blame] | 951 | 	 */ | 
| Eric Sandeen | a9cc799 | 2010-02-03 17:50:13 +0000 | [diff] [blame] | 952 | 	bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 953 |  | 
| Christoph Hellwig | 739bfb2 | 2007-08-29 10:58:01 +1000 | [diff] [blame] | 954 | 	error = xfs_readdir(ip, dirent, bufsize, | 
| Christoph Hellwig | 051e7cd | 2007-08-28 13:58:24 +1000 | [diff] [blame] | 955 | 				(xfs_off_t *)&filp->f_pos, filldir); | 
 | 956 | 	if (error) | 
 | 957 | 		return -error; | 
 | 958 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | } | 
 | 960 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | STATIC int | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 962 | xfs_file_mmap( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | 	struct file	*filp, | 
 | 964 | 	struct vm_area_struct *vma) | 
 | 965 | { | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 966 | 	vma->vm_ops = &xfs_file_vm_ops; | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 967 | 	vma->vm_flags |= VM_CAN_NONLINEAR; | 
| Dean Roehrich | 6fac0cb | 2005-06-21 14:07:45 +1000 | [diff] [blame] | 968 |  | 
| Nathan Scott | fbc1462 | 2006-06-09 14:52:13 +1000 | [diff] [blame] | 969 | 	file_accessed(filp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 | 	return 0; | 
 | 971 | } | 
 | 972 |  | 
| David Chinner | 4f57dbc | 2007-07-19 16:28:17 +1000 | [diff] [blame] | 973 | /* | 
 | 974 |  * mmap()d file has taken write protection fault and is being made | 
 | 975 |  * writable. We can set the page state up correctly for a writable | 
 | 976 |  * page, which means we can do correct delalloc accounting (ENOSPC | 
 | 977 |  * checking!) and unwritten extent mapping. | 
 | 978 |  */ | 
 | 979 | STATIC int | 
 | 980 | xfs_vm_page_mkwrite( | 
 | 981 | 	struct vm_area_struct	*vma, | 
| Nick Piggin | c2ec175 | 2009-03-31 15:23:21 -0700 | [diff] [blame] | 982 | 	struct vm_fault		*vmf) | 
| David Chinner | 4f57dbc | 2007-07-19 16:28:17 +1000 | [diff] [blame] | 983 | { | 
| Nick Piggin | c2ec175 | 2009-03-31 15:23:21 -0700 | [diff] [blame] | 984 | 	return block_page_mkwrite(vma, vmf, xfs_get_blocks); | 
| David Chinner | 4f57dbc | 2007-07-19 16:28:17 +1000 | [diff] [blame] | 985 | } | 
 | 986 |  | 
| Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 987 | const struct file_operations xfs_file_operations = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 | 	.llseek		= generic_file_llseek, | 
 | 989 | 	.read		= do_sync_read, | 
| Dean Roehrich | bb3f724 | 2005-09-02 15:43:05 +1000 | [diff] [blame] | 990 | 	.write		= do_sync_write, | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 991 | 	.aio_read	= xfs_file_aio_read, | 
 | 992 | 	.aio_write	= xfs_file_aio_write, | 
| Nathan Scott | 1b89584 | 2006-03-31 13:08:59 +1000 | [diff] [blame] | 993 | 	.splice_read	= xfs_file_splice_read, | 
 | 994 | 	.splice_write	= xfs_file_splice_write, | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 995 | 	.unlocked_ioctl	= xfs_file_ioctl, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | #ifdef CONFIG_COMPAT | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 997 | 	.compat_ioctl	= xfs_file_compat_ioctl, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | #endif | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 999 | 	.mmap		= xfs_file_mmap, | 
 | 1000 | 	.open		= xfs_file_open, | 
 | 1001 | 	.release	= xfs_file_release, | 
 | 1002 | 	.fsync		= xfs_file_fsync, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 | }; | 
 | 1004 |  | 
| Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 1005 | const struct file_operations xfs_dir_file_operations = { | 
| Christoph Hellwig | f999a5b | 2008-11-28 14:23:32 +1100 | [diff] [blame] | 1006 | 	.open		= xfs_dir_open, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 | 	.read		= generic_read_dir, | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 1008 | 	.readdir	= xfs_file_readdir, | 
| Al Viro | 59af158 | 2008-08-24 07:24:41 -0400 | [diff] [blame] | 1009 | 	.llseek		= generic_file_llseek, | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 1010 | 	.unlocked_ioctl	= xfs_file_ioctl, | 
| Nathan Scott | d387039 | 2005-05-06 06:44:46 -0700 | [diff] [blame] | 1011 | #ifdef CONFIG_COMPAT | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 1012 | 	.compat_ioctl	= xfs_file_compat_ioctl, | 
| Nathan Scott | d387039 | 2005-05-06 06:44:46 -0700 | [diff] [blame] | 1013 | #endif | 
| Nathan Scott | 3562fd4 | 2006-03-14 14:00:35 +1100 | [diff] [blame] | 1014 | 	.fsync		= xfs_file_fsync, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | }; | 
 | 1016 |  | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 1017 | static const struct vm_operations_struct xfs_file_vm_ops = { | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 1018 | 	.fault		= filemap_fault, | 
| David Chinner | 4f57dbc | 2007-07-19 16:28:17 +1000 | [diff] [blame] | 1019 | 	.page_mkwrite	= xfs_vm_page_mkwrite, | 
| Dean Roehrich | 6fac0cb | 2005-06-21 14:07:45 +1000 | [diff] [blame] | 1020 | }; |