| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc.  All Rights Reserved. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or modify it | 
|  | 5 | * under the terms of version 2 of the GNU General Public License as | 
|  | 6 | * published by the Free Software Foundation. | 
|  | 7 | * | 
|  | 8 | * This program is distributed in the hope that it would be useful, but | 
|  | 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 
|  | 11 | * | 
|  | 12 | * Further, this software is distributed without any warranty that it is | 
|  | 13 | * free of the rightful claim of any third person regarding infringement | 
|  | 14 | * or the like.  Any license provided herein, whether implied or | 
|  | 15 | * otherwise, applies only to this software file.  Patent licenses, if | 
|  | 16 | * any, provided herein do not apply to combinations of this program with | 
|  | 17 | * other software, or any other product whatsoever. | 
|  | 18 | * | 
|  | 19 | * You should have received a copy of the GNU General Public License along | 
|  | 20 | * with this program; if not, write the Free Software Foundation, Inc., 59 | 
|  | 21 | * Temple Place - Suite 330, Boston MA 02111-1307, USA. | 
|  | 22 | * | 
|  | 23 | * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, | 
|  | 24 | * Mountain View, CA  94043, or: | 
|  | 25 | * | 
|  | 26 | * http://www.sgi.com | 
|  | 27 | * | 
|  | 28 | * For further information regarding this notice, see: | 
|  | 29 | * | 
|  | 30 | * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ | 
|  | 31 | */ | 
|  | 32 |  | 
|  | 33 | #include "xfs.h" | 
|  | 34 | #include "xfs_macros.h" | 
|  | 35 | #include "xfs_types.h" | 
|  | 36 | #include "xfs_inum.h" | 
|  | 37 | #include "xfs_log.h" | 
|  | 38 | #include "xfs_trans.h" | 
|  | 39 | #include "xfs_sb.h" | 
|  | 40 | #include "xfs_ag.h" | 
|  | 41 | #include "xfs_dir.h" | 
|  | 42 | #include "xfs_dir2.h" | 
|  | 43 | #include "xfs_dmapi.h" | 
|  | 44 | #include "xfs_mount.h" | 
|  | 45 | #include "xfs_alloc_btree.h" | 
|  | 46 | #include "xfs_bmap_btree.h" | 
|  | 47 | #include "xfs_ialloc_btree.h" | 
|  | 48 | #include "xfs_itable.h" | 
|  | 49 | #include "xfs_btree.h" | 
|  | 50 | #include "xfs_ialloc.h" | 
|  | 51 | #include "xfs_alloc.h" | 
|  | 52 | #include "xfs_attr_sf.h" | 
|  | 53 | #include "xfs_dir_sf.h" | 
|  | 54 | #include "xfs_dir2_sf.h" | 
|  | 55 | #include "xfs_dinode.h" | 
|  | 56 | #include "xfs_inode_item.h" | 
|  | 57 | #include "xfs_inode.h" | 
|  | 58 | #include "xfs_bmap.h" | 
|  | 59 | #include "xfs_da_btree.h" | 
|  | 60 | #include "xfs_attr.h" | 
|  | 61 | #include "xfs_rw.h" | 
|  | 62 | #include "xfs_refcache.h" | 
|  | 63 | #include "xfs_error.h" | 
|  | 64 | #include "xfs_bit.h" | 
|  | 65 | #include "xfs_rtalloc.h" | 
|  | 66 | #include "xfs_quota.h" | 
|  | 67 | #include "xfs_utils.h" | 
|  | 68 | #include "xfs_trans_space.h" | 
|  | 69 | #include "xfs_dir_leaf.h" | 
|  | 70 | #include "xfs_mac.h" | 
|  | 71 | #include "xfs_log_priv.h" | 
|  | 72 |  | 
|  | 73 |  | 
|  | 74 | /* | 
|  | 75 | * The maximum pathlen is 1024 bytes. Since the minimum file system | 
|  | 76 | * blocksize is 512 bytes, we can get a max of 2 extents back from | 
|  | 77 | * bmapi. | 
|  | 78 | */ | 
|  | 79 | #define SYMLINK_MAPS 2 | 
|  | 80 |  | 
|  | 81 | /* | 
|  | 82 | * For xfs, we check that the file isn't too big to be opened by this kernel. | 
|  | 83 | * No other open action is required for regular files.  Devices are handled | 
|  | 84 | * through the specfs file system, pipes through fifofs.  Device and | 
|  | 85 | * fifo vnodes are "wrapped" by specfs and fifofs vnodes, respectively, | 
|  | 86 | * when a new vnode is first looked up or created. | 
|  | 87 | */ | 
|  | 88 | STATIC int | 
|  | 89 | xfs_open( | 
|  | 90 | bhv_desc_t	*bdp, | 
|  | 91 | cred_t		*credp) | 
|  | 92 | { | 
|  | 93 | int		mode; | 
|  | 94 | vnode_t		*vp; | 
|  | 95 | xfs_inode_t	*ip; | 
|  | 96 |  | 
|  | 97 | vp = BHV_TO_VNODE(bdp); | 
|  | 98 | ip = XFS_BHVTOI(bdp); | 
|  | 99 |  | 
|  | 100 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 
|  | 101 | return XFS_ERROR(EIO); | 
|  | 102 |  | 
|  | 103 | /* | 
|  | 104 | * If it's a directory with any blocks, read-ahead block 0 | 
|  | 105 | * as we're almost certain to have the next operation be a read there. | 
|  | 106 | */ | 
|  | 107 | if (vp->v_type == VDIR && ip->i_d.di_nextents > 0) { | 
|  | 108 | mode = xfs_ilock_map_shared(ip); | 
|  | 109 | if (ip->i_d.di_nextents > 0) | 
|  | 110 | (void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); | 
|  | 111 | xfs_iunlock(ip, mode); | 
|  | 112 | } | 
|  | 113 | return 0; | 
|  | 114 | } | 
|  | 115 |  | 
|  | 116 |  | 
|  | 117 | /* | 
|  | 118 | * xfs_getattr | 
|  | 119 | */ | 
|  | 120 | STATIC int | 
|  | 121 | xfs_getattr( | 
|  | 122 | bhv_desc_t	*bdp, | 
|  | 123 | vattr_t		*vap, | 
|  | 124 | int		flags, | 
|  | 125 | cred_t		*credp) | 
|  | 126 | { | 
|  | 127 | xfs_inode_t	*ip; | 
|  | 128 | xfs_mount_t	*mp; | 
|  | 129 | vnode_t		*vp; | 
|  | 130 |  | 
|  | 131 | vp  = BHV_TO_VNODE(bdp); | 
|  | 132 | vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 133 |  | 
|  | 134 | ip = XFS_BHVTOI(bdp); | 
|  | 135 | mp = ip->i_mount; | 
|  | 136 |  | 
|  | 137 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 138 | return XFS_ERROR(EIO); | 
|  | 139 |  | 
|  | 140 | if (!(flags & ATTR_LAZY)) | 
|  | 141 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 
|  | 142 |  | 
|  | 143 | vap->va_size = ip->i_d.di_size; | 
|  | 144 | if (vap->va_mask == XFS_AT_SIZE) | 
|  | 145 | goto all_done; | 
|  | 146 |  | 
|  | 147 | vap->va_nblocks = | 
|  | 148 | XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); | 
|  | 149 | vap->va_nodeid = ip->i_ino; | 
|  | 150 | #if XFS_BIG_INUMS | 
|  | 151 | vap->va_nodeid += mp->m_inoadd; | 
|  | 152 | #endif | 
|  | 153 | vap->va_nlink = ip->i_d.di_nlink; | 
|  | 154 |  | 
|  | 155 | /* | 
|  | 156 | * Quick exit for non-stat callers | 
|  | 157 | */ | 
|  | 158 | if ((vap->va_mask & | 
|  | 159 | ~(XFS_AT_SIZE|XFS_AT_FSID|XFS_AT_NODEID| | 
|  | 160 | XFS_AT_NLINK|XFS_AT_BLKSIZE)) == 0) | 
|  | 161 | goto all_done; | 
|  | 162 |  | 
|  | 163 | /* | 
|  | 164 | * Copy from in-core inode. | 
|  | 165 | */ | 
|  | 166 | vap->va_type = vp->v_type; | 
|  | 167 | vap->va_mode = ip->i_d.di_mode & MODEMASK; | 
|  | 168 | vap->va_uid = ip->i_d.di_uid; | 
|  | 169 | vap->va_gid = ip->i_d.di_gid; | 
|  | 170 | vap->va_projid = ip->i_d.di_projid; | 
|  | 171 |  | 
|  | 172 | /* | 
|  | 173 | * Check vnode type block/char vs. everything else. | 
|  | 174 | * Do it with bitmask because that's faster than looking | 
|  | 175 | * for multiple values individually. | 
|  | 176 | */ | 
|  | 177 | if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) { | 
|  | 178 | vap->va_rdev = 0; | 
|  | 179 |  | 
|  | 180 | if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { | 
|  | 181 |  | 
|  | 182 | #if 0 | 
|  | 183 | /* Large block sizes confuse various | 
|  | 184 | * user space programs, so letting the | 
|  | 185 | * stripe size through is not a good | 
|  | 186 | * idea for now. | 
|  | 187 | */ | 
|  | 188 | vap->va_blocksize = mp->m_swidth ? | 
|  | 189 | /* | 
|  | 190 | * If the underlying volume is a stripe, then | 
|  | 191 | * return the stripe width in bytes as the | 
|  | 192 | * recommended I/O size. | 
|  | 193 | */ | 
|  | 194 | (mp->m_swidth << mp->m_sb.sb_blocklog) : | 
|  | 195 | /* | 
|  | 196 | * Return the largest of the preferred buffer | 
|  | 197 | * sizes since doing small I/Os into larger | 
|  | 198 | * buffers causes buffers to be decommissioned. | 
|  | 199 | * The value returned is in bytes. | 
|  | 200 | */ | 
|  | 201 | (1 << (int)MAX(mp->m_readio_log, | 
|  | 202 | mp->m_writeio_log)); | 
|  | 203 |  | 
|  | 204 | #else | 
|  | 205 | vap->va_blocksize = | 
|  | 206 | /* | 
|  | 207 | * Return the largest of the preferred buffer | 
|  | 208 | * sizes since doing small I/Os into larger | 
|  | 209 | * buffers causes buffers to be decommissioned. | 
|  | 210 | * The value returned is in bytes. | 
|  | 211 | */ | 
|  | 212 | 1 << (int)MAX(mp->m_readio_log, | 
|  | 213 | mp->m_writeio_log); | 
|  | 214 | #endif | 
|  | 215 | } else { | 
|  | 216 |  | 
|  | 217 | /* | 
|  | 218 | * If the file blocks are being allocated from a | 
|  | 219 | * realtime partition, then return the inode's | 
|  | 220 | * realtime extent size or the realtime volume's | 
|  | 221 | * extent size. | 
|  | 222 | */ | 
|  | 223 | vap->va_blocksize = ip->i_d.di_extsize ? | 
|  | 224 | (ip->i_d.di_extsize << mp->m_sb.sb_blocklog) : | 
|  | 225 | (mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog); | 
|  | 226 | } | 
|  | 227 | } else { | 
|  | 228 | vap->va_rdev = ip->i_df.if_u2.if_rdev; | 
|  | 229 | vap->va_blocksize = BLKDEV_IOSIZE; | 
|  | 230 | } | 
|  | 231 |  | 
|  | 232 | vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec; | 
|  | 233 | vap->va_atime.tv_nsec = ip->i_d.di_atime.t_nsec; | 
|  | 234 | vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec; | 
|  | 235 | vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; | 
|  | 236 | vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec; | 
|  | 237 | vap->va_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; | 
|  | 238 |  | 
|  | 239 | /* | 
|  | 240 | * Exit for stat callers.  See if any of the rest of the fields | 
|  | 241 | * to be filled in are needed. | 
|  | 242 | */ | 
|  | 243 | if ((vap->va_mask & | 
|  | 244 | (XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS| | 
|  | 245 | XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0) | 
|  | 246 | goto all_done; | 
|  | 247 |  | 
|  | 248 | /* | 
|  | 249 | * Convert di_flags to xflags. | 
|  | 250 | */ | 
|  | 251 | vap->va_xflags = xfs_ip2xflags(ip); | 
|  | 252 |  | 
|  | 253 | /* | 
|  | 254 | * Exit for inode revalidate.  See if any of the rest of | 
|  | 255 | * the fields to be filled in are needed. | 
|  | 256 | */ | 
|  | 257 | if ((vap->va_mask & | 
|  | 258 | (XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS| | 
|  | 259 | XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0) | 
|  | 260 | goto all_done; | 
|  | 261 |  | 
|  | 262 | vap->va_extsize = ip->i_d.di_extsize << mp->m_sb.sb_blocklog; | 
|  | 263 | vap->va_nextents = | 
|  | 264 | (ip->i_df.if_flags & XFS_IFEXTENTS) ? | 
|  | 265 | ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) : | 
|  | 266 | ip->i_d.di_nextents; | 
|  | 267 | if (ip->i_afp) | 
|  | 268 | vap->va_anextents = | 
|  | 269 | (ip->i_afp->if_flags & XFS_IFEXTENTS) ? | 
|  | 270 | ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) : | 
|  | 271 | ip->i_d.di_anextents; | 
|  | 272 | else | 
|  | 273 | vap->va_anextents = 0; | 
|  | 274 | vap->va_gen = ip->i_d.di_gen; | 
|  | 275 |  | 
|  | 276 | all_done: | 
|  | 277 | if (!(flags & ATTR_LAZY)) | 
|  | 278 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 279 | return 0; | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 |  | 
|  | 283 | /* | 
|  | 284 | * xfs_setattr | 
|  | 285 | */ | 
|  | 286 | int | 
|  | 287 | xfs_setattr( | 
|  | 288 | bhv_desc_t		*bdp, | 
|  | 289 | vattr_t			*vap, | 
|  | 290 | int			flags, | 
|  | 291 | cred_t			*credp) | 
|  | 292 | { | 
|  | 293 | xfs_inode_t		*ip; | 
|  | 294 | xfs_trans_t		*tp; | 
|  | 295 | xfs_mount_t		*mp; | 
|  | 296 | int			mask; | 
|  | 297 | int			code; | 
|  | 298 | uint			lock_flags; | 
|  | 299 | uint			commit_flags=0; | 
|  | 300 | uid_t			uid=0, iuid=0; | 
|  | 301 | gid_t			gid=0, igid=0; | 
|  | 302 | int			timeflags = 0; | 
|  | 303 | vnode_t			*vp; | 
|  | 304 | xfs_prid_t		projid=0, iprojid=0; | 
|  | 305 | int			mandlock_before, mandlock_after; | 
|  | 306 | struct xfs_dquot	*udqp, *gdqp, *olddquot1, *olddquot2; | 
|  | 307 | int			file_owner; | 
| Dean Roehrich | 5fcbab3 | 2005-05-05 13:27:19 -0700 | [diff] [blame] | 308 | int			need_iolock = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 |  | 
|  | 310 | vp = BHV_TO_VNODE(bdp); | 
|  | 311 | vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 312 |  | 
|  | 313 | if (vp->v_vfsp->vfs_flag & VFS_RDONLY) | 
|  | 314 | return XFS_ERROR(EROFS); | 
|  | 315 |  | 
|  | 316 | /* | 
|  | 317 | * Cannot set certain attributes. | 
|  | 318 | */ | 
|  | 319 | mask = vap->va_mask; | 
|  | 320 | if (mask & XFS_AT_NOSET) { | 
|  | 321 | return XFS_ERROR(EINVAL); | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | ip = XFS_BHVTOI(bdp); | 
|  | 325 | mp = ip->i_mount; | 
|  | 326 |  | 
|  | 327 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 328 | return XFS_ERROR(EIO); | 
|  | 329 |  | 
|  | 330 | /* | 
|  | 331 | * Timestamps do not need to be logged and hence do not | 
|  | 332 | * need to be done within a transaction. | 
|  | 333 | */ | 
|  | 334 | if (mask & XFS_AT_UPDTIMES) { | 
|  | 335 | ASSERT((mask & ~XFS_AT_UPDTIMES) == 0); | 
|  | 336 | timeflags = ((mask & XFS_AT_UPDATIME) ? XFS_ICHGTIME_ACC : 0) | | 
|  | 337 | ((mask & XFS_AT_UPDCTIME) ? XFS_ICHGTIME_CHG : 0) | | 
|  | 338 | ((mask & XFS_AT_UPDMTIME) ? XFS_ICHGTIME_MOD : 0); | 
|  | 339 | xfs_ichgtime(ip, timeflags); | 
|  | 340 | return 0; | 
|  | 341 | } | 
|  | 342 |  | 
|  | 343 | olddquot1 = olddquot2 = NULL; | 
|  | 344 | udqp = gdqp = NULL; | 
|  | 345 |  | 
|  | 346 | /* | 
|  | 347 | * If disk quotas is on, we make sure that the dquots do exist on disk, | 
|  | 348 | * before we start any other transactions. Trying to do this later | 
|  | 349 | * is messy. We don't care to take a readlock to look at the ids | 
|  | 350 | * in inode here, because we can't hold it across the trans_reserve. | 
|  | 351 | * If the IDs do change before we take the ilock, we're covered | 
|  | 352 | * because the i_*dquot fields will get updated anyway. | 
|  | 353 | */ | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 354 | if (XFS_IS_QUOTA_ON(mp) && | 
|  | 355 | (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | uint	qflags = 0; | 
|  | 357 |  | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 358 | if ((mask & XFS_AT_UID) && XFS_IS_UQUOTA_ON(mp)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | uid = vap->va_uid; | 
|  | 360 | qflags |= XFS_QMOPT_UQUOTA; | 
|  | 361 | } else { | 
|  | 362 | uid = ip->i_d.di_uid; | 
|  | 363 | } | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 364 | if ((mask & XFS_AT_GID) && XFS_IS_GQUOTA_ON(mp)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | gid = vap->va_gid; | 
|  | 366 | qflags |= XFS_QMOPT_GQUOTA; | 
|  | 367 | }  else { | 
|  | 368 | gid = ip->i_d.di_gid; | 
|  | 369 | } | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 370 | if ((mask & XFS_AT_PROJID) && XFS_IS_PQUOTA_ON(mp)) { | 
|  | 371 | projid = vap->va_projid; | 
|  | 372 | qflags |= XFS_QMOPT_PQUOTA; | 
|  | 373 | }  else { | 
|  | 374 | projid = ip->i_d.di_projid; | 
|  | 375 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | /* | 
|  | 377 | * We take a reference when we initialize udqp and gdqp, | 
|  | 378 | * so it is important that we never blindly double trip on | 
|  | 379 | * the same variable. See xfs_create() for an example. | 
|  | 380 | */ | 
|  | 381 | ASSERT(udqp == NULL); | 
|  | 382 | ASSERT(gdqp == NULL); | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 383 | code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, projid, qflags, | 
|  | 384 | &udqp, &gdqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | if (code) | 
|  | 386 | return (code); | 
|  | 387 | } | 
|  | 388 |  | 
|  | 389 | /* | 
|  | 390 | * For the other attributes, we acquire the inode lock and | 
|  | 391 | * first do an error checking pass. | 
|  | 392 | */ | 
|  | 393 | tp = NULL; | 
|  | 394 | lock_flags = XFS_ILOCK_EXCL; | 
| Dean Roehrich | 5fcbab3 | 2005-05-05 13:27:19 -0700 | [diff] [blame] | 395 | ASSERT(flags & ATTR_NOLOCK ? flags & ATTR_DMI : 1); | 
|  | 396 | if (flags & ATTR_NOLOCK) | 
|  | 397 | need_iolock = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | if (!(mask & XFS_AT_SIZE)) { | 
|  | 399 | if ((mask != (XFS_AT_CTIME|XFS_AT_ATIME|XFS_AT_MTIME)) || | 
|  | 400 | (mp->m_flags & XFS_MOUNT_WSYNC)) { | 
|  | 401 | tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); | 
|  | 402 | commit_flags = 0; | 
|  | 403 | if ((code = xfs_trans_reserve(tp, 0, | 
|  | 404 | XFS_ICHANGE_LOG_RES(mp), 0, | 
|  | 405 | 0, 0))) { | 
|  | 406 | lock_flags = 0; | 
|  | 407 | goto error_return; | 
|  | 408 | } | 
|  | 409 | } | 
|  | 410 | } else { | 
|  | 411 | if (DM_EVENT_ENABLED (vp->v_vfsp, ip, DM_EVENT_TRUNCATE) && | 
|  | 412 | !(flags & ATTR_DMI)) { | 
|  | 413 | int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR; | 
|  | 414 | code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, vp, | 
|  | 415 | vap->va_size, 0, dmflags, NULL); | 
|  | 416 | if (code) { | 
|  | 417 | lock_flags = 0; | 
|  | 418 | goto error_return; | 
|  | 419 | } | 
|  | 420 | } | 
|  | 421 | if (need_iolock) | 
|  | 422 | lock_flags |= XFS_IOLOCK_EXCL; | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | xfs_ilock(ip, lock_flags); | 
|  | 426 |  | 
|  | 427 | /* boolean: are we the file owner? */ | 
|  | 428 | file_owner = (current_fsuid(credp) == ip->i_d.di_uid); | 
|  | 429 |  | 
|  | 430 | /* | 
|  | 431 | * Change various properties of a file. | 
|  | 432 | * Only the owner or users with CAP_FOWNER | 
|  | 433 | * capability may do these things. | 
|  | 434 | */ | 
|  | 435 | if (mask & | 
|  | 436 | (XFS_AT_MODE|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_UID| | 
|  | 437 | XFS_AT_GID|XFS_AT_PROJID)) { | 
|  | 438 | /* | 
|  | 439 | * CAP_FOWNER overrides the following restrictions: | 
|  | 440 | * | 
|  | 441 | * The user ID of the calling process must be equal | 
|  | 442 | * to the file owner ID, except in cases where the | 
|  | 443 | * CAP_FSETID capability is applicable. | 
|  | 444 | */ | 
|  | 445 | if (!file_owner && !capable(CAP_FOWNER)) { | 
|  | 446 | code = XFS_ERROR(EPERM); | 
|  | 447 | goto error_return; | 
|  | 448 | } | 
|  | 449 |  | 
|  | 450 | /* | 
|  | 451 | * CAP_FSETID overrides the following restrictions: | 
|  | 452 | * | 
|  | 453 | * The effective user ID of the calling process shall match | 
|  | 454 | * the file owner when setting the set-user-ID and | 
|  | 455 | * set-group-ID bits on that file. | 
|  | 456 | * | 
|  | 457 | * The effective group ID or one of the supplementary group | 
|  | 458 | * IDs of the calling process shall match the group owner of | 
|  | 459 | * the file when setting the set-group-ID bit on that file | 
|  | 460 | */ | 
|  | 461 | if (mask & XFS_AT_MODE) { | 
|  | 462 | mode_t m = 0; | 
|  | 463 |  | 
|  | 464 | if ((vap->va_mode & S_ISUID) && !file_owner) | 
|  | 465 | m |= S_ISUID; | 
|  | 466 | if ((vap->va_mode & S_ISGID) && | 
|  | 467 | !in_group_p((gid_t)ip->i_d.di_gid)) | 
|  | 468 | m |= S_ISGID; | 
|  | 469 | #if 0 | 
|  | 470 | /* Linux allows this, Irix doesn't. */ | 
|  | 471 | if ((vap->va_mode & S_ISVTX) && vp->v_type != VDIR) | 
|  | 472 | m |= S_ISVTX; | 
|  | 473 | #endif | 
|  | 474 | if (m && !capable(CAP_FSETID)) | 
|  | 475 | vap->va_mode &= ~m; | 
|  | 476 | } | 
|  | 477 | } | 
|  | 478 |  | 
|  | 479 | /* | 
|  | 480 | * Change file ownership.  Must be the owner or privileged. | 
|  | 481 | * If the system was configured with the "restricted_chown" | 
|  | 482 | * option, the owner is not permitted to give away the file, | 
|  | 483 | * and can change the group id only to a group of which he | 
|  | 484 | * or she is a member. | 
|  | 485 | */ | 
|  | 486 | if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) { | 
|  | 487 | /* | 
|  | 488 | * These IDs could have changed since we last looked at them. | 
|  | 489 | * But, we're assured that if the ownership did change | 
|  | 490 | * while we didn't have the inode locked, inode's dquot(s) | 
|  | 491 | * would have changed also. | 
|  | 492 | */ | 
|  | 493 | iuid = ip->i_d.di_uid; | 
|  | 494 | iprojid = ip->i_d.di_projid; | 
|  | 495 | igid = ip->i_d.di_gid; | 
|  | 496 | gid = (mask & XFS_AT_GID) ? vap->va_gid : igid; | 
|  | 497 | uid = (mask & XFS_AT_UID) ? vap->va_uid : iuid; | 
|  | 498 | projid = (mask & XFS_AT_PROJID) ? (xfs_prid_t)vap->va_projid : | 
|  | 499 | iprojid; | 
|  | 500 |  | 
|  | 501 | /* | 
|  | 502 | * CAP_CHOWN overrides the following restrictions: | 
|  | 503 | * | 
|  | 504 | * If _POSIX_CHOWN_RESTRICTED is defined, this capability | 
|  | 505 | * shall override the restriction that a process cannot | 
|  | 506 | * change the user ID of a file it owns and the restriction | 
|  | 507 | * that the group ID supplied to the chown() function | 
|  | 508 | * shall be equal to either the group ID or one of the | 
|  | 509 | * supplementary group IDs of the calling process. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | */ | 
|  | 511 | if (restricted_chown && | 
|  | 512 | (iuid != uid || (igid != gid && | 
|  | 513 | !in_group_p((gid_t)gid))) && | 
|  | 514 | !capable(CAP_CHOWN)) { | 
|  | 515 | code = XFS_ERROR(EPERM); | 
|  | 516 | goto error_return; | 
|  | 517 | } | 
|  | 518 | /* | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 519 | * Do a quota reservation only if uid/projid/gid is actually | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | * going to change. | 
|  | 521 | */ | 
|  | 522 | if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 523 | (XFS_IS_PQUOTA_ON(mp) && iprojid != projid) || | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | (XFS_IS_GQUOTA_ON(mp) && igid != gid)) { | 
|  | 525 | ASSERT(tp); | 
|  | 526 | code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp, | 
|  | 527 | capable(CAP_FOWNER) ? | 
|  | 528 | XFS_QMOPT_FORCE_RES : 0); | 
|  | 529 | if (code)	/* out of quota */ | 
|  | 530 | goto error_return; | 
|  | 531 | } | 
|  | 532 | } | 
|  | 533 |  | 
|  | 534 | /* | 
|  | 535 | * Truncate file.  Must have write permission and not be a directory. | 
|  | 536 | */ | 
|  | 537 | if (mask & XFS_AT_SIZE) { | 
|  | 538 | /* Short circuit the truncate case for zero length files */ | 
|  | 539 | if ((vap->va_size == 0) && | 
|  | 540 | (ip->i_d.di_size == 0) && (ip->i_d.di_nextents == 0)) { | 
|  | 541 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 542 | lock_flags &= ~XFS_ILOCK_EXCL; | 
|  | 543 | if (mask & XFS_AT_CTIME) | 
|  | 544 | xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 
|  | 545 | code = 0; | 
|  | 546 | goto error_return; | 
|  | 547 | } | 
|  | 548 |  | 
|  | 549 | if (vp->v_type == VDIR) { | 
|  | 550 | code = XFS_ERROR(EISDIR); | 
|  | 551 | goto error_return; | 
|  | 552 | } else if (vp->v_type != VREG) { | 
|  | 553 | code = XFS_ERROR(EINVAL); | 
|  | 554 | goto error_return; | 
|  | 555 | } | 
|  | 556 | /* | 
|  | 557 | * Make sure that the dquots are attached to the inode. | 
|  | 558 | */ | 
|  | 559 | if ((code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED))) | 
|  | 560 | goto error_return; | 
|  | 561 | } | 
|  | 562 |  | 
|  | 563 | /* | 
|  | 564 | * Change file access or modified times. | 
|  | 565 | */ | 
|  | 566 | if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) { | 
|  | 567 | if (!file_owner) { | 
|  | 568 | if ((flags & ATTR_UTIME) && | 
|  | 569 | !capable(CAP_FOWNER)) { | 
|  | 570 | code = XFS_ERROR(EPERM); | 
|  | 571 | goto error_return; | 
|  | 572 | } | 
|  | 573 | } | 
|  | 574 | } | 
|  | 575 |  | 
|  | 576 | /* | 
|  | 577 | * Change extent size or realtime flag. | 
|  | 578 | */ | 
|  | 579 | if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) { | 
|  | 580 | /* | 
|  | 581 | * Can't change extent size if any extents are allocated. | 
|  | 582 | */ | 
|  | 583 | if ((ip->i_d.di_nextents || ip->i_delayed_blks) && | 
|  | 584 | (mask & XFS_AT_EXTSIZE) && | 
|  | 585 | ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != | 
|  | 586 | vap->va_extsize) ) { | 
|  | 587 | code = XFS_ERROR(EINVAL);	/* EFBIG? */ | 
|  | 588 | goto error_return; | 
|  | 589 | } | 
|  | 590 |  | 
|  | 591 | /* | 
|  | 592 | * Can't set extent size unless the file is marked, or | 
|  | 593 | * about to be marked as a realtime file. | 
|  | 594 | * | 
|  | 595 | * This check will be removed when fixed size extents | 
|  | 596 | * with buffered data writes is implemented. | 
|  | 597 | * | 
|  | 598 | */ | 
|  | 599 | if ((mask & XFS_AT_EXTSIZE)			&& | 
|  | 600 | ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != | 
|  | 601 | vap->va_extsize) && | 
|  | 602 | (!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || | 
|  | 603 | ((mask & XFS_AT_XFLAGS) && | 
|  | 604 | (vap->va_xflags & XFS_XFLAG_REALTIME))))) { | 
|  | 605 | code = XFS_ERROR(EINVAL); | 
|  | 606 | goto error_return; | 
|  | 607 | } | 
|  | 608 |  | 
|  | 609 | /* | 
|  | 610 | * Can't change realtime flag if any extents are allocated. | 
|  | 611 | */ | 
|  | 612 | if (ip->i_d.di_nextents && (mask & XFS_AT_XFLAGS) && | 
|  | 613 | (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != | 
|  | 614 | (vap->va_xflags & XFS_XFLAG_REALTIME)) { | 
|  | 615 | code = XFS_ERROR(EINVAL);	/* EFBIG? */ | 
|  | 616 | goto error_return; | 
|  | 617 | } | 
|  | 618 | /* | 
|  | 619 | * Extent size must be a multiple of the appropriate block | 
|  | 620 | * size, if set at all. | 
|  | 621 | */ | 
|  | 622 | if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) { | 
|  | 623 | xfs_extlen_t	size; | 
|  | 624 |  | 
|  | 625 | if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || | 
|  | 626 | ((mask & XFS_AT_XFLAGS) && | 
|  | 627 | (vap->va_xflags & XFS_XFLAG_REALTIME))) { | 
|  | 628 | size = mp->m_sb.sb_rextsize << | 
|  | 629 | mp->m_sb.sb_blocklog; | 
|  | 630 | } else { | 
|  | 631 | size = mp->m_sb.sb_blocksize; | 
|  | 632 | } | 
|  | 633 | if (vap->va_extsize % size) { | 
|  | 634 | code = XFS_ERROR(EINVAL); | 
|  | 635 | goto error_return; | 
|  | 636 | } | 
|  | 637 | } | 
|  | 638 | /* | 
|  | 639 | * If realtime flag is set then must have realtime data. | 
|  | 640 | */ | 
|  | 641 | if ((mask & XFS_AT_XFLAGS) && | 
|  | 642 | (vap->va_xflags & XFS_XFLAG_REALTIME)) { | 
|  | 643 | if ((mp->m_sb.sb_rblocks == 0) || | 
|  | 644 | (mp->m_sb.sb_rextsize == 0) || | 
|  | 645 | (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) { | 
|  | 646 | code = XFS_ERROR(EINVAL); | 
|  | 647 | goto error_return; | 
|  | 648 | } | 
|  | 649 | } | 
|  | 650 |  | 
|  | 651 | /* | 
|  | 652 | * Can't modify an immutable/append-only file unless | 
|  | 653 | * we have appropriate permission. | 
|  | 654 | */ | 
|  | 655 | if ((mask & XFS_AT_XFLAGS) && | 
|  | 656 | (ip->i_d.di_flags & | 
|  | 657 | (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) || | 
|  | 658 | (vap->va_xflags & | 
|  | 659 | (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) && | 
|  | 660 | !capable(CAP_LINUX_IMMUTABLE)) { | 
|  | 661 | code = XFS_ERROR(EPERM); | 
|  | 662 | goto error_return; | 
|  | 663 | } | 
|  | 664 | } | 
|  | 665 |  | 
|  | 666 | /* | 
|  | 667 | * Now we can make the changes.  Before we join the inode | 
|  | 668 | * to the transaction, if XFS_AT_SIZE is set then take care of | 
|  | 669 | * the part of the truncation that must be done without the | 
|  | 670 | * inode lock.  This needs to be done before joining the inode | 
|  | 671 | * to the transaction, because the inode cannot be unlocked | 
|  | 672 | * once it is a part of the transaction. | 
|  | 673 | */ | 
|  | 674 | if (mask & XFS_AT_SIZE) { | 
|  | 675 | code = 0; | 
|  | 676 | if (vap->va_size > ip->i_d.di_size) | 
|  | 677 | code = xfs_igrow_start(ip, vap->va_size, credp); | 
|  | 678 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 679 | if (!code) | 
|  | 680 | code = xfs_itruncate_data(ip, vap->va_size); | 
|  | 681 | if (code) { | 
|  | 682 | ASSERT(tp == NULL); | 
|  | 683 | lock_flags &= ~XFS_ILOCK_EXCL; | 
|  | 684 | ASSERT(lock_flags == XFS_IOLOCK_EXCL); | 
|  | 685 | goto error_return; | 
|  | 686 | } | 
|  | 687 | tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); | 
|  | 688 | if ((code = xfs_trans_reserve(tp, 0, | 
|  | 689 | XFS_ITRUNCATE_LOG_RES(mp), 0, | 
|  | 690 | XFS_TRANS_PERM_LOG_RES, | 
|  | 691 | XFS_ITRUNCATE_LOG_COUNT))) { | 
|  | 692 | xfs_trans_cancel(tp, 0); | 
|  | 693 | if (need_iolock) | 
|  | 694 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 
|  | 695 | return code; | 
|  | 696 | } | 
|  | 697 | commit_flags = XFS_TRANS_RELEASE_LOG_RES; | 
|  | 698 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 699 | } | 
|  | 700 |  | 
|  | 701 | if (tp) { | 
|  | 702 | xfs_trans_ijoin(tp, ip, lock_flags); | 
|  | 703 | xfs_trans_ihold(tp, ip); | 
|  | 704 | } | 
|  | 705 |  | 
|  | 706 | /* determine whether mandatory locking mode changes */ | 
|  | 707 | mandlock_before = MANDLOCK(vp, ip->i_d.di_mode); | 
|  | 708 |  | 
|  | 709 | /* | 
|  | 710 | * Truncate file.  Must have write permission and not be a directory. | 
|  | 711 | */ | 
|  | 712 | if (mask & XFS_AT_SIZE) { | 
|  | 713 | if (vap->va_size > ip->i_d.di_size) { | 
|  | 714 | xfs_igrow_finish(tp, ip, vap->va_size, | 
|  | 715 | !(flags & ATTR_DMI)); | 
|  | 716 | } else if ((vap->va_size <= ip->i_d.di_size) || | 
|  | 717 | ((vap->va_size == 0) && ip->i_d.di_nextents)) { | 
|  | 718 | /* | 
|  | 719 | * signal a sync transaction unless | 
|  | 720 | * we're truncating an already unlinked | 
|  | 721 | * file on a wsync filesystem | 
|  | 722 | */ | 
|  | 723 | code = xfs_itruncate_finish(&tp, ip, | 
|  | 724 | (xfs_fsize_t)vap->va_size, | 
|  | 725 | XFS_DATA_FORK, | 
|  | 726 | ((ip->i_d.di_nlink != 0 || | 
|  | 727 | !(mp->m_flags & XFS_MOUNT_WSYNC)) | 
|  | 728 | ? 1 : 0)); | 
|  | 729 | if (code) { | 
|  | 730 | goto abort_return; | 
|  | 731 | } | 
|  | 732 | } | 
|  | 733 | /* | 
|  | 734 | * Have to do this even if the file's size doesn't change. | 
|  | 735 | */ | 
|  | 736 | timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; | 
|  | 737 | } | 
|  | 738 |  | 
|  | 739 | /* | 
|  | 740 | * Change file access modes. | 
|  | 741 | */ | 
|  | 742 | if (mask & XFS_AT_MODE) { | 
|  | 743 | ip->i_d.di_mode &= S_IFMT; | 
|  | 744 | ip->i_d.di_mode |= vap->va_mode & ~S_IFMT; | 
|  | 745 |  | 
|  | 746 | xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); | 
|  | 747 | timeflags |= XFS_ICHGTIME_CHG; | 
|  | 748 | } | 
|  | 749 |  | 
|  | 750 | /* | 
|  | 751 | * Change file ownership.  Must be the owner or privileged. | 
|  | 752 | * If the system was configured with the "restricted_chown" | 
|  | 753 | * option, the owner is not permitted to give away the file, | 
|  | 754 | * and can change the group id only to a group of which he | 
|  | 755 | * or she is a member. | 
|  | 756 | */ | 
|  | 757 | if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) { | 
|  | 758 | /* | 
|  | 759 | * CAP_FSETID overrides the following restrictions: | 
|  | 760 | * | 
|  | 761 | * The set-user-ID and set-group-ID bits of a file will be | 
|  | 762 | * cleared upon successful return from chown() | 
|  | 763 | */ | 
|  | 764 | if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && | 
|  | 765 | !capable(CAP_FSETID)) { | 
|  | 766 | ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); | 
|  | 767 | } | 
|  | 768 |  | 
|  | 769 | /* | 
|  | 770 | * Change the ownerships and register quota modifications | 
|  | 771 | * in the transaction. | 
|  | 772 | */ | 
|  | 773 | if (iuid != uid) { | 
|  | 774 | if (XFS_IS_UQUOTA_ON(mp)) { | 
|  | 775 | ASSERT(mask & XFS_AT_UID); | 
|  | 776 | ASSERT(udqp); | 
|  | 777 | olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip, | 
|  | 778 | &ip->i_udquot, udqp); | 
|  | 779 | } | 
|  | 780 | ip->i_d.di_uid = uid; | 
|  | 781 | } | 
|  | 782 | if (igid != gid) { | 
|  | 783 | if (XFS_IS_GQUOTA_ON(mp)) { | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 784 | ASSERT(!XFS_IS_PQUOTA_ON(mp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | ASSERT(mask & XFS_AT_GID); | 
|  | 786 | ASSERT(gdqp); | 
|  | 787 | olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip, | 
|  | 788 | &ip->i_gdquot, gdqp); | 
|  | 789 | } | 
|  | 790 | ip->i_d.di_gid = gid; | 
|  | 791 | } | 
|  | 792 | if (iprojid != projid) { | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 793 | if (XFS_IS_PQUOTA_ON(mp)) { | 
|  | 794 | ASSERT(!XFS_IS_GQUOTA_ON(mp)); | 
|  | 795 | ASSERT(mask & XFS_AT_PROJID); | 
|  | 796 | ASSERT(gdqp); | 
|  | 797 | olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip, | 
|  | 798 | &ip->i_gdquot, gdqp); | 
|  | 799 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | ip->i_d.di_projid = projid; | 
|  | 801 | /* | 
|  | 802 | * We may have to rev the inode as well as | 
|  | 803 | * the superblock version number since projids didn't | 
|  | 804 | * exist before DINODE_VERSION_2 and SB_VERSION_NLINK. | 
|  | 805 | */ | 
|  | 806 | if (ip->i_d.di_version == XFS_DINODE_VERSION_1) | 
|  | 807 | xfs_bump_ino_vers2(tp, ip); | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); | 
|  | 811 | timeflags |= XFS_ICHGTIME_CHG; | 
|  | 812 | } | 
|  | 813 |  | 
|  | 814 |  | 
|  | 815 | /* | 
|  | 816 | * Change file access or modified times. | 
|  | 817 | */ | 
|  | 818 | if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) { | 
|  | 819 | if (mask & XFS_AT_ATIME) { | 
|  | 820 | ip->i_d.di_atime.t_sec = vap->va_atime.tv_sec; | 
|  | 821 | ip->i_d.di_atime.t_nsec = vap->va_atime.tv_nsec; | 
|  | 822 | ip->i_update_core = 1; | 
|  | 823 | timeflags &= ~XFS_ICHGTIME_ACC; | 
|  | 824 | } | 
|  | 825 | if (mask & XFS_AT_MTIME) { | 
|  | 826 | ip->i_d.di_mtime.t_sec = vap->va_mtime.tv_sec; | 
|  | 827 | ip->i_d.di_mtime.t_nsec = vap->va_mtime.tv_nsec; | 
|  | 828 | timeflags &= ~XFS_ICHGTIME_MOD; | 
|  | 829 | timeflags |= XFS_ICHGTIME_CHG; | 
|  | 830 | } | 
|  | 831 | if (tp && (flags & ATTR_UTIME)) | 
|  | 832 | xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); | 
|  | 833 | } | 
|  | 834 |  | 
|  | 835 | /* | 
|  | 836 | * Change XFS-added attributes. | 
|  | 837 | */ | 
|  | 838 | if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) { | 
|  | 839 | if (mask & XFS_AT_EXTSIZE) { | 
|  | 840 | /* | 
|  | 841 | * Converting bytes to fs blocks. | 
|  | 842 | */ | 
|  | 843 | ip->i_d.di_extsize = vap->va_extsize >> | 
|  | 844 | mp->m_sb.sb_blocklog; | 
|  | 845 | } | 
|  | 846 | if (mask & XFS_AT_XFLAGS) { | 
|  | 847 | uint	di_flags; | 
|  | 848 |  | 
|  | 849 | /* can't set PREALLOC this way, just preserve it */ | 
|  | 850 | di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); | 
|  | 851 | if (vap->va_xflags & XFS_XFLAG_IMMUTABLE) | 
|  | 852 | di_flags |= XFS_DIFLAG_IMMUTABLE; | 
|  | 853 | if (vap->va_xflags & XFS_XFLAG_APPEND) | 
|  | 854 | di_flags |= XFS_DIFLAG_APPEND; | 
|  | 855 | if (vap->va_xflags & XFS_XFLAG_SYNC) | 
|  | 856 | di_flags |= XFS_DIFLAG_SYNC; | 
|  | 857 | if (vap->va_xflags & XFS_XFLAG_NOATIME) | 
|  | 858 | di_flags |= XFS_DIFLAG_NOATIME; | 
|  | 859 | if (vap->va_xflags & XFS_XFLAG_NODUMP) | 
|  | 860 | di_flags |= XFS_DIFLAG_NODUMP; | 
| Nathan Scott | 365ca83 | 2005-06-21 15:39:12 +1000 | [diff] [blame] | 861 | if (vap->va_xflags & XFS_XFLAG_PROJINHERIT) | 
|  | 862 | di_flags |= XFS_DIFLAG_PROJINHERIT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { | 
|  | 864 | if (vap->va_xflags & XFS_XFLAG_RTINHERIT) | 
|  | 865 | di_flags |= XFS_DIFLAG_RTINHERIT; | 
|  | 866 | if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS) | 
|  | 867 | di_flags |= XFS_DIFLAG_NOSYMLINKS; | 
|  | 868 | } else { | 
|  | 869 | if (vap->va_xflags & XFS_XFLAG_REALTIME) { | 
|  | 870 | di_flags |= XFS_DIFLAG_REALTIME; | 
|  | 871 | ip->i_iocore.io_flags |= XFS_IOCORE_RT; | 
|  | 872 | } else { | 
|  | 873 | ip->i_iocore.io_flags &= ~XFS_IOCORE_RT; | 
|  | 874 | } | 
|  | 875 | } | 
|  | 876 | ip->i_d.di_flags = di_flags; | 
|  | 877 | } | 
|  | 878 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 
|  | 879 | timeflags |= XFS_ICHGTIME_CHG; | 
|  | 880 | } | 
|  | 881 |  | 
|  | 882 | /* | 
|  | 883 | * Change file inode change time only if XFS_AT_CTIME set | 
|  | 884 | * AND we have been called by a DMI function. | 
|  | 885 | */ | 
|  | 886 |  | 
|  | 887 | if ( (flags & ATTR_DMI) && (mask & XFS_AT_CTIME) ) { | 
|  | 888 | ip->i_d.di_ctime.t_sec = vap->va_ctime.tv_sec; | 
|  | 889 | ip->i_d.di_ctime.t_nsec = vap->va_ctime.tv_nsec; | 
|  | 890 | ip->i_update_core = 1; | 
|  | 891 | timeflags &= ~XFS_ICHGTIME_CHG; | 
|  | 892 | } | 
|  | 893 |  | 
|  | 894 | /* | 
|  | 895 | * Send out timestamp changes that need to be set to the | 
|  | 896 | * current time.  Not done when called by a DMI function. | 
|  | 897 | */ | 
|  | 898 | if (timeflags && !(flags & ATTR_DMI)) | 
|  | 899 | xfs_ichgtime(ip, timeflags); | 
|  | 900 |  | 
|  | 901 | XFS_STATS_INC(xs_ig_attrchg); | 
|  | 902 |  | 
|  | 903 | /* | 
|  | 904 | * If this is a synchronous mount, make sure that the | 
|  | 905 | * transaction goes to disk before returning to the user. | 
|  | 906 | * This is slightly sub-optimal in that truncates require | 
|  | 907 | * two sync transactions instead of one for wsync filesytems. | 
|  | 908 | * One for the truncate and one for the timestamps since we | 
|  | 909 | * don't want to change the timestamps unless we're sure the | 
|  | 910 | * truncate worked.  Truncates are less than 1% of the laddis | 
|  | 911 | * mix so this probably isn't worth the trouble to optimize. | 
|  | 912 | */ | 
|  | 913 | code = 0; | 
|  | 914 | if (tp) { | 
|  | 915 | if (mp->m_flags & XFS_MOUNT_WSYNC) | 
|  | 916 | xfs_trans_set_sync(tp); | 
|  | 917 |  | 
|  | 918 | code = xfs_trans_commit(tp, commit_flags, NULL); | 
|  | 919 | } | 
|  | 920 |  | 
|  | 921 | /* | 
|  | 922 | * If the (regular) file's mandatory locking mode changed, then | 
|  | 923 | * notify the vnode.  We do this under the inode lock to prevent | 
|  | 924 | * racing calls to vop_vnode_change. | 
|  | 925 | */ | 
|  | 926 | mandlock_after = MANDLOCK(vp, ip->i_d.di_mode); | 
|  | 927 | if (mandlock_before != mandlock_after) { | 
|  | 928 | VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_ENF_LOCKING, | 
|  | 929 | mandlock_after); | 
|  | 930 | } | 
|  | 931 |  | 
|  | 932 | xfs_iunlock(ip, lock_flags); | 
|  | 933 |  | 
|  | 934 | /* | 
|  | 935 | * Release any dquot(s) the inode had kept before chown. | 
|  | 936 | */ | 
|  | 937 | XFS_QM_DQRELE(mp, olddquot1); | 
|  | 938 | XFS_QM_DQRELE(mp, olddquot2); | 
|  | 939 | XFS_QM_DQRELE(mp, udqp); | 
|  | 940 | XFS_QM_DQRELE(mp, gdqp); | 
|  | 941 |  | 
|  | 942 | if (code) { | 
|  | 943 | return code; | 
|  | 944 | } | 
|  | 945 |  | 
|  | 946 | if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_ATTRIBUTE) && | 
|  | 947 | !(flags & ATTR_DMI)) { | 
|  | 948 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, vp, DM_RIGHT_NULL, | 
|  | 949 | NULL, DM_RIGHT_NULL, NULL, NULL, | 
|  | 950 | 0, 0, AT_DELAY_FLAG(flags)); | 
|  | 951 | } | 
|  | 952 | return 0; | 
|  | 953 |  | 
|  | 954 | abort_return: | 
|  | 955 | commit_flags |= XFS_TRANS_ABORT; | 
|  | 956 | /* FALLTHROUGH */ | 
|  | 957 | error_return: | 
|  | 958 | XFS_QM_DQRELE(mp, udqp); | 
|  | 959 | XFS_QM_DQRELE(mp, gdqp); | 
|  | 960 | if (tp) { | 
|  | 961 | xfs_trans_cancel(tp, commit_flags); | 
|  | 962 | } | 
|  | 963 | if (lock_flags != 0) { | 
|  | 964 | xfs_iunlock(ip, lock_flags); | 
|  | 965 | } | 
|  | 966 | return code; | 
|  | 967 | } | 
|  | 968 |  | 
|  | 969 |  | 
|  | 970 | /* | 
|  | 971 | * xfs_access | 
|  | 972 | * Null conversion from vnode mode bits to inode mode bits, as in efs. | 
|  | 973 | */ | 
|  | 974 | STATIC int | 
|  | 975 | xfs_access( | 
|  | 976 | bhv_desc_t	*bdp, | 
|  | 977 | int		mode, | 
|  | 978 | cred_t		*credp) | 
|  | 979 | { | 
|  | 980 | xfs_inode_t	*ip; | 
|  | 981 | int		error; | 
|  | 982 |  | 
|  | 983 | vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__, | 
|  | 984 | (inst_t *)__return_address); | 
|  | 985 |  | 
|  | 986 | ip = XFS_BHVTOI(bdp); | 
|  | 987 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 
|  | 988 | error = xfs_iaccess(ip, mode, credp); | 
|  | 989 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 990 | return error; | 
|  | 991 | } | 
|  | 992 |  | 
|  | 993 |  | 
|  | 994 | /* | 
|  | 995 | * xfs_readlink | 
|  | 996 | * | 
|  | 997 | */ | 
|  | 998 | STATIC int | 
|  | 999 | xfs_readlink( | 
|  | 1000 | bhv_desc_t	*bdp, | 
|  | 1001 | uio_t		*uiop, | 
|  | 1002 | int		ioflags, | 
|  | 1003 | cred_t		*credp) | 
|  | 1004 | { | 
|  | 1005 | xfs_inode_t     *ip; | 
|  | 1006 | int		count; | 
|  | 1007 | xfs_off_t	offset; | 
|  | 1008 | int		pathlen; | 
|  | 1009 | vnode_t		*vp; | 
|  | 1010 | int		error = 0; | 
|  | 1011 | xfs_mount_t	*mp; | 
|  | 1012 | int             nmaps; | 
|  | 1013 | xfs_bmbt_irec_t mval[SYMLINK_MAPS]; | 
|  | 1014 | xfs_daddr_t	d; | 
|  | 1015 | int		byte_cnt; | 
|  | 1016 | int		n; | 
|  | 1017 | xfs_buf_t	*bp; | 
|  | 1018 |  | 
|  | 1019 | vp = BHV_TO_VNODE(bdp); | 
|  | 1020 | vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 1021 |  | 
|  | 1022 | ip = XFS_BHVTOI(bdp); | 
|  | 1023 | mp = ip->i_mount; | 
|  | 1024 |  | 
|  | 1025 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 1026 | return XFS_ERROR(EIO); | 
|  | 1027 |  | 
|  | 1028 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 
|  | 1029 |  | 
|  | 1030 | ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK); | 
|  | 1031 |  | 
|  | 1032 | offset = uiop->uio_offset; | 
|  | 1033 | count = uiop->uio_resid; | 
|  | 1034 |  | 
|  | 1035 | if (offset < 0) { | 
|  | 1036 | error = XFS_ERROR(EINVAL); | 
|  | 1037 | goto error_return; | 
|  | 1038 | } | 
|  | 1039 | if (count <= 0) { | 
|  | 1040 | error = 0; | 
|  | 1041 | goto error_return; | 
|  | 1042 | } | 
|  | 1043 |  | 
|  | 1044 | if (!(ioflags & IO_INVIS)) { | 
|  | 1045 | xfs_ichgtime(ip, XFS_ICHGTIME_ACC); | 
|  | 1046 | } | 
|  | 1047 |  | 
|  | 1048 | /* | 
|  | 1049 | * See if the symlink is stored inline. | 
|  | 1050 | */ | 
|  | 1051 | pathlen = (int)ip->i_d.di_size; | 
|  | 1052 |  | 
|  | 1053 | if (ip->i_df.if_flags & XFS_IFINLINE) { | 
|  | 1054 | error = uio_read(ip->i_df.if_u1.if_data, pathlen, uiop); | 
|  | 1055 | } | 
|  | 1056 | else { | 
|  | 1057 | /* | 
|  | 1058 | * Symlink not inline.  Call bmap to get it in. | 
|  | 1059 | */ | 
|  | 1060 | nmaps = SYMLINK_MAPS; | 
|  | 1061 |  | 
|  | 1062 | error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), | 
|  | 1063 | 0, NULL, 0, mval, &nmaps, NULL); | 
|  | 1064 |  | 
|  | 1065 | if (error) { | 
|  | 1066 | goto error_return; | 
|  | 1067 | } | 
|  | 1068 |  | 
|  | 1069 | for (n = 0; n < nmaps; n++) { | 
|  | 1070 | d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); | 
|  | 1071 | byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); | 
|  | 1072 | bp = xfs_buf_read(mp->m_ddev_targp, d, | 
|  | 1073 | BTOBB(byte_cnt), 0); | 
|  | 1074 | error = XFS_BUF_GETERROR(bp); | 
|  | 1075 | if (error) { | 
|  | 1076 | xfs_ioerror_alert("xfs_readlink", | 
|  | 1077 | ip->i_mount, bp, XFS_BUF_ADDR(bp)); | 
|  | 1078 | xfs_buf_relse(bp); | 
|  | 1079 | goto error_return; | 
|  | 1080 | } | 
|  | 1081 | if (pathlen < byte_cnt) | 
|  | 1082 | byte_cnt = pathlen; | 
|  | 1083 | pathlen -= byte_cnt; | 
|  | 1084 |  | 
|  | 1085 | error = uio_read(XFS_BUF_PTR(bp), byte_cnt, uiop); | 
|  | 1086 | xfs_buf_relse (bp); | 
|  | 1087 | } | 
|  | 1088 |  | 
|  | 1089 | } | 
|  | 1090 |  | 
|  | 1091 |  | 
|  | 1092 | error_return: | 
|  | 1093 |  | 
|  | 1094 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 1095 |  | 
|  | 1096 | return error; | 
|  | 1097 | } | 
|  | 1098 |  | 
|  | 1099 |  | 
|  | 1100 | /* | 
|  | 1101 | * xfs_fsync | 
|  | 1102 | * | 
|  | 1103 | * This is called to sync the inode and its data out to disk. | 
|  | 1104 | * We need to hold the I/O lock while flushing the data, and | 
|  | 1105 | * the inode lock while flushing the inode.  The inode lock CANNOT | 
|  | 1106 | * be held while flushing the data, so acquire after we're done | 
|  | 1107 | * with that. | 
|  | 1108 | */ | 
|  | 1109 | STATIC int | 
|  | 1110 | xfs_fsync( | 
|  | 1111 | bhv_desc_t	*bdp, | 
|  | 1112 | int		flag, | 
|  | 1113 | cred_t		*credp, | 
|  | 1114 | xfs_off_t	start, | 
|  | 1115 | xfs_off_t	stop) | 
|  | 1116 | { | 
|  | 1117 | xfs_inode_t	*ip; | 
|  | 1118 | xfs_trans_t	*tp; | 
|  | 1119 | int		error; | 
|  | 1120 |  | 
|  | 1121 | vn_trace_entry(BHV_TO_VNODE(bdp), | 
|  | 1122 | __FUNCTION__, (inst_t *)__return_address); | 
|  | 1123 |  | 
|  | 1124 | ip = XFS_BHVTOI(bdp); | 
|  | 1125 |  | 
|  | 1126 | ASSERT(start >= 0 && stop >= -1); | 
|  | 1127 |  | 
|  | 1128 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 
|  | 1129 | return XFS_ERROR(EIO); | 
|  | 1130 |  | 
|  | 1131 | /* | 
|  | 1132 | * We always need to make sure that the required inode state | 
|  | 1133 | * is safe on disk.  The vnode might be clean but because | 
|  | 1134 | * of committed transactions that haven't hit the disk yet. | 
|  | 1135 | * Likewise, there could be unflushed non-transactional | 
|  | 1136 | * changes to the inode core that have to go to disk. | 
|  | 1137 | * | 
|  | 1138 | * The following code depends on one assumption:  that | 
|  | 1139 | * any transaction that changes an inode logs the core | 
|  | 1140 | * because it has to change some field in the inode core | 
|  | 1141 | * (typically nextents or nblocks).  That assumption | 
|  | 1142 | * implies that any transactions against an inode will | 
|  | 1143 | * catch any non-transactional updates.  If inode-altering | 
|  | 1144 | * transactions exist that violate this assumption, the | 
|  | 1145 | * code breaks.  Right now, it figures that if the involved | 
|  | 1146 | * update_* field is clear and the inode is unpinned, the | 
|  | 1147 | * inode is clean.  Either it's been flushed or it's been | 
|  | 1148 | * committed and the commit has hit the disk unpinning the inode. | 
|  | 1149 | * (Note that xfs_inode_item_format() called at commit clears | 
|  | 1150 | * the update_* fields.) | 
|  | 1151 | */ | 
|  | 1152 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 
|  | 1153 |  | 
|  | 1154 | /* If we are flushing data then we care about update_size | 
|  | 1155 | * being set, otherwise we care about update_core | 
|  | 1156 | */ | 
|  | 1157 | if ((flag & FSYNC_DATA) ? | 
|  | 1158 | (ip->i_update_size == 0) : | 
|  | 1159 | (ip->i_update_core == 0)) { | 
|  | 1160 | /* | 
|  | 1161 | * Timestamps/size haven't changed since last inode | 
|  | 1162 | * flush or inode transaction commit.  That means | 
|  | 1163 | * either nothing got written or a transaction | 
|  | 1164 | * committed which caught the updates.	If the | 
|  | 1165 | * latter happened and the transaction hasn't | 
|  | 1166 | * hit the disk yet, the inode will be still | 
|  | 1167 | * be pinned.  If it is, force the log. | 
|  | 1168 | */ | 
|  | 1169 |  | 
|  | 1170 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 1171 |  | 
|  | 1172 | if (xfs_ipincount(ip)) { | 
|  | 1173 | xfs_log_force(ip->i_mount, (xfs_lsn_t)0, | 
|  | 1174 | XFS_LOG_FORCE | | 
|  | 1175 | ((flag & FSYNC_WAIT) | 
|  | 1176 | ? XFS_LOG_SYNC : 0)); | 
|  | 1177 | } | 
|  | 1178 | error = 0; | 
|  | 1179 | } else	{ | 
|  | 1180 | /* | 
|  | 1181 | * Kick off a transaction to log the inode | 
|  | 1182 | * core to get the updates.  Make it | 
|  | 1183 | * sync if FSYNC_WAIT is passed in (which | 
|  | 1184 | * is done by everybody but specfs).  The | 
|  | 1185 | * sync transaction will also force the log. | 
|  | 1186 | */ | 
|  | 1187 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 1188 | tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); | 
|  | 1189 | if ((error = xfs_trans_reserve(tp, 0, | 
|  | 1190 | XFS_FSYNC_TS_LOG_RES(ip->i_mount), | 
|  | 1191 | 0, 0, 0)))  { | 
|  | 1192 | xfs_trans_cancel(tp, 0); | 
|  | 1193 | return error; | 
|  | 1194 | } | 
|  | 1195 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 1196 |  | 
|  | 1197 | /* | 
|  | 1198 | * Note - it's possible that we might have pushed | 
|  | 1199 | * ourselves out of the way during trans_reserve | 
|  | 1200 | * which would flush the inode.	 But there's no | 
|  | 1201 | * guarantee that the inode buffer has actually | 
|  | 1202 | * gone out yet (it's delwri).	Plus the buffer | 
|  | 1203 | * could be pinned anyway if it's part of an | 
|  | 1204 | * inode in another recent transaction.	 So we | 
|  | 1205 | * play it safe and fire off the transaction anyway. | 
|  | 1206 | */ | 
|  | 1207 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 
|  | 1208 | xfs_trans_ihold(tp, ip); | 
|  | 1209 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 
|  | 1210 | if (flag & FSYNC_WAIT) | 
|  | 1211 | xfs_trans_set_sync(tp); | 
|  | 1212 | error = xfs_trans_commit(tp, 0, NULL); | 
|  | 1213 |  | 
|  | 1214 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 1215 | } | 
|  | 1216 | return error; | 
|  | 1217 | } | 
|  | 1218 |  | 
|  | 1219 | /* | 
|  | 1220 | * This is called by xfs_inactive to free any blocks beyond eof, | 
|  | 1221 | * when the link count isn't zero. | 
|  | 1222 | */ | 
|  | 1223 | STATIC int | 
|  | 1224 | xfs_inactive_free_eofblocks( | 
|  | 1225 | xfs_mount_t	*mp, | 
|  | 1226 | xfs_inode_t	*ip) | 
|  | 1227 | { | 
|  | 1228 | xfs_trans_t	*tp; | 
|  | 1229 | int		error; | 
|  | 1230 | xfs_fileoff_t	end_fsb; | 
|  | 1231 | xfs_fileoff_t	last_fsb; | 
|  | 1232 | xfs_filblks_t	map_len; | 
|  | 1233 | int		nimaps; | 
|  | 1234 | xfs_bmbt_irec_t	imap; | 
|  | 1235 |  | 
|  | 1236 | /* | 
|  | 1237 | * Figure out if there are any blocks beyond the end | 
|  | 1238 | * of the file.  If not, then there is nothing to do. | 
|  | 1239 | */ | 
|  | 1240 | end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_d.di_size)); | 
|  | 1241 | last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); | 
|  | 1242 | map_len = last_fsb - end_fsb; | 
|  | 1243 | if (map_len <= 0) | 
|  | 1244 | return (0); | 
|  | 1245 |  | 
|  | 1246 | nimaps = 1; | 
|  | 1247 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 
|  | 1248 | error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0, | 
|  | 1249 | NULL, 0, &imap, &nimaps, NULL); | 
|  | 1250 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 1251 |  | 
|  | 1252 | if (!error && (nimaps != 0) && | 
|  | 1253 | (imap.br_startblock != HOLESTARTBLOCK)) { | 
|  | 1254 | /* | 
|  | 1255 | * Attach the dquots to the inode up front. | 
|  | 1256 | */ | 
|  | 1257 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) | 
|  | 1258 | return (error); | 
|  | 1259 |  | 
|  | 1260 | /* | 
|  | 1261 | * There are blocks after the end of file. | 
|  | 1262 | * Free them up now by truncating the file to | 
|  | 1263 | * its current size. | 
|  | 1264 | */ | 
|  | 1265 | tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); | 
|  | 1266 |  | 
|  | 1267 | /* | 
|  | 1268 | * Do the xfs_itruncate_start() call before | 
|  | 1269 | * reserving any log space because | 
|  | 1270 | * itruncate_start will call into the buffer | 
|  | 1271 | * cache and we can't | 
|  | 1272 | * do that within a transaction. | 
|  | 1273 | */ | 
|  | 1274 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | 
|  | 1275 | xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, | 
|  | 1276 | ip->i_d.di_size); | 
|  | 1277 |  | 
|  | 1278 | error = xfs_trans_reserve(tp, 0, | 
|  | 1279 | XFS_ITRUNCATE_LOG_RES(mp), | 
|  | 1280 | 0, XFS_TRANS_PERM_LOG_RES, | 
|  | 1281 | XFS_ITRUNCATE_LOG_COUNT); | 
|  | 1282 | if (error) { | 
|  | 1283 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | 
|  | 1284 | xfs_trans_cancel(tp, 0); | 
|  | 1285 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 
|  | 1286 | return (error); | 
|  | 1287 | } | 
|  | 1288 |  | 
|  | 1289 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 1290 | xfs_trans_ijoin(tp, ip, | 
|  | 1291 | XFS_IOLOCK_EXCL | | 
|  | 1292 | XFS_ILOCK_EXCL); | 
|  | 1293 | xfs_trans_ihold(tp, ip); | 
|  | 1294 |  | 
|  | 1295 | error = xfs_itruncate_finish(&tp, ip, | 
|  | 1296 | ip->i_d.di_size, | 
|  | 1297 | XFS_DATA_FORK, | 
|  | 1298 | 0); | 
|  | 1299 | /* | 
|  | 1300 | * If we get an error at this point we | 
|  | 1301 | * simply don't bother truncating the file. | 
|  | 1302 | */ | 
|  | 1303 | if (error) { | 
|  | 1304 | xfs_trans_cancel(tp, | 
|  | 1305 | (XFS_TRANS_RELEASE_LOG_RES | | 
|  | 1306 | XFS_TRANS_ABORT)); | 
|  | 1307 | } else { | 
|  | 1308 | error = xfs_trans_commit(tp, | 
|  | 1309 | XFS_TRANS_RELEASE_LOG_RES, | 
|  | 1310 | NULL); | 
|  | 1311 | } | 
|  | 1312 | xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 
|  | 1313 | } | 
|  | 1314 | return (error); | 
|  | 1315 | } | 
|  | 1316 |  | 
|  | 1317 | /* | 
|  | 1318 | * Free a symlink that has blocks associated with it. | 
|  | 1319 | */ | 
|  | 1320 | STATIC int | 
|  | 1321 | xfs_inactive_symlink_rmt( | 
|  | 1322 | xfs_inode_t	*ip, | 
|  | 1323 | xfs_trans_t	**tpp) | 
|  | 1324 | { | 
|  | 1325 | xfs_buf_t	*bp; | 
|  | 1326 | int		committed; | 
|  | 1327 | int		done; | 
|  | 1328 | int		error; | 
|  | 1329 | xfs_fsblock_t	first_block; | 
|  | 1330 | xfs_bmap_free_t	free_list; | 
|  | 1331 | int		i; | 
|  | 1332 | xfs_mount_t	*mp; | 
|  | 1333 | xfs_bmbt_irec_t	mval[SYMLINK_MAPS]; | 
|  | 1334 | int		nmaps; | 
|  | 1335 | xfs_trans_t	*ntp; | 
|  | 1336 | int		size; | 
|  | 1337 | xfs_trans_t	*tp; | 
|  | 1338 |  | 
|  | 1339 | tp = *tpp; | 
|  | 1340 | mp = ip->i_mount; | 
|  | 1341 | ASSERT(ip->i_d.di_size > XFS_IFORK_DSIZE(ip)); | 
|  | 1342 | /* | 
|  | 1343 | * We're freeing a symlink that has some | 
|  | 1344 | * blocks allocated to it.  Free the | 
|  | 1345 | * blocks here.  We know that we've got | 
|  | 1346 | * either 1 or 2 extents and that we can | 
|  | 1347 | * free them all in one bunmapi call. | 
|  | 1348 | */ | 
|  | 1349 | ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2); | 
|  | 1350 | if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, | 
|  | 1351 | XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) { | 
|  | 1352 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | 
|  | 1353 | xfs_trans_cancel(tp, 0); | 
|  | 1354 | *tpp = NULL; | 
|  | 1355 | return error; | 
|  | 1356 | } | 
|  | 1357 | /* | 
|  | 1358 | * Lock the inode, fix the size, and join it to the transaction. | 
|  | 1359 | * Hold it so in the normal path, we still have it locked for | 
|  | 1360 | * the second transaction.  In the error paths we need it | 
|  | 1361 | * held so the cancel won't rele it, see below. | 
|  | 1362 | */ | 
|  | 1363 | xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 
|  | 1364 | size = (int)ip->i_d.di_size; | 
|  | 1365 | ip->i_d.di_size = 0; | 
|  | 1366 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 
|  | 1367 | xfs_trans_ihold(tp, ip); | 
|  | 1368 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 
|  | 1369 | /* | 
|  | 1370 | * Find the block(s) so we can inval and unmap them. | 
|  | 1371 | */ | 
|  | 1372 | done = 0; | 
|  | 1373 | XFS_BMAP_INIT(&free_list, &first_block); | 
|  | 1374 | nmaps = sizeof(mval) / sizeof(mval[0]); | 
|  | 1375 | if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), | 
|  | 1376 | XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, | 
|  | 1377 | &free_list))) | 
|  | 1378 | goto error0; | 
|  | 1379 | /* | 
|  | 1380 | * Invalidate the block(s). | 
|  | 1381 | */ | 
|  | 1382 | for (i = 0; i < nmaps; i++) { | 
|  | 1383 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, | 
|  | 1384 | XFS_FSB_TO_DADDR(mp, mval[i].br_startblock), | 
|  | 1385 | XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0); | 
|  | 1386 | xfs_trans_binval(tp, bp); | 
|  | 1387 | } | 
|  | 1388 | /* | 
|  | 1389 | * Unmap the dead block(s) to the free_list. | 
|  | 1390 | */ | 
|  | 1391 | if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps, | 
|  | 1392 | &first_block, &free_list, &done))) | 
|  | 1393 | goto error1; | 
|  | 1394 | ASSERT(done); | 
|  | 1395 | /* | 
|  | 1396 | * Commit the first transaction.  This logs the EFI and the inode. | 
|  | 1397 | */ | 
|  | 1398 | if ((error = xfs_bmap_finish(&tp, &free_list, first_block, &committed))) | 
|  | 1399 | goto error1; | 
|  | 1400 | /* | 
|  | 1401 | * The transaction must have been committed, since there were | 
|  | 1402 | * actually extents freed by xfs_bunmapi.  See xfs_bmap_finish. | 
|  | 1403 | * The new tp has the extent freeing and EFDs. | 
|  | 1404 | */ | 
|  | 1405 | ASSERT(committed); | 
|  | 1406 | /* | 
|  | 1407 | * The first xact was committed, so add the inode to the new one. | 
|  | 1408 | * Mark it dirty so it will be logged and moved forward in the log as | 
|  | 1409 | * part of every commit. | 
|  | 1410 | */ | 
|  | 1411 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 
|  | 1412 | xfs_trans_ihold(tp, ip); | 
|  | 1413 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 
|  | 1414 | /* | 
|  | 1415 | * Get a new, empty transaction to return to our caller. | 
|  | 1416 | */ | 
|  | 1417 | ntp = xfs_trans_dup(tp); | 
|  | 1418 | /* | 
|  | 1419 | * Commit the transaction containing extent freeing and EFD's. | 
|  | 1420 | * If we get an error on the commit here or on the reserve below, | 
|  | 1421 | * we need to unlock the inode since the new transaction doesn't | 
|  | 1422 | * have the inode attached. | 
|  | 1423 | */ | 
|  | 1424 | error = xfs_trans_commit(tp, 0, NULL); | 
|  | 1425 | tp = ntp; | 
|  | 1426 | if (error) { | 
|  | 1427 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | 
|  | 1428 | goto error0; | 
|  | 1429 | } | 
|  | 1430 | /* | 
|  | 1431 | * Remove the memory for extent descriptions (just bookkeeping). | 
|  | 1432 | */ | 
|  | 1433 | if (ip->i_df.if_bytes) | 
|  | 1434 | xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK); | 
|  | 1435 | ASSERT(ip->i_df.if_bytes == 0); | 
|  | 1436 | /* | 
|  | 1437 | * Put an itruncate log reservation in the new transaction | 
|  | 1438 | * for our caller. | 
|  | 1439 | */ | 
|  | 1440 | if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, | 
|  | 1441 | XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) { | 
|  | 1442 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | 
|  | 1443 | goto error0; | 
|  | 1444 | } | 
|  | 1445 | /* | 
|  | 1446 | * Return with the inode locked but not joined to the transaction. | 
|  | 1447 | */ | 
|  | 1448 | *tpp = tp; | 
|  | 1449 | return 0; | 
|  | 1450 |  | 
|  | 1451 | error1: | 
|  | 1452 | xfs_bmap_cancel(&free_list); | 
|  | 1453 | error0: | 
|  | 1454 | /* | 
|  | 1455 | * Have to come here with the inode locked and either | 
|  | 1456 | * (held and in the transaction) or (not in the transaction). | 
|  | 1457 | * If the inode isn't held then cancel would iput it, but | 
|  | 1458 | * that's wrong since this is inactive and the vnode ref | 
|  | 1459 | * count is 0 already. | 
|  | 1460 | * Cancel won't do anything to the inode if held, but it still | 
|  | 1461 | * needs to be locked until the cancel is done, if it was | 
|  | 1462 | * joined to the transaction. | 
|  | 1463 | */ | 
|  | 1464 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); | 
|  | 1465 | xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 
|  | 1466 | *tpp = NULL; | 
|  | 1467 | return error; | 
|  | 1468 |  | 
|  | 1469 | } | 
|  | 1470 |  | 
|  | 1471 | STATIC int | 
|  | 1472 | xfs_inactive_symlink_local( | 
|  | 1473 | xfs_inode_t	*ip, | 
|  | 1474 | xfs_trans_t	**tpp) | 
|  | 1475 | { | 
|  | 1476 | int		error; | 
|  | 1477 |  | 
|  | 1478 | ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip)); | 
|  | 1479 | /* | 
|  | 1480 | * We're freeing a symlink which fit into | 
|  | 1481 | * the inode.  Just free the memory used | 
|  | 1482 | * to hold the old symlink. | 
|  | 1483 | */ | 
|  | 1484 | error = xfs_trans_reserve(*tpp, 0, | 
|  | 1485 | XFS_ITRUNCATE_LOG_RES(ip->i_mount), | 
|  | 1486 | 0, XFS_TRANS_PERM_LOG_RES, | 
|  | 1487 | XFS_ITRUNCATE_LOG_COUNT); | 
|  | 1488 |  | 
|  | 1489 | if (error) { | 
|  | 1490 | xfs_trans_cancel(*tpp, 0); | 
|  | 1491 | *tpp = NULL; | 
|  | 1492 | return (error); | 
|  | 1493 | } | 
|  | 1494 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 
|  | 1495 |  | 
|  | 1496 | /* | 
|  | 1497 | * Zero length symlinks _can_ exist. | 
|  | 1498 | */ | 
|  | 1499 | if (ip->i_df.if_bytes > 0) { | 
|  | 1500 | xfs_idata_realloc(ip, | 
|  | 1501 | -(ip->i_df.if_bytes), | 
|  | 1502 | XFS_DATA_FORK); | 
|  | 1503 | ASSERT(ip->i_df.if_bytes == 0); | 
|  | 1504 | } | 
|  | 1505 | return (0); | 
|  | 1506 | } | 
|  | 1507 |  | 
|  | 1508 | /* | 
|  | 1509 | * | 
|  | 1510 | */ | 
|  | 1511 | STATIC int | 
|  | 1512 | xfs_inactive_attrs( | 
|  | 1513 | xfs_inode_t	*ip, | 
|  | 1514 | xfs_trans_t	**tpp) | 
|  | 1515 | { | 
|  | 1516 | xfs_trans_t	*tp; | 
|  | 1517 | int		error; | 
|  | 1518 | xfs_mount_t	*mp; | 
|  | 1519 |  | 
|  | 1520 | ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); | 
|  | 1521 | tp = *tpp; | 
|  | 1522 | mp = ip->i_mount; | 
|  | 1523 | ASSERT(ip->i_d.di_forkoff != 0); | 
|  | 1524 | xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 
|  | 1525 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 1526 |  | 
|  | 1527 | error = xfs_attr_inactive(ip); | 
|  | 1528 | if (error) { | 
|  | 1529 | *tpp = NULL; | 
|  | 1530 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 
|  | 1531 | return (error); /* goto out*/ | 
|  | 1532 | } | 
|  | 1533 |  | 
|  | 1534 | tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); | 
|  | 1535 | error = xfs_trans_reserve(tp, 0, | 
|  | 1536 | XFS_IFREE_LOG_RES(mp), | 
|  | 1537 | 0, XFS_TRANS_PERM_LOG_RES, | 
|  | 1538 | XFS_INACTIVE_LOG_COUNT); | 
|  | 1539 | if (error) { | 
|  | 1540 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | 
|  | 1541 | xfs_trans_cancel(tp, 0); | 
|  | 1542 | *tpp = NULL; | 
|  | 1543 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 
|  | 1544 | return (error); | 
|  | 1545 | } | 
|  | 1546 |  | 
|  | 1547 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 1548 | xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 
|  | 1549 | xfs_trans_ihold(tp, ip); | 
|  | 1550 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | 
|  | 1551 |  | 
|  | 1552 | ASSERT(ip->i_d.di_anextents == 0); | 
|  | 1553 |  | 
|  | 1554 | *tpp = tp; | 
|  | 1555 | return (0); | 
|  | 1556 | } | 
|  | 1557 |  | 
|  | 1558 | STATIC int | 
|  | 1559 | xfs_release( | 
|  | 1560 | bhv_desc_t	*bdp) | 
|  | 1561 | { | 
|  | 1562 | xfs_inode_t	*ip; | 
|  | 1563 | vnode_t		*vp; | 
|  | 1564 | xfs_mount_t	*mp; | 
|  | 1565 | int		error; | 
|  | 1566 |  | 
|  | 1567 | vp = BHV_TO_VNODE(bdp); | 
|  | 1568 | ip = XFS_BHVTOI(bdp); | 
|  | 1569 |  | 
|  | 1570 | if ((vp->v_type != VREG) || (ip->i_d.di_mode == 0)) { | 
|  | 1571 | return 0; | 
|  | 1572 | } | 
|  | 1573 |  | 
|  | 1574 | /* If this is a read-only mount, don't do this (would generate I/O) */ | 
|  | 1575 | if (vp->v_vfsp->vfs_flag & VFS_RDONLY) | 
|  | 1576 | return 0; | 
|  | 1577 |  | 
|  | 1578 | #ifdef HAVE_REFCACHE | 
|  | 1579 | /* If we are in the NFS reference cache then don't do this now */ | 
|  | 1580 | if (ip->i_refcache) | 
|  | 1581 | return 0; | 
|  | 1582 | #endif | 
|  | 1583 |  | 
|  | 1584 | mp = ip->i_mount; | 
|  | 1585 |  | 
|  | 1586 | if (ip->i_d.di_nlink != 0) { | 
|  | 1587 | if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && | 
|  | 1588 | ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && | 
|  | 1589 | (ip->i_df.if_flags & XFS_IFEXTENTS))  && | 
|  | 1590 | (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)))) { | 
|  | 1591 | if ((error = xfs_inactive_free_eofblocks(mp, ip))) | 
|  | 1592 | return (error); | 
|  | 1593 | /* Update linux inode block count after free above */ | 
|  | 1594 | LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, | 
|  | 1595 | ip->i_d.di_nblocks + ip->i_delayed_blks); | 
|  | 1596 | } | 
|  | 1597 | } | 
|  | 1598 |  | 
|  | 1599 | return 0; | 
|  | 1600 | } | 
|  | 1601 |  | 
|  | 1602 | /* | 
|  | 1603 | * xfs_inactive | 
|  | 1604 | * | 
|  | 1605 | * This is called when the vnode reference count for the vnode | 
|  | 1606 | * goes to zero.  If the file has been unlinked, then it must | 
|  | 1607 | * now be truncated.  Also, we clear all of the read-ahead state | 
|  | 1608 | * kept for the inode here since the file is now closed. | 
|  | 1609 | */ | 
|  | 1610 | STATIC int | 
|  | 1611 | xfs_inactive( | 
|  | 1612 | bhv_desc_t	*bdp, | 
|  | 1613 | cred_t		*credp) | 
|  | 1614 | { | 
|  | 1615 | xfs_inode_t	*ip; | 
|  | 1616 | vnode_t		*vp; | 
|  | 1617 | xfs_bmap_free_t	free_list; | 
|  | 1618 | xfs_fsblock_t	first_block; | 
|  | 1619 | int		committed; | 
|  | 1620 | xfs_trans_t	*tp; | 
|  | 1621 | xfs_mount_t	*mp; | 
|  | 1622 | int		error; | 
|  | 1623 | int		truncate; | 
|  | 1624 |  | 
|  | 1625 | vp = BHV_TO_VNODE(bdp); | 
|  | 1626 | vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 1627 |  | 
|  | 1628 | ip = XFS_BHVTOI(bdp); | 
|  | 1629 |  | 
|  | 1630 | /* | 
|  | 1631 | * If the inode is already free, then there can be nothing | 
|  | 1632 | * to clean up here. | 
|  | 1633 | */ | 
|  | 1634 | if (ip->i_d.di_mode == 0 || VN_BAD(vp)) { | 
|  | 1635 | ASSERT(ip->i_df.if_real_bytes == 0); | 
|  | 1636 | ASSERT(ip->i_df.if_broot_bytes == 0); | 
|  | 1637 | return VN_INACTIVE_CACHE; | 
|  | 1638 | } | 
|  | 1639 |  | 
|  | 1640 | /* | 
|  | 1641 | * Only do a truncate if it's a regular file with | 
|  | 1642 | * some actual space in it.  It's OK to look at the | 
|  | 1643 | * inode's fields without the lock because we're the | 
|  | 1644 | * only one with a reference to the inode. | 
|  | 1645 | */ | 
|  | 1646 | truncate = ((ip->i_d.di_nlink == 0) && | 
|  | 1647 | ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0)) && | 
|  | 1648 | ((ip->i_d.di_mode & S_IFMT) == S_IFREG)); | 
|  | 1649 |  | 
|  | 1650 | mp = ip->i_mount; | 
|  | 1651 |  | 
|  | 1652 | if (ip->i_d.di_nlink == 0 && | 
|  | 1653 | DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_DESTROY)) { | 
|  | 1654 | (void) XFS_SEND_DESTROY(mp, vp, DM_RIGHT_NULL); | 
|  | 1655 | } | 
|  | 1656 |  | 
|  | 1657 | error = 0; | 
|  | 1658 |  | 
|  | 1659 | /* If this is a read-only mount, don't do this (would generate I/O) */ | 
|  | 1660 | if (vp->v_vfsp->vfs_flag & VFS_RDONLY) | 
|  | 1661 | goto out; | 
|  | 1662 |  | 
|  | 1663 | if (ip->i_d.di_nlink != 0) { | 
|  | 1664 | if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && | 
|  | 1665 | ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && | 
|  | 1666 | (ip->i_df.if_flags & XFS_IFEXTENTS))  && | 
|  | 1667 | (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)) || | 
|  | 1668 | (ip->i_delayed_blks != 0))) { | 
|  | 1669 | if ((error = xfs_inactive_free_eofblocks(mp, ip))) | 
|  | 1670 | return (VN_INACTIVE_CACHE); | 
|  | 1671 | /* Update linux inode block count after free above */ | 
|  | 1672 | LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, | 
|  | 1673 | ip->i_d.di_nblocks + ip->i_delayed_blks); | 
|  | 1674 | } | 
|  | 1675 | goto out; | 
|  | 1676 | } | 
|  | 1677 |  | 
|  | 1678 | ASSERT(ip->i_d.di_nlink == 0); | 
|  | 1679 |  | 
|  | 1680 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) | 
|  | 1681 | return (VN_INACTIVE_CACHE); | 
|  | 1682 |  | 
|  | 1683 | tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); | 
|  | 1684 | if (truncate) { | 
|  | 1685 | /* | 
|  | 1686 | * Do the xfs_itruncate_start() call before | 
|  | 1687 | * reserving any log space because itruncate_start | 
|  | 1688 | * will call into the buffer cache and we can't | 
|  | 1689 | * do that within a transaction. | 
|  | 1690 | */ | 
|  | 1691 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | 
|  | 1692 |  | 
|  | 1693 | xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0); | 
|  | 1694 |  | 
|  | 1695 | error = xfs_trans_reserve(tp, 0, | 
|  | 1696 | XFS_ITRUNCATE_LOG_RES(mp), | 
|  | 1697 | 0, XFS_TRANS_PERM_LOG_RES, | 
|  | 1698 | XFS_ITRUNCATE_LOG_COUNT); | 
|  | 1699 | if (error) { | 
|  | 1700 | /* Don't call itruncate_cleanup */ | 
|  | 1701 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | 
|  | 1702 | xfs_trans_cancel(tp, 0); | 
|  | 1703 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 
|  | 1704 | return (VN_INACTIVE_CACHE); | 
|  | 1705 | } | 
|  | 1706 |  | 
|  | 1707 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 1708 | xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 
|  | 1709 | xfs_trans_ihold(tp, ip); | 
|  | 1710 |  | 
|  | 1711 | /* | 
|  | 1712 | * normally, we have to run xfs_itruncate_finish sync. | 
|  | 1713 | * But if filesystem is wsync and we're in the inactive | 
|  | 1714 | * path, then we know that nlink == 0, and that the | 
|  | 1715 | * xaction that made nlink == 0 is permanently committed | 
|  | 1716 | * since xfs_remove runs as a synchronous transaction. | 
|  | 1717 | */ | 
|  | 1718 | error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, | 
|  | 1719 | (!(mp->m_flags & XFS_MOUNT_WSYNC) ? 1 : 0)); | 
|  | 1720 |  | 
|  | 1721 | if (error) { | 
|  | 1722 | xfs_trans_cancel(tp, | 
|  | 1723 | XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); | 
|  | 1724 | xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 
|  | 1725 | return (VN_INACTIVE_CACHE); | 
|  | 1726 | } | 
|  | 1727 | } else if ((ip->i_d.di_mode & S_IFMT) == S_IFLNK) { | 
|  | 1728 |  | 
|  | 1729 | /* | 
|  | 1730 | * If we get an error while cleaning up a | 
|  | 1731 | * symlink we bail out. | 
|  | 1732 | */ | 
|  | 1733 | error = (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) ? | 
|  | 1734 | xfs_inactive_symlink_rmt(ip, &tp) : | 
|  | 1735 | xfs_inactive_symlink_local(ip, &tp); | 
|  | 1736 |  | 
|  | 1737 | if (error) { | 
|  | 1738 | ASSERT(tp == NULL); | 
|  | 1739 | return (VN_INACTIVE_CACHE); | 
|  | 1740 | } | 
|  | 1741 |  | 
|  | 1742 | xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 
|  | 1743 | xfs_trans_ihold(tp, ip); | 
|  | 1744 | } else { | 
|  | 1745 | error = xfs_trans_reserve(tp, 0, | 
|  | 1746 | XFS_IFREE_LOG_RES(mp), | 
|  | 1747 | 0, XFS_TRANS_PERM_LOG_RES, | 
|  | 1748 | XFS_INACTIVE_LOG_COUNT); | 
|  | 1749 | if (error) { | 
|  | 1750 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | 
|  | 1751 | xfs_trans_cancel(tp, 0); | 
|  | 1752 | return (VN_INACTIVE_CACHE); | 
|  | 1753 | } | 
|  | 1754 |  | 
|  | 1755 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 
|  | 1756 | xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 
|  | 1757 | xfs_trans_ihold(tp, ip); | 
|  | 1758 | } | 
|  | 1759 |  | 
|  | 1760 | /* | 
|  | 1761 | * If there are attributes associated with the file | 
|  | 1762 | * then blow them away now.  The code calls a routine | 
|  | 1763 | * that recursively deconstructs the attribute fork. | 
|  | 1764 | * We need to just commit the current transaction | 
|  | 1765 | * because we can't use it for xfs_attr_inactive(). | 
|  | 1766 | */ | 
|  | 1767 | if (ip->i_d.di_anextents > 0) { | 
|  | 1768 | error = xfs_inactive_attrs(ip, &tp); | 
|  | 1769 | /* | 
|  | 1770 | * If we got an error, the transaction is already | 
|  | 1771 | * cancelled, and the inode is unlocked. Just get out. | 
|  | 1772 | */ | 
|  | 1773 | if (error) | 
|  | 1774 | return (VN_INACTIVE_CACHE); | 
|  | 1775 | } else if (ip->i_afp) { | 
|  | 1776 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | 
|  | 1777 | } | 
|  | 1778 |  | 
|  | 1779 | /* | 
|  | 1780 | * Free the inode. | 
|  | 1781 | */ | 
|  | 1782 | XFS_BMAP_INIT(&free_list, &first_block); | 
|  | 1783 | error = xfs_ifree(tp, ip, &free_list); | 
|  | 1784 | if (error) { | 
|  | 1785 | /* | 
|  | 1786 | * If we fail to free the inode, shut down.  The cancel | 
|  | 1787 | * might do that, we need to make sure.  Otherwise the | 
|  | 1788 | * inode might be lost for a long time or forever. | 
|  | 1789 | */ | 
|  | 1790 | if (!XFS_FORCED_SHUTDOWN(mp)) { | 
|  | 1791 | cmn_err(CE_NOTE, | 
|  | 1792 | "xfs_inactive:	xfs_ifree() returned an error = %d on %s", | 
|  | 1793 | error, mp->m_fsname); | 
|  | 1794 | xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR); | 
|  | 1795 | } | 
|  | 1796 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); | 
|  | 1797 | } else { | 
|  | 1798 | /* | 
|  | 1799 | * Credit the quota account(s). The inode is gone. | 
|  | 1800 | */ | 
|  | 1801 | XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1); | 
|  | 1802 |  | 
|  | 1803 | /* | 
|  | 1804 | * Just ignore errors at this point.  There is | 
|  | 1805 | * nothing we can do except to try to keep going. | 
|  | 1806 | */ | 
|  | 1807 | (void) xfs_bmap_finish(&tp,  &free_list, first_block, | 
|  | 1808 | &committed); | 
|  | 1809 | (void) xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 
|  | 1810 | } | 
|  | 1811 | /* | 
|  | 1812 | * Release the dquots held by inode, if any. | 
|  | 1813 | */ | 
|  | 1814 | XFS_QM_DQDETACH(mp, ip); | 
|  | 1815 |  | 
|  | 1816 | xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 
|  | 1817 |  | 
|  | 1818 | out: | 
|  | 1819 | return VN_INACTIVE_CACHE; | 
|  | 1820 | } | 
|  | 1821 |  | 
|  | 1822 |  | 
|  | 1823 | /* | 
|  | 1824 | * xfs_lookup | 
|  | 1825 | */ | 
|  | 1826 | STATIC int | 
|  | 1827 | xfs_lookup( | 
|  | 1828 | bhv_desc_t		*dir_bdp, | 
|  | 1829 | vname_t			*dentry, | 
|  | 1830 | vnode_t			**vpp, | 
|  | 1831 | int			flags, | 
|  | 1832 | vnode_t			*rdir, | 
|  | 1833 | cred_t			*credp) | 
|  | 1834 | { | 
|  | 1835 | xfs_inode_t		*dp, *ip; | 
|  | 1836 | xfs_ino_t		e_inum; | 
|  | 1837 | int			error; | 
|  | 1838 | uint			lock_mode; | 
|  | 1839 | vnode_t			*dir_vp; | 
|  | 1840 |  | 
|  | 1841 | dir_vp = BHV_TO_VNODE(dir_bdp); | 
|  | 1842 | vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 1843 |  | 
|  | 1844 | dp = XFS_BHVTOI(dir_bdp); | 
|  | 1845 |  | 
|  | 1846 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 
|  | 1847 | return XFS_ERROR(EIO); | 
|  | 1848 |  | 
|  | 1849 | lock_mode = xfs_ilock_map_shared(dp); | 
|  | 1850 | error = xfs_dir_lookup_int(dir_bdp, lock_mode, dentry, &e_inum, &ip); | 
|  | 1851 | if (!error) { | 
|  | 1852 | *vpp = XFS_ITOV(ip); | 
|  | 1853 | ITRACE(ip); | 
|  | 1854 | } | 
|  | 1855 | xfs_iunlock_map_shared(dp, lock_mode); | 
|  | 1856 | return error; | 
|  | 1857 | } | 
|  | 1858 |  | 
|  | 1859 |  | 
|  | 1860 | /* | 
|  | 1861 | * xfs_create (create a new file). | 
|  | 1862 | */ | 
|  | 1863 | STATIC int | 
|  | 1864 | xfs_create( | 
|  | 1865 | bhv_desc_t		*dir_bdp, | 
|  | 1866 | vname_t			*dentry, | 
|  | 1867 | vattr_t			*vap, | 
|  | 1868 | vnode_t			**vpp, | 
|  | 1869 | cred_t			*credp) | 
|  | 1870 | { | 
|  | 1871 | char			*name = VNAME(dentry); | 
|  | 1872 | vnode_t			*dir_vp; | 
|  | 1873 | xfs_inode_t		*dp, *ip; | 
|  | 1874 | vnode_t		        *vp=NULL; | 
|  | 1875 | xfs_trans_t		*tp; | 
|  | 1876 | xfs_mount_t	        *mp; | 
|  | 1877 | xfs_dev_t		rdev; | 
|  | 1878 | int                     error; | 
|  | 1879 | xfs_bmap_free_t		free_list; | 
|  | 1880 | xfs_fsblock_t		first_block; | 
|  | 1881 | boolean_t		dp_joined_to_trans; | 
|  | 1882 | int			dm_event_sent = 0; | 
|  | 1883 | uint			cancel_flags; | 
|  | 1884 | int			committed; | 
|  | 1885 | xfs_prid_t		prid; | 
|  | 1886 | struct xfs_dquot	*udqp, *gdqp; | 
|  | 1887 | uint			resblks; | 
|  | 1888 | int			dm_di_mode; | 
|  | 1889 | int			namelen; | 
|  | 1890 |  | 
|  | 1891 | ASSERT(!*vpp); | 
|  | 1892 | dir_vp = BHV_TO_VNODE(dir_bdp); | 
|  | 1893 | vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 1894 |  | 
|  | 1895 | dp = XFS_BHVTOI(dir_bdp); | 
|  | 1896 | mp = dp->i_mount; | 
|  | 1897 |  | 
|  | 1898 | dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); | 
|  | 1899 | namelen = VNAMELEN(dentry); | 
|  | 1900 |  | 
|  | 1901 | if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { | 
|  | 1902 | error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, | 
|  | 1903 | dir_vp, DM_RIGHT_NULL, NULL, | 
|  | 1904 | DM_RIGHT_NULL, name, NULL, | 
|  | 1905 | dm_di_mode, 0, 0); | 
|  | 1906 |  | 
|  | 1907 | if (error) | 
|  | 1908 | return error; | 
|  | 1909 | dm_event_sent = 1; | 
|  | 1910 | } | 
|  | 1911 |  | 
|  | 1912 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 1913 | return XFS_ERROR(EIO); | 
|  | 1914 |  | 
|  | 1915 | /* Return through std_return after this point. */ | 
|  | 1916 |  | 
|  | 1917 | udqp = gdqp = NULL; | 
| Nathan Scott | 365ca83 | 2005-06-21 15:39:12 +1000 | [diff] [blame] | 1918 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) | 
|  | 1919 | prid = dp->i_d.di_projid; | 
|  | 1920 | else if (vap->va_mask & XFS_AT_PROJID) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1921 | prid = (xfs_prid_t)vap->va_projid; | 
|  | 1922 | else | 
|  | 1923 | prid = (xfs_prid_t)dfltprid; | 
|  | 1924 |  | 
|  | 1925 | /* | 
|  | 1926 | * Make sure that we have allocated dquot(s) on disk. | 
|  | 1927 | */ | 
|  | 1928 | error = XFS_QM_DQVOPALLOC(mp, dp, | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1929 | current_fsuid(credp), current_fsgid(credp), prid, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1930 | XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp); | 
|  | 1931 | if (error) | 
|  | 1932 | goto std_return; | 
|  | 1933 |  | 
|  | 1934 | ip = NULL; | 
|  | 1935 | dp_joined_to_trans = B_FALSE; | 
|  | 1936 |  | 
|  | 1937 | tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE); | 
|  | 1938 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 
|  | 1939 | resblks = XFS_CREATE_SPACE_RES(mp, namelen); | 
|  | 1940 | /* | 
|  | 1941 | * Initially assume that the file does not exist and | 
|  | 1942 | * reserve the resources for that case.  If that is not | 
|  | 1943 | * the case we'll drop the one we have and get a more | 
|  | 1944 | * appropriate transaction later. | 
|  | 1945 | */ | 
|  | 1946 | error = xfs_trans_reserve(tp, resblks, XFS_CREATE_LOG_RES(mp), 0, | 
|  | 1947 | XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT); | 
|  | 1948 | if (error == ENOSPC) { | 
|  | 1949 | resblks = 0; | 
|  | 1950 | error = xfs_trans_reserve(tp, 0, XFS_CREATE_LOG_RES(mp), 0, | 
|  | 1951 | XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT); | 
|  | 1952 | } | 
|  | 1953 | if (error) { | 
|  | 1954 | cancel_flags = 0; | 
|  | 1955 | dp = NULL; | 
|  | 1956 | goto error_return; | 
|  | 1957 | } | 
|  | 1958 |  | 
|  | 1959 | xfs_ilock(dp, XFS_ILOCK_EXCL); | 
|  | 1960 |  | 
|  | 1961 | XFS_BMAP_INIT(&free_list, &first_block); | 
|  | 1962 |  | 
|  | 1963 | ASSERT(ip == NULL); | 
|  | 1964 |  | 
|  | 1965 | /* | 
|  | 1966 | * Reserve disk quota and the inode. | 
|  | 1967 | */ | 
|  | 1968 | error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); | 
|  | 1969 | if (error) | 
|  | 1970 | goto error_return; | 
|  | 1971 |  | 
|  | 1972 | if (resblks == 0 && | 
|  | 1973 | (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen))) | 
|  | 1974 | goto error_return; | 
|  | 1975 | rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0; | 
|  | 1976 | error = xfs_dir_ialloc(&tp, dp, | 
|  | 1977 | MAKEIMODE(vap->va_type,vap->va_mode), 1, | 
|  | 1978 | rdev, credp, prid, resblks > 0, | 
|  | 1979 | &ip, &committed); | 
|  | 1980 | if (error) { | 
|  | 1981 | if (error == ENOSPC) | 
|  | 1982 | goto error_return; | 
|  | 1983 | goto abort_return; | 
|  | 1984 | } | 
|  | 1985 | ITRACE(ip); | 
|  | 1986 |  | 
|  | 1987 | /* | 
|  | 1988 | * At this point, we've gotten a newly allocated inode. | 
|  | 1989 | * It is locked (and joined to the transaction). | 
|  | 1990 | */ | 
|  | 1991 |  | 
|  | 1992 | ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); | 
|  | 1993 |  | 
|  | 1994 | /* | 
|  | 1995 | * Now we join the directory inode to the transaction. | 
|  | 1996 | * We do not do it earlier because xfs_dir_ialloc | 
|  | 1997 | * might commit the previous transaction (and release | 
|  | 1998 | * all the locks). | 
|  | 1999 | */ | 
|  | 2000 |  | 
|  | 2001 | VN_HOLD(dir_vp); | 
|  | 2002 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 
|  | 2003 | dp_joined_to_trans = B_TRUE; | 
|  | 2004 |  | 
|  | 2005 | error = XFS_DIR_CREATENAME(mp, tp, dp, name, namelen, ip->i_ino, | 
|  | 2006 | &first_block, &free_list, | 
|  | 2007 | resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0); | 
|  | 2008 | if (error) { | 
|  | 2009 | ASSERT(error != ENOSPC); | 
|  | 2010 | goto abort_return; | 
|  | 2011 | } | 
|  | 2012 | xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 
|  | 2013 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); | 
|  | 2014 |  | 
|  | 2015 | /* | 
|  | 2016 | * If this is a synchronous mount, make sure that the | 
|  | 2017 | * create transaction goes to disk before returning to | 
|  | 2018 | * the user. | 
|  | 2019 | */ | 
|  | 2020 | if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { | 
|  | 2021 | xfs_trans_set_sync(tp); | 
|  | 2022 | } | 
|  | 2023 |  | 
|  | 2024 | dp->i_gen++; | 
|  | 2025 |  | 
|  | 2026 | /* | 
|  | 2027 | * Attach the dquot(s) to the inodes and modify them incore. | 
|  | 2028 | * These ids of the inode couldn't have changed since the new | 
|  | 2029 | * inode has been locked ever since it was created. | 
|  | 2030 | */ | 
|  | 2031 | XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp); | 
|  | 2032 |  | 
|  | 2033 | /* | 
|  | 2034 | * xfs_trans_commit normally decrements the vnode ref count | 
|  | 2035 | * when it unlocks the inode. Since we want to return the | 
|  | 2036 | * vnode to the caller, we bump the vnode ref count now. | 
|  | 2037 | */ | 
|  | 2038 | IHOLD(ip); | 
|  | 2039 | vp = XFS_ITOV(ip); | 
|  | 2040 |  | 
|  | 2041 | error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); | 
|  | 2042 | if (error) { | 
|  | 2043 | xfs_bmap_cancel(&free_list); | 
|  | 2044 | goto abort_rele; | 
|  | 2045 | } | 
|  | 2046 |  | 
|  | 2047 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 
|  | 2048 | if (error) { | 
|  | 2049 | IRELE(ip); | 
|  | 2050 | tp = NULL; | 
|  | 2051 | goto error_return; | 
|  | 2052 | } | 
|  | 2053 |  | 
|  | 2054 | XFS_QM_DQRELE(mp, udqp); | 
|  | 2055 | XFS_QM_DQRELE(mp, gdqp); | 
|  | 2056 |  | 
|  | 2057 | /* | 
|  | 2058 | * Propogate the fact that the vnode changed after the | 
|  | 2059 | * xfs_inode locks have been released. | 
|  | 2060 | */ | 
|  | 2061 | VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3); | 
|  | 2062 |  | 
|  | 2063 | *vpp = vp; | 
|  | 2064 |  | 
|  | 2065 | /* Fallthrough to std_return with error = 0  */ | 
|  | 2066 |  | 
|  | 2067 | std_return: | 
|  | 2068 | if ( (*vpp || (error != 0 && dm_event_sent != 0)) && | 
|  | 2069 | DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), | 
|  | 2070 | DM_EVENT_POSTCREATE)) { | 
|  | 2071 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, | 
|  | 2072 | dir_vp, DM_RIGHT_NULL, | 
|  | 2073 | *vpp ? vp:NULL, | 
|  | 2074 | DM_RIGHT_NULL, name, NULL, | 
|  | 2075 | dm_di_mode, error, 0); | 
|  | 2076 | } | 
|  | 2077 | return error; | 
|  | 2078 |  | 
|  | 2079 | abort_return: | 
|  | 2080 | cancel_flags |= XFS_TRANS_ABORT; | 
|  | 2081 | /* FALLTHROUGH */ | 
|  | 2082 | error_return: | 
|  | 2083 |  | 
|  | 2084 | if (tp != NULL) | 
|  | 2085 | xfs_trans_cancel(tp, cancel_flags); | 
|  | 2086 |  | 
|  | 2087 | if (!dp_joined_to_trans && (dp != NULL)) | 
|  | 2088 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 
|  | 2089 | XFS_QM_DQRELE(mp, udqp); | 
|  | 2090 | XFS_QM_DQRELE(mp, gdqp); | 
|  | 2091 |  | 
|  | 2092 | goto std_return; | 
|  | 2093 |  | 
|  | 2094 | abort_rele: | 
|  | 2095 | /* | 
|  | 2096 | * Wait until after the current transaction is aborted to | 
|  | 2097 | * release the inode.  This prevents recursive transactions | 
|  | 2098 | * and deadlocks from xfs_inactive. | 
|  | 2099 | */ | 
|  | 2100 | cancel_flags |= XFS_TRANS_ABORT; | 
|  | 2101 | xfs_trans_cancel(tp, cancel_flags); | 
|  | 2102 | IRELE(ip); | 
|  | 2103 |  | 
|  | 2104 | XFS_QM_DQRELE(mp, udqp); | 
|  | 2105 | XFS_QM_DQRELE(mp, gdqp); | 
|  | 2106 |  | 
|  | 2107 | goto std_return; | 
|  | 2108 | } | 
|  | 2109 |  | 
|  | 2110 | #ifdef DEBUG | 
|  | 2111 | /* | 
|  | 2112 | * Some counters to see if (and how often) we are hitting some deadlock | 
|  | 2113 | * prevention code paths. | 
|  | 2114 | */ | 
|  | 2115 |  | 
|  | 2116 | int xfs_rm_locks; | 
|  | 2117 | int xfs_rm_lock_delays; | 
|  | 2118 | int xfs_rm_attempts; | 
|  | 2119 | #endif | 
|  | 2120 |  | 
|  | 2121 | /* | 
|  | 2122 | * The following routine will lock the inodes associated with the | 
|  | 2123 | * directory and the named entry in the directory. The locks are | 
|  | 2124 | * acquired in increasing inode number. | 
|  | 2125 | * | 
|  | 2126 | * If the entry is "..", then only the directory is locked. The | 
|  | 2127 | * vnode ref count will still include that from the .. entry in | 
|  | 2128 | * this case. | 
|  | 2129 | * | 
|  | 2130 | * There is a deadlock we need to worry about. If the locked directory is | 
|  | 2131 | * in the AIL, it might be blocking up the log. The next inode we lock | 
|  | 2132 | * could be already locked by another thread waiting for log space (e.g | 
|  | 2133 | * a permanent log reservation with a long running transaction (see | 
|  | 2134 | * xfs_itruncate_finish)). To solve this, we must check if the directory | 
|  | 2135 | * is in the ail and use lock_nowait. If we can't lock, we need to | 
|  | 2136 | * drop the inode lock on the directory and try again. xfs_iunlock will | 
|  | 2137 | * potentially push the tail if we were holding up the log. | 
|  | 2138 | */ | 
|  | 2139 | STATIC int | 
|  | 2140 | xfs_lock_dir_and_entry( | 
|  | 2141 | xfs_inode_t	*dp, | 
|  | 2142 | vname_t		*dentry, | 
|  | 2143 | xfs_inode_t	*ip)	/* inode of entry 'name' */ | 
|  | 2144 | { | 
|  | 2145 | int		attempts; | 
|  | 2146 | xfs_ino_t	e_inum; | 
|  | 2147 | xfs_inode_t	*ips[2]; | 
|  | 2148 | xfs_log_item_t	*lp; | 
|  | 2149 |  | 
|  | 2150 | #ifdef DEBUG | 
|  | 2151 | xfs_rm_locks++; | 
|  | 2152 | #endif | 
|  | 2153 | attempts = 0; | 
|  | 2154 |  | 
|  | 2155 | again: | 
|  | 2156 | xfs_ilock(dp, XFS_ILOCK_EXCL); | 
|  | 2157 |  | 
|  | 2158 | e_inum = ip->i_ino; | 
|  | 2159 |  | 
|  | 2160 | ITRACE(ip); | 
|  | 2161 |  | 
|  | 2162 | /* | 
|  | 2163 | * We want to lock in increasing inum. Since we've already | 
|  | 2164 | * acquired the lock on the directory, we may need to release | 
|  | 2165 | * if if the inum of the entry turns out to be less. | 
|  | 2166 | */ | 
|  | 2167 | if (e_inum > dp->i_ino) { | 
|  | 2168 | /* | 
|  | 2169 | * We are already in the right order, so just | 
|  | 2170 | * lock on the inode of the entry. | 
|  | 2171 | * We need to use nowait if dp is in the AIL. | 
|  | 2172 | */ | 
|  | 2173 |  | 
|  | 2174 | lp = (xfs_log_item_t *)dp->i_itemp; | 
|  | 2175 | if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { | 
|  | 2176 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { | 
|  | 2177 | attempts++; | 
|  | 2178 | #ifdef DEBUG | 
|  | 2179 | xfs_rm_attempts++; | 
|  | 2180 | #endif | 
|  | 2181 |  | 
|  | 2182 | /* | 
|  | 2183 | * Unlock dp and try again. | 
|  | 2184 | * xfs_iunlock will try to push the tail | 
|  | 2185 | * if the inode is in the AIL. | 
|  | 2186 | */ | 
|  | 2187 |  | 
|  | 2188 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 
|  | 2189 |  | 
|  | 2190 | if ((attempts % 5) == 0) { | 
|  | 2191 | delay(1); /* Don't just spin the CPU */ | 
|  | 2192 | #ifdef DEBUG | 
|  | 2193 | xfs_rm_lock_delays++; | 
|  | 2194 | #endif | 
|  | 2195 | } | 
|  | 2196 | goto again; | 
|  | 2197 | } | 
|  | 2198 | } else { | 
|  | 2199 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 2200 | } | 
|  | 2201 | } else if (e_inum < dp->i_ino) { | 
|  | 2202 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 
|  | 2203 |  | 
|  | 2204 | ips[0] = ip; | 
|  | 2205 | ips[1] = dp; | 
|  | 2206 | xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); | 
|  | 2207 | } | 
|  | 2208 | /* else	 e_inum == dp->i_ino */ | 
|  | 2209 | /*     This can happen if we're asked to lock /x/.. | 
|  | 2210 | *     the entry is "..", which is also the parent directory. | 
|  | 2211 | */ | 
|  | 2212 |  | 
|  | 2213 | return 0; | 
|  | 2214 | } | 
|  | 2215 |  | 
|  | 2216 | #ifdef DEBUG | 
|  | 2217 | int xfs_locked_n; | 
|  | 2218 | int xfs_small_retries; | 
|  | 2219 | int xfs_middle_retries; | 
|  | 2220 | int xfs_lots_retries; | 
|  | 2221 | int xfs_lock_delays; | 
|  | 2222 | #endif | 
|  | 2223 |  | 
|  | 2224 | /* | 
|  | 2225 | * The following routine will lock n inodes in exclusive mode. | 
|  | 2226 | * We assume the caller calls us with the inodes in i_ino order. | 
|  | 2227 | * | 
|  | 2228 | * We need to detect deadlock where an inode that we lock | 
|  | 2229 | * is in the AIL and we start waiting for another inode that is locked | 
|  | 2230 | * by a thread in a long running transaction (such as truncate). This can | 
|  | 2231 | * result in deadlock since the long running trans might need to wait | 
|  | 2232 | * for the inode we just locked in order to push the tail and free space | 
|  | 2233 | * in the log. | 
|  | 2234 | */ | 
|  | 2235 | void | 
|  | 2236 | xfs_lock_inodes( | 
|  | 2237 | xfs_inode_t	**ips, | 
|  | 2238 | int		inodes, | 
|  | 2239 | int		first_locked, | 
|  | 2240 | uint		lock_mode) | 
|  | 2241 | { | 
|  | 2242 | int		attempts = 0, i, j, try_lock; | 
|  | 2243 | xfs_log_item_t	*lp; | 
|  | 2244 |  | 
|  | 2245 | ASSERT(ips && (inodes >= 2)); /* we need at least two */ | 
|  | 2246 |  | 
|  | 2247 | if (first_locked) { | 
|  | 2248 | try_lock = 1; | 
|  | 2249 | i = 1; | 
|  | 2250 | } else { | 
|  | 2251 | try_lock = 0; | 
|  | 2252 | i = 0; | 
|  | 2253 | } | 
|  | 2254 |  | 
|  | 2255 | again: | 
|  | 2256 | for (; i < inodes; i++) { | 
|  | 2257 | ASSERT(ips[i]); | 
|  | 2258 |  | 
|  | 2259 | if (i && (ips[i] == ips[i-1]))	/* Already locked */ | 
|  | 2260 | continue; | 
|  | 2261 |  | 
|  | 2262 | /* | 
|  | 2263 | * If try_lock is not set yet, make sure all locked inodes | 
|  | 2264 | * are not in the AIL. | 
|  | 2265 | * If any are, set try_lock to be used later. | 
|  | 2266 | */ | 
|  | 2267 |  | 
|  | 2268 | if (!try_lock) { | 
|  | 2269 | for (j = (i - 1); j >= 0 && !try_lock; j--) { | 
|  | 2270 | lp = (xfs_log_item_t *)ips[j]->i_itemp; | 
|  | 2271 | if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { | 
|  | 2272 | try_lock++; | 
|  | 2273 | } | 
|  | 2274 | } | 
|  | 2275 | } | 
|  | 2276 |  | 
|  | 2277 | /* | 
|  | 2278 | * If any of the previous locks we have locked is in the AIL, | 
|  | 2279 | * we must TRY to get the second and subsequent locks. If | 
|  | 2280 | * we can't get any, we must release all we have | 
|  | 2281 | * and try again. | 
|  | 2282 | */ | 
|  | 2283 |  | 
|  | 2284 | if (try_lock) { | 
|  | 2285 | /* try_lock must be 0 if i is 0. */ | 
|  | 2286 | /* | 
|  | 2287 | * try_lock means we have an inode locked | 
|  | 2288 | * that is in the AIL. | 
|  | 2289 | */ | 
|  | 2290 | ASSERT(i != 0); | 
|  | 2291 | if (!xfs_ilock_nowait(ips[i], lock_mode)) { | 
|  | 2292 | attempts++; | 
|  | 2293 |  | 
|  | 2294 | /* | 
|  | 2295 | * Unlock all previous guys and try again. | 
|  | 2296 | * xfs_iunlock will try to push the tail | 
|  | 2297 | * if the inode is in the AIL. | 
|  | 2298 | */ | 
|  | 2299 |  | 
|  | 2300 | for(j = i - 1; j >= 0; j--) { | 
|  | 2301 |  | 
|  | 2302 | /* | 
|  | 2303 | * Check to see if we've already | 
|  | 2304 | * unlocked this one. | 
|  | 2305 | * Not the first one going back, | 
|  | 2306 | * and the inode ptr is the same. | 
|  | 2307 | */ | 
|  | 2308 | if ((j != (i - 1)) && ips[j] == | 
|  | 2309 | ips[j+1]) | 
|  | 2310 | continue; | 
|  | 2311 |  | 
|  | 2312 | xfs_iunlock(ips[j], lock_mode); | 
|  | 2313 | } | 
|  | 2314 |  | 
|  | 2315 | if ((attempts % 5) == 0) { | 
|  | 2316 | delay(1); /* Don't just spin the CPU */ | 
|  | 2317 | #ifdef DEBUG | 
|  | 2318 | xfs_lock_delays++; | 
|  | 2319 | #endif | 
|  | 2320 | } | 
|  | 2321 | i = 0; | 
|  | 2322 | try_lock = 0; | 
|  | 2323 | goto again; | 
|  | 2324 | } | 
|  | 2325 | } else { | 
|  | 2326 | xfs_ilock(ips[i], lock_mode); | 
|  | 2327 | } | 
|  | 2328 | } | 
|  | 2329 |  | 
|  | 2330 | #ifdef DEBUG | 
|  | 2331 | if (attempts) { | 
|  | 2332 | if (attempts < 5) xfs_small_retries++; | 
|  | 2333 | else if (attempts < 100) xfs_middle_retries++; | 
|  | 2334 | else xfs_lots_retries++; | 
|  | 2335 | } else { | 
|  | 2336 | xfs_locked_n++; | 
|  | 2337 | } | 
|  | 2338 | #endif | 
|  | 2339 | } | 
|  | 2340 |  | 
|  | 2341 | #ifdef	DEBUG | 
|  | 2342 | #define	REMOVE_DEBUG_TRACE(x)	{remove_which_error_return = (x);} | 
|  | 2343 | int remove_which_error_return = 0; | 
|  | 2344 | #else /* ! DEBUG */ | 
|  | 2345 | #define	REMOVE_DEBUG_TRACE(x) | 
|  | 2346 | #endif	/* ! DEBUG */ | 
|  | 2347 |  | 
|  | 2348 |  | 
|  | 2349 | /* | 
|  | 2350 | * xfs_remove | 
|  | 2351 | * | 
|  | 2352 | */ | 
|  | 2353 | STATIC int | 
|  | 2354 | xfs_remove( | 
|  | 2355 | bhv_desc_t		*dir_bdp, | 
|  | 2356 | vname_t			*dentry, | 
|  | 2357 | cred_t			*credp) | 
|  | 2358 | { | 
|  | 2359 | vnode_t			*dir_vp; | 
|  | 2360 | char			*name = VNAME(dentry); | 
|  | 2361 | xfs_inode_t             *dp, *ip; | 
|  | 2362 | xfs_trans_t             *tp = NULL; | 
|  | 2363 | xfs_mount_t		*mp; | 
|  | 2364 | int                     error = 0; | 
|  | 2365 | xfs_bmap_free_t         free_list; | 
|  | 2366 | xfs_fsblock_t           first_block; | 
|  | 2367 | int			cancel_flags; | 
|  | 2368 | int			committed; | 
|  | 2369 | int			dm_di_mode = 0; | 
|  | 2370 | int			link_zero; | 
|  | 2371 | uint			resblks; | 
|  | 2372 | int			namelen; | 
|  | 2373 |  | 
|  | 2374 | dir_vp = BHV_TO_VNODE(dir_bdp); | 
|  | 2375 | vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 2376 |  | 
|  | 2377 | dp = XFS_BHVTOI(dir_bdp); | 
|  | 2378 | mp = dp->i_mount; | 
|  | 2379 |  | 
|  | 2380 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 2381 | return XFS_ERROR(EIO); | 
|  | 2382 |  | 
|  | 2383 | namelen = VNAMELEN(dentry); | 
|  | 2384 |  | 
|  | 2385 | if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) { | 
|  | 2386 | error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_vp, | 
|  | 2387 | DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, | 
|  | 2388 | name, NULL, 0, 0, 0); | 
|  | 2389 | if (error) | 
|  | 2390 | return error; | 
|  | 2391 | } | 
|  | 2392 |  | 
|  | 2393 | /* From this point on, return through std_return */ | 
|  | 2394 | ip = NULL; | 
|  | 2395 |  | 
|  | 2396 | /* | 
|  | 2397 | * We need to get a reference to ip before we get our log | 
|  | 2398 | * reservation. The reason for this is that we cannot call | 
|  | 2399 | * xfs_iget for an inode for which we do not have a reference | 
|  | 2400 | * once we've acquired a log reservation. This is because the | 
|  | 2401 | * inode we are trying to get might be in xfs_inactive going | 
|  | 2402 | * for a log reservation. Since we'll have to wait for the | 
|  | 2403 | * inactive code to complete before returning from xfs_iget, | 
|  | 2404 | * we need to make sure that we don't have log space reserved | 
|  | 2405 | * when we call xfs_iget.  Instead we get an unlocked referece | 
|  | 2406 | * to the inode before getting our log reservation. | 
|  | 2407 | */ | 
|  | 2408 | error = xfs_get_dir_entry(dentry, &ip); | 
|  | 2409 | if (error) { | 
|  | 2410 | REMOVE_DEBUG_TRACE(__LINE__); | 
|  | 2411 | goto std_return; | 
|  | 2412 | } | 
|  | 2413 |  | 
|  | 2414 | dm_di_mode = ip->i_d.di_mode; | 
|  | 2415 |  | 
|  | 2416 | vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); | 
|  | 2417 |  | 
|  | 2418 | ITRACE(ip); | 
|  | 2419 |  | 
|  | 2420 | error = XFS_QM_DQATTACH(mp, dp, 0); | 
|  | 2421 | if (!error && dp != ip) | 
|  | 2422 | error = XFS_QM_DQATTACH(mp, ip, 0); | 
|  | 2423 | if (error) { | 
|  | 2424 | REMOVE_DEBUG_TRACE(__LINE__); | 
|  | 2425 | IRELE(ip); | 
|  | 2426 | goto std_return; | 
|  | 2427 | } | 
|  | 2428 |  | 
|  | 2429 | tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE); | 
|  | 2430 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 
|  | 2431 | /* | 
|  | 2432 | * We try to get the real space reservation first, | 
|  | 2433 | * allowing for directory btree deletion(s) implying | 
|  | 2434 | * possible bmap insert(s).  If we can't get the space | 
|  | 2435 | * reservation then we use 0 instead, and avoid the bmap | 
|  | 2436 | * btree insert(s) in the directory code by, if the bmap | 
|  | 2437 | * insert tries to happen, instead trimming the LAST | 
|  | 2438 | * block from the directory. | 
|  | 2439 | */ | 
|  | 2440 | resblks = XFS_REMOVE_SPACE_RES(mp); | 
|  | 2441 | error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0, | 
|  | 2442 | XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT); | 
|  | 2443 | if (error == ENOSPC) { | 
|  | 2444 | resblks = 0; | 
|  | 2445 | error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0, | 
|  | 2446 | XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT); | 
|  | 2447 | } | 
|  | 2448 | if (error) { | 
|  | 2449 | ASSERT(error != ENOSPC); | 
|  | 2450 | REMOVE_DEBUG_TRACE(__LINE__); | 
|  | 2451 | xfs_trans_cancel(tp, 0); | 
|  | 2452 | IRELE(ip); | 
|  | 2453 | return error; | 
|  | 2454 | } | 
|  | 2455 |  | 
|  | 2456 | error = xfs_lock_dir_and_entry(dp, dentry, ip); | 
|  | 2457 | if (error) { | 
|  | 2458 | REMOVE_DEBUG_TRACE(__LINE__); | 
|  | 2459 | xfs_trans_cancel(tp, cancel_flags); | 
|  | 2460 | IRELE(ip); | 
|  | 2461 | goto std_return; | 
|  | 2462 | } | 
|  | 2463 |  | 
|  | 2464 | /* | 
|  | 2465 | * At this point, we've gotten both the directory and the entry | 
|  | 2466 | * inodes locked. | 
|  | 2467 | */ | 
|  | 2468 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 
|  | 2469 | if (dp != ip) { | 
|  | 2470 | /* | 
|  | 2471 | * Increment vnode ref count only in this case since | 
|  | 2472 | * there's an extra vnode reference in the case where | 
|  | 2473 | * dp == ip. | 
|  | 2474 | */ | 
|  | 2475 | IHOLD(dp); | 
|  | 2476 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 
|  | 2477 | } | 
|  | 2478 |  | 
|  | 2479 | /* | 
|  | 2480 | * Entry must exist since we did a lookup in xfs_lock_dir_and_entry. | 
|  | 2481 | */ | 
|  | 2482 | XFS_BMAP_INIT(&free_list, &first_block); | 
|  | 2483 | error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, ip->i_ino, | 
|  | 2484 | &first_block, &free_list, 0); | 
|  | 2485 | if (error) { | 
|  | 2486 | ASSERT(error != ENOENT); | 
|  | 2487 | REMOVE_DEBUG_TRACE(__LINE__); | 
|  | 2488 | goto error1; | 
|  | 2489 | } | 
|  | 2490 | xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 
|  | 2491 |  | 
|  | 2492 | dp->i_gen++; | 
|  | 2493 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); | 
|  | 2494 |  | 
|  | 2495 | error = xfs_droplink(tp, ip); | 
|  | 2496 | if (error) { | 
|  | 2497 | REMOVE_DEBUG_TRACE(__LINE__); | 
|  | 2498 | goto error1; | 
|  | 2499 | } | 
|  | 2500 |  | 
|  | 2501 | /* Determine if this is the last link while | 
|  | 2502 | * we are in the transaction. | 
|  | 2503 | */ | 
|  | 2504 | link_zero = (ip)->i_d.di_nlink==0; | 
|  | 2505 |  | 
|  | 2506 | /* | 
|  | 2507 | * Take an extra ref on the inode so that it doesn't | 
|  | 2508 | * go to xfs_inactive() from within the commit. | 
|  | 2509 | */ | 
|  | 2510 | IHOLD(ip); | 
|  | 2511 |  | 
|  | 2512 | /* | 
|  | 2513 | * If this is a synchronous mount, make sure that the | 
|  | 2514 | * remove transaction goes to disk before returning to | 
|  | 2515 | * the user. | 
|  | 2516 | */ | 
|  | 2517 | if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { | 
|  | 2518 | xfs_trans_set_sync(tp); | 
|  | 2519 | } | 
|  | 2520 |  | 
|  | 2521 | error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); | 
|  | 2522 | if (error) { | 
|  | 2523 | REMOVE_DEBUG_TRACE(__LINE__); | 
|  | 2524 | goto error_rele; | 
|  | 2525 | } | 
|  | 2526 |  | 
|  | 2527 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 
|  | 2528 | if (error) { | 
|  | 2529 | IRELE(ip); | 
|  | 2530 | goto std_return; | 
|  | 2531 | } | 
|  | 2532 |  | 
|  | 2533 | /* | 
|  | 2534 | * Before we drop our extra reference to the inode, purge it | 
|  | 2535 | * from the refcache if it is there.  By waiting until afterwards | 
|  | 2536 | * to do the IRELE, we ensure that we won't go inactive in the | 
|  | 2537 | * xfs_refcache_purge_ip routine (although that would be OK). | 
|  | 2538 | */ | 
|  | 2539 | xfs_refcache_purge_ip(ip); | 
|  | 2540 |  | 
|  | 2541 | vn_trace_exit(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); | 
|  | 2542 |  | 
|  | 2543 | /* | 
|  | 2544 | * Let interposed file systems know about removed links. | 
|  | 2545 | */ | 
|  | 2546 | VOP_LINK_REMOVED(XFS_ITOV(ip), dir_vp, link_zero); | 
|  | 2547 |  | 
|  | 2548 | IRELE(ip); | 
|  | 2549 |  | 
|  | 2550 | /*	Fall through to std_return with error = 0 */ | 
|  | 2551 | std_return: | 
|  | 2552 | if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, | 
|  | 2553 | DM_EVENT_POSTREMOVE)) { | 
|  | 2554 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, | 
|  | 2555 | dir_vp, DM_RIGHT_NULL, | 
|  | 2556 | NULL, DM_RIGHT_NULL, | 
|  | 2557 | name, NULL, dm_di_mode, error, 0); | 
|  | 2558 | } | 
|  | 2559 | return error; | 
|  | 2560 |  | 
|  | 2561 | error1: | 
|  | 2562 | xfs_bmap_cancel(&free_list); | 
|  | 2563 | cancel_flags |= XFS_TRANS_ABORT; | 
|  | 2564 | xfs_trans_cancel(tp, cancel_flags); | 
|  | 2565 | goto std_return; | 
|  | 2566 |  | 
|  | 2567 | error_rele: | 
|  | 2568 | /* | 
|  | 2569 | * In this case make sure to not release the inode until after | 
|  | 2570 | * the current transaction is aborted.  Releasing it beforehand | 
|  | 2571 | * can cause us to go to xfs_inactive and start a recursive | 
|  | 2572 | * transaction which can easily deadlock with the current one. | 
|  | 2573 | */ | 
|  | 2574 | xfs_bmap_cancel(&free_list); | 
|  | 2575 | cancel_flags |= XFS_TRANS_ABORT; | 
|  | 2576 | xfs_trans_cancel(tp, cancel_flags); | 
|  | 2577 |  | 
|  | 2578 | /* | 
|  | 2579 | * Before we drop our extra reference to the inode, purge it | 
|  | 2580 | * from the refcache if it is there.  By waiting until afterwards | 
|  | 2581 | * to do the IRELE, we ensure that we won't go inactive in the | 
|  | 2582 | * xfs_refcache_purge_ip routine (although that would be OK). | 
|  | 2583 | */ | 
|  | 2584 | xfs_refcache_purge_ip(ip); | 
|  | 2585 |  | 
|  | 2586 | IRELE(ip); | 
|  | 2587 |  | 
|  | 2588 | goto std_return; | 
|  | 2589 | } | 
|  | 2590 |  | 
|  | 2591 |  | 
|  | 2592 | /* | 
|  | 2593 | * xfs_link | 
|  | 2594 | * | 
|  | 2595 | */ | 
|  | 2596 | STATIC int | 
|  | 2597 | xfs_link( | 
|  | 2598 | bhv_desc_t		*target_dir_bdp, | 
|  | 2599 | vnode_t			*src_vp, | 
|  | 2600 | vname_t			*dentry, | 
|  | 2601 | cred_t			*credp) | 
|  | 2602 | { | 
|  | 2603 | xfs_inode_t		*tdp, *sip; | 
|  | 2604 | xfs_trans_t		*tp; | 
|  | 2605 | xfs_mount_t		*mp; | 
|  | 2606 | xfs_inode_t		*ips[2]; | 
|  | 2607 | int			error; | 
|  | 2608 | xfs_bmap_free_t         free_list; | 
|  | 2609 | xfs_fsblock_t           first_block; | 
|  | 2610 | int			cancel_flags; | 
|  | 2611 | int			committed; | 
|  | 2612 | vnode_t			*target_dir_vp; | 
|  | 2613 | bhv_desc_t		*src_bdp; | 
|  | 2614 | int			resblks; | 
|  | 2615 | char			*target_name = VNAME(dentry); | 
|  | 2616 | int			target_namelen; | 
|  | 2617 |  | 
|  | 2618 | target_dir_vp = BHV_TO_VNODE(target_dir_bdp); | 
|  | 2619 | vn_trace_entry(target_dir_vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 2620 | vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 2621 |  | 
|  | 2622 | target_namelen = VNAMELEN(dentry); | 
|  | 2623 | if (src_vp->v_type == VDIR) | 
|  | 2624 | return XFS_ERROR(EPERM); | 
|  | 2625 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2626 | src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2627 | sip = XFS_BHVTOI(src_bdp); | 
|  | 2628 | tdp = XFS_BHVTOI(target_dir_bdp); | 
|  | 2629 | mp = tdp->i_mount; | 
|  | 2630 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 2631 | return XFS_ERROR(EIO); | 
|  | 2632 |  | 
|  | 2633 | if (DM_EVENT_ENABLED(src_vp->v_vfsp, tdp, DM_EVENT_LINK)) { | 
|  | 2634 | error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK, | 
|  | 2635 | target_dir_vp, DM_RIGHT_NULL, | 
|  | 2636 | src_vp, DM_RIGHT_NULL, | 
|  | 2637 | target_name, NULL, 0, 0, 0); | 
|  | 2638 | if (error) | 
|  | 2639 | return error; | 
|  | 2640 | } | 
|  | 2641 |  | 
|  | 2642 | /* Return through std_return after this point. */ | 
|  | 2643 |  | 
|  | 2644 | error = XFS_QM_DQATTACH(mp, sip, 0); | 
|  | 2645 | if (!error && sip != tdp) | 
|  | 2646 | error = XFS_QM_DQATTACH(mp, tdp, 0); | 
|  | 2647 | if (error) | 
|  | 2648 | goto std_return; | 
|  | 2649 |  | 
|  | 2650 | tp = xfs_trans_alloc(mp, XFS_TRANS_LINK); | 
|  | 2651 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 
|  | 2652 | resblks = XFS_LINK_SPACE_RES(mp, target_namelen); | 
|  | 2653 | error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0, | 
|  | 2654 | XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT); | 
|  | 2655 | if (error == ENOSPC) { | 
|  | 2656 | resblks = 0; | 
|  | 2657 | error = xfs_trans_reserve(tp, 0, XFS_LINK_LOG_RES(mp), 0, | 
|  | 2658 | XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT); | 
|  | 2659 | } | 
|  | 2660 | if (error) { | 
|  | 2661 | cancel_flags = 0; | 
|  | 2662 | goto error_return; | 
|  | 2663 | } | 
|  | 2664 |  | 
|  | 2665 | if (sip->i_ino < tdp->i_ino) { | 
|  | 2666 | ips[0] = sip; | 
|  | 2667 | ips[1] = tdp; | 
|  | 2668 | } else { | 
|  | 2669 | ips[0] = tdp; | 
|  | 2670 | ips[1] = sip; | 
|  | 2671 | } | 
|  | 2672 |  | 
|  | 2673 | xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); | 
|  | 2674 |  | 
|  | 2675 | /* | 
|  | 2676 | * Increment vnode ref counts since xfs_trans_commit & | 
|  | 2677 | * xfs_trans_cancel will both unlock the inodes and | 
|  | 2678 | * decrement the associated ref counts. | 
|  | 2679 | */ | 
|  | 2680 | VN_HOLD(src_vp); | 
|  | 2681 | VN_HOLD(target_dir_vp); | 
|  | 2682 | xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); | 
|  | 2683 | xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); | 
|  | 2684 |  | 
|  | 2685 | /* | 
|  | 2686 | * If the source has too many links, we can't make any more to it. | 
|  | 2687 | */ | 
|  | 2688 | if (sip->i_d.di_nlink >= XFS_MAXLINK) { | 
|  | 2689 | error = XFS_ERROR(EMLINK); | 
|  | 2690 | goto error_return; | 
|  | 2691 | } | 
|  | 2692 |  | 
| Nathan Scott | 365ca83 | 2005-06-21 15:39:12 +1000 | [diff] [blame] | 2693 | /* | 
|  | 2694 | * If we are using project inheritance, we only allow hard link | 
|  | 2695 | * creation in our tree when the project IDs are the same; else | 
|  | 2696 | * the tree quota mechanism could be circumvented. | 
|  | 2697 | */ | 
|  | 2698 | if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && | 
|  | 2699 | (tdp->i_d.di_projid != sip->i_d.di_projid))) { | 
|  | 2700 | error = XFS_ERROR(EPERM); | 
|  | 2701 | goto error_return; | 
|  | 2702 | } | 
|  | 2703 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2704 | if (resblks == 0 && | 
|  | 2705 | (error = XFS_DIR_CANENTER(mp, tp, tdp, target_name, | 
|  | 2706 | target_namelen))) | 
|  | 2707 | goto error_return; | 
|  | 2708 |  | 
|  | 2709 | XFS_BMAP_INIT(&free_list, &first_block); | 
|  | 2710 |  | 
|  | 2711 | error = XFS_DIR_CREATENAME(mp, tp, tdp, target_name, target_namelen, | 
|  | 2712 | sip->i_ino, &first_block, &free_list, | 
|  | 2713 | resblks); | 
|  | 2714 | if (error) | 
|  | 2715 | goto abort_return; | 
|  | 2716 | xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 
|  | 2717 | tdp->i_gen++; | 
|  | 2718 | xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); | 
|  | 2719 |  | 
|  | 2720 | error = xfs_bumplink(tp, sip); | 
|  | 2721 | if (error) { | 
|  | 2722 | goto abort_return; | 
|  | 2723 | } | 
|  | 2724 |  | 
|  | 2725 | /* | 
|  | 2726 | * If this is a synchronous mount, make sure that the | 
|  | 2727 | * link transaction goes to disk before returning to | 
|  | 2728 | * the user. | 
|  | 2729 | */ | 
|  | 2730 | if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { | 
|  | 2731 | xfs_trans_set_sync(tp); | 
|  | 2732 | } | 
|  | 2733 |  | 
|  | 2734 | error = xfs_bmap_finish (&tp, &free_list, first_block, &committed); | 
|  | 2735 | if (error) { | 
|  | 2736 | xfs_bmap_cancel(&free_list); | 
|  | 2737 | goto abort_return; | 
|  | 2738 | } | 
|  | 2739 |  | 
|  | 2740 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 
|  | 2741 | if (error) { | 
|  | 2742 | goto std_return; | 
|  | 2743 | } | 
|  | 2744 |  | 
|  | 2745 | /* Fall through to std_return with error = 0. */ | 
|  | 2746 | std_return: | 
|  | 2747 | if (DM_EVENT_ENABLED(src_vp->v_vfsp, sip, | 
|  | 2748 | DM_EVENT_POSTLINK)) { | 
|  | 2749 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK, | 
|  | 2750 | target_dir_vp, DM_RIGHT_NULL, | 
|  | 2751 | src_vp, DM_RIGHT_NULL, | 
|  | 2752 | target_name, NULL, 0, error, 0); | 
|  | 2753 | } | 
|  | 2754 | return error; | 
|  | 2755 |  | 
|  | 2756 | abort_return: | 
|  | 2757 | cancel_flags |= XFS_TRANS_ABORT; | 
|  | 2758 | /* FALLTHROUGH */ | 
|  | 2759 | error_return: | 
|  | 2760 | xfs_trans_cancel(tp, cancel_flags); | 
|  | 2761 |  | 
|  | 2762 | goto std_return; | 
|  | 2763 | } | 
|  | 2764 | /* | 
|  | 2765 | * xfs_mkdir | 
|  | 2766 | * | 
|  | 2767 | */ | 
|  | 2768 | STATIC int | 
|  | 2769 | xfs_mkdir( | 
|  | 2770 | bhv_desc_t		*dir_bdp, | 
|  | 2771 | vname_t			*dentry, | 
|  | 2772 | vattr_t			*vap, | 
|  | 2773 | vnode_t			**vpp, | 
|  | 2774 | cred_t			*credp) | 
|  | 2775 | { | 
|  | 2776 | char			*dir_name = VNAME(dentry); | 
|  | 2777 | xfs_inode_t             *dp; | 
|  | 2778 | xfs_inode_t		*cdp;	/* inode of created dir */ | 
|  | 2779 | vnode_t			*cvp;	/* vnode of created dir */ | 
|  | 2780 | xfs_trans_t		*tp; | 
|  | 2781 | xfs_mount_t		*mp; | 
|  | 2782 | int			cancel_flags; | 
|  | 2783 | int			error; | 
|  | 2784 | int			committed; | 
|  | 2785 | xfs_bmap_free_t         free_list; | 
|  | 2786 | xfs_fsblock_t           first_block; | 
|  | 2787 | vnode_t			*dir_vp; | 
|  | 2788 | boolean_t		dp_joined_to_trans; | 
|  | 2789 | boolean_t		created = B_FALSE; | 
|  | 2790 | int			dm_event_sent = 0; | 
|  | 2791 | xfs_prid_t		prid; | 
|  | 2792 | struct xfs_dquot	*udqp, *gdqp; | 
|  | 2793 | uint			resblks; | 
|  | 2794 | int			dm_di_mode; | 
|  | 2795 | int			dir_namelen; | 
|  | 2796 |  | 
|  | 2797 | dir_vp = BHV_TO_VNODE(dir_bdp); | 
|  | 2798 | dp = XFS_BHVTOI(dir_bdp); | 
|  | 2799 | mp = dp->i_mount; | 
|  | 2800 |  | 
|  | 2801 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 2802 | return XFS_ERROR(EIO); | 
|  | 2803 |  | 
|  | 2804 | dir_namelen = VNAMELEN(dentry); | 
|  | 2805 |  | 
|  | 2806 | tp = NULL; | 
|  | 2807 | dp_joined_to_trans = B_FALSE; | 
|  | 2808 | dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); | 
|  | 2809 |  | 
|  | 2810 | if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { | 
|  | 2811 | error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, | 
|  | 2812 | dir_vp, DM_RIGHT_NULL, NULL, | 
|  | 2813 | DM_RIGHT_NULL, dir_name, NULL, | 
|  | 2814 | dm_di_mode, 0, 0); | 
|  | 2815 | if (error) | 
|  | 2816 | return error; | 
|  | 2817 | dm_event_sent = 1; | 
|  | 2818 | } | 
|  | 2819 |  | 
|  | 2820 | /* Return through std_return after this point. */ | 
|  | 2821 |  | 
|  | 2822 | vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 2823 |  | 
|  | 2824 | mp = dp->i_mount; | 
|  | 2825 | udqp = gdqp = NULL; | 
| Nathan Scott | 365ca83 | 2005-06-21 15:39:12 +1000 | [diff] [blame] | 2826 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) | 
|  | 2827 | prid = dp->i_d.di_projid; | 
|  | 2828 | else if (vap->va_mask & XFS_AT_PROJID) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2829 | prid = (xfs_prid_t)vap->va_projid; | 
|  | 2830 | else | 
|  | 2831 | prid = (xfs_prid_t)dfltprid; | 
|  | 2832 |  | 
|  | 2833 | /* | 
|  | 2834 | * Make sure that we have allocated dquot(s) on disk. | 
|  | 2835 | */ | 
|  | 2836 | error = XFS_QM_DQVOPALLOC(mp, dp, | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 2837 | current_fsuid(credp), current_fsgid(credp), prid, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2838 | XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); | 
|  | 2839 | if (error) | 
|  | 2840 | goto std_return; | 
|  | 2841 |  | 
|  | 2842 | tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR); | 
|  | 2843 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 
|  | 2844 | resblks = XFS_MKDIR_SPACE_RES(mp, dir_namelen); | 
|  | 2845 | error = xfs_trans_reserve(tp, resblks, XFS_MKDIR_LOG_RES(mp), 0, | 
|  | 2846 | XFS_TRANS_PERM_LOG_RES, XFS_MKDIR_LOG_COUNT); | 
|  | 2847 | if (error == ENOSPC) { | 
|  | 2848 | resblks = 0; | 
|  | 2849 | error = xfs_trans_reserve(tp, 0, XFS_MKDIR_LOG_RES(mp), 0, | 
|  | 2850 | XFS_TRANS_PERM_LOG_RES, | 
|  | 2851 | XFS_MKDIR_LOG_COUNT); | 
|  | 2852 | } | 
|  | 2853 | if (error) { | 
|  | 2854 | cancel_flags = 0; | 
|  | 2855 | dp = NULL; | 
|  | 2856 | goto error_return; | 
|  | 2857 | } | 
|  | 2858 |  | 
|  | 2859 | xfs_ilock(dp, XFS_ILOCK_EXCL); | 
|  | 2860 |  | 
|  | 2861 | /* | 
|  | 2862 | * Check for directory link count overflow. | 
|  | 2863 | */ | 
|  | 2864 | if (dp->i_d.di_nlink >= XFS_MAXLINK) { | 
|  | 2865 | error = XFS_ERROR(EMLINK); | 
|  | 2866 | goto error_return; | 
|  | 2867 | } | 
|  | 2868 |  | 
|  | 2869 | /* | 
|  | 2870 | * Reserve disk quota and the inode. | 
|  | 2871 | */ | 
|  | 2872 | error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); | 
|  | 2873 | if (error) | 
|  | 2874 | goto error_return; | 
|  | 2875 |  | 
|  | 2876 | if (resblks == 0 && | 
|  | 2877 | (error = XFS_DIR_CANENTER(mp, tp, dp, dir_name, dir_namelen))) | 
|  | 2878 | goto error_return; | 
|  | 2879 | /* | 
|  | 2880 | * create the directory inode. | 
|  | 2881 | */ | 
|  | 2882 | error = xfs_dir_ialloc(&tp, dp, | 
|  | 2883 | MAKEIMODE(vap->va_type,vap->va_mode), 2, | 
|  | 2884 | 0, credp, prid, resblks > 0, | 
|  | 2885 | &cdp, NULL); | 
|  | 2886 | if (error) { | 
|  | 2887 | if (error == ENOSPC) | 
|  | 2888 | goto error_return; | 
|  | 2889 | goto abort_return; | 
|  | 2890 | } | 
|  | 2891 | ITRACE(cdp); | 
|  | 2892 |  | 
|  | 2893 | /* | 
|  | 2894 | * Now we add the directory inode to the transaction. | 
|  | 2895 | * We waited until now since xfs_dir_ialloc might start | 
|  | 2896 | * a new transaction.  Had we joined the transaction | 
|  | 2897 | * earlier, the locks might have gotten released. | 
|  | 2898 | */ | 
|  | 2899 | VN_HOLD(dir_vp); | 
|  | 2900 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 
|  | 2901 | dp_joined_to_trans = B_TRUE; | 
|  | 2902 |  | 
|  | 2903 | XFS_BMAP_INIT(&free_list, &first_block); | 
|  | 2904 |  | 
|  | 2905 | error = XFS_DIR_CREATENAME(mp, tp, dp, dir_name, dir_namelen, | 
|  | 2906 | cdp->i_ino, &first_block, &free_list, | 
|  | 2907 | resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0); | 
|  | 2908 | if (error) { | 
|  | 2909 | ASSERT(error != ENOSPC); | 
|  | 2910 | goto error1; | 
|  | 2911 | } | 
|  | 2912 | xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 
|  | 2913 |  | 
|  | 2914 | /* | 
|  | 2915 | * Bump the in memory version number of the parent directory | 
|  | 2916 | * so that other processes accessing it will recognize that | 
|  | 2917 | * the directory has changed. | 
|  | 2918 | */ | 
|  | 2919 | dp->i_gen++; | 
|  | 2920 |  | 
|  | 2921 | error = XFS_DIR_INIT(mp, tp, cdp, dp); | 
|  | 2922 | if (error) { | 
|  | 2923 | goto error2; | 
|  | 2924 | } | 
|  | 2925 |  | 
|  | 2926 | cdp->i_gen = 1; | 
|  | 2927 | error = xfs_bumplink(tp, dp); | 
|  | 2928 | if (error) { | 
|  | 2929 | goto error2; | 
|  | 2930 | } | 
|  | 2931 |  | 
|  | 2932 | cvp = XFS_ITOV(cdp); | 
|  | 2933 |  | 
|  | 2934 | created = B_TRUE; | 
|  | 2935 |  | 
|  | 2936 | *vpp = cvp; | 
|  | 2937 | IHOLD(cdp); | 
|  | 2938 |  | 
|  | 2939 | /* | 
|  | 2940 | * Attach the dquots to the new inode and modify the icount incore. | 
|  | 2941 | */ | 
|  | 2942 | XFS_QM_DQVOPCREATE(mp, tp, cdp, udqp, gdqp); | 
|  | 2943 |  | 
|  | 2944 | /* | 
|  | 2945 | * If this is a synchronous mount, make sure that the | 
|  | 2946 | * mkdir transaction goes to disk before returning to | 
|  | 2947 | * the user. | 
|  | 2948 | */ | 
|  | 2949 | if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { | 
|  | 2950 | xfs_trans_set_sync(tp); | 
|  | 2951 | } | 
|  | 2952 |  | 
|  | 2953 | error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); | 
|  | 2954 | if (error) { | 
|  | 2955 | IRELE(cdp); | 
|  | 2956 | goto error2; | 
|  | 2957 | } | 
|  | 2958 |  | 
|  | 2959 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 
|  | 2960 | XFS_QM_DQRELE(mp, udqp); | 
|  | 2961 | XFS_QM_DQRELE(mp, gdqp); | 
|  | 2962 | if (error) { | 
|  | 2963 | IRELE(cdp); | 
|  | 2964 | } | 
|  | 2965 |  | 
|  | 2966 | /* Fall through to std_return with error = 0 or errno from | 
|  | 2967 | * xfs_trans_commit. */ | 
|  | 2968 |  | 
|  | 2969 | std_return: | 
|  | 2970 | if ( (created || (error != 0 && dm_event_sent != 0)) && | 
|  | 2971 | DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), | 
|  | 2972 | DM_EVENT_POSTCREATE)) { | 
|  | 2973 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, | 
|  | 2974 | dir_vp, DM_RIGHT_NULL, | 
|  | 2975 | created ? XFS_ITOV(cdp):NULL, | 
|  | 2976 | DM_RIGHT_NULL, | 
|  | 2977 | dir_name, NULL, | 
|  | 2978 | dm_di_mode, error, 0); | 
|  | 2979 | } | 
|  | 2980 | return error; | 
|  | 2981 |  | 
|  | 2982 | error2: | 
|  | 2983 | error1: | 
|  | 2984 | xfs_bmap_cancel(&free_list); | 
|  | 2985 | abort_return: | 
|  | 2986 | cancel_flags |= XFS_TRANS_ABORT; | 
|  | 2987 | error_return: | 
|  | 2988 | xfs_trans_cancel(tp, cancel_flags); | 
|  | 2989 | XFS_QM_DQRELE(mp, udqp); | 
|  | 2990 | XFS_QM_DQRELE(mp, gdqp); | 
|  | 2991 |  | 
|  | 2992 | if (!dp_joined_to_trans && (dp != NULL)) { | 
|  | 2993 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 
|  | 2994 | } | 
|  | 2995 |  | 
|  | 2996 | goto std_return; | 
|  | 2997 | } | 
|  | 2998 |  | 
|  | 2999 |  | 
|  | 3000 | /* | 
|  | 3001 | * xfs_rmdir | 
|  | 3002 | * | 
|  | 3003 | */ | 
|  | 3004 | STATIC int | 
|  | 3005 | xfs_rmdir( | 
|  | 3006 | bhv_desc_t		*dir_bdp, | 
|  | 3007 | vname_t			*dentry, | 
|  | 3008 | cred_t			*credp) | 
|  | 3009 | { | 
|  | 3010 | char			*name = VNAME(dentry); | 
|  | 3011 | xfs_inode_t             *dp; | 
|  | 3012 | xfs_inode_t             *cdp;   /* child directory */ | 
|  | 3013 | xfs_trans_t             *tp; | 
|  | 3014 | xfs_mount_t		*mp; | 
|  | 3015 | int                     error; | 
|  | 3016 | xfs_bmap_free_t         free_list; | 
|  | 3017 | xfs_fsblock_t           first_block; | 
|  | 3018 | int			cancel_flags; | 
|  | 3019 | int			committed; | 
|  | 3020 | vnode_t			*dir_vp; | 
|  | 3021 | int			dm_di_mode = 0; | 
|  | 3022 | int			last_cdp_link; | 
|  | 3023 | int			namelen; | 
|  | 3024 | uint			resblks; | 
|  | 3025 |  | 
|  | 3026 | dir_vp = BHV_TO_VNODE(dir_bdp); | 
|  | 3027 | dp = XFS_BHVTOI(dir_bdp); | 
|  | 3028 | mp = dp->i_mount; | 
|  | 3029 |  | 
|  | 3030 | vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 3031 |  | 
|  | 3032 | if (XFS_FORCED_SHUTDOWN(XFS_BHVTOI(dir_bdp)->i_mount)) | 
|  | 3033 | return XFS_ERROR(EIO); | 
|  | 3034 | namelen = VNAMELEN(dentry); | 
|  | 3035 |  | 
|  | 3036 | if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) { | 
|  | 3037 | error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, | 
|  | 3038 | dir_vp, DM_RIGHT_NULL, | 
|  | 3039 | NULL, DM_RIGHT_NULL, | 
|  | 3040 | name, NULL, 0, 0, 0); | 
|  | 3041 | if (error) | 
|  | 3042 | return XFS_ERROR(error); | 
|  | 3043 | } | 
|  | 3044 |  | 
|  | 3045 | /* Return through std_return after this point. */ | 
|  | 3046 |  | 
|  | 3047 | cdp = NULL; | 
|  | 3048 |  | 
|  | 3049 | /* | 
|  | 3050 | * We need to get a reference to cdp before we get our log | 
|  | 3051 | * reservation.  The reason for this is that we cannot call | 
|  | 3052 | * xfs_iget for an inode for which we do not have a reference | 
|  | 3053 | * once we've acquired a log reservation.  This is because the | 
|  | 3054 | * inode we are trying to get might be in xfs_inactive going | 
|  | 3055 | * for a log reservation.  Since we'll have to wait for the | 
|  | 3056 | * inactive code to complete before returning from xfs_iget, | 
|  | 3057 | * we need to make sure that we don't have log space reserved | 
|  | 3058 | * when we call xfs_iget.  Instead we get an unlocked referece | 
|  | 3059 | * to the inode before getting our log reservation. | 
|  | 3060 | */ | 
|  | 3061 | error = xfs_get_dir_entry(dentry, &cdp); | 
|  | 3062 | if (error) { | 
|  | 3063 | REMOVE_DEBUG_TRACE(__LINE__); | 
|  | 3064 | goto std_return; | 
|  | 3065 | } | 
|  | 3066 | mp = dp->i_mount; | 
|  | 3067 | dm_di_mode = cdp->i_d.di_mode; | 
|  | 3068 |  | 
|  | 3069 | /* | 
|  | 3070 | * Get the dquots for the inodes. | 
|  | 3071 | */ | 
|  | 3072 | error = XFS_QM_DQATTACH(mp, dp, 0); | 
|  | 3073 | if (!error && dp != cdp) | 
|  | 3074 | error = XFS_QM_DQATTACH(mp, cdp, 0); | 
|  | 3075 | if (error) { | 
|  | 3076 | IRELE(cdp); | 
|  | 3077 | REMOVE_DEBUG_TRACE(__LINE__); | 
|  | 3078 | goto std_return; | 
|  | 3079 | } | 
|  | 3080 |  | 
|  | 3081 | tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR); | 
|  | 3082 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 
|  | 3083 | /* | 
|  | 3084 | * We try to get the real space reservation first, | 
|  | 3085 | * allowing for directory btree deletion(s) implying | 
|  | 3086 | * possible bmap insert(s).  If we can't get the space | 
|  | 3087 | * reservation then we use 0 instead, and avoid the bmap | 
|  | 3088 | * btree insert(s) in the directory code by, if the bmap | 
|  | 3089 | * insert tries to happen, instead trimming the LAST | 
|  | 3090 | * block from the directory. | 
|  | 3091 | */ | 
|  | 3092 | resblks = XFS_REMOVE_SPACE_RES(mp); | 
|  | 3093 | error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0, | 
|  | 3094 | XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT); | 
|  | 3095 | if (error == ENOSPC) { | 
|  | 3096 | resblks = 0; | 
|  | 3097 | error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0, | 
|  | 3098 | XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT); | 
|  | 3099 | } | 
|  | 3100 | if (error) { | 
|  | 3101 | ASSERT(error != ENOSPC); | 
|  | 3102 | cancel_flags = 0; | 
|  | 3103 | IRELE(cdp); | 
|  | 3104 | goto error_return; | 
|  | 3105 | } | 
|  | 3106 | XFS_BMAP_INIT(&free_list, &first_block); | 
|  | 3107 |  | 
|  | 3108 | /* | 
|  | 3109 | * Now lock the child directory inode and the parent directory | 
|  | 3110 | * inode in the proper order.  This will take care of validating | 
|  | 3111 | * that the directory entry for the child directory inode has | 
|  | 3112 | * not changed while we were obtaining a log reservation. | 
|  | 3113 | */ | 
|  | 3114 | error = xfs_lock_dir_and_entry(dp, dentry, cdp); | 
|  | 3115 | if (error) { | 
|  | 3116 | xfs_trans_cancel(tp, cancel_flags); | 
|  | 3117 | IRELE(cdp); | 
|  | 3118 | goto std_return; | 
|  | 3119 | } | 
|  | 3120 |  | 
|  | 3121 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 
|  | 3122 | if (dp != cdp) { | 
|  | 3123 | /* | 
|  | 3124 | * Only increment the parent directory vnode count if | 
|  | 3125 | * we didn't bump it in looking up cdp.  The only time | 
|  | 3126 | * we don't bump it is when we're looking up ".". | 
|  | 3127 | */ | 
|  | 3128 | VN_HOLD(dir_vp); | 
|  | 3129 | } | 
|  | 3130 |  | 
|  | 3131 | ITRACE(cdp); | 
|  | 3132 | xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL); | 
|  | 3133 |  | 
|  | 3134 | ASSERT(cdp->i_d.di_nlink >= 2); | 
|  | 3135 | if (cdp->i_d.di_nlink != 2) { | 
|  | 3136 | error = XFS_ERROR(ENOTEMPTY); | 
|  | 3137 | goto error_return; | 
|  | 3138 | } | 
|  | 3139 | if (!XFS_DIR_ISEMPTY(mp, cdp)) { | 
|  | 3140 | error = XFS_ERROR(ENOTEMPTY); | 
|  | 3141 | goto error_return; | 
|  | 3142 | } | 
|  | 3143 |  | 
|  | 3144 | error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, cdp->i_ino, | 
|  | 3145 | &first_block, &free_list, resblks); | 
|  | 3146 | if (error) { | 
|  | 3147 | goto error1; | 
|  | 3148 | } | 
|  | 3149 |  | 
|  | 3150 | xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 
|  | 3151 |  | 
|  | 3152 | /* | 
|  | 3153 | * Bump the in memory generation count on the parent | 
|  | 3154 | * directory so that other can know that it has changed. | 
|  | 3155 | */ | 
|  | 3156 | dp->i_gen++; | 
|  | 3157 |  | 
|  | 3158 | /* | 
|  | 3159 | * Drop the link from cdp's "..". | 
|  | 3160 | */ | 
|  | 3161 | error = xfs_droplink(tp, dp); | 
|  | 3162 | if (error) { | 
|  | 3163 | goto error1; | 
|  | 3164 | } | 
|  | 3165 |  | 
|  | 3166 | /* | 
|  | 3167 | * Drop the link from dp to cdp. | 
|  | 3168 | */ | 
|  | 3169 | error = xfs_droplink(tp, cdp); | 
|  | 3170 | if (error) { | 
|  | 3171 | goto error1; | 
|  | 3172 | } | 
|  | 3173 |  | 
|  | 3174 | /* | 
|  | 3175 | * Drop the "." link from cdp to self. | 
|  | 3176 | */ | 
|  | 3177 | error = xfs_droplink(tp, cdp); | 
|  | 3178 | if (error) { | 
|  | 3179 | goto error1; | 
|  | 3180 | } | 
|  | 3181 |  | 
|  | 3182 | /* Determine these before committing transaction */ | 
|  | 3183 | last_cdp_link = (cdp)->i_d.di_nlink==0; | 
|  | 3184 |  | 
|  | 3185 | /* | 
|  | 3186 | * Take an extra ref on the child vnode so that it | 
|  | 3187 | * does not go to xfs_inactive() from within the commit. | 
|  | 3188 | */ | 
|  | 3189 | IHOLD(cdp); | 
|  | 3190 |  | 
|  | 3191 | /* | 
|  | 3192 | * If this is a synchronous mount, make sure that the | 
|  | 3193 | * rmdir transaction goes to disk before returning to | 
|  | 3194 | * the user. | 
|  | 3195 | */ | 
|  | 3196 | if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { | 
|  | 3197 | xfs_trans_set_sync(tp); | 
|  | 3198 | } | 
|  | 3199 |  | 
|  | 3200 | error = xfs_bmap_finish (&tp, &free_list, first_block, &committed); | 
|  | 3201 | if (error) { | 
|  | 3202 | xfs_bmap_cancel(&free_list); | 
|  | 3203 | xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | | 
|  | 3204 | XFS_TRANS_ABORT)); | 
|  | 3205 | IRELE(cdp); | 
|  | 3206 | goto std_return; | 
|  | 3207 | } | 
|  | 3208 |  | 
|  | 3209 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 
|  | 3210 | if (error) { | 
|  | 3211 | IRELE(cdp); | 
|  | 3212 | goto std_return; | 
|  | 3213 | } | 
|  | 3214 |  | 
|  | 3215 |  | 
|  | 3216 | /* | 
|  | 3217 | * Let interposed file systems know about removed links. | 
|  | 3218 | */ | 
|  | 3219 | VOP_LINK_REMOVED(XFS_ITOV(cdp), dir_vp, last_cdp_link); | 
|  | 3220 |  | 
|  | 3221 | IRELE(cdp); | 
|  | 3222 |  | 
|  | 3223 | /* Fall through to std_return with error = 0 or the errno | 
|  | 3224 | * from xfs_trans_commit. */ | 
|  | 3225 | std_return: | 
|  | 3226 | if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) { | 
|  | 3227 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, | 
|  | 3228 | dir_vp, DM_RIGHT_NULL, | 
|  | 3229 | NULL, DM_RIGHT_NULL, | 
|  | 3230 | name, NULL, dm_di_mode, | 
|  | 3231 | error, 0); | 
|  | 3232 | } | 
|  | 3233 | return error; | 
|  | 3234 |  | 
|  | 3235 | error1: | 
|  | 3236 | xfs_bmap_cancel(&free_list); | 
|  | 3237 | cancel_flags |= XFS_TRANS_ABORT; | 
|  | 3238 | error_return: | 
|  | 3239 | xfs_trans_cancel(tp, cancel_flags); | 
|  | 3240 | goto std_return; | 
|  | 3241 | } | 
|  | 3242 |  | 
|  | 3243 |  | 
|  | 3244 | /* | 
|  | 3245 | * xfs_readdir | 
|  | 3246 | * | 
|  | 3247 | * Read dp's entries starting at uiop->uio_offset and translate them into | 
|  | 3248 | * bufsize bytes worth of struct dirents starting at bufbase. | 
|  | 3249 | */ | 
|  | 3250 | STATIC int | 
|  | 3251 | xfs_readdir( | 
|  | 3252 | bhv_desc_t	*dir_bdp, | 
|  | 3253 | uio_t		*uiop, | 
|  | 3254 | cred_t		*credp, | 
|  | 3255 | int		*eofp) | 
|  | 3256 | { | 
|  | 3257 | xfs_inode_t	*dp; | 
|  | 3258 | xfs_trans_t	*tp = NULL; | 
|  | 3259 | int		error = 0; | 
|  | 3260 | uint		lock_mode; | 
|  | 3261 | xfs_off_t	start_offset; | 
|  | 3262 |  | 
|  | 3263 | vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__, | 
|  | 3264 | (inst_t *)__return_address); | 
|  | 3265 | dp = XFS_BHVTOI(dir_bdp); | 
|  | 3266 |  | 
|  | 3267 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) { | 
|  | 3268 | return XFS_ERROR(EIO); | 
|  | 3269 | } | 
|  | 3270 |  | 
|  | 3271 | lock_mode = xfs_ilock_map_shared(dp); | 
|  | 3272 | start_offset = uiop->uio_offset; | 
|  | 3273 | error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp); | 
|  | 3274 | if (start_offset != uiop->uio_offset) { | 
|  | 3275 | xfs_ichgtime(dp, XFS_ICHGTIME_ACC); | 
|  | 3276 | } | 
|  | 3277 | xfs_iunlock_map_shared(dp, lock_mode); | 
|  | 3278 | return error; | 
|  | 3279 | } | 
|  | 3280 |  | 
|  | 3281 |  | 
|  | 3282 | /* | 
|  | 3283 | * xfs_symlink | 
|  | 3284 | * | 
|  | 3285 | */ | 
|  | 3286 | STATIC int | 
|  | 3287 | xfs_symlink( | 
|  | 3288 | bhv_desc_t		*dir_bdp, | 
|  | 3289 | vname_t			*dentry, | 
|  | 3290 | vattr_t			*vap, | 
|  | 3291 | char			*target_path, | 
|  | 3292 | vnode_t			**vpp, | 
|  | 3293 | cred_t			*credp) | 
|  | 3294 | { | 
|  | 3295 | xfs_trans_t		*tp; | 
|  | 3296 | xfs_mount_t		*mp; | 
|  | 3297 | xfs_inode_t		*dp; | 
|  | 3298 | xfs_inode_t		*ip; | 
|  | 3299 | int			error; | 
|  | 3300 | int			pathlen; | 
|  | 3301 | xfs_bmap_free_t		free_list; | 
|  | 3302 | xfs_fsblock_t		first_block; | 
|  | 3303 | boolean_t		dp_joined_to_trans; | 
|  | 3304 | vnode_t			*dir_vp; | 
|  | 3305 | uint			cancel_flags; | 
|  | 3306 | int			committed; | 
|  | 3307 | xfs_fileoff_t		first_fsb; | 
|  | 3308 | xfs_filblks_t		fs_blocks; | 
|  | 3309 | int			nmaps; | 
|  | 3310 | xfs_bmbt_irec_t		mval[SYMLINK_MAPS]; | 
|  | 3311 | xfs_daddr_t		d; | 
|  | 3312 | char			*cur_chunk; | 
|  | 3313 | int			byte_cnt; | 
|  | 3314 | int			n; | 
|  | 3315 | xfs_buf_t		*bp; | 
|  | 3316 | xfs_prid_t		prid; | 
|  | 3317 | struct xfs_dquot	*udqp, *gdqp; | 
|  | 3318 | uint			resblks; | 
|  | 3319 | char			*link_name = VNAME(dentry); | 
|  | 3320 | int			link_namelen; | 
|  | 3321 |  | 
|  | 3322 | *vpp = NULL; | 
|  | 3323 | dir_vp = BHV_TO_VNODE(dir_bdp); | 
|  | 3324 | dp = XFS_BHVTOI(dir_bdp); | 
|  | 3325 | dp_joined_to_trans = B_FALSE; | 
|  | 3326 | error = 0; | 
|  | 3327 | ip = NULL; | 
|  | 3328 | tp = NULL; | 
|  | 3329 |  | 
|  | 3330 | vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 3331 |  | 
|  | 3332 | mp = dp->i_mount; | 
|  | 3333 |  | 
|  | 3334 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 3335 | return XFS_ERROR(EIO); | 
|  | 3336 |  | 
|  | 3337 | link_namelen = VNAMELEN(dentry); | 
|  | 3338 |  | 
|  | 3339 | /* | 
|  | 3340 | * Check component lengths of the target path name. | 
|  | 3341 | */ | 
|  | 3342 | pathlen = strlen(target_path); | 
|  | 3343 | if (pathlen >= MAXPATHLEN)      /* total string too long */ | 
|  | 3344 | return XFS_ERROR(ENAMETOOLONG); | 
|  | 3345 | if (pathlen >= MAXNAMELEN) {    /* is any component too long? */ | 
|  | 3346 | int len, total; | 
|  | 3347 | char *path; | 
|  | 3348 |  | 
|  | 3349 | for(total = 0, path = target_path; total < pathlen;) { | 
|  | 3350 | /* | 
|  | 3351 | * Skip any slashes. | 
|  | 3352 | */ | 
|  | 3353 | while(*path == '/') { | 
|  | 3354 | total++; | 
|  | 3355 | path++; | 
|  | 3356 | } | 
|  | 3357 |  | 
|  | 3358 | /* | 
|  | 3359 | * Count up to the next slash or end of path. | 
|  | 3360 | * Error out if the component is bigger than MAXNAMELEN. | 
|  | 3361 | */ | 
|  | 3362 | for(len = 0; *path != '/' && total < pathlen;total++, path++) { | 
|  | 3363 | if (++len >= MAXNAMELEN) { | 
|  | 3364 | error = ENAMETOOLONG; | 
|  | 3365 | return error; | 
|  | 3366 | } | 
|  | 3367 | } | 
|  | 3368 | } | 
|  | 3369 | } | 
|  | 3370 |  | 
|  | 3371 | if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_SYMLINK)) { | 
|  | 3372 | error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dir_vp, | 
|  | 3373 | DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, | 
|  | 3374 | link_name, target_path, 0, 0, 0); | 
|  | 3375 | if (error) | 
|  | 3376 | return error; | 
|  | 3377 | } | 
|  | 3378 |  | 
|  | 3379 | /* Return through std_return after this point. */ | 
|  | 3380 |  | 
|  | 3381 | udqp = gdqp = NULL; | 
| Nathan Scott | 365ca83 | 2005-06-21 15:39:12 +1000 | [diff] [blame] | 3382 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) | 
|  | 3383 | prid = dp->i_d.di_projid; | 
|  | 3384 | else if (vap->va_mask & XFS_AT_PROJID) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3385 | prid = (xfs_prid_t)vap->va_projid; | 
|  | 3386 | else | 
|  | 3387 | prid = (xfs_prid_t)dfltprid; | 
|  | 3388 |  | 
|  | 3389 | /* | 
|  | 3390 | * Make sure that we have allocated dquot(s) on disk. | 
|  | 3391 | */ | 
|  | 3392 | error = XFS_QM_DQVOPALLOC(mp, dp, | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 3393 | current_fsuid(credp), current_fsgid(credp), prid, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3394 | XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); | 
|  | 3395 | if (error) | 
|  | 3396 | goto std_return; | 
|  | 3397 |  | 
|  | 3398 | tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK); | 
|  | 3399 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 
|  | 3400 | /* | 
|  | 3401 | * The symlink will fit into the inode data fork? | 
|  | 3402 | * There can't be any attributes so we get the whole variable part. | 
|  | 3403 | */ | 
|  | 3404 | if (pathlen <= XFS_LITINO(mp)) | 
|  | 3405 | fs_blocks = 0; | 
|  | 3406 | else | 
|  | 3407 | fs_blocks = XFS_B_TO_FSB(mp, pathlen); | 
|  | 3408 | resblks = XFS_SYMLINK_SPACE_RES(mp, link_namelen, fs_blocks); | 
|  | 3409 | error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0, | 
|  | 3410 | XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); | 
|  | 3411 | if (error == ENOSPC && fs_blocks == 0) { | 
|  | 3412 | resblks = 0; | 
|  | 3413 | error = xfs_trans_reserve(tp, 0, XFS_SYMLINK_LOG_RES(mp), 0, | 
|  | 3414 | XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); | 
|  | 3415 | } | 
|  | 3416 | if (error) { | 
|  | 3417 | cancel_flags = 0; | 
|  | 3418 | dp = NULL; | 
|  | 3419 | goto error_return; | 
|  | 3420 | } | 
|  | 3421 |  | 
|  | 3422 | xfs_ilock(dp, XFS_ILOCK_EXCL); | 
|  | 3423 |  | 
|  | 3424 | /* | 
|  | 3425 | * Check whether the directory allows new symlinks or not. | 
|  | 3426 | */ | 
|  | 3427 | if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) { | 
|  | 3428 | error = XFS_ERROR(EPERM); | 
|  | 3429 | goto error_return; | 
|  | 3430 | } | 
|  | 3431 |  | 
|  | 3432 | /* | 
|  | 3433 | * Reserve disk quota : blocks and inode. | 
|  | 3434 | */ | 
|  | 3435 | error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); | 
|  | 3436 | if (error) | 
|  | 3437 | goto error_return; | 
|  | 3438 |  | 
|  | 3439 | /* | 
|  | 3440 | * Check for ability to enter directory entry, if no space reserved. | 
|  | 3441 | */ | 
|  | 3442 | if (resblks == 0 && | 
|  | 3443 | (error = XFS_DIR_CANENTER(mp, tp, dp, link_name, link_namelen))) | 
|  | 3444 | goto error_return; | 
|  | 3445 | /* | 
|  | 3446 | * Initialize the bmap freelist prior to calling either | 
|  | 3447 | * bmapi or the directory create code. | 
|  | 3448 | */ | 
|  | 3449 | XFS_BMAP_INIT(&free_list, &first_block); | 
|  | 3450 |  | 
|  | 3451 | /* | 
|  | 3452 | * Allocate an inode for the symlink. | 
|  | 3453 | */ | 
|  | 3454 | error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (vap->va_mode&~S_IFMT), | 
|  | 3455 | 1, 0, credp, prid, resblks > 0, &ip, NULL); | 
|  | 3456 | if (error) { | 
|  | 3457 | if (error == ENOSPC) | 
|  | 3458 | goto error_return; | 
|  | 3459 | goto error1; | 
|  | 3460 | } | 
|  | 3461 | ITRACE(ip); | 
|  | 3462 |  | 
|  | 3463 | VN_HOLD(dir_vp); | 
|  | 3464 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 
|  | 3465 | dp_joined_to_trans = B_TRUE; | 
|  | 3466 |  | 
|  | 3467 | /* | 
|  | 3468 | * Also attach the dquot(s) to it, if applicable. | 
|  | 3469 | */ | 
|  | 3470 | XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp); | 
|  | 3471 |  | 
|  | 3472 | if (resblks) | 
|  | 3473 | resblks -= XFS_IALLOC_SPACE_RES(mp); | 
|  | 3474 | /* | 
|  | 3475 | * If the symlink will fit into the inode, write it inline. | 
|  | 3476 | */ | 
|  | 3477 | if (pathlen <= XFS_IFORK_DSIZE(ip)) { | 
|  | 3478 | xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK); | 
|  | 3479 | memcpy(ip->i_df.if_u1.if_data, target_path, pathlen); | 
|  | 3480 | ip->i_d.di_size = pathlen; | 
|  | 3481 |  | 
|  | 3482 | /* | 
|  | 3483 | * The inode was initially created in extent format. | 
|  | 3484 | */ | 
|  | 3485 | ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT); | 
|  | 3486 | ip->i_df.if_flags |= XFS_IFINLINE; | 
|  | 3487 |  | 
|  | 3488 | ip->i_d.di_format = XFS_DINODE_FMT_LOCAL; | 
|  | 3489 | xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE); | 
|  | 3490 |  | 
|  | 3491 | } else { | 
|  | 3492 | first_fsb = 0; | 
|  | 3493 | nmaps = SYMLINK_MAPS; | 
|  | 3494 |  | 
|  | 3495 | error = xfs_bmapi(tp, ip, first_fsb, fs_blocks, | 
|  | 3496 | XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, | 
|  | 3497 | &first_block, resblks, mval, &nmaps, | 
|  | 3498 | &free_list); | 
|  | 3499 | if (error) { | 
|  | 3500 | goto error1; | 
|  | 3501 | } | 
|  | 3502 |  | 
|  | 3503 | if (resblks) | 
|  | 3504 | resblks -= fs_blocks; | 
|  | 3505 | ip->i_d.di_size = pathlen; | 
|  | 3506 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 
|  | 3507 |  | 
|  | 3508 | cur_chunk = target_path; | 
|  | 3509 | for (n = 0; n < nmaps; n++) { | 
|  | 3510 | d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); | 
|  | 3511 | byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); | 
|  | 3512 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, | 
|  | 3513 | BTOBB(byte_cnt), 0); | 
|  | 3514 | ASSERT(bp && !XFS_BUF_GETERROR(bp)); | 
|  | 3515 | if (pathlen < byte_cnt) { | 
|  | 3516 | byte_cnt = pathlen; | 
|  | 3517 | } | 
|  | 3518 | pathlen -= byte_cnt; | 
|  | 3519 |  | 
|  | 3520 | memcpy(XFS_BUF_PTR(bp), cur_chunk, byte_cnt); | 
|  | 3521 | cur_chunk += byte_cnt; | 
|  | 3522 |  | 
|  | 3523 | xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1); | 
|  | 3524 | } | 
|  | 3525 | } | 
|  | 3526 |  | 
|  | 3527 | /* | 
|  | 3528 | * Create the directory entry for the symlink. | 
|  | 3529 | */ | 
|  | 3530 | error = XFS_DIR_CREATENAME(mp, tp, dp, link_name, link_namelen, | 
|  | 3531 | ip->i_ino, &first_block, &free_list, resblks); | 
|  | 3532 | if (error) { | 
|  | 3533 | goto error1; | 
|  | 3534 | } | 
|  | 3535 | xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 
|  | 3536 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); | 
|  | 3537 |  | 
|  | 3538 | /* | 
|  | 3539 | * Bump the in memory version number of the parent directory | 
|  | 3540 | * so that other processes accessing it will recognize that | 
|  | 3541 | * the directory has changed. | 
|  | 3542 | */ | 
|  | 3543 | dp->i_gen++; | 
|  | 3544 |  | 
|  | 3545 | /* | 
|  | 3546 | * If this is a synchronous mount, make sure that the | 
|  | 3547 | * symlink transaction goes to disk before returning to | 
|  | 3548 | * the user. | 
|  | 3549 | */ | 
|  | 3550 | if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { | 
|  | 3551 | xfs_trans_set_sync(tp); | 
|  | 3552 | } | 
|  | 3553 |  | 
|  | 3554 | /* | 
|  | 3555 | * xfs_trans_commit normally decrements the vnode ref count | 
|  | 3556 | * when it unlocks the inode. Since we want to return the | 
|  | 3557 | * vnode to the caller, we bump the vnode ref count now. | 
|  | 3558 | */ | 
|  | 3559 | IHOLD(ip); | 
|  | 3560 |  | 
|  | 3561 | error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); | 
|  | 3562 | if (error) { | 
|  | 3563 | goto error2; | 
|  | 3564 | } | 
|  | 3565 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 
|  | 3566 | XFS_QM_DQRELE(mp, udqp); | 
|  | 3567 | XFS_QM_DQRELE(mp, gdqp); | 
|  | 3568 |  | 
|  | 3569 | /* Fall through to std_return with error = 0 or errno from | 
|  | 3570 | * xfs_trans_commit	*/ | 
|  | 3571 | std_return: | 
|  | 3572 | if (DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), | 
|  | 3573 | DM_EVENT_POSTSYMLINK)) { | 
|  | 3574 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK, | 
|  | 3575 | dir_vp, DM_RIGHT_NULL, | 
|  | 3576 | error ? NULL : XFS_ITOV(ip), | 
|  | 3577 | DM_RIGHT_NULL, link_name, target_path, | 
|  | 3578 | 0, error, 0); | 
|  | 3579 | } | 
|  | 3580 |  | 
|  | 3581 | if (!error) { | 
|  | 3582 | vnode_t *vp; | 
|  | 3583 |  | 
|  | 3584 | ASSERT(ip); | 
|  | 3585 | vp = XFS_ITOV(ip); | 
|  | 3586 | *vpp = vp; | 
|  | 3587 | } | 
|  | 3588 | return error; | 
|  | 3589 |  | 
|  | 3590 | error2: | 
|  | 3591 | IRELE(ip); | 
|  | 3592 | error1: | 
|  | 3593 | xfs_bmap_cancel(&free_list); | 
|  | 3594 | cancel_flags |= XFS_TRANS_ABORT; | 
|  | 3595 | error_return: | 
|  | 3596 | xfs_trans_cancel(tp, cancel_flags); | 
|  | 3597 | XFS_QM_DQRELE(mp, udqp); | 
|  | 3598 | XFS_QM_DQRELE(mp, gdqp); | 
|  | 3599 |  | 
|  | 3600 | if (!dp_joined_to_trans && (dp != NULL)) { | 
|  | 3601 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 
|  | 3602 | } | 
|  | 3603 |  | 
|  | 3604 | goto std_return; | 
|  | 3605 | } | 
|  | 3606 |  | 
|  | 3607 |  | 
|  | 3608 | /* | 
|  | 3609 | * xfs_fid2 | 
|  | 3610 | * | 
|  | 3611 | * A fid routine that takes a pointer to a previously allocated | 
|  | 3612 | * fid structure (like xfs_fast_fid) but uses a 64 bit inode number. | 
|  | 3613 | */ | 
|  | 3614 | STATIC int | 
|  | 3615 | xfs_fid2( | 
|  | 3616 | bhv_desc_t	*bdp, | 
|  | 3617 | fid_t		*fidp) | 
|  | 3618 | { | 
|  | 3619 | xfs_inode_t	*ip; | 
|  | 3620 | xfs_fid2_t	*xfid; | 
|  | 3621 |  | 
|  | 3622 | vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__, | 
|  | 3623 | (inst_t *)__return_address); | 
|  | 3624 | ASSERT(sizeof(fid_t) >= sizeof(xfs_fid2_t)); | 
|  | 3625 |  | 
|  | 3626 | xfid = (xfs_fid2_t *)fidp; | 
|  | 3627 | ip = XFS_BHVTOI(bdp); | 
|  | 3628 | xfid->fid_len = sizeof(xfs_fid2_t) - sizeof(xfid->fid_len); | 
|  | 3629 | xfid->fid_pad = 0; | 
|  | 3630 | /* | 
|  | 3631 | * use memcpy because the inode is a long long and there's no | 
|  | 3632 | * assurance that xfid->fid_ino is properly aligned. | 
|  | 3633 | */ | 
|  | 3634 | memcpy(&xfid->fid_ino, &ip->i_ino, sizeof(xfid->fid_ino)); | 
|  | 3635 | xfid->fid_gen = ip->i_d.di_gen; | 
|  | 3636 |  | 
|  | 3637 | return 0; | 
|  | 3638 | } | 
|  | 3639 |  | 
|  | 3640 |  | 
|  | 3641 | /* | 
|  | 3642 | * xfs_rwlock | 
|  | 3643 | */ | 
|  | 3644 | int | 
|  | 3645 | xfs_rwlock( | 
|  | 3646 | bhv_desc_t	*bdp, | 
|  | 3647 | vrwlock_t	locktype) | 
|  | 3648 | { | 
|  | 3649 | xfs_inode_t	*ip; | 
|  | 3650 | vnode_t		*vp; | 
|  | 3651 |  | 
|  | 3652 | vp = BHV_TO_VNODE(bdp); | 
|  | 3653 | if (vp->v_type == VDIR) | 
|  | 3654 | return 1; | 
|  | 3655 | ip = XFS_BHVTOI(bdp); | 
|  | 3656 | if (locktype == VRWLOCK_WRITE) { | 
|  | 3657 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | 
|  | 3658 | } else if (locktype == VRWLOCK_TRY_READ) { | 
|  | 3659 | return (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)); | 
|  | 3660 | } else if (locktype == VRWLOCK_TRY_WRITE) { | 
|  | 3661 | return (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)); | 
|  | 3662 | } else { | 
|  | 3663 | ASSERT((locktype == VRWLOCK_READ) || | 
|  | 3664 | (locktype == VRWLOCK_WRITE_DIRECT)); | 
|  | 3665 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 
|  | 3666 | } | 
|  | 3667 |  | 
|  | 3668 | return 1; | 
|  | 3669 | } | 
|  | 3670 |  | 
|  | 3671 |  | 
|  | 3672 | /* | 
|  | 3673 | * xfs_rwunlock | 
|  | 3674 | */ | 
|  | 3675 | void | 
|  | 3676 | xfs_rwunlock( | 
|  | 3677 | bhv_desc_t	*bdp, | 
|  | 3678 | vrwlock_t	locktype) | 
|  | 3679 | { | 
|  | 3680 | xfs_inode_t     *ip; | 
|  | 3681 | vnode_t		*vp; | 
|  | 3682 |  | 
|  | 3683 | vp = BHV_TO_VNODE(bdp); | 
|  | 3684 | if (vp->v_type == VDIR) | 
|  | 3685 | return; | 
|  | 3686 | ip = XFS_BHVTOI(bdp); | 
|  | 3687 | if (locktype == VRWLOCK_WRITE) { | 
|  | 3688 | /* | 
|  | 3689 | * In the write case, we may have added a new entry to | 
|  | 3690 | * the reference cache.  This might store a pointer to | 
|  | 3691 | * an inode to be released in this inode.  If it is there, | 
|  | 3692 | * clear the pointer and release the inode after unlocking | 
|  | 3693 | * this one. | 
|  | 3694 | */ | 
|  | 3695 | xfs_refcache_iunlock(ip, XFS_IOLOCK_EXCL); | 
|  | 3696 | } else { | 
|  | 3697 | ASSERT((locktype == VRWLOCK_READ) || | 
|  | 3698 | (locktype == VRWLOCK_WRITE_DIRECT)); | 
|  | 3699 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 
|  | 3700 | } | 
|  | 3701 | return; | 
|  | 3702 | } | 
|  | 3703 |  | 
|  | 3704 | STATIC int | 
|  | 3705 | xfs_inode_flush( | 
|  | 3706 | bhv_desc_t	*bdp, | 
|  | 3707 | int		flags) | 
|  | 3708 | { | 
|  | 3709 | xfs_inode_t	*ip; | 
|  | 3710 | xfs_mount_t	*mp; | 
|  | 3711 | xfs_inode_log_item_t *iip; | 
|  | 3712 | int		error = 0; | 
|  | 3713 |  | 
|  | 3714 | ip = XFS_BHVTOI(bdp); | 
|  | 3715 | mp = ip->i_mount; | 
|  | 3716 | iip = ip->i_itemp; | 
|  | 3717 |  | 
|  | 3718 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 3719 | return XFS_ERROR(EIO); | 
|  | 3720 |  | 
|  | 3721 | /* | 
|  | 3722 | * Bypass inodes which have already been cleaned by | 
|  | 3723 | * the inode flush clustering code inside xfs_iflush | 
|  | 3724 | */ | 
|  | 3725 | if ((ip->i_update_core == 0) && | 
|  | 3726 | ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) | 
|  | 3727 | return 0; | 
|  | 3728 |  | 
|  | 3729 | if (flags & FLUSH_LOG) { | 
|  | 3730 | if (iip && iip->ili_last_lsn) { | 
|  | 3731 | xlog_t		*log = mp->m_log; | 
|  | 3732 | xfs_lsn_t	sync_lsn; | 
|  | 3733 | int		s, log_flags = XFS_LOG_FORCE; | 
|  | 3734 |  | 
|  | 3735 | s = GRANT_LOCK(log); | 
|  | 3736 | sync_lsn = log->l_last_sync_lsn; | 
|  | 3737 | GRANT_UNLOCK(log, s); | 
|  | 3738 |  | 
|  | 3739 | if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) <= 0)) | 
|  | 3740 | return 0; | 
|  | 3741 |  | 
|  | 3742 | if (flags & FLUSH_SYNC) | 
|  | 3743 | log_flags |= XFS_LOG_SYNC; | 
|  | 3744 | return xfs_log_force(mp, iip->ili_last_lsn, log_flags); | 
|  | 3745 | } | 
|  | 3746 | } | 
|  | 3747 |  | 
|  | 3748 | /* | 
|  | 3749 | * We make this non-blocking if the inode is contended, | 
|  | 3750 | * return EAGAIN to indicate to the caller that they | 
|  | 3751 | * did not succeed. This prevents the flush path from | 
|  | 3752 | * blocking on inodes inside another operation right | 
|  | 3753 | * now, they get caught later by xfs_sync. | 
|  | 3754 | */ | 
|  | 3755 | if (flags & FLUSH_INODE) { | 
|  | 3756 | int	flush_flags; | 
|  | 3757 |  | 
|  | 3758 | if (xfs_ipincount(ip)) | 
|  | 3759 | return EAGAIN; | 
|  | 3760 |  | 
|  | 3761 | if (flags & FLUSH_SYNC) { | 
|  | 3762 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 
|  | 3763 | xfs_iflock(ip); | 
|  | 3764 | } else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { | 
|  | 3765 | if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) { | 
|  | 3766 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 3767 | return EAGAIN; | 
|  | 3768 | } | 
|  | 3769 | } else { | 
|  | 3770 | return EAGAIN; | 
|  | 3771 | } | 
|  | 3772 |  | 
|  | 3773 | if (flags & FLUSH_SYNC) | 
|  | 3774 | flush_flags = XFS_IFLUSH_SYNC; | 
|  | 3775 | else | 
|  | 3776 | flush_flags = XFS_IFLUSH_ASYNC; | 
|  | 3777 |  | 
|  | 3778 | error = xfs_iflush(ip, flush_flags); | 
|  | 3779 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 3780 | } | 
|  | 3781 |  | 
|  | 3782 | return error; | 
|  | 3783 | } | 
|  | 3784 |  | 
|  | 3785 |  | 
|  | 3786 | int | 
|  | 3787 | xfs_set_dmattrs ( | 
|  | 3788 | bhv_desc_t	*bdp, | 
|  | 3789 | u_int		evmask, | 
|  | 3790 | u_int16_t	state, | 
|  | 3791 | cred_t		*credp) | 
|  | 3792 | { | 
|  | 3793 | xfs_inode_t     *ip; | 
|  | 3794 | xfs_trans_t	*tp; | 
|  | 3795 | xfs_mount_t	*mp; | 
|  | 3796 | int		error; | 
|  | 3797 |  | 
|  | 3798 | if (!capable(CAP_SYS_ADMIN)) | 
|  | 3799 | return XFS_ERROR(EPERM); | 
|  | 3800 |  | 
|  | 3801 | ip = XFS_BHVTOI(bdp); | 
|  | 3802 | mp = ip->i_mount; | 
|  | 3803 |  | 
|  | 3804 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 3805 | return XFS_ERROR(EIO); | 
|  | 3806 |  | 
|  | 3807 | tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS); | 
|  | 3808 | error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES (mp), 0, 0, 0); | 
|  | 3809 | if (error) { | 
|  | 3810 | xfs_trans_cancel(tp, 0); | 
|  | 3811 | return error; | 
|  | 3812 | } | 
|  | 3813 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 3814 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 
|  | 3815 |  | 
|  | 3816 | ip->i_iocore.io_dmevmask = ip->i_d.di_dmevmask = evmask; | 
|  | 3817 | ip->i_iocore.io_dmstate  = ip->i_d.di_dmstate  = state; | 
|  | 3818 |  | 
|  | 3819 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 
|  | 3820 | IHOLD(ip); | 
|  | 3821 | error = xfs_trans_commit(tp, 0, NULL); | 
|  | 3822 |  | 
|  | 3823 | return error; | 
|  | 3824 | } | 
|  | 3825 |  | 
|  | 3826 |  | 
|  | 3827 | /* | 
|  | 3828 | * xfs_reclaim | 
|  | 3829 | */ | 
|  | 3830 | STATIC int | 
|  | 3831 | xfs_reclaim( | 
|  | 3832 | bhv_desc_t	*bdp) | 
|  | 3833 | { | 
|  | 3834 | xfs_inode_t	*ip; | 
|  | 3835 | vnode_t		*vp; | 
|  | 3836 |  | 
|  | 3837 | vp = BHV_TO_VNODE(bdp); | 
|  | 3838 | ip = XFS_BHVTOI(bdp); | 
|  | 3839 |  | 
|  | 3840 | vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 3841 |  | 
|  | 3842 | ASSERT(!VN_MAPPED(vp)); | 
|  | 3843 |  | 
|  | 3844 | /* bad inode, get out here ASAP */ | 
|  | 3845 | if (VN_BAD(vp)) { | 
|  | 3846 | xfs_ireclaim(ip); | 
|  | 3847 | return 0; | 
|  | 3848 | } | 
|  | 3849 |  | 
|  | 3850 | if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { | 
|  | 3851 | if (ip->i_d.di_size > 0) { | 
|  | 3852 | /* | 
|  | 3853 | * Flush and invalidate any data left around that is | 
|  | 3854 | * a part of this file. | 
|  | 3855 | * | 
|  | 3856 | * Get the inode's i/o lock so that buffers are pushed | 
|  | 3857 | * out while holding the proper lock.  We can't hold | 
|  | 3858 | * the inode lock here since flushing out buffers may | 
|  | 3859 | * cause us to try to get the lock in xfs_strategy(). | 
|  | 3860 | * | 
|  | 3861 | * We don't have to call remapf() here, because there | 
|  | 3862 | * cannot be any mapped file references to this vnode | 
|  | 3863 | * since it is being reclaimed. | 
|  | 3864 | */ | 
|  | 3865 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | 
|  | 3866 |  | 
|  | 3867 | /* | 
|  | 3868 | * If we hit an IO error, we need to make sure that the | 
|  | 3869 | * buffer and page caches of file data for | 
|  | 3870 | * the file are tossed away. We don't want to use | 
|  | 3871 | * VOP_FLUSHINVAL_PAGES here because we don't want dirty | 
|  | 3872 | * pages to stay attached to the vnode, but be | 
|  | 3873 | * marked P_BAD. pdflush/vnode_pagebad | 
|  | 3874 | * hates that. | 
|  | 3875 | */ | 
|  | 3876 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 
|  | 3877 | VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_NONE); | 
|  | 3878 | } else { | 
|  | 3879 | VOP_TOSS_PAGES(vp, 0, -1, FI_NONE); | 
|  | 3880 | } | 
|  | 3881 |  | 
|  | 3882 | ASSERT(VN_CACHED(vp) == 0); | 
|  | 3883 | ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || | 
|  | 3884 | ip->i_delayed_blks == 0); | 
|  | 3885 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 
|  | 3886 | } else if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 
|  | 3887 | /* | 
|  | 3888 | * di_size field may not be quite accurate if we're | 
|  | 3889 | * shutting down. | 
|  | 3890 | */ | 
|  | 3891 | VOP_TOSS_PAGES(vp, 0, -1, FI_NONE); | 
|  | 3892 | ASSERT(VN_CACHED(vp) == 0); | 
|  | 3893 | } | 
|  | 3894 | } | 
|  | 3895 |  | 
|  | 3896 | /* If we have nothing to flush with this inode then complete the | 
|  | 3897 | * teardown now, otherwise break the link between the xfs inode | 
|  | 3898 | * and the linux inode and clean up the xfs inode later. This | 
|  | 3899 | * avoids flushing the inode to disk during the delete operation | 
|  | 3900 | * itself. | 
|  | 3901 | */ | 
|  | 3902 | if (!ip->i_update_core && (ip->i_itemp == NULL)) { | 
|  | 3903 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 3904 | xfs_iflock(ip); | 
|  | 3905 | return xfs_finish_reclaim(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC); | 
|  | 3906 | } else { | 
|  | 3907 | xfs_mount_t	*mp = ip->i_mount; | 
|  | 3908 |  | 
|  | 3909 | /* Protect sync from us */ | 
|  | 3910 | XFS_MOUNT_ILOCK(mp); | 
|  | 3911 | vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); | 
|  | 3912 | list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); | 
|  | 3913 | ip->i_flags |= XFS_IRECLAIMABLE; | 
|  | 3914 | XFS_MOUNT_IUNLOCK(mp); | 
|  | 3915 | } | 
|  | 3916 | return 0; | 
|  | 3917 | } | 
|  | 3918 |  | 
|  | 3919 | int | 
|  | 3920 | xfs_finish_reclaim( | 
|  | 3921 | xfs_inode_t	*ip, | 
|  | 3922 | int		locked, | 
|  | 3923 | int		sync_mode) | 
|  | 3924 | { | 
|  | 3925 | xfs_ihash_t	*ih = ip->i_hash; | 
|  | 3926 | vnode_t		*vp = XFS_ITOV_NULL(ip); | 
|  | 3927 | int		error; | 
|  | 3928 |  | 
|  | 3929 | if (vp && VN_BAD(vp)) | 
|  | 3930 | goto reclaim; | 
|  | 3931 |  | 
|  | 3932 | /* The hash lock here protects a thread in xfs_iget_core from | 
|  | 3933 | * racing with us on linking the inode back with a vnode. | 
|  | 3934 | * Once we have the XFS_IRECLAIM flag set it will not touch | 
|  | 3935 | * us. | 
|  | 3936 | */ | 
|  | 3937 | write_lock(&ih->ih_lock); | 
|  | 3938 | if ((ip->i_flags & XFS_IRECLAIM) || | 
|  | 3939 | (!(ip->i_flags & XFS_IRECLAIMABLE) && vp == NULL)) { | 
|  | 3940 | write_unlock(&ih->ih_lock); | 
|  | 3941 | if (locked) { | 
|  | 3942 | xfs_ifunlock(ip); | 
|  | 3943 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 3944 | } | 
|  | 3945 | return(1); | 
|  | 3946 | } | 
|  | 3947 | ip->i_flags |= XFS_IRECLAIM; | 
|  | 3948 | write_unlock(&ih->ih_lock); | 
|  | 3949 |  | 
|  | 3950 | /* | 
|  | 3951 | * If the inode is still dirty, then flush it out.  If the inode | 
|  | 3952 | * is not in the AIL, then it will be OK to flush it delwri as | 
|  | 3953 | * long as xfs_iflush() does not keep any references to the inode. | 
|  | 3954 | * We leave that decision up to xfs_iflush() since it has the | 
|  | 3955 | * knowledge of whether it's OK to simply do a delwri flush of | 
|  | 3956 | * the inode or whether we need to wait until the inode is | 
|  | 3957 | * pulled from the AIL. | 
|  | 3958 | * We get the flush lock regardless, though, just to make sure | 
|  | 3959 | * we don't free it while it is being flushed. | 
|  | 3960 | */ | 
|  | 3961 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 
|  | 3962 | if (!locked) { | 
|  | 3963 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 3964 | xfs_iflock(ip); | 
|  | 3965 | } | 
|  | 3966 |  | 
|  | 3967 | if (ip->i_update_core || | 
|  | 3968 | ((ip->i_itemp != NULL) && | 
|  | 3969 | (ip->i_itemp->ili_format.ilf_fields != 0))) { | 
|  | 3970 | error = xfs_iflush(ip, sync_mode); | 
|  | 3971 | /* | 
|  | 3972 | * If we hit an error, typically because of filesystem | 
|  | 3973 | * shutdown, we don't need to let vn_reclaim to know | 
|  | 3974 | * because we're gonna reclaim the inode anyway. | 
|  | 3975 | */ | 
|  | 3976 | if (error) { | 
|  | 3977 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 3978 | goto reclaim; | 
|  | 3979 | } | 
|  | 3980 | xfs_iflock(ip); /* synchronize with xfs_iflush_done */ | 
|  | 3981 | } | 
|  | 3982 |  | 
|  | 3983 | ASSERT(ip->i_update_core == 0); | 
|  | 3984 | ASSERT(ip->i_itemp == NULL || | 
|  | 3985 | ip->i_itemp->ili_format.ilf_fields == 0); | 
|  | 3986 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 3987 | } else if (locked) { | 
|  | 3988 | /* | 
|  | 3989 | * We are not interested in doing an iflush if we're | 
|  | 3990 | * in the process of shutting down the filesystem forcibly. | 
|  | 3991 | * So, just reclaim the inode. | 
|  | 3992 | */ | 
|  | 3993 | xfs_ifunlock(ip); | 
|  | 3994 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 3995 | } | 
|  | 3996 |  | 
|  | 3997 | reclaim: | 
|  | 3998 | xfs_ireclaim(ip); | 
|  | 3999 | return 0; | 
|  | 4000 | } | 
|  | 4001 |  | 
|  | 4002 | int | 
|  | 4003 | xfs_finish_reclaim_all(xfs_mount_t *mp, int noblock) | 
|  | 4004 | { | 
|  | 4005 | int		purged; | 
|  | 4006 | xfs_inode_t	*ip, *n; | 
|  | 4007 | int		done = 0; | 
|  | 4008 |  | 
|  | 4009 | while (!done) { | 
|  | 4010 | purged = 0; | 
|  | 4011 | XFS_MOUNT_ILOCK(mp); | 
|  | 4012 | list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) { | 
|  | 4013 | if (noblock) { | 
|  | 4014 | if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) | 
|  | 4015 | continue; | 
|  | 4016 | if (xfs_ipincount(ip) || | 
|  | 4017 | !xfs_iflock_nowait(ip)) { | 
|  | 4018 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 4019 | continue; | 
|  | 4020 | } | 
|  | 4021 | } | 
|  | 4022 | XFS_MOUNT_IUNLOCK(mp); | 
|  | 4023 | xfs_finish_reclaim(ip, noblock, | 
|  | 4024 | XFS_IFLUSH_DELWRI_ELSE_ASYNC); | 
|  | 4025 | purged = 1; | 
|  | 4026 | break; | 
|  | 4027 | } | 
|  | 4028 |  | 
|  | 4029 | done = !purged; | 
|  | 4030 | } | 
|  | 4031 |  | 
|  | 4032 | XFS_MOUNT_IUNLOCK(mp); | 
|  | 4033 | return 0; | 
|  | 4034 | } | 
|  | 4035 |  | 
|  | 4036 | /* | 
|  | 4037 | * xfs_alloc_file_space() | 
|  | 4038 | *      This routine allocates disk space for the given file. | 
|  | 4039 | * | 
|  | 4040 | *	If alloc_type == 0, this request is for an ALLOCSP type | 
|  | 4041 | *	request which will change the file size.  In this case, no | 
|  | 4042 | *	DMAPI event will be generated by the call.  A TRUNCATE event | 
|  | 4043 | *	will be generated later by xfs_setattr. | 
|  | 4044 | * | 
|  | 4045 | *	If alloc_type != 0, this request is for a RESVSP type | 
|  | 4046 | *	request, and a DMAPI DM_EVENT_WRITE will be generated if the | 
|  | 4047 | *	lower block boundary byte address is less than the file's | 
|  | 4048 | *	length. | 
|  | 4049 | * | 
|  | 4050 | * RETURNS: | 
|  | 4051 | *       0 on success | 
|  | 4052 | *      errno on error | 
|  | 4053 | * | 
|  | 4054 | */ | 
| Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 4055 | STATIC int | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4056 | xfs_alloc_file_space( | 
|  | 4057 | xfs_inode_t		*ip, | 
|  | 4058 | xfs_off_t		offset, | 
|  | 4059 | xfs_off_t		len, | 
|  | 4060 | int			alloc_type, | 
|  | 4061 | int			attr_flags) | 
|  | 4062 | { | 
|  | 4063 | xfs_filblks_t		allocated_fsb; | 
|  | 4064 | xfs_filblks_t		allocatesize_fsb; | 
|  | 4065 | int			committed; | 
|  | 4066 | xfs_off_t		count; | 
|  | 4067 | xfs_filblks_t		datablocks; | 
|  | 4068 | int			error; | 
|  | 4069 | xfs_fsblock_t		firstfsb; | 
|  | 4070 | xfs_bmap_free_t		free_list; | 
|  | 4071 | xfs_bmbt_irec_t		*imapp; | 
|  | 4072 | xfs_bmbt_irec_t		imaps[1]; | 
|  | 4073 | xfs_mount_t		*mp; | 
|  | 4074 | int			numrtextents; | 
|  | 4075 | int			reccount; | 
|  | 4076 | uint			resblks; | 
|  | 4077 | int			rt; | 
|  | 4078 | int			rtextsize; | 
|  | 4079 | xfs_fileoff_t		startoffset_fsb; | 
|  | 4080 | xfs_trans_t		*tp; | 
|  | 4081 | int			xfs_bmapi_flags; | 
|  | 4082 |  | 
|  | 4083 | vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); | 
|  | 4084 | mp = ip->i_mount; | 
|  | 4085 |  | 
|  | 4086 | if (XFS_FORCED_SHUTDOWN(mp)) | 
|  | 4087 | return XFS_ERROR(EIO); | 
|  | 4088 |  | 
|  | 4089 | /* | 
|  | 4090 | * determine if this is a realtime file | 
|  | 4091 | */ | 
|  | 4092 | if ((rt = XFS_IS_REALTIME_INODE(ip)) != 0) { | 
|  | 4093 | if (ip->i_d.di_extsize) | 
|  | 4094 | rtextsize = ip->i_d.di_extsize; | 
|  | 4095 | else | 
|  | 4096 | rtextsize = mp->m_sb.sb_rextsize; | 
|  | 4097 | } else | 
|  | 4098 | rtextsize = 0; | 
|  | 4099 |  | 
|  | 4100 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) | 
|  | 4101 | return error; | 
|  | 4102 |  | 
|  | 4103 | if (len <= 0) | 
|  | 4104 | return XFS_ERROR(EINVAL); | 
|  | 4105 |  | 
|  | 4106 | count = len; | 
|  | 4107 | error = 0; | 
|  | 4108 | imapp = &imaps[0]; | 
|  | 4109 | reccount = 1; | 
|  | 4110 | xfs_bmapi_flags = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); | 
|  | 4111 | startoffset_fsb	= XFS_B_TO_FSBT(mp, offset); | 
|  | 4112 | allocatesize_fsb = XFS_B_TO_FSB(mp, count); | 
|  | 4113 |  | 
|  | 4114 | /*	Generate a DMAPI event if needed.	*/ | 
|  | 4115 | if (alloc_type != 0 && offset < ip->i_d.di_size && | 
|  | 4116 | (attr_flags&ATTR_DMI) == 0  && | 
|  | 4117 | DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) { | 
|  | 4118 | xfs_off_t           end_dmi_offset; | 
|  | 4119 |  | 
|  | 4120 | end_dmi_offset = offset+len; | 
|  | 4121 | if (end_dmi_offset > ip->i_d.di_size) | 
|  | 4122 | end_dmi_offset = ip->i_d.di_size; | 
|  | 4123 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOV(ip), | 
|  | 4124 | offset, end_dmi_offset - offset, | 
|  | 4125 | 0, NULL); | 
|  | 4126 | if (error) | 
|  | 4127 | return(error); | 
|  | 4128 | } | 
|  | 4129 |  | 
|  | 4130 | /* | 
|  | 4131 | * allocate file space until done or until there is an error | 
|  | 4132 | */ | 
|  | 4133 | retry: | 
|  | 4134 | while (allocatesize_fsb && !error) { | 
|  | 4135 | /* | 
|  | 4136 | * determine if reserving space on | 
|  | 4137 | * the data or realtime partition. | 
|  | 4138 | */ | 
|  | 4139 | if (rt) { | 
|  | 4140 | xfs_fileoff_t s, e; | 
|  | 4141 |  | 
|  | 4142 | s = startoffset_fsb; | 
|  | 4143 | do_div(s, rtextsize); | 
|  | 4144 | s *= rtextsize; | 
|  | 4145 | e = roundup_64(startoffset_fsb + allocatesize_fsb, | 
|  | 4146 | rtextsize); | 
|  | 4147 | numrtextents = (int)(e - s) / mp->m_sb.sb_rextsize; | 
|  | 4148 | datablocks = 0; | 
|  | 4149 | } else { | 
|  | 4150 | datablocks = allocatesize_fsb; | 
|  | 4151 | numrtextents = 0; | 
|  | 4152 | } | 
|  | 4153 |  | 
|  | 4154 | /* | 
|  | 4155 | * allocate and setup the transaction | 
|  | 4156 | */ | 
|  | 4157 | tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); | 
|  | 4158 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks); | 
|  | 4159 | error = xfs_trans_reserve(tp, | 
|  | 4160 | resblks, | 
|  | 4161 | XFS_WRITE_LOG_RES(mp), | 
|  | 4162 | numrtextents, | 
|  | 4163 | XFS_TRANS_PERM_LOG_RES, | 
|  | 4164 | XFS_WRITE_LOG_COUNT); | 
|  | 4165 |  | 
|  | 4166 | /* | 
|  | 4167 | * check for running out of space | 
|  | 4168 | */ | 
|  | 4169 | if (error) { | 
|  | 4170 | /* | 
|  | 4171 | * Free the transaction structure. | 
|  | 4172 | */ | 
|  | 4173 | ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); | 
|  | 4174 | xfs_trans_cancel(tp, 0); | 
|  | 4175 | break; | 
|  | 4176 | } | 
|  | 4177 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
| Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 4178 | error = XFS_TRANS_RESERVE_QUOTA(mp, tp, | 
|  | 4179 | ip->i_udquot, ip->i_gdquot, resblks, 0, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4180 | if (error) | 
|  | 4181 | goto error1; | 
|  | 4182 |  | 
|  | 4183 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 
|  | 4184 | xfs_trans_ihold(tp, ip); | 
|  | 4185 |  | 
|  | 4186 | /* | 
|  | 4187 | * issue the bmapi() call to allocate the blocks | 
|  | 4188 | */ | 
|  | 4189 | XFS_BMAP_INIT(&free_list, &firstfsb); | 
|  | 4190 | error = xfs_bmapi(tp, ip, startoffset_fsb, | 
|  | 4191 | allocatesize_fsb, xfs_bmapi_flags, | 
|  | 4192 | &firstfsb, 0, imapp, &reccount, | 
|  | 4193 | &free_list); | 
|  | 4194 | if (error) { | 
|  | 4195 | goto error0; | 
|  | 4196 | } | 
|  | 4197 |  | 
|  | 4198 | /* | 
|  | 4199 | * complete the transaction | 
|  | 4200 | */ | 
|  | 4201 | error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); | 
|  | 4202 | if (error) { | 
|  | 4203 | goto error0; | 
|  | 4204 | } | 
|  | 4205 |  | 
|  | 4206 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 
|  | 4207 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 4208 | if (error) { | 
|  | 4209 | break; | 
|  | 4210 | } | 
|  | 4211 |  | 
|  | 4212 | allocated_fsb = imapp->br_blockcount; | 
|  | 4213 |  | 
|  | 4214 | if (reccount == 0) { | 
|  | 4215 | error = XFS_ERROR(ENOSPC); | 
|  | 4216 | break; | 
|  | 4217 | } | 
|  | 4218 |  | 
|  | 4219 | startoffset_fsb += allocated_fsb; | 
|  | 4220 | allocatesize_fsb -= allocated_fsb; | 
|  | 4221 | } | 
|  | 4222 | dmapi_enospc_check: | 
|  | 4223 | if (error == ENOSPC && (attr_flags&ATTR_DMI) == 0 && | 
|  | 4224 | DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_NOSPACE)) { | 
|  | 4225 |  | 
|  | 4226 | error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE, | 
|  | 4227 | XFS_ITOV(ip), DM_RIGHT_NULL, | 
|  | 4228 | XFS_ITOV(ip), DM_RIGHT_NULL, | 
|  | 4229 | NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ | 
|  | 4230 | if (error == 0) | 
|  | 4231 | goto retry;	/* Maybe DMAPI app. has made space */ | 
|  | 4232 | /* else fall through with error from XFS_SEND_DATA */ | 
|  | 4233 | } | 
|  | 4234 |  | 
|  | 4235 | return error; | 
|  | 4236 |  | 
|  | 4237 | error0: | 
|  | 4238 | xfs_bmap_cancel(&free_list); | 
|  | 4239 | error1: | 
|  | 4240 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); | 
|  | 4241 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 4242 | goto dmapi_enospc_check; | 
|  | 4243 | } | 
|  | 4244 |  | 
|  | 4245 | /* | 
|  | 4246 | * Zero file bytes between startoff and endoff inclusive. | 
|  | 4247 | * The iolock is held exclusive and no blocks are buffered. | 
|  | 4248 | */ | 
|  | 4249 | STATIC int | 
|  | 4250 | xfs_zero_remaining_bytes( | 
|  | 4251 | xfs_inode_t		*ip, | 
|  | 4252 | xfs_off_t		startoff, | 
|  | 4253 | xfs_off_t		endoff) | 
|  | 4254 | { | 
|  | 4255 | xfs_bmbt_irec_t		imap; | 
|  | 4256 | xfs_fileoff_t		offset_fsb; | 
|  | 4257 | xfs_off_t		lastoffset; | 
|  | 4258 | xfs_off_t		offset; | 
|  | 4259 | xfs_buf_t		*bp; | 
|  | 4260 | xfs_mount_t		*mp = ip->i_mount; | 
|  | 4261 | int			nimap; | 
|  | 4262 | int			error = 0; | 
|  | 4263 |  | 
|  | 4264 | bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize, | 
|  | 4265 | ip->i_d.di_flags & XFS_DIFLAG_REALTIME ? | 
|  | 4266 | mp->m_rtdev_targp : mp->m_ddev_targp); | 
|  | 4267 |  | 
|  | 4268 | for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { | 
|  | 4269 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 
|  | 4270 | nimap = 1; | 
|  | 4271 | error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, NULL, 0, &imap, | 
|  | 4272 | &nimap, NULL); | 
|  | 4273 | if (error || nimap < 1) | 
|  | 4274 | break; | 
|  | 4275 | ASSERT(imap.br_blockcount >= 1); | 
|  | 4276 | ASSERT(imap.br_startoff == offset_fsb); | 
|  | 4277 | lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1; | 
|  | 4278 | if (lastoffset > endoff) | 
|  | 4279 | lastoffset = endoff; | 
|  | 4280 | if (imap.br_startblock == HOLESTARTBLOCK) | 
|  | 4281 | continue; | 
|  | 4282 | ASSERT(imap.br_startblock != DELAYSTARTBLOCK); | 
|  | 4283 | if (imap.br_state == XFS_EXT_UNWRITTEN) | 
|  | 4284 | continue; | 
|  | 4285 | XFS_BUF_UNDONE(bp); | 
|  | 4286 | XFS_BUF_UNWRITE(bp); | 
|  | 4287 | XFS_BUF_READ(bp); | 
|  | 4288 | XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock)); | 
|  | 4289 | xfsbdstrat(mp, bp); | 
|  | 4290 | if ((error = xfs_iowait(bp))) { | 
|  | 4291 | xfs_ioerror_alert("xfs_zero_remaining_bytes(read)", | 
|  | 4292 | mp, bp, XFS_BUF_ADDR(bp)); | 
|  | 4293 | break; | 
|  | 4294 | } | 
|  | 4295 | memset(XFS_BUF_PTR(bp) + | 
|  | 4296 | (offset - XFS_FSB_TO_B(mp, imap.br_startoff)), | 
|  | 4297 | 0, lastoffset - offset + 1); | 
|  | 4298 | XFS_BUF_UNDONE(bp); | 
|  | 4299 | XFS_BUF_UNREAD(bp); | 
|  | 4300 | XFS_BUF_WRITE(bp); | 
|  | 4301 | xfsbdstrat(mp, bp); | 
|  | 4302 | if ((error = xfs_iowait(bp))) { | 
|  | 4303 | xfs_ioerror_alert("xfs_zero_remaining_bytes(write)", | 
|  | 4304 | mp, bp, XFS_BUF_ADDR(bp)); | 
|  | 4305 | break; | 
|  | 4306 | } | 
|  | 4307 | } | 
|  | 4308 | xfs_buf_free(bp); | 
|  | 4309 | return error; | 
|  | 4310 | } | 
|  | 4311 |  | 
|  | 4312 | /* | 
|  | 4313 | * xfs_free_file_space() | 
|  | 4314 | *      This routine frees disk space for the given file. | 
|  | 4315 | * | 
|  | 4316 | *	This routine is only called by xfs_change_file_space | 
|  | 4317 | *	for an UNRESVSP type call. | 
|  | 4318 | * | 
|  | 4319 | * RETURNS: | 
|  | 4320 | *       0 on success | 
|  | 4321 | *      errno on error | 
|  | 4322 | * | 
|  | 4323 | */ | 
|  | 4324 | STATIC int | 
|  | 4325 | xfs_free_file_space( | 
|  | 4326 | xfs_inode_t		*ip, | 
|  | 4327 | xfs_off_t		offset, | 
|  | 4328 | xfs_off_t		len, | 
|  | 4329 | int			attr_flags) | 
|  | 4330 | { | 
| Christoph Hellwig | bd5a876 | 2005-06-21 15:47:39 +1000 | [diff] [blame] | 4331 | vnode_t			*vp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4332 | int			committed; | 
|  | 4333 | int			done; | 
|  | 4334 | xfs_off_t		end_dmi_offset; | 
|  | 4335 | xfs_fileoff_t		endoffset_fsb; | 
|  | 4336 | int			error; | 
|  | 4337 | xfs_fsblock_t		firstfsb; | 
|  | 4338 | xfs_bmap_free_t		free_list; | 
|  | 4339 | xfs_off_t		ilen; | 
|  | 4340 | xfs_bmbt_irec_t		imap; | 
|  | 4341 | xfs_off_t		ioffset; | 
|  | 4342 | xfs_extlen_t		mod=0; | 
|  | 4343 | xfs_mount_t		*mp; | 
|  | 4344 | int			nimap; | 
|  | 4345 | uint			resblks; | 
|  | 4346 | int			rounding; | 
|  | 4347 | int			rt; | 
|  | 4348 | xfs_fileoff_t		startoffset_fsb; | 
|  | 4349 | xfs_trans_t		*tp; | 
| Dean Roehrich | 5fcbab3 | 2005-05-05 13:27:19 -0700 | [diff] [blame] | 4350 | int			need_iolock = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4351 |  | 
| Christoph Hellwig | bd5a876 | 2005-06-21 15:47:39 +1000 | [diff] [blame] | 4352 | vp = XFS_ITOV(ip); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4353 | mp = ip->i_mount; | 
|  | 4354 |  | 
| Christoph Hellwig | bd5a876 | 2005-06-21 15:47:39 +1000 | [diff] [blame] | 4355 | vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 4356 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4357 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) | 
|  | 4358 | return error; | 
|  | 4359 |  | 
|  | 4360 | error = 0; | 
|  | 4361 | if (len <= 0)	/* if nothing being freed */ | 
|  | 4362 | return error; | 
|  | 4363 | rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME); | 
|  | 4364 | startoffset_fsb	= XFS_B_TO_FSB(mp, offset); | 
|  | 4365 | end_dmi_offset = offset + len; | 
|  | 4366 | endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset); | 
|  | 4367 |  | 
|  | 4368 | if (offset < ip->i_d.di_size && | 
|  | 4369 | (attr_flags & ATTR_DMI) == 0 && | 
|  | 4370 | DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) { | 
|  | 4371 | if (end_dmi_offset > ip->i_d.di_size) | 
|  | 4372 | end_dmi_offset = ip->i_d.di_size; | 
| Christoph Hellwig | bd5a876 | 2005-06-21 15:47:39 +1000 | [diff] [blame] | 4373 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, vp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4374 | offset, end_dmi_offset - offset, | 
|  | 4375 | AT_DELAY_FLAG(attr_flags), NULL); | 
|  | 4376 | if (error) | 
|  | 4377 | return(error); | 
|  | 4378 | } | 
|  | 4379 |  | 
| Dean Roehrich | 5fcbab3 | 2005-05-05 13:27:19 -0700 | [diff] [blame] | 4380 | ASSERT(attr_flags & ATTR_NOLOCK ? attr_flags & ATTR_DMI : 1); | 
|  | 4381 | if (attr_flags & ATTR_NOLOCK) | 
|  | 4382 | need_iolock = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4383 | if (need_iolock) | 
|  | 4384 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | 
| Dean Roehrich | 5fcbab3 | 2005-05-05 13:27:19 -0700 | [diff] [blame] | 4385 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4386 | rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog), | 
|  | 4387 | (__uint8_t)NBPP); | 
|  | 4388 | ilen = len + (offset & (rounding - 1)); | 
|  | 4389 | ioffset = offset & ~(rounding - 1); | 
|  | 4390 | if (ilen & (rounding - 1)) | 
|  | 4391 | ilen = (ilen + rounding) & ~(rounding - 1); | 
| Christoph Hellwig | bd5a876 | 2005-06-21 15:47:39 +1000 | [diff] [blame] | 4392 |  | 
|  | 4393 | if (VN_CACHED(vp) != 0) { | 
|  | 4394 | xfs_inval_cached_trace(&ip->i_iocore, ioffset, -1, | 
|  | 4395 | ctooff(offtoct(ioffset)), -1); | 
|  | 4396 | VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(ioffset)), | 
|  | 4397 | -1, FI_REMAPF_LOCKED); | 
|  | 4398 | } | 
|  | 4399 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4400 | /* | 
|  | 4401 | * Need to zero the stuff we're not freeing, on disk. | 
|  | 4402 | * If its a realtime file & can't use unwritten extents then we | 
|  | 4403 | * actually need to zero the extent edges.  Otherwise xfs_bunmapi | 
|  | 4404 | * will take care of it for us. | 
|  | 4405 | */ | 
|  | 4406 | if (rt && !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { | 
|  | 4407 | nimap = 1; | 
|  | 4408 | error = xfs_bmapi(NULL, ip, startoffset_fsb, 1, 0, NULL, 0, | 
|  | 4409 | &imap, &nimap, NULL); | 
|  | 4410 | if (error) | 
|  | 4411 | goto out_unlock_iolock; | 
|  | 4412 | ASSERT(nimap == 0 || nimap == 1); | 
|  | 4413 | if (nimap && imap.br_startblock != HOLESTARTBLOCK) { | 
|  | 4414 | xfs_daddr_t	block; | 
|  | 4415 |  | 
|  | 4416 | ASSERT(imap.br_startblock != DELAYSTARTBLOCK); | 
|  | 4417 | block = imap.br_startblock; | 
|  | 4418 | mod = do_div(block, mp->m_sb.sb_rextsize); | 
|  | 4419 | if (mod) | 
|  | 4420 | startoffset_fsb += mp->m_sb.sb_rextsize - mod; | 
|  | 4421 | } | 
|  | 4422 | nimap = 1; | 
|  | 4423 | error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, 1, 0, NULL, 0, | 
|  | 4424 | &imap, &nimap, NULL); | 
|  | 4425 | if (error) | 
|  | 4426 | goto out_unlock_iolock; | 
|  | 4427 | ASSERT(nimap == 0 || nimap == 1); | 
|  | 4428 | if (nimap && imap.br_startblock != HOLESTARTBLOCK) { | 
|  | 4429 | ASSERT(imap.br_startblock != DELAYSTARTBLOCK); | 
|  | 4430 | mod++; | 
|  | 4431 | if (mod && (mod != mp->m_sb.sb_rextsize)) | 
|  | 4432 | endoffset_fsb -= mod; | 
|  | 4433 | } | 
|  | 4434 | } | 
|  | 4435 | if ((done = (endoffset_fsb <= startoffset_fsb))) | 
|  | 4436 | /* | 
|  | 4437 | * One contiguous piece to clear | 
|  | 4438 | */ | 
|  | 4439 | error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1); | 
|  | 4440 | else { | 
|  | 4441 | /* | 
|  | 4442 | * Some full blocks, possibly two pieces to clear | 
|  | 4443 | */ | 
|  | 4444 | if (offset < XFS_FSB_TO_B(mp, startoffset_fsb)) | 
|  | 4445 | error = xfs_zero_remaining_bytes(ip, offset, | 
|  | 4446 | XFS_FSB_TO_B(mp, startoffset_fsb) - 1); | 
|  | 4447 | if (!error && | 
|  | 4448 | XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len) | 
|  | 4449 | error = xfs_zero_remaining_bytes(ip, | 
|  | 4450 | XFS_FSB_TO_B(mp, endoffset_fsb), | 
|  | 4451 | offset + len - 1); | 
|  | 4452 | } | 
|  | 4453 |  | 
|  | 4454 | /* | 
|  | 4455 | * free file space until done or until there is an error | 
|  | 4456 | */ | 
|  | 4457 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); | 
|  | 4458 | while (!error && !done) { | 
|  | 4459 |  | 
|  | 4460 | /* | 
|  | 4461 | * allocate and setup the transaction | 
|  | 4462 | */ | 
|  | 4463 | tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); | 
|  | 4464 | error = xfs_trans_reserve(tp, | 
|  | 4465 | resblks, | 
|  | 4466 | XFS_WRITE_LOG_RES(mp), | 
|  | 4467 | 0, | 
|  | 4468 | XFS_TRANS_PERM_LOG_RES, | 
|  | 4469 | XFS_WRITE_LOG_COUNT); | 
|  | 4470 |  | 
|  | 4471 | /* | 
|  | 4472 | * check for running out of space | 
|  | 4473 | */ | 
|  | 4474 | if (error) { | 
|  | 4475 | /* | 
|  | 4476 | * Free the transaction structure. | 
|  | 4477 | */ | 
|  | 4478 | ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); | 
|  | 4479 | xfs_trans_cancel(tp, 0); | 
|  | 4480 | break; | 
|  | 4481 | } | 
|  | 4482 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 4483 | error = XFS_TRANS_RESERVE_QUOTA(mp, tp, | 
|  | 4484 | ip->i_udquot, ip->i_gdquot, resblks, 0, rt ? | 
|  | 4485 | XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); | 
|  | 4486 | if (error) | 
|  | 4487 | goto error1; | 
|  | 4488 |  | 
|  | 4489 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 
|  | 4490 | xfs_trans_ihold(tp, ip); | 
|  | 4491 |  | 
|  | 4492 | /* | 
|  | 4493 | * issue the bunmapi() call to free the blocks | 
|  | 4494 | */ | 
|  | 4495 | XFS_BMAP_INIT(&free_list, &firstfsb); | 
|  | 4496 | error = xfs_bunmapi(tp, ip, startoffset_fsb, | 
|  | 4497 | endoffset_fsb - startoffset_fsb, | 
|  | 4498 | 0, 2, &firstfsb, &free_list, &done); | 
|  | 4499 | if (error) { | 
|  | 4500 | goto error0; | 
|  | 4501 | } | 
|  | 4502 |  | 
|  | 4503 | /* | 
|  | 4504 | * complete the transaction | 
|  | 4505 | */ | 
|  | 4506 | error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); | 
|  | 4507 | if (error) { | 
|  | 4508 | goto error0; | 
|  | 4509 | } | 
|  | 4510 |  | 
|  | 4511 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 
|  | 4512 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 4513 | } | 
|  | 4514 |  | 
|  | 4515 | out_unlock_iolock: | 
|  | 4516 | if (need_iolock) | 
|  | 4517 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 
|  | 4518 | return error; | 
|  | 4519 |  | 
|  | 4520 | error0: | 
|  | 4521 | xfs_bmap_cancel(&free_list); | 
|  | 4522 | error1: | 
|  | 4523 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); | 
|  | 4524 | xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) : | 
|  | 4525 | XFS_ILOCK_EXCL); | 
|  | 4526 | return error; | 
|  | 4527 | } | 
|  | 4528 |  | 
|  | 4529 | /* | 
|  | 4530 | * xfs_change_file_space() | 
|  | 4531 | *      This routine allocates or frees disk space for the given file. | 
|  | 4532 | *      The user specified parameters are checked for alignment and size | 
|  | 4533 | *      limitations. | 
|  | 4534 | * | 
|  | 4535 | * RETURNS: | 
|  | 4536 | *       0 on success | 
|  | 4537 | *      errno on error | 
|  | 4538 | * | 
|  | 4539 | */ | 
|  | 4540 | int | 
|  | 4541 | xfs_change_file_space( | 
|  | 4542 | bhv_desc_t	*bdp, | 
|  | 4543 | int		cmd, | 
|  | 4544 | xfs_flock64_t	*bf, | 
|  | 4545 | xfs_off_t	offset, | 
|  | 4546 | cred_t		*credp, | 
|  | 4547 | int		attr_flags) | 
|  | 4548 | { | 
|  | 4549 | int		clrprealloc; | 
|  | 4550 | int		error; | 
|  | 4551 | xfs_fsize_t	fsize; | 
|  | 4552 | xfs_inode_t	*ip; | 
|  | 4553 | xfs_mount_t	*mp; | 
|  | 4554 | int		setprealloc; | 
|  | 4555 | xfs_off_t	startoffset; | 
|  | 4556 | xfs_off_t	llen; | 
|  | 4557 | xfs_trans_t	*tp; | 
|  | 4558 | vattr_t		va; | 
|  | 4559 | vnode_t		*vp; | 
|  | 4560 |  | 
|  | 4561 | vp = BHV_TO_VNODE(bdp); | 
|  | 4562 | vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); | 
|  | 4563 |  | 
|  | 4564 | ip = XFS_BHVTOI(bdp); | 
|  | 4565 | mp = ip->i_mount; | 
|  | 4566 |  | 
|  | 4567 | /* | 
|  | 4568 | * must be a regular file and have write permission | 
|  | 4569 | */ | 
|  | 4570 | if (vp->v_type != VREG) | 
|  | 4571 | return XFS_ERROR(EINVAL); | 
|  | 4572 |  | 
|  | 4573 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 
|  | 4574 |  | 
|  | 4575 | if ((error = xfs_iaccess(ip, S_IWUSR, credp))) { | 
|  | 4576 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 4577 | return error; | 
|  | 4578 | } | 
|  | 4579 |  | 
|  | 4580 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 4581 |  | 
|  | 4582 | switch (bf->l_whence) { | 
|  | 4583 | case 0: /*SEEK_SET*/ | 
|  | 4584 | break; | 
|  | 4585 | case 1: /*SEEK_CUR*/ | 
|  | 4586 | bf->l_start += offset; | 
|  | 4587 | break; | 
|  | 4588 | case 2: /*SEEK_END*/ | 
|  | 4589 | bf->l_start += ip->i_d.di_size; | 
|  | 4590 | break; | 
|  | 4591 | default: | 
|  | 4592 | return XFS_ERROR(EINVAL); | 
|  | 4593 | } | 
|  | 4594 |  | 
|  | 4595 | llen = bf->l_len > 0 ? bf->l_len - 1 : bf->l_len; | 
|  | 4596 |  | 
|  | 4597 | if (   (bf->l_start < 0) | 
|  | 4598 | || (bf->l_start > XFS_MAXIOFFSET(mp)) | 
|  | 4599 | || (bf->l_start + llen < 0) | 
|  | 4600 | || (bf->l_start + llen > XFS_MAXIOFFSET(mp))) | 
|  | 4601 | return XFS_ERROR(EINVAL); | 
|  | 4602 |  | 
|  | 4603 | bf->l_whence = 0; | 
|  | 4604 |  | 
|  | 4605 | startoffset = bf->l_start; | 
|  | 4606 | fsize = ip->i_d.di_size; | 
|  | 4607 |  | 
|  | 4608 | /* | 
|  | 4609 | * XFS_IOC_RESVSP and XFS_IOC_UNRESVSP will reserve or unreserve | 
|  | 4610 | * file space. | 
|  | 4611 | * These calls do NOT zero the data space allocated to the file, | 
|  | 4612 | * nor do they change the file size. | 
|  | 4613 | * | 
|  | 4614 | * XFS_IOC_ALLOCSP and XFS_IOC_FREESP will allocate and free file | 
|  | 4615 | * space. | 
|  | 4616 | * These calls cause the new file data to be zeroed and the file | 
|  | 4617 | * size to be changed. | 
|  | 4618 | */ | 
|  | 4619 | setprealloc = clrprealloc = 0; | 
|  | 4620 |  | 
|  | 4621 | switch (cmd) { | 
|  | 4622 | case XFS_IOC_RESVSP: | 
|  | 4623 | case XFS_IOC_RESVSP64: | 
|  | 4624 | error = xfs_alloc_file_space(ip, startoffset, bf->l_len, | 
|  | 4625 | 1, attr_flags); | 
|  | 4626 | if (error) | 
|  | 4627 | return error; | 
|  | 4628 | setprealloc = 1; | 
|  | 4629 | break; | 
|  | 4630 |  | 
|  | 4631 | case XFS_IOC_UNRESVSP: | 
|  | 4632 | case XFS_IOC_UNRESVSP64: | 
|  | 4633 | if ((error = xfs_free_file_space(ip, startoffset, bf->l_len, | 
|  | 4634 | attr_flags))) | 
|  | 4635 | return error; | 
|  | 4636 | break; | 
|  | 4637 |  | 
|  | 4638 | case XFS_IOC_ALLOCSP: | 
|  | 4639 | case XFS_IOC_ALLOCSP64: | 
|  | 4640 | case XFS_IOC_FREESP: | 
|  | 4641 | case XFS_IOC_FREESP64: | 
|  | 4642 | if (startoffset > fsize) { | 
|  | 4643 | error = xfs_alloc_file_space(ip, fsize, | 
|  | 4644 | startoffset - fsize, 0, attr_flags); | 
|  | 4645 | if (error) | 
|  | 4646 | break; | 
|  | 4647 | } | 
|  | 4648 |  | 
|  | 4649 | va.va_mask = XFS_AT_SIZE; | 
|  | 4650 | va.va_size = startoffset; | 
|  | 4651 |  | 
|  | 4652 | error = xfs_setattr(bdp, &va, attr_flags, credp); | 
|  | 4653 |  | 
|  | 4654 | if (error) | 
|  | 4655 | return error; | 
|  | 4656 |  | 
|  | 4657 | clrprealloc = 1; | 
|  | 4658 | break; | 
|  | 4659 |  | 
|  | 4660 | default: | 
|  | 4661 | ASSERT(0); | 
|  | 4662 | return XFS_ERROR(EINVAL); | 
|  | 4663 | } | 
|  | 4664 |  | 
|  | 4665 | /* | 
|  | 4666 | * update the inode timestamp, mode, and prealloc flag bits | 
|  | 4667 | */ | 
|  | 4668 | tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID); | 
|  | 4669 |  | 
|  | 4670 | if ((error = xfs_trans_reserve(tp, 0, XFS_WRITEID_LOG_RES(mp), | 
|  | 4671 | 0, 0, 0))) { | 
|  | 4672 | /* ASSERT(0); */ | 
|  | 4673 | xfs_trans_cancel(tp, 0); | 
|  | 4674 | return error; | 
|  | 4675 | } | 
|  | 4676 |  | 
|  | 4677 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 4678 |  | 
|  | 4679 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 
|  | 4680 | xfs_trans_ihold(tp, ip); | 
|  | 4681 |  | 
|  | 4682 | if ((attr_flags & ATTR_DMI) == 0) { | 
|  | 4683 | ip->i_d.di_mode &= ~S_ISUID; | 
|  | 4684 |  | 
|  | 4685 | /* | 
|  | 4686 | * Note that we don't have to worry about mandatory | 
|  | 4687 | * file locking being disabled here because we only | 
|  | 4688 | * clear the S_ISGID bit if the Group execute bit is | 
|  | 4689 | * on, but if it was on then mandatory locking wouldn't | 
|  | 4690 | * have been enabled. | 
|  | 4691 | */ | 
|  | 4692 | if (ip->i_d.di_mode & S_IXGRP) | 
|  | 4693 | ip->i_d.di_mode &= ~S_ISGID; | 
|  | 4694 |  | 
|  | 4695 | xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 
|  | 4696 | } | 
|  | 4697 | if (setprealloc) | 
|  | 4698 | ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; | 
|  | 4699 | else if (clrprealloc) | 
|  | 4700 | ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; | 
|  | 4701 |  | 
|  | 4702 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 
|  | 4703 | xfs_trans_set_sync(tp); | 
|  | 4704 |  | 
|  | 4705 | error = xfs_trans_commit(tp, 0, NULL); | 
|  | 4706 |  | 
|  | 4707 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 4708 |  | 
|  | 4709 | return error; | 
|  | 4710 | } | 
|  | 4711 |  | 
|  | 4712 | vnodeops_t xfs_vnodeops = { | 
|  | 4713 | BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS), | 
|  | 4714 | .vop_open		= xfs_open, | 
|  | 4715 | .vop_read		= xfs_read, | 
|  | 4716 | #ifdef HAVE_SENDFILE | 
|  | 4717 | .vop_sendfile		= xfs_sendfile, | 
|  | 4718 | #endif | 
|  | 4719 | .vop_write		= xfs_write, | 
|  | 4720 | .vop_ioctl		= xfs_ioctl, | 
|  | 4721 | .vop_getattr		= xfs_getattr, | 
|  | 4722 | .vop_setattr		= xfs_setattr, | 
|  | 4723 | .vop_access		= xfs_access, | 
|  | 4724 | .vop_lookup		= xfs_lookup, | 
|  | 4725 | .vop_create		= xfs_create, | 
|  | 4726 | .vop_remove		= xfs_remove, | 
|  | 4727 | .vop_link		= xfs_link, | 
|  | 4728 | .vop_rename		= xfs_rename, | 
|  | 4729 | .vop_mkdir		= xfs_mkdir, | 
|  | 4730 | .vop_rmdir		= xfs_rmdir, | 
|  | 4731 | .vop_readdir		= xfs_readdir, | 
|  | 4732 | .vop_symlink		= xfs_symlink, | 
|  | 4733 | .vop_readlink		= xfs_readlink, | 
|  | 4734 | .vop_fsync		= xfs_fsync, | 
|  | 4735 | .vop_inactive		= xfs_inactive, | 
|  | 4736 | .vop_fid2		= xfs_fid2, | 
|  | 4737 | .vop_rwlock		= xfs_rwlock, | 
|  | 4738 | .vop_rwunlock		= xfs_rwunlock, | 
|  | 4739 | .vop_bmap		= xfs_bmap, | 
|  | 4740 | .vop_reclaim		= xfs_reclaim, | 
|  | 4741 | .vop_attr_get		= xfs_attr_get, | 
|  | 4742 | .vop_attr_set		= xfs_attr_set, | 
|  | 4743 | .vop_attr_remove	= xfs_attr_remove, | 
|  | 4744 | .vop_attr_list		= xfs_attr_list, | 
|  | 4745 | .vop_link_removed	= (vop_link_removed_t)fs_noval, | 
|  | 4746 | .vop_vnode_change	= (vop_vnode_change_t)fs_noval, | 
|  | 4747 | .vop_tosspages		= fs_tosspages, | 
|  | 4748 | .vop_flushinval_pages	= fs_flushinval_pages, | 
|  | 4749 | .vop_flush_pages	= fs_flush_pages, | 
|  | 4750 | .vop_release		= xfs_release, | 
|  | 4751 | .vop_iflush		= xfs_inode_flush, | 
|  | 4752 | }; |