| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2000-2005 Silicon Graphics, Inc. | 
 | 3 |  * All Rights Reserved. | 
 | 4 |  * | 
 | 5 |  * This program is free software; you can redistribute it and/or | 
 | 6 |  * modify it under the terms of the GNU General Public License as | 
 | 7 |  * published by the Free Software Foundation. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it would be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 12 |  * GNU General Public License for more details. | 
 | 13 |  * | 
 | 14 |  * You should have received a copy of the GNU General Public License | 
 | 15 |  * along with this program; if not, write the Free Software Foundation, | 
 | 16 |  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
 | 17 |  */ | 
 | 18 | #include "xfs.h" | 
 | 19 | #include "xfs_fs.h" | 
 | 20 | #include "xfs_types.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 21 | #include "xfs_log.h" | 
| Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 22 | #include "xfs_log_priv.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 23 | #include "xfs_inum.h" | 
 | 24 | #include "xfs_trans.h" | 
| Dave Chinner | fd07484 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 25 | #include "xfs_trans_priv.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 26 | #include "xfs_sb.h" | 
 | 27 | #include "xfs_ag.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 28 | #include "xfs_mount.h" | 
 | 29 | #include "xfs_bmap_btree.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 30 | #include "xfs_inode.h" | 
 | 31 | #include "xfs_dinode.h" | 
 | 32 | #include "xfs_error.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 33 | #include "xfs_filestream.h" | 
 | 34 | #include "xfs_vnodeops.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 35 | #include "xfs_inode_item.h" | 
| Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 36 | #include "xfs_quota.h" | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 37 | #include "xfs_trace.h" | 
| Dave Chinner | 1a387d3 | 2010-08-24 11:46:31 +1000 | [diff] [blame] | 38 | #include "xfs_fsops.h" | 
| Dave Chinner | 6d8b79c | 2012-10-08 21:56:09 +1100 | [diff] [blame] | 39 | #include "xfs_icache.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 40 |  | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 41 | #include <linux/kthread.h> | 
 | 42 | #include <linux/freezer.h> | 
 | 43 |  | 
| Dave Chinner | 33479e0 | 2012-10-08 21:56:11 +1100 | [diff] [blame] | 44 | STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, | 
 | 45 | 				struct xfs_perag *pag, struct xfs_inode *ip); | 
 | 46 |  | 
 | 47 | /* | 
 | 48 |  * Allocate and initialise an xfs_inode. | 
 | 49 |  */ | 
 | 50 | STATIC struct xfs_inode * | 
 | 51 | xfs_inode_alloc( | 
 | 52 | 	struct xfs_mount	*mp, | 
 | 53 | 	xfs_ino_t		ino) | 
 | 54 | { | 
 | 55 | 	struct xfs_inode	*ip; | 
 | 56 |  | 
 | 57 | 	/* | 
 | 58 | 	 * if this didn't occur in transactions, we could use | 
 | 59 | 	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the | 
 | 60 | 	 * code up to do this anyway. | 
 | 61 | 	 */ | 
 | 62 | 	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); | 
 | 63 | 	if (!ip) | 
 | 64 | 		return NULL; | 
 | 65 | 	if (inode_init_always(mp->m_super, VFS_I(ip))) { | 
 | 66 | 		kmem_zone_free(xfs_inode_zone, ip); | 
 | 67 | 		return NULL; | 
 | 68 | 	} | 
 | 69 |  | 
 | 70 | 	ASSERT(atomic_read(&ip->i_pincount) == 0); | 
 | 71 | 	ASSERT(!spin_is_locked(&ip->i_flags_lock)); | 
 | 72 | 	ASSERT(!xfs_isiflocked(ip)); | 
 | 73 | 	ASSERT(ip->i_ino == 0); | 
 | 74 |  | 
 | 75 | 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); | 
 | 76 |  | 
 | 77 | 	/* initialise the xfs inode */ | 
 | 78 | 	ip->i_ino = ino; | 
 | 79 | 	ip->i_mount = mp; | 
 | 80 | 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); | 
 | 81 | 	ip->i_afp = NULL; | 
 | 82 | 	memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); | 
 | 83 | 	ip->i_flags = 0; | 
 | 84 | 	ip->i_delayed_blks = 0; | 
 | 85 | 	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); | 
 | 86 |  | 
 | 87 | 	return ip; | 
 | 88 | } | 
 | 89 |  | 
 | 90 | STATIC void | 
 | 91 | xfs_inode_free_callback( | 
 | 92 | 	struct rcu_head		*head) | 
 | 93 | { | 
 | 94 | 	struct inode		*inode = container_of(head, struct inode, i_rcu); | 
 | 95 | 	struct xfs_inode	*ip = XFS_I(inode); | 
 | 96 |  | 
 | 97 | 	kmem_zone_free(xfs_inode_zone, ip); | 
 | 98 | } | 
 | 99 |  | 
 | 100 | STATIC void | 
 | 101 | xfs_inode_free( | 
 | 102 | 	struct xfs_inode	*ip) | 
 | 103 | { | 
 | 104 | 	switch (ip->i_d.di_mode & S_IFMT) { | 
 | 105 | 	case S_IFREG: | 
 | 106 | 	case S_IFDIR: | 
 | 107 | 	case S_IFLNK: | 
 | 108 | 		xfs_idestroy_fork(ip, XFS_DATA_FORK); | 
 | 109 | 		break; | 
 | 110 | 	} | 
 | 111 |  | 
 | 112 | 	if (ip->i_afp) | 
 | 113 | 		xfs_idestroy_fork(ip, XFS_ATTR_FORK); | 
 | 114 |  | 
 | 115 | 	if (ip->i_itemp) { | 
 | 116 | 		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL)); | 
 | 117 | 		xfs_inode_item_destroy(ip); | 
 | 118 | 		ip->i_itemp = NULL; | 
 | 119 | 	} | 
 | 120 |  | 
 | 121 | 	/* asserts to verify all state is correct here */ | 
 | 122 | 	ASSERT(atomic_read(&ip->i_pincount) == 0); | 
 | 123 | 	ASSERT(!spin_is_locked(&ip->i_flags_lock)); | 
 | 124 | 	ASSERT(!xfs_isiflocked(ip)); | 
 | 125 |  | 
 | 126 | 	/* | 
 | 127 | 	 * Because we use RCU freeing we need to ensure the inode always | 
 | 128 | 	 * appears to be reclaimed with an invalid inode number when in the | 
 | 129 | 	 * free state. The ip->i_flags_lock provides the barrier against lookup | 
 | 130 | 	 * races. | 
 | 131 | 	 */ | 
 | 132 | 	spin_lock(&ip->i_flags_lock); | 
 | 133 | 	ip->i_flags = XFS_IRECLAIM; | 
 | 134 | 	ip->i_ino = 0; | 
 | 135 | 	spin_unlock(&ip->i_flags_lock); | 
 | 136 |  | 
 | 137 | 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); | 
 | 138 | } | 
 | 139 |  | 
 | 140 | /* | 
 | 141 |  * Check the validity of the inode we just found it the cache | 
 | 142 |  */ | 
 | 143 | static int | 
 | 144 | xfs_iget_cache_hit( | 
 | 145 | 	struct xfs_perag	*pag, | 
 | 146 | 	struct xfs_inode	*ip, | 
 | 147 | 	xfs_ino_t		ino, | 
 | 148 | 	int			flags, | 
 | 149 | 	int			lock_flags) __releases(RCU) | 
 | 150 | { | 
 | 151 | 	struct inode		*inode = VFS_I(ip); | 
 | 152 | 	struct xfs_mount	*mp = ip->i_mount; | 
 | 153 | 	int			error; | 
 | 154 |  | 
 | 155 | 	/* | 
 | 156 | 	 * check for re-use of an inode within an RCU grace period due to the | 
 | 157 | 	 * radix tree nodes not being updated yet. We monitor for this by | 
 | 158 | 	 * setting the inode number to zero before freeing the inode structure. | 
 | 159 | 	 * If the inode has been reallocated and set up, then the inode number | 
 | 160 | 	 * will not match, so check for that, too. | 
 | 161 | 	 */ | 
 | 162 | 	spin_lock(&ip->i_flags_lock); | 
 | 163 | 	if (ip->i_ino != ino) { | 
 | 164 | 		trace_xfs_iget_skip(ip); | 
 | 165 | 		XFS_STATS_INC(xs_ig_frecycle); | 
 | 166 | 		error = EAGAIN; | 
 | 167 | 		goto out_error; | 
 | 168 | 	} | 
 | 169 |  | 
 | 170 |  | 
 | 171 | 	/* | 
 | 172 | 	 * If we are racing with another cache hit that is currently | 
 | 173 | 	 * instantiating this inode or currently recycling it out of | 
 | 174 | 	 * reclaimabe state, wait for the initialisation to complete | 
 | 175 | 	 * before continuing. | 
 | 176 | 	 * | 
 | 177 | 	 * XXX(hch): eventually we should do something equivalent to | 
 | 178 | 	 *	     wait_on_inode to wait for these flags to be cleared | 
 | 179 | 	 *	     instead of polling for it. | 
 | 180 | 	 */ | 
 | 181 | 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { | 
 | 182 | 		trace_xfs_iget_skip(ip); | 
 | 183 | 		XFS_STATS_INC(xs_ig_frecycle); | 
 | 184 | 		error = EAGAIN; | 
 | 185 | 		goto out_error; | 
 | 186 | 	} | 
 | 187 |  | 
 | 188 | 	/* | 
 | 189 | 	 * If lookup is racing with unlink return an error immediately. | 
 | 190 | 	 */ | 
 | 191 | 	if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { | 
 | 192 | 		error = ENOENT; | 
 | 193 | 		goto out_error; | 
 | 194 | 	} | 
 | 195 |  | 
 | 196 | 	/* | 
 | 197 | 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already. | 
 | 198 | 	 * Need to carefully get it back into useable state. | 
 | 199 | 	 */ | 
 | 200 | 	if (ip->i_flags & XFS_IRECLAIMABLE) { | 
 | 201 | 		trace_xfs_iget_reclaim(ip); | 
 | 202 |  | 
 | 203 | 		/* | 
 | 204 | 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode | 
 | 205 | 		 * from stomping over us while we recycle the inode.  We can't | 
 | 206 | 		 * clear the radix tree reclaimable tag yet as it requires | 
 | 207 | 		 * pag_ici_lock to be held exclusive. | 
 | 208 | 		 */ | 
 | 209 | 		ip->i_flags |= XFS_IRECLAIM; | 
 | 210 |  | 
 | 211 | 		spin_unlock(&ip->i_flags_lock); | 
 | 212 | 		rcu_read_unlock(); | 
 | 213 |  | 
 | 214 | 		error = -inode_init_always(mp->m_super, inode); | 
 | 215 | 		if (error) { | 
 | 216 | 			/* | 
 | 217 | 			 * Re-initializing the inode failed, and we are in deep | 
 | 218 | 			 * trouble.  Try to re-add it to the reclaim list. | 
 | 219 | 			 */ | 
 | 220 | 			rcu_read_lock(); | 
 | 221 | 			spin_lock(&ip->i_flags_lock); | 
 | 222 |  | 
 | 223 | 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); | 
 | 224 | 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE); | 
 | 225 | 			trace_xfs_iget_reclaim_fail(ip); | 
 | 226 | 			goto out_error; | 
 | 227 | 		} | 
 | 228 |  | 
 | 229 | 		spin_lock(&pag->pag_ici_lock); | 
 | 230 | 		spin_lock(&ip->i_flags_lock); | 
 | 231 |  | 
 | 232 | 		/* | 
 | 233 | 		 * Clear the per-lifetime state in the inode as we are now | 
 | 234 | 		 * effectively a new inode and need to return to the initial | 
 | 235 | 		 * state before reuse occurs. | 
 | 236 | 		 */ | 
 | 237 | 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; | 
 | 238 | 		ip->i_flags |= XFS_INEW; | 
 | 239 | 		__xfs_inode_clear_reclaim_tag(mp, pag, ip); | 
 | 240 | 		inode->i_state = I_NEW; | 
 | 241 |  | 
 | 242 | 		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); | 
 | 243 | 		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); | 
 | 244 |  | 
 | 245 | 		spin_unlock(&ip->i_flags_lock); | 
 | 246 | 		spin_unlock(&pag->pag_ici_lock); | 
 | 247 | 	} else { | 
 | 248 | 		/* If the VFS inode is being torn down, pause and try again. */ | 
 | 249 | 		if (!igrab(inode)) { | 
 | 250 | 			trace_xfs_iget_skip(ip); | 
 | 251 | 			error = EAGAIN; | 
 | 252 | 			goto out_error; | 
 | 253 | 		} | 
 | 254 |  | 
 | 255 | 		/* We've got a live one. */ | 
 | 256 | 		spin_unlock(&ip->i_flags_lock); | 
 | 257 | 		rcu_read_unlock(); | 
 | 258 | 		trace_xfs_iget_hit(ip); | 
 | 259 | 	} | 
 | 260 |  | 
 | 261 | 	if (lock_flags != 0) | 
 | 262 | 		xfs_ilock(ip, lock_flags); | 
 | 263 |  | 
 | 264 | 	xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); | 
 | 265 | 	XFS_STATS_INC(xs_ig_found); | 
 | 266 |  | 
 | 267 | 	return 0; | 
 | 268 |  | 
 | 269 | out_error: | 
 | 270 | 	spin_unlock(&ip->i_flags_lock); | 
 | 271 | 	rcu_read_unlock(); | 
 | 272 | 	return error; | 
 | 273 | } | 
 | 274 |  | 
 | 275 |  | 
 | 276 | static int | 
 | 277 | xfs_iget_cache_miss( | 
 | 278 | 	struct xfs_mount	*mp, | 
 | 279 | 	struct xfs_perag	*pag, | 
 | 280 | 	xfs_trans_t		*tp, | 
 | 281 | 	xfs_ino_t		ino, | 
 | 282 | 	struct xfs_inode	**ipp, | 
 | 283 | 	int			flags, | 
 | 284 | 	int			lock_flags) | 
 | 285 | { | 
 | 286 | 	struct xfs_inode	*ip; | 
 | 287 | 	int			error; | 
 | 288 | 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino); | 
 | 289 | 	int			iflags; | 
 | 290 |  | 
 | 291 | 	ip = xfs_inode_alloc(mp, ino); | 
 | 292 | 	if (!ip) | 
 | 293 | 		return ENOMEM; | 
 | 294 |  | 
 | 295 | 	error = xfs_iread(mp, tp, ip, flags); | 
 | 296 | 	if (error) | 
 | 297 | 		goto out_destroy; | 
 | 298 |  | 
 | 299 | 	trace_xfs_iget_miss(ip); | 
 | 300 |  | 
 | 301 | 	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { | 
 | 302 | 		error = ENOENT; | 
 | 303 | 		goto out_destroy; | 
 | 304 | 	} | 
 | 305 |  | 
 | 306 | 	/* | 
 | 307 | 	 * Preload the radix tree so we can insert safely under the | 
 | 308 | 	 * write spinlock. Note that we cannot sleep inside the preload | 
 | 309 | 	 * region. Since we can be called from transaction context, don't | 
 | 310 | 	 * recurse into the file system. | 
 | 311 | 	 */ | 
 | 312 | 	if (radix_tree_preload(GFP_NOFS)) { | 
 | 313 | 		error = EAGAIN; | 
 | 314 | 		goto out_destroy; | 
 | 315 | 	} | 
 | 316 |  | 
 | 317 | 	/* | 
 | 318 | 	 * Because the inode hasn't been added to the radix-tree yet it can't | 
 | 319 | 	 * be found by another thread, so we can do the non-sleeping lock here. | 
 | 320 | 	 */ | 
 | 321 | 	if (lock_flags) { | 
 | 322 | 		if (!xfs_ilock_nowait(ip, lock_flags)) | 
 | 323 | 			BUG(); | 
 | 324 | 	} | 
 | 325 |  | 
 | 326 | 	/* | 
 | 327 | 	 * These values must be set before inserting the inode into the radix | 
 | 328 | 	 * tree as the moment it is inserted a concurrent lookup (allowed by the | 
 | 329 | 	 * RCU locking mechanism) can find it and that lookup must see that this | 
 | 330 | 	 * is an inode currently under construction (i.e. that XFS_INEW is set). | 
 | 331 | 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the | 
 | 332 | 	 * memory barrier that ensures this detection works correctly at lookup | 
 | 333 | 	 * time. | 
 | 334 | 	 */ | 
 | 335 | 	iflags = XFS_INEW; | 
 | 336 | 	if (flags & XFS_IGET_DONTCACHE) | 
 | 337 | 		iflags |= XFS_IDONTCACHE; | 
 | 338 | 	ip->i_udquot = ip->i_gdquot = NULL; | 
 | 339 | 	xfs_iflags_set(ip, iflags); | 
 | 340 |  | 
 | 341 | 	/* insert the new inode */ | 
 | 342 | 	spin_lock(&pag->pag_ici_lock); | 
 | 343 | 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip); | 
 | 344 | 	if (unlikely(error)) { | 
 | 345 | 		WARN_ON(error != -EEXIST); | 
 | 346 | 		XFS_STATS_INC(xs_ig_dup); | 
 | 347 | 		error = EAGAIN; | 
 | 348 | 		goto out_preload_end; | 
 | 349 | 	} | 
 | 350 | 	spin_unlock(&pag->pag_ici_lock); | 
 | 351 | 	radix_tree_preload_end(); | 
 | 352 |  | 
 | 353 | 	*ipp = ip; | 
 | 354 | 	return 0; | 
 | 355 |  | 
 | 356 | out_preload_end: | 
 | 357 | 	spin_unlock(&pag->pag_ici_lock); | 
 | 358 | 	radix_tree_preload_end(); | 
 | 359 | 	if (lock_flags) | 
 | 360 | 		xfs_iunlock(ip, lock_flags); | 
 | 361 | out_destroy: | 
 | 362 | 	__destroy_inode(VFS_I(ip)); | 
 | 363 | 	xfs_inode_free(ip); | 
 | 364 | 	return error; | 
 | 365 | } | 
 | 366 |  | 
 | 367 | /* | 
 | 368 |  * Look up an inode by number in the given file system. | 
 | 369 |  * The inode is looked up in the cache held in each AG. | 
 | 370 |  * If the inode is found in the cache, initialise the vfs inode | 
 | 371 |  * if necessary. | 
 | 372 |  * | 
 | 373 |  * If it is not in core, read it in from the file system's device, | 
 | 374 |  * add it to the cache and initialise the vfs inode. | 
 | 375 |  * | 
 | 376 |  * The inode is locked according to the value of the lock_flags parameter. | 
 | 377 |  * This flag parameter indicates how and if the inode's IO lock and inode lock | 
 | 378 |  * should be taken. | 
 | 379 |  * | 
 | 380 |  * mp -- the mount point structure for the current file system.  It points | 
 | 381 |  *       to the inode hash table. | 
 | 382 |  * tp -- a pointer to the current transaction if there is one.  This is | 
 | 383 |  *       simply passed through to the xfs_iread() call. | 
 | 384 |  * ino -- the number of the inode desired.  This is the unique identifier | 
 | 385 |  *        within the file system for the inode being requested. | 
 | 386 |  * lock_flags -- flags indicating how to lock the inode.  See the comment | 
 | 387 |  *		 for xfs_ilock() for a list of valid values. | 
 | 388 |  */ | 
 | 389 | int | 
 | 390 | xfs_iget( | 
 | 391 | 	xfs_mount_t	*mp, | 
 | 392 | 	xfs_trans_t	*tp, | 
 | 393 | 	xfs_ino_t	ino, | 
 | 394 | 	uint		flags, | 
 | 395 | 	uint		lock_flags, | 
 | 396 | 	xfs_inode_t	**ipp) | 
 | 397 | { | 
 | 398 | 	xfs_inode_t	*ip; | 
 | 399 | 	int		error; | 
 | 400 | 	xfs_perag_t	*pag; | 
 | 401 | 	xfs_agino_t	agino; | 
 | 402 |  | 
 | 403 | 	/* | 
 | 404 | 	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode | 
 | 405 | 	 * doesn't get freed while it's being referenced during a | 
 | 406 | 	 * radix tree traversal here.  It assumes this function | 
 | 407 | 	 * aqcuires only the ILOCK (and therefore it has no need to | 
 | 408 | 	 * involve the IOLOCK in this synchronization). | 
 | 409 | 	 */ | 
 | 410 | 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); | 
 | 411 |  | 
 | 412 | 	/* reject inode numbers outside existing AGs */ | 
 | 413 | 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) | 
 | 414 | 		return EINVAL; | 
 | 415 |  | 
 | 416 | 	/* get the perag structure and ensure that it's inode capable */ | 
 | 417 | 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); | 
 | 418 | 	agino = XFS_INO_TO_AGINO(mp, ino); | 
 | 419 |  | 
 | 420 | again: | 
 | 421 | 	error = 0; | 
 | 422 | 	rcu_read_lock(); | 
 | 423 | 	ip = radix_tree_lookup(&pag->pag_ici_root, agino); | 
 | 424 |  | 
 | 425 | 	if (ip) { | 
 | 426 | 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); | 
 | 427 | 		if (error) | 
 | 428 | 			goto out_error_or_again; | 
 | 429 | 	} else { | 
 | 430 | 		rcu_read_unlock(); | 
 | 431 | 		XFS_STATS_INC(xs_ig_missed); | 
 | 432 |  | 
 | 433 | 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, | 
 | 434 | 							flags, lock_flags); | 
 | 435 | 		if (error) | 
 | 436 | 			goto out_error_or_again; | 
 | 437 | 	} | 
 | 438 | 	xfs_perag_put(pag); | 
 | 439 |  | 
 | 440 | 	*ipp = ip; | 
 | 441 |  | 
 | 442 | 	/* | 
 | 443 | 	 * If we have a real type for an on-disk inode, we can set ops(&unlock) | 
 | 444 | 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it. | 
 | 445 | 	 */ | 
 | 446 | 	if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) | 
 | 447 | 		xfs_setup_inode(ip); | 
 | 448 | 	return 0; | 
 | 449 |  | 
 | 450 | out_error_or_again: | 
 | 451 | 	if (error == EAGAIN) { | 
 | 452 | 		delay(1); | 
 | 453 | 		goto again; | 
 | 454 | 	} | 
 | 455 | 	xfs_perag_put(pag); | 
 | 456 | 	return error; | 
 | 457 | } | 
 | 458 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 459 | /* | 
 | 460 |  * The inode lookup is done in batches to keep the amount of lock traffic and | 
 | 461 |  * radix tree lookups to a minimum. The batch size is a trade off between | 
 | 462 |  * lookup reduction and stack usage. This is in the reclaim path, so we can't | 
 | 463 |  * be too greedy. | 
 | 464 |  */ | 
 | 465 | #define XFS_LOOKUP_BATCH	32 | 
 | 466 |  | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 467 | STATIC int | 
 | 468 | xfs_inode_ag_walk_grab( | 
 | 469 | 	struct xfs_inode	*ip) | 
 | 470 | { | 
 | 471 | 	struct inode		*inode = VFS_I(ip); | 
 | 472 |  | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 473 | 	ASSERT(rcu_read_lock_held()); | 
 | 474 |  | 
 | 475 | 	/* | 
 | 476 | 	 * check for stale RCU freed inode | 
 | 477 | 	 * | 
 | 478 | 	 * If the inode has been reallocated, it doesn't matter if it's not in | 
 | 479 | 	 * the AG we are walking - we are walking for writeback, so if it | 
 | 480 | 	 * passes all the "valid inode" checks and is dirty, then we'll write | 
 | 481 | 	 * it back anyway.  If it has been reallocated and still being | 
 | 482 | 	 * initialised, the XFS_INEW check below will catch it. | 
 | 483 | 	 */ | 
 | 484 | 	spin_lock(&ip->i_flags_lock); | 
 | 485 | 	if (!ip->i_ino) | 
 | 486 | 		goto out_unlock_noent; | 
 | 487 |  | 
 | 488 | 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */ | 
 | 489 | 	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) | 
 | 490 | 		goto out_unlock_noent; | 
 | 491 | 	spin_unlock(&ip->i_flags_lock); | 
 | 492 |  | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 493 | 	/* nothing to sync during shutdown */ | 
 | 494 | 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 
 | 495 | 		return EFSCORRUPTED; | 
 | 496 |  | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 497 | 	/* If we can't grab the inode, it must on it's way to reclaim. */ | 
 | 498 | 	if (!igrab(inode)) | 
 | 499 | 		return ENOENT; | 
 | 500 |  | 
 | 501 | 	if (is_bad_inode(inode)) { | 
 | 502 | 		IRELE(ip); | 
 | 503 | 		return ENOENT; | 
 | 504 | 	} | 
 | 505 |  | 
 | 506 | 	/* inode is valid */ | 
 | 507 | 	return 0; | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 508 |  | 
 | 509 | out_unlock_noent: | 
 | 510 | 	spin_unlock(&ip->i_flags_lock); | 
 | 511 | 	return ENOENT; | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 512 | } | 
 | 513 |  | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 514 | STATIC int | 
 | 515 | xfs_inode_ag_walk( | 
 | 516 | 	struct xfs_mount	*mp, | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 517 | 	struct xfs_perag	*pag, | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 518 | 	int			(*execute)(struct xfs_inode *ip, | 
| Brian Foster | a454f74 | 2012-11-06 09:50:39 -0500 | [diff] [blame] | 519 | 					   struct xfs_perag *pag, int flags, | 
 | 520 | 					   void *args), | 
 | 521 | 	int			flags, | 
 | 522 | 	void			*args, | 
 | 523 | 	int			tag) | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 524 | { | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 525 | 	uint32_t		first_index; | 
 | 526 | 	int			last_error = 0; | 
 | 527 | 	int			skipped; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 528 | 	int			done; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 529 | 	int			nr_found; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 530 |  | 
 | 531 | restart: | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 532 | 	done = 0; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 533 | 	skipped = 0; | 
 | 534 | 	first_index = 0; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 535 | 	nr_found = 0; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 536 | 	do { | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 537 | 		struct xfs_inode *batch[XFS_LOOKUP_BATCH]; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 538 | 		int		error = 0; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 539 | 		int		i; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 540 |  | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 541 | 		rcu_read_lock(); | 
| Brian Foster | a454f74 | 2012-11-06 09:50:39 -0500 | [diff] [blame] | 542 |  | 
 | 543 | 		if (tag == -1) | 
 | 544 | 			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 545 | 					(void **)batch, first_index, | 
 | 546 | 					XFS_LOOKUP_BATCH); | 
| Brian Foster | a454f74 | 2012-11-06 09:50:39 -0500 | [diff] [blame] | 547 | 		else | 
 | 548 | 			nr_found = radix_tree_gang_lookup_tag( | 
 | 549 | 					&pag->pag_ici_root, | 
 | 550 | 					(void **) batch, first_index, | 
 | 551 | 					XFS_LOOKUP_BATCH, tag); | 
 | 552 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 553 | 		if (!nr_found) { | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 554 | 			rcu_read_unlock(); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 555 | 			break; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 556 | 		} | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 557 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 558 | 		/* | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 559 | 		 * Grab the inodes before we drop the lock. if we found | 
 | 560 | 		 * nothing, nr == 0 and the loop will be skipped. | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 561 | 		 */ | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 562 | 		for (i = 0; i < nr_found; i++) { | 
 | 563 | 			struct xfs_inode *ip = batch[i]; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 564 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 565 | 			if (done || xfs_inode_ag_walk_grab(ip)) | 
 | 566 | 				batch[i] = NULL; | 
 | 567 |  | 
 | 568 | 			/* | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 569 | 			 * Update the index for the next lookup. Catch | 
 | 570 | 			 * overflows into the next AG range which can occur if | 
 | 571 | 			 * we have inodes in the last block of the AG and we | 
 | 572 | 			 * are currently pointing to the last inode. | 
 | 573 | 			 * | 
 | 574 | 			 * Because we may see inodes that are from the wrong AG | 
 | 575 | 			 * due to RCU freeing and reallocation, only update the | 
 | 576 | 			 * index if it lies in this AG. It was a race that lead | 
 | 577 | 			 * us to see this inode, so another lookup from the | 
 | 578 | 			 * same index will not find it again. | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 579 | 			 */ | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 580 | 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) | 
 | 581 | 				continue; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 582 | 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | 
 | 583 | 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | 
 | 584 | 				done = 1; | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 585 | 		} | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 586 |  | 
 | 587 | 		/* unlock now we've grabbed the inodes. */ | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 588 | 		rcu_read_unlock(); | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 589 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 590 | 		for (i = 0; i < nr_found; i++) { | 
 | 591 | 			if (!batch[i]) | 
 | 592 | 				continue; | 
| Brian Foster | a454f74 | 2012-11-06 09:50:39 -0500 | [diff] [blame] | 593 | 			error = execute(batch[i], pag, flags, args); | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 594 | 			IRELE(batch[i]); | 
 | 595 | 			if (error == EAGAIN) { | 
 | 596 | 				skipped++; | 
 | 597 | 				continue; | 
 | 598 | 			} | 
 | 599 | 			if (error && last_error != EFSCORRUPTED) | 
 | 600 | 				last_error = error; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 601 | 		} | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 602 |  | 
 | 603 | 		/* bail out if the filesystem is corrupted.  */ | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 604 | 		if (error == EFSCORRUPTED) | 
 | 605 | 			break; | 
 | 606 |  | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 607 | 		cond_resched(); | 
 | 608 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 609 | 	} while (nr_found && !done); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 610 |  | 
 | 611 | 	if (skipped) { | 
 | 612 | 		delay(1); | 
 | 613 | 		goto restart; | 
 | 614 | 	} | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 615 | 	return last_error; | 
 | 616 | } | 
 | 617 |  | 
| Brian Foster | 579b62f | 2012-11-06 09:50:47 -0500 | [diff] [blame] | 618 | /* | 
 | 619 |  * Background scanning to trim post-EOF preallocated space. This is queued | 
 | 620 |  * based on the 'background_prealloc_discard_period' tunable (5m by default). | 
 | 621 |  */ | 
 | 622 | STATIC void | 
 | 623 | xfs_queue_eofblocks( | 
 | 624 | 	struct xfs_mount *mp) | 
 | 625 | { | 
 | 626 | 	rcu_read_lock(); | 
 | 627 | 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG)) | 
 | 628 | 		queue_delayed_work(mp->m_eofblocks_workqueue, | 
 | 629 | 				   &mp->m_eofblocks_work, | 
 | 630 | 				   msecs_to_jiffies(xfs_eofb_secs * 1000)); | 
 | 631 | 	rcu_read_unlock(); | 
 | 632 | } | 
 | 633 |  | 
 | 634 | void | 
 | 635 | xfs_eofblocks_worker( | 
 | 636 | 	struct work_struct *work) | 
 | 637 | { | 
 | 638 | 	struct xfs_mount *mp = container_of(to_delayed_work(work), | 
 | 639 | 				struct xfs_mount, m_eofblocks_work); | 
 | 640 | 	xfs_icache_free_eofblocks(mp, NULL); | 
 | 641 | 	xfs_queue_eofblocks(mp); | 
 | 642 | } | 
 | 643 |  | 
| Christoph Hellwig | fe588ed | 2009-06-08 15:35:27 +0200 | [diff] [blame] | 644 | int | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 645 | xfs_inode_ag_iterator( | 
 | 646 | 	struct xfs_mount	*mp, | 
 | 647 | 	int			(*execute)(struct xfs_inode *ip, | 
| Brian Foster | a454f74 | 2012-11-06 09:50:39 -0500 | [diff] [blame] | 648 | 					   struct xfs_perag *pag, int flags, | 
 | 649 | 					   void *args), | 
 | 650 | 	int			flags, | 
 | 651 | 	void			*args) | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 652 | { | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 653 | 	struct xfs_perag	*pag; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 654 | 	int			error = 0; | 
 | 655 | 	int			last_error = 0; | 
 | 656 | 	xfs_agnumber_t		ag; | 
 | 657 |  | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 658 | 	ag = 0; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 659 | 	while ((pag = xfs_perag_get(mp, ag))) { | 
 | 660 | 		ag = pag->pag_agno + 1; | 
| Brian Foster | a454f74 | 2012-11-06 09:50:39 -0500 | [diff] [blame] | 661 | 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1); | 
 | 662 | 		xfs_perag_put(pag); | 
 | 663 | 		if (error) { | 
 | 664 | 			last_error = error; | 
 | 665 | 			if (error == EFSCORRUPTED) | 
 | 666 | 				break; | 
 | 667 | 		} | 
 | 668 | 	} | 
 | 669 | 	return XFS_ERROR(last_error); | 
 | 670 | } | 
 | 671 |  | 
 | 672 | int | 
 | 673 | xfs_inode_ag_iterator_tag( | 
 | 674 | 	struct xfs_mount	*mp, | 
 | 675 | 	int			(*execute)(struct xfs_inode *ip, | 
 | 676 | 					   struct xfs_perag *pag, int flags, | 
 | 677 | 					   void *args), | 
 | 678 | 	int			flags, | 
 | 679 | 	void			*args, | 
 | 680 | 	int			tag) | 
 | 681 | { | 
 | 682 | 	struct xfs_perag	*pag; | 
 | 683 | 	int			error = 0; | 
 | 684 | 	int			last_error = 0; | 
 | 685 | 	xfs_agnumber_t		ag; | 
 | 686 |  | 
 | 687 | 	ag = 0; | 
 | 688 | 	while ((pag = xfs_perag_get_tag(mp, ag, tag))) { | 
 | 689 | 		ag = pag->pag_agno + 1; | 
 | 690 | 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag); | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 691 | 		xfs_perag_put(pag); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 692 | 		if (error) { | 
 | 693 | 			last_error = error; | 
 | 694 | 			if (error == EFSCORRUPTED) | 
 | 695 | 				break; | 
 | 696 | 		} | 
 | 697 | 	} | 
 | 698 | 	return XFS_ERROR(last_error); | 
 | 699 | } | 
 | 700 |  | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 701 | /* | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 702 |  * Queue a new inode reclaim pass if there are reclaimable inodes and there | 
 | 703 |  * isn't a reclaim pass already in progress. By default it runs every 5s based | 
| Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 704 |  * on the xfs periodic sync default of 30s. Perhaps this should have it's own | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 705 |  * tunable, but that can be done if this method proves to be ineffective or too | 
 | 706 |  * aggressive. | 
 | 707 |  */ | 
 | 708 | static void | 
| Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 709 | xfs_reclaim_work_queue( | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 710 | 	struct xfs_mount        *mp) | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 711 | { | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 712 |  | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 713 | 	rcu_read_lock(); | 
 | 714 | 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { | 
| Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 715 | 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 716 | 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 717 | 	} | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 718 | 	rcu_read_unlock(); | 
 | 719 | } | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 720 |  | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 721 | /* | 
 | 722 |  * This is a fast pass over the inode cache to try to get reclaim moving on as | 
 | 723 |  * many inodes as possible in a short period of time. It kicks itself every few | 
 | 724 |  * seconds, as well as being kicked by the inode cache shrinker when memory | 
 | 725 |  * goes low. It scans as quickly as possible avoiding locked inodes or those | 
 | 726 |  * already being flushed, and once done schedules a future pass. | 
 | 727 |  */ | 
| Dave Chinner | 33c7a2b | 2012-10-08 21:55:59 +1100 | [diff] [blame] | 728 | void | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 729 | xfs_reclaim_worker( | 
 | 730 | 	struct work_struct *work) | 
 | 731 | { | 
 | 732 | 	struct xfs_mount *mp = container_of(to_delayed_work(work), | 
 | 733 | 					struct xfs_mount, m_reclaim_work); | 
 | 734 |  | 
 | 735 | 	xfs_reclaim_inodes(mp, SYNC_TRYLOCK); | 
| Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 736 | 	xfs_reclaim_work_queue(mp); | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 737 | } | 
 | 738 |  | 
| Dave Chinner | 33479e0 | 2012-10-08 21:56:11 +1100 | [diff] [blame] | 739 | static void | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 740 | __xfs_inode_set_reclaim_tag( | 
 | 741 | 	struct xfs_perag	*pag, | 
 | 742 | 	struct xfs_inode	*ip) | 
 | 743 | { | 
 | 744 | 	radix_tree_tag_set(&pag->pag_ici_root, | 
 | 745 | 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | 
 | 746 | 			   XFS_ICI_RECLAIM_TAG); | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 747 |  | 
 | 748 | 	if (!pag->pag_ici_reclaimable) { | 
 | 749 | 		/* propagate the reclaim tag up into the perag radix tree */ | 
 | 750 | 		spin_lock(&ip->i_mount->m_perag_lock); | 
 | 751 | 		radix_tree_tag_set(&ip->i_mount->m_perag_tree, | 
 | 752 | 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | 
 | 753 | 				XFS_ICI_RECLAIM_TAG); | 
 | 754 | 		spin_unlock(&ip->i_mount->m_perag_lock); | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 755 |  | 
 | 756 | 		/* schedule periodic background inode reclaim */ | 
| Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 757 | 		xfs_reclaim_work_queue(ip->i_mount); | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 758 |  | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 759 | 		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, | 
 | 760 | 							-1, _RET_IP_); | 
 | 761 | 	} | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 762 | 	pag->pag_ici_reclaimable++; | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 763 | } | 
 | 764 |  | 
| David Chinner | 1165451 | 2008-10-30 17:37:49 +1100 | [diff] [blame] | 765 | /* | 
 | 766 |  * We set the inode flag atomically with the radix tree tag. | 
 | 767 |  * Once we get tag lookups on the radix tree, this inode flag | 
 | 768 |  * can go away. | 
 | 769 |  */ | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 770 | void | 
 | 771 | xfs_inode_set_reclaim_tag( | 
 | 772 | 	xfs_inode_t	*ip) | 
 | 773 | { | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 774 | 	struct xfs_mount *mp = ip->i_mount; | 
 | 775 | 	struct xfs_perag *pag; | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 776 |  | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 777 | 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 778 | 	spin_lock(&pag->pag_ici_lock); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 779 | 	spin_lock(&ip->i_flags_lock); | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 780 | 	__xfs_inode_set_reclaim_tag(pag, ip); | 
| David Chinner | 1165451 | 2008-10-30 17:37:49 +1100 | [diff] [blame] | 781 | 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 782 | 	spin_unlock(&ip->i_flags_lock); | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 783 | 	spin_unlock(&pag->pag_ici_lock); | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 784 | 	xfs_perag_put(pag); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 785 | } | 
 | 786 |  | 
| Johannes Weiner | 081003f | 2010-10-01 07:43:54 +0000 | [diff] [blame] | 787 | STATIC void | 
 | 788 | __xfs_inode_clear_reclaim( | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 789 | 	xfs_perag_t	*pag, | 
 | 790 | 	xfs_inode_t	*ip) | 
 | 791 | { | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 792 | 	pag->pag_ici_reclaimable--; | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 793 | 	if (!pag->pag_ici_reclaimable) { | 
 | 794 | 		/* clear the reclaim tag from the perag radix tree */ | 
 | 795 | 		spin_lock(&ip->i_mount->m_perag_lock); | 
 | 796 | 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | 
 | 797 | 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | 
 | 798 | 				XFS_ICI_RECLAIM_TAG); | 
 | 799 | 		spin_unlock(&ip->i_mount->m_perag_lock); | 
 | 800 | 		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, | 
 | 801 | 							-1, _RET_IP_); | 
 | 802 | 	} | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 803 | } | 
 | 804 |  | 
| Dave Chinner | 33479e0 | 2012-10-08 21:56:11 +1100 | [diff] [blame] | 805 | STATIC void | 
| Johannes Weiner | 081003f | 2010-10-01 07:43:54 +0000 | [diff] [blame] | 806 | __xfs_inode_clear_reclaim_tag( | 
 | 807 | 	xfs_mount_t	*mp, | 
 | 808 | 	xfs_perag_t	*pag, | 
 | 809 | 	xfs_inode_t	*ip) | 
 | 810 | { | 
 | 811 | 	radix_tree_tag_clear(&pag->pag_ici_root, | 
 | 812 | 			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | 
 | 813 | 	__xfs_inode_clear_reclaim(pag, ip); | 
 | 814 | } | 
 | 815 |  | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 816 | /* | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 817 |  * Grab the inode for reclaim exclusively. | 
 | 818 |  * Return 0 if we grabbed it, non-zero otherwise. | 
 | 819 |  */ | 
 | 820 | STATIC int | 
 | 821 | xfs_reclaim_inode_grab( | 
 | 822 | 	struct xfs_inode	*ip, | 
 | 823 | 	int			flags) | 
 | 824 | { | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 825 | 	ASSERT(rcu_read_lock_held()); | 
 | 826 |  | 
 | 827 | 	/* quick check for stale RCU freed inode */ | 
 | 828 | 	if (!ip->i_ino) | 
 | 829 | 		return 1; | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 830 |  | 
 | 831 | 	/* | 
| Christoph Hellwig | 474fce0 | 2011-12-18 20:00:09 +0000 | [diff] [blame] | 832 | 	 * If we are asked for non-blocking operation, do unlocked checks to | 
 | 833 | 	 * see if the inode already is being flushed or in reclaim to avoid | 
 | 834 | 	 * lock traffic. | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 835 | 	 */ | 
 | 836 | 	if ((flags & SYNC_TRYLOCK) && | 
| Christoph Hellwig | 474fce0 | 2011-12-18 20:00:09 +0000 | [diff] [blame] | 837 | 	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 838 | 		return 1; | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 839 |  | 
 | 840 | 	/* | 
 | 841 | 	 * The radix tree lock here protects a thread in xfs_iget from racing | 
 | 842 | 	 * with us starting reclaim on the inode.  Once we have the | 
 | 843 | 	 * XFS_IRECLAIM flag set it will not touch us. | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 844 | 	 * | 
 | 845 | 	 * Due to RCU lookup, we may find inodes that have been freed and only | 
 | 846 | 	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that | 
 | 847 | 	 * aren't candidates for reclaim at all, so we must check the | 
 | 848 | 	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim. | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 849 | 	 */ | 
 | 850 | 	spin_lock(&ip->i_flags_lock); | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 851 | 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || | 
 | 852 | 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) { | 
 | 853 | 		/* not a reclaim candidate. */ | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 854 | 		spin_unlock(&ip->i_flags_lock); | 
 | 855 | 		return 1; | 
 | 856 | 	} | 
 | 857 | 	__xfs_iflags_set(ip, XFS_IRECLAIM); | 
 | 858 | 	spin_unlock(&ip->i_flags_lock); | 
 | 859 | 	return 0; | 
 | 860 | } | 
 | 861 |  | 
 | 862 | /* | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 863 |  * Inodes in different states need to be treated differently. The following | 
 | 864 |  * table lists the inode states and the reclaim actions necessary: | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 865 |  * | 
 | 866 |  *	inode state	     iflush ret		required action | 
 | 867 |  *      ---------------      ----------         --------------- | 
 | 868 |  *	bad			-		reclaim | 
 | 869 |  *	shutdown		EIO		unpin and reclaim | 
 | 870 |  *	clean, unpinned		0		reclaim | 
 | 871 |  *	stale, unpinned		0		reclaim | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 872 |  *	clean, pinned(*)	0		requeue | 
 | 873 |  *	stale, pinned		EAGAIN		requeue | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 874 |  *	dirty, async		-		requeue | 
 | 875 |  *	dirty, sync		0		reclaim | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 876 |  * | 
 | 877 |  * (*) dgc: I don't think the clean, pinned state is possible but it gets | 
 | 878 |  * handled anyway given the order of checks implemented. | 
 | 879 |  * | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 880 |  * Also, because we get the flush lock first, we know that any inode that has | 
 | 881 |  * been flushed delwri has had the flush completed by the time we check that | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 882 |  * the inode is clean. | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 883 |  * | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 884 |  * Note that because the inode is flushed delayed write by AIL pushing, the | 
 | 885 |  * flush lock may already be held here and waiting on it can result in very | 
 | 886 |  * long latencies.  Hence for sync reclaims, where we wait on the flush lock, | 
 | 887 |  * the caller should push the AIL first before trying to reclaim inodes to | 
 | 888 |  * minimise the amount of time spent waiting.  For background relaim, we only | 
 | 889 |  * bother to reclaim clean inodes anyway. | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 890 |  * | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 891 |  * Hence the order of actions after gaining the locks should be: | 
 | 892 |  *	bad		=> reclaim | 
 | 893 |  *	shutdown	=> unpin and reclaim | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 894 |  *	pinned, async	=> requeue | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 895 |  *	pinned, sync	=> unpin | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 896 |  *	stale		=> reclaim | 
 | 897 |  *	clean		=> reclaim | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 898 |  *	dirty, async	=> requeue | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 899 |  *	dirty, sync	=> flush, wait and reclaim | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 900 |  */ | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 901 | STATIC int | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 902 | xfs_reclaim_inode( | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 903 | 	struct xfs_inode	*ip, | 
 | 904 | 	struct xfs_perag	*pag, | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 905 | 	int			sync_mode) | 
| David Chinner | 7a3be02 | 2008-10-30 17:37:37 +1100 | [diff] [blame] | 906 | { | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 907 | 	struct xfs_buf		*bp = NULL; | 
 | 908 | 	int			error; | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 909 |  | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 910 | restart: | 
 | 911 | 	error = 0; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 912 | 	xfs_ilock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 913 | 	if (!xfs_iflock_nowait(ip)) { | 
 | 914 | 		if (!(sync_mode & SYNC_WAIT)) | 
 | 915 | 			goto out; | 
 | 916 | 		xfs_iflock(ip); | 
 | 917 | 	} | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 918 |  | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 919 | 	if (is_bad_inode(VFS_I(ip))) | 
 | 920 | 		goto reclaim; | 
 | 921 | 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 
 | 922 | 		xfs_iunpin_wait(ip); | 
| Dave Chinner | 04913fd | 2012-04-23 15:58:41 +1000 | [diff] [blame] | 923 | 		xfs_iflush_abort(ip, false); | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 924 | 		goto reclaim; | 
 | 925 | 	} | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 926 | 	if (xfs_ipincount(ip)) { | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 927 | 		if (!(sync_mode & SYNC_WAIT)) | 
 | 928 | 			goto out_ifunlock; | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 929 | 		xfs_iunpin_wait(ip); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 930 | 	} | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 931 | 	if (xfs_iflags_test(ip, XFS_ISTALE)) | 
 | 932 | 		goto reclaim; | 
 | 933 | 	if (xfs_inode_clean(ip)) | 
 | 934 | 		goto reclaim; | 
 | 935 |  | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 936 | 	/* | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 937 | 	 * Never flush out dirty data during non-blocking reclaim, as it would | 
 | 938 | 	 * just contend with AIL pushing trying to do the same job. | 
 | 939 | 	 */ | 
 | 940 | 	if (!(sync_mode & SYNC_WAIT)) | 
 | 941 | 		goto out_ifunlock; | 
 | 942 |  | 
 | 943 | 	/* | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 944 | 	 * Now we have an inode that needs flushing. | 
 | 945 | 	 * | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 946 | 	 * Note that xfs_iflush will never block on the inode buffer lock, as | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 947 | 	 * xfs_ifree_cluster() can lock the inode buffer before it locks the | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 948 | 	 * ip->i_lock, and we are doing the exact opposite here.  As a result, | 
| Christoph Hellwig | 475ee41 | 2012-07-03 12:21:22 -0400 | [diff] [blame] | 949 | 	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would | 
 | 950 | 	 * result in an ABBA deadlock with xfs_ifree_cluster(). | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 951 | 	 * | 
 | 952 | 	 * As xfs_ifree_cluser() must gather all inodes that are active in the | 
 | 953 | 	 * cache to mark them stale, if we hit this case we don't actually want | 
 | 954 | 	 * to do IO here - we want the inode marked stale so we can simply | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 955 | 	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the | 
 | 956 | 	 * inode, back off and try again.  Hopefully the next pass through will | 
 | 957 | 	 * see the stale flag set on the inode. | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 958 | 	 */ | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 959 | 	error = xfs_iflush(ip, &bp); | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 960 | 	if (error == EAGAIN) { | 
 | 961 | 		xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
 | 962 | 		/* backoff longer than in xfs_ifree_cluster */ | 
 | 963 | 		delay(2); | 
 | 964 | 		goto restart; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 965 | 	} | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 966 |  | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 967 | 	if (!error) { | 
 | 968 | 		error = xfs_bwrite(bp); | 
 | 969 | 		xfs_buf_relse(bp); | 
 | 970 | 	} | 
 | 971 |  | 
 | 972 | 	xfs_iflock(ip); | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 973 | reclaim: | 
 | 974 | 	xfs_ifunlock(ip); | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 975 | 	xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 976 |  | 
 | 977 | 	XFS_STATS_INC(xs_ig_reclaims); | 
 | 978 | 	/* | 
 | 979 | 	 * Remove the inode from the per-AG radix tree. | 
 | 980 | 	 * | 
 | 981 | 	 * Because radix_tree_delete won't complain even if the item was never | 
 | 982 | 	 * added to the tree assert that it's been there before to catch | 
 | 983 | 	 * problems with the inode life time early on. | 
 | 984 | 	 */ | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 985 | 	spin_lock(&pag->pag_ici_lock); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 986 | 	if (!radix_tree_delete(&pag->pag_ici_root, | 
 | 987 | 				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) | 
 | 988 | 		ASSERT(0); | 
| Johannes Weiner | 081003f | 2010-10-01 07:43:54 +0000 | [diff] [blame] | 989 | 	__xfs_inode_clear_reclaim(pag, ip); | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 990 | 	spin_unlock(&pag->pag_ici_lock); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 991 |  | 
 | 992 | 	/* | 
 | 993 | 	 * Here we do an (almost) spurious inode lock in order to coordinate | 
 | 994 | 	 * with inode cache radix tree lookups.  This is because the lookup | 
 | 995 | 	 * can reference the inodes in the cache without taking references. | 
 | 996 | 	 * | 
 | 997 | 	 * We make that OK here by ensuring that we wait until the inode is | 
| Alex Elder | ad637a1 | 2012-02-16 22:01:00 +0000 | [diff] [blame] | 998 | 	 * unlocked after the lookup before we go ahead and free it. | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 999 | 	 */ | 
| Alex Elder | ad637a1 | 2012-02-16 22:01:00 +0000 | [diff] [blame] | 1000 | 	xfs_ilock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 1001 | 	xfs_qm_dqdetach(ip); | 
| Alex Elder | ad637a1 | 2012-02-16 22:01:00 +0000 | [diff] [blame] | 1002 | 	xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 1003 |  | 
 | 1004 | 	xfs_inode_free(ip); | 
| Alex Elder | ad637a1 | 2012-02-16 22:01:00 +0000 | [diff] [blame] | 1005 | 	return error; | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 1006 |  | 
 | 1007 | out_ifunlock: | 
 | 1008 | 	xfs_ifunlock(ip); | 
 | 1009 | out: | 
 | 1010 | 	xfs_iflags_clear(ip, XFS_IRECLAIM); | 
 | 1011 | 	xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
 | 1012 | 	/* | 
 | 1013 | 	 * We could return EAGAIN here to make reclaim rescan the inode tree in | 
 | 1014 | 	 * a short while. However, this just burns CPU time scanning the tree | 
| Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 1015 | 	 * waiting for IO to complete and the reclaim work never goes back to | 
 | 1016 | 	 * the idle state. Instead, return 0 to let the next scheduled | 
 | 1017 | 	 * background reclaim attempt to reclaim the inode again. | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 1018 | 	 */ | 
 | 1019 | 	return 0; | 
| David Chinner | 7a3be02 | 2008-10-30 17:37:37 +1100 | [diff] [blame] | 1020 | } | 
 | 1021 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1022 | /* | 
 | 1023 |  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is | 
 | 1024 |  * corrupted, we still want to try to reclaim all the inodes. If we don't, | 
 | 1025 |  * then a shut down during filesystem unmount reclaim walk leak all the | 
 | 1026 |  * unreclaimed inodes. | 
 | 1027 |  */ | 
| Dave Chinner | 33479e0 | 2012-10-08 21:56:11 +1100 | [diff] [blame] | 1028 | STATIC int | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1029 | xfs_reclaim_inodes_ag( | 
 | 1030 | 	struct xfs_mount	*mp, | 
 | 1031 | 	int			flags, | 
 | 1032 | 	int			*nr_to_scan) | 
 | 1033 | { | 
 | 1034 | 	struct xfs_perag	*pag; | 
 | 1035 | 	int			error = 0; | 
 | 1036 | 	int			last_error = 0; | 
 | 1037 | 	xfs_agnumber_t		ag; | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 1038 | 	int			trylock = flags & SYNC_TRYLOCK; | 
 | 1039 | 	int			skipped; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1040 |  | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 1041 | restart: | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1042 | 	ag = 0; | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 1043 | 	skipped = 0; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1044 | 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { | 
 | 1045 | 		unsigned long	first_index = 0; | 
 | 1046 | 		int		done = 0; | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1047 | 		int		nr_found = 0; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1048 |  | 
 | 1049 | 		ag = pag->pag_agno + 1; | 
 | 1050 |  | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 1051 | 		if (trylock) { | 
 | 1052 | 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { | 
 | 1053 | 				skipped++; | 
| Dave Chinner | f83282a | 2010-11-08 08:55:04 +0000 | [diff] [blame] | 1054 | 				xfs_perag_put(pag); | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 1055 | 				continue; | 
 | 1056 | 			} | 
 | 1057 | 			first_index = pag->pag_ici_reclaim_cursor; | 
 | 1058 | 		} else | 
 | 1059 | 			mutex_lock(&pag->pag_ici_reclaim_lock); | 
 | 1060 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1061 | 		do { | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1062 | 			struct xfs_inode *batch[XFS_LOOKUP_BATCH]; | 
 | 1063 | 			int	i; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1064 |  | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 1065 | 			rcu_read_lock(); | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1066 | 			nr_found = radix_tree_gang_lookup_tag( | 
 | 1067 | 					&pag->pag_ici_root, | 
 | 1068 | 					(void **)batch, first_index, | 
 | 1069 | 					XFS_LOOKUP_BATCH, | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1070 | 					XFS_ICI_RECLAIM_TAG); | 
 | 1071 | 			if (!nr_found) { | 
| Dave Chinner | b223221 | 2011-05-06 02:54:04 +0000 | [diff] [blame] | 1072 | 				done = 1; | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 1073 | 				rcu_read_unlock(); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1074 | 				break; | 
 | 1075 | 			} | 
 | 1076 |  | 
 | 1077 | 			/* | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1078 | 			 * Grab the inodes before we drop the lock. if we found | 
 | 1079 | 			 * nothing, nr == 0 and the loop will be skipped. | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1080 | 			 */ | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1081 | 			for (i = 0; i < nr_found; i++) { | 
 | 1082 | 				struct xfs_inode *ip = batch[i]; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1083 |  | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1084 | 				if (done || xfs_reclaim_inode_grab(ip, flags)) | 
 | 1085 | 					batch[i] = NULL; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1086 |  | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1087 | 				/* | 
 | 1088 | 				 * Update the index for the next lookup. Catch | 
 | 1089 | 				 * overflows into the next AG range which can | 
 | 1090 | 				 * occur if we have inodes in the last block of | 
 | 1091 | 				 * the AG and we are currently pointing to the | 
 | 1092 | 				 * last inode. | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 1093 | 				 * | 
 | 1094 | 				 * Because we may see inodes that are from the | 
 | 1095 | 				 * wrong AG due to RCU freeing and | 
 | 1096 | 				 * reallocation, only update the index if it | 
 | 1097 | 				 * lies in this AG. It was a race that lead us | 
 | 1098 | 				 * to see this inode, so another lookup from | 
 | 1099 | 				 * the same index will not find it again. | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1100 | 				 */ | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 1101 | 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) != | 
 | 1102 | 								pag->pag_agno) | 
 | 1103 | 					continue; | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1104 | 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | 
 | 1105 | 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | 
 | 1106 | 					done = 1; | 
 | 1107 | 			} | 
 | 1108 |  | 
 | 1109 | 			/* unlock now we've grabbed the inodes. */ | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 1110 | 			rcu_read_unlock(); | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1111 |  | 
 | 1112 | 			for (i = 0; i < nr_found; i++) { | 
 | 1113 | 				if (!batch[i]) | 
 | 1114 | 					continue; | 
 | 1115 | 				error = xfs_reclaim_inode(batch[i], pag, flags); | 
 | 1116 | 				if (error && last_error != EFSCORRUPTED) | 
 | 1117 | 					last_error = error; | 
 | 1118 | 			} | 
 | 1119 |  | 
 | 1120 | 			*nr_to_scan -= XFS_LOOKUP_BATCH; | 
 | 1121 |  | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 1122 | 			cond_resched(); | 
 | 1123 |  | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 1124 | 		} while (nr_found && !done && *nr_to_scan > 0); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1125 |  | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 1126 | 		if (trylock && !done) | 
 | 1127 | 			pag->pag_ici_reclaim_cursor = first_index; | 
 | 1128 | 		else | 
 | 1129 | 			pag->pag_ici_reclaim_cursor = 0; | 
 | 1130 | 		mutex_unlock(&pag->pag_ici_reclaim_lock); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1131 | 		xfs_perag_put(pag); | 
 | 1132 | 	} | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 1133 |  | 
 | 1134 | 	/* | 
 | 1135 | 	 * if we skipped any AG, and we still have scan count remaining, do | 
 | 1136 | 	 * another pass this time using blocking reclaim semantics (i.e | 
 | 1137 | 	 * waiting on the reclaim locks and ignoring the reclaim cursors). This | 
 | 1138 | 	 * ensure that when we get more reclaimers than AGs we block rather | 
 | 1139 | 	 * than spin trying to execute reclaim. | 
 | 1140 | 	 */ | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 1141 | 	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 1142 | 		trylock = 0; | 
 | 1143 | 		goto restart; | 
 | 1144 | 	} | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1145 | 	return XFS_ERROR(last_error); | 
 | 1146 | } | 
 | 1147 |  | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 1148 | int | 
| David Chinner | 1dc3318 | 2008-10-30 17:37:15 +1100 | [diff] [blame] | 1149 | xfs_reclaim_inodes( | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 1150 | 	xfs_mount_t	*mp, | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 1151 | 	int		mode) | 
 | 1152 | { | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1153 | 	int		nr_to_scan = INT_MAX; | 
 | 1154 |  | 
 | 1155 | 	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 1156 | } | 
 | 1157 |  | 
 | 1158 | /* | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 1159 |  * Scan a certain number of inodes for reclaim. | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 1160 |  * | 
 | 1161 |  * When called we make sure that there is a background (fast) inode reclaim in | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 1162 |  * progress, while we will throttle the speed of reclaim via doing synchronous | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 1163 |  * reclaim of inodes. That means if we come across dirty inodes, we wait for | 
 | 1164 |  * them to be cleaned, which we hope will not be very long due to the | 
 | 1165 |  * background walker having already kicked the IO off on those dirty inodes. | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 1166 |  */ | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 1167 | void | 
 | 1168 | xfs_reclaim_inodes_nr( | 
 | 1169 | 	struct xfs_mount	*mp, | 
 | 1170 | 	int			nr_to_scan) | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 1171 | { | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 1172 | 	/* kick background reclaimer and push the AIL */ | 
| Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 1173 | 	xfs_reclaim_work_queue(mp); | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 1174 | 	xfs_ail_push_all(mp->m_ail); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 1175 |  | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 1176 | 	xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); | 
 | 1177 | } | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 1178 |  | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 1179 | /* | 
 | 1180 |  * Return the number of reclaimable inodes in the filesystem for | 
 | 1181 |  * the shrinker to determine how much to reclaim. | 
 | 1182 |  */ | 
 | 1183 | int | 
 | 1184 | xfs_reclaim_inodes_count( | 
 | 1185 | 	struct xfs_mount	*mp) | 
 | 1186 | { | 
 | 1187 | 	struct xfs_perag	*pag; | 
 | 1188 | 	xfs_agnumber_t		ag = 0; | 
 | 1189 | 	int			reclaimable = 0; | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 1190 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 1191 | 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { | 
 | 1192 | 		ag = pag->pag_agno + 1; | 
| Dave Chinner | 70e60ce | 2010-07-20 08:07:02 +1000 | [diff] [blame] | 1193 | 		reclaimable += pag->pag_ici_reclaimable; | 
 | 1194 | 		xfs_perag_put(pag); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 1195 | 	} | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 1196 | 	return reclaimable; | 
 | 1197 | } | 
 | 1198 |  | 
| Brian Foster | 41176a6 | 2012-11-06 09:50:42 -0500 | [diff] [blame] | 1199 | STATIC int | 
| Brian Foster | 3e3f9f5 | 2012-11-07 12:21:13 -0500 | [diff] [blame] | 1200 | xfs_inode_match_id( | 
 | 1201 | 	struct xfs_inode	*ip, | 
 | 1202 | 	struct xfs_eofblocks	*eofb) | 
 | 1203 | { | 
| Brian Foster | 1b55604 | 2012-11-06 09:50:45 -0500 | [diff] [blame] | 1204 | 	if (eofb->eof_flags & XFS_EOF_FLAGS_UID && | 
 | 1205 | 	    ip->i_d.di_uid != eofb->eof_uid) | 
 | 1206 | 		return 0; | 
| Brian Foster | 3e3f9f5 | 2012-11-07 12:21:13 -0500 | [diff] [blame] | 1207 |  | 
| Brian Foster | 1b55604 | 2012-11-06 09:50:45 -0500 | [diff] [blame] | 1208 | 	if (eofb->eof_flags & XFS_EOF_FLAGS_GID && | 
 | 1209 | 	    ip->i_d.di_gid != eofb->eof_gid) | 
 | 1210 | 		return 0; | 
 | 1211 |  | 
 | 1212 | 	if (eofb->eof_flags & XFS_EOF_FLAGS_PRID && | 
 | 1213 | 	    xfs_get_projid(ip) != eofb->eof_prid) | 
 | 1214 | 		return 0; | 
 | 1215 |  | 
 | 1216 | 	return 1; | 
| Brian Foster | 3e3f9f5 | 2012-11-07 12:21:13 -0500 | [diff] [blame] | 1217 | } | 
 | 1218 |  | 
 | 1219 | STATIC int | 
| Brian Foster | 41176a6 | 2012-11-06 09:50:42 -0500 | [diff] [blame] | 1220 | xfs_inode_free_eofblocks( | 
 | 1221 | 	struct xfs_inode	*ip, | 
 | 1222 | 	struct xfs_perag	*pag, | 
 | 1223 | 	int			flags, | 
 | 1224 | 	void			*args) | 
 | 1225 | { | 
 | 1226 | 	int ret; | 
| Brian Foster | 3e3f9f5 | 2012-11-07 12:21:13 -0500 | [diff] [blame] | 1227 | 	struct xfs_eofblocks *eofb = args; | 
| Brian Foster | 41176a6 | 2012-11-06 09:50:42 -0500 | [diff] [blame] | 1228 |  | 
 | 1229 | 	if (!xfs_can_free_eofblocks(ip, false)) { | 
 | 1230 | 		/* inode could be preallocated or append-only */ | 
 | 1231 | 		trace_xfs_inode_free_eofblocks_invalid(ip); | 
 | 1232 | 		xfs_inode_clear_eofblocks_tag(ip); | 
 | 1233 | 		return 0; | 
 | 1234 | 	} | 
 | 1235 |  | 
 | 1236 | 	/* | 
 | 1237 | 	 * If the mapping is dirty the operation can block and wait for some | 
 | 1238 | 	 * time. Unless we are waiting, skip it. | 
 | 1239 | 	 */ | 
 | 1240 | 	if (!(flags & SYNC_WAIT) && | 
 | 1241 | 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) | 
 | 1242 | 		return 0; | 
 | 1243 |  | 
| Brian Foster | 00ca79a | 2012-11-07 12:21:14 -0500 | [diff] [blame] | 1244 | 	if (eofb) { | 
 | 1245 | 		if (!xfs_inode_match_id(ip, eofb)) | 
 | 1246 | 			return 0; | 
 | 1247 |  | 
 | 1248 | 		/* skip the inode if the file size is too small */ | 
 | 1249 | 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && | 
 | 1250 | 		    XFS_ISIZE(ip) < eofb->eof_min_file_size) | 
 | 1251 | 			return 0; | 
 | 1252 | 	} | 
| Brian Foster | 3e3f9f5 | 2012-11-07 12:21:13 -0500 | [diff] [blame] | 1253 |  | 
| Brian Foster | 41176a6 | 2012-11-06 09:50:42 -0500 | [diff] [blame] | 1254 | 	ret = xfs_free_eofblocks(ip->i_mount, ip, true); | 
 | 1255 |  | 
 | 1256 | 	/* don't revisit the inode if we're not waiting */ | 
 | 1257 | 	if (ret == EAGAIN && !(flags & SYNC_WAIT)) | 
 | 1258 | 		ret = 0; | 
 | 1259 |  | 
 | 1260 | 	return ret; | 
 | 1261 | } | 
 | 1262 |  | 
 | 1263 | int | 
 | 1264 | xfs_icache_free_eofblocks( | 
 | 1265 | 	struct xfs_mount	*mp, | 
| Brian Foster | 8ca149d | 2012-11-07 12:21:12 -0500 | [diff] [blame] | 1266 | 	struct xfs_eofblocks	*eofb) | 
| Brian Foster | 41176a6 | 2012-11-06 09:50:42 -0500 | [diff] [blame] | 1267 | { | 
| Brian Foster | 8ca149d | 2012-11-07 12:21:12 -0500 | [diff] [blame] | 1268 | 	int flags = SYNC_TRYLOCK; | 
 | 1269 |  | 
 | 1270 | 	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC)) | 
 | 1271 | 		flags = SYNC_WAIT; | 
 | 1272 |  | 
| Brian Foster | 41176a6 | 2012-11-06 09:50:42 -0500 | [diff] [blame] | 1273 | 	return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags, | 
| Brian Foster | 8ca149d | 2012-11-07 12:21:12 -0500 | [diff] [blame] | 1274 | 					 eofb, XFS_ICI_EOFBLOCKS_TAG); | 
| Brian Foster | 41176a6 | 2012-11-06 09:50:42 -0500 | [diff] [blame] | 1275 | } | 
 | 1276 |  | 
| Brian Foster | 27b5286 | 2012-11-06 09:50:38 -0500 | [diff] [blame] | 1277 | void | 
 | 1278 | xfs_inode_set_eofblocks_tag( | 
 | 1279 | 	xfs_inode_t	*ip) | 
 | 1280 | { | 
 | 1281 | 	struct xfs_mount *mp = ip->i_mount; | 
 | 1282 | 	struct xfs_perag *pag; | 
 | 1283 | 	int tagged; | 
 | 1284 |  | 
 | 1285 | 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | 
 | 1286 | 	spin_lock(&pag->pag_ici_lock); | 
 | 1287 | 	trace_xfs_inode_set_eofblocks_tag(ip); | 
 | 1288 |  | 
 | 1289 | 	tagged = radix_tree_tagged(&pag->pag_ici_root, | 
 | 1290 | 				   XFS_ICI_EOFBLOCKS_TAG); | 
 | 1291 | 	radix_tree_tag_set(&pag->pag_ici_root, | 
 | 1292 | 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | 
 | 1293 | 			   XFS_ICI_EOFBLOCKS_TAG); | 
 | 1294 | 	if (!tagged) { | 
 | 1295 | 		/* propagate the eofblocks tag up into the perag radix tree */ | 
 | 1296 | 		spin_lock(&ip->i_mount->m_perag_lock); | 
 | 1297 | 		radix_tree_tag_set(&ip->i_mount->m_perag_tree, | 
 | 1298 | 				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | 
 | 1299 | 				   XFS_ICI_EOFBLOCKS_TAG); | 
 | 1300 | 		spin_unlock(&ip->i_mount->m_perag_lock); | 
 | 1301 |  | 
| Brian Foster | 579b62f | 2012-11-06 09:50:47 -0500 | [diff] [blame] | 1302 | 		/* kick off background trimming */ | 
 | 1303 | 		xfs_queue_eofblocks(ip->i_mount); | 
 | 1304 |  | 
| Brian Foster | 27b5286 | 2012-11-06 09:50:38 -0500 | [diff] [blame] | 1305 | 		trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno, | 
 | 1306 | 					      -1, _RET_IP_); | 
 | 1307 | 	} | 
 | 1308 |  | 
 | 1309 | 	spin_unlock(&pag->pag_ici_lock); | 
 | 1310 | 	xfs_perag_put(pag); | 
 | 1311 | } | 
 | 1312 |  | 
 | 1313 | void | 
 | 1314 | xfs_inode_clear_eofblocks_tag( | 
 | 1315 | 	xfs_inode_t	*ip) | 
 | 1316 | { | 
 | 1317 | 	struct xfs_mount *mp = ip->i_mount; | 
 | 1318 | 	struct xfs_perag *pag; | 
 | 1319 |  | 
 | 1320 | 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | 
 | 1321 | 	spin_lock(&pag->pag_ici_lock); | 
 | 1322 | 	trace_xfs_inode_clear_eofblocks_tag(ip); | 
 | 1323 |  | 
 | 1324 | 	radix_tree_tag_clear(&pag->pag_ici_root, | 
 | 1325 | 			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | 
 | 1326 | 			     XFS_ICI_EOFBLOCKS_TAG); | 
 | 1327 | 	if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) { | 
 | 1328 | 		/* clear the eofblocks tag from the perag radix tree */ | 
 | 1329 | 		spin_lock(&ip->i_mount->m_perag_lock); | 
 | 1330 | 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | 
 | 1331 | 				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | 
 | 1332 | 				     XFS_ICI_EOFBLOCKS_TAG); | 
 | 1333 | 		spin_unlock(&ip->i_mount->m_perag_lock); | 
 | 1334 | 		trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno, | 
 | 1335 | 					       -1, _RET_IP_); | 
 | 1336 | 	} | 
 | 1337 |  | 
 | 1338 | 	spin_unlock(&pag->pag_ici_lock); | 
 | 1339 | 	xfs_perag_put(pag); | 
 | 1340 | } | 
 | 1341 |  |