| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2000-2005 Silicon Graphics, Inc. | 
 | 3 |  * All Rights Reserved. | 
 | 4 |  * | 
 | 5 |  * This program is free software; you can redistribute it and/or | 
 | 6 |  * modify it under the terms of the GNU General Public License as | 
 | 7 |  * published by the Free Software Foundation. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it would be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 12 |  * GNU General Public License for more details. | 
 | 13 |  * | 
 | 14 |  * You should have received a copy of the GNU General Public License | 
 | 15 |  * along with this program; if not, write the Free Software Foundation, | 
 | 16 |  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
 | 17 |  */ | 
 | 18 | #include "xfs.h" | 
 | 19 | #include "xfs_fs.h" | 
 | 20 | #include "xfs_types.h" | 
 | 21 | #include "xfs_bit.h" | 
 | 22 | #include "xfs_log.h" | 
 | 23 | #include "xfs_inum.h" | 
 | 24 | #include "xfs_trans.h" | 
 | 25 | #include "xfs_sb.h" | 
 | 26 | #include "xfs_ag.h" | 
 | 27 | #include "xfs_dir2.h" | 
 | 28 | #include "xfs_dmapi.h" | 
 | 29 | #include "xfs_mount.h" | 
 | 30 | #include "xfs_bmap_btree.h" | 
 | 31 | #include "xfs_alloc_btree.h" | 
 | 32 | #include "xfs_ialloc_btree.h" | 
 | 33 | #include "xfs_btree.h" | 
 | 34 | #include "xfs_dir2_sf.h" | 
 | 35 | #include "xfs_attr_sf.h" | 
 | 36 | #include "xfs_inode.h" | 
 | 37 | #include "xfs_dinode.h" | 
 | 38 | #include "xfs_error.h" | 
 | 39 | #include "xfs_mru_cache.h" | 
 | 40 | #include "xfs_filestream.h" | 
 | 41 | #include "xfs_vnodeops.h" | 
 | 42 | #include "xfs_utils.h" | 
 | 43 | #include "xfs_buf_item.h" | 
 | 44 | #include "xfs_inode_item.h" | 
 | 45 | #include "xfs_rw.h" | 
| Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 46 | #include "xfs_quota.h" | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 47 | #include "xfs_trace.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 48 |  | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 49 | #include <linux/kthread.h> | 
 | 50 | #include <linux/freezer.h> | 
 | 51 |  | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 52 |  | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 53 | STATIC xfs_inode_t * | 
 | 54 | xfs_inode_ag_lookup( | 
 | 55 | 	struct xfs_mount	*mp, | 
 | 56 | 	struct xfs_perag	*pag, | 
 | 57 | 	uint32_t		*first_index, | 
 | 58 | 	int			tag) | 
 | 59 | { | 
 | 60 | 	int			nr_found; | 
 | 61 | 	struct xfs_inode	*ip; | 
 | 62 |  | 
 | 63 | 	/* | 
 | 64 | 	 * use a gang lookup to find the next inode in the tree | 
 | 65 | 	 * as the tree is sparse and a gang lookup walks to find | 
 | 66 | 	 * the number of objects requested. | 
 | 67 | 	 */ | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 68 | 	if (tag == XFS_ICI_NO_TAG) { | 
 | 69 | 		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | 
 | 70 | 				(void **)&ip, *first_index, 1); | 
 | 71 | 	} else { | 
 | 72 | 		nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, | 
 | 73 | 				(void **)&ip, *first_index, 1, tag); | 
 | 74 | 	} | 
 | 75 | 	if (!nr_found) | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 76 | 		return NULL; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 77 |  | 
 | 78 | 	/* | 
 | 79 | 	 * Update the index for the next lookup. Catch overflows | 
 | 80 | 	 * into the next AG range which can occur if we have inodes | 
 | 81 | 	 * in the last block of the AG and we are currently | 
 | 82 | 	 * pointing to the last inode. | 
 | 83 | 	 */ | 
 | 84 | 	*first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | 
 | 85 | 	if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 86 | 		return NULL; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 87 | 	return ip; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 88 | } | 
 | 89 |  | 
 | 90 | STATIC int | 
 | 91 | xfs_inode_ag_walk( | 
 | 92 | 	struct xfs_mount	*mp, | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 93 | 	struct xfs_perag	*pag, | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 94 | 	int			(*execute)(struct xfs_inode *ip, | 
 | 95 | 					   struct xfs_perag *pag, int flags), | 
 | 96 | 	int			flags, | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 97 | 	int			tag, | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 98 | 	int			exclusive, | 
 | 99 | 	int			*nr_to_scan) | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 100 | { | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 101 | 	uint32_t		first_index; | 
 | 102 | 	int			last_error = 0; | 
 | 103 | 	int			skipped; | 
 | 104 |  | 
 | 105 | restart: | 
 | 106 | 	skipped = 0; | 
 | 107 | 	first_index = 0; | 
 | 108 | 	do { | 
 | 109 | 		int		error = 0; | 
 | 110 | 		xfs_inode_t	*ip; | 
 | 111 |  | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 112 | 		if (exclusive) | 
 | 113 | 			write_lock(&pag->pag_ici_lock); | 
 | 114 | 		else | 
 | 115 | 			read_lock(&pag->pag_ici_lock); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 116 | 		ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag); | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 117 | 		if (!ip) { | 
 | 118 | 			if (exclusive) | 
 | 119 | 				write_unlock(&pag->pag_ici_lock); | 
 | 120 | 			else | 
 | 121 | 				read_unlock(&pag->pag_ici_lock); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 122 | 			break; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 123 | 		} | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 124 |  | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 125 | 		/* execute releases pag->pag_ici_lock */ | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 126 | 		error = execute(ip, pag, flags); | 
 | 127 | 		if (error == EAGAIN) { | 
 | 128 | 			skipped++; | 
 | 129 | 			continue; | 
 | 130 | 		} | 
 | 131 | 		if (error) | 
 | 132 | 			last_error = error; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 133 |  | 
 | 134 | 		/* bail out if the filesystem is corrupted.  */ | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 135 | 		if (error == EFSCORRUPTED) | 
 | 136 | 			break; | 
 | 137 |  | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 138 | 	} while ((*nr_to_scan)--); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 139 |  | 
 | 140 | 	if (skipped) { | 
 | 141 | 		delay(1); | 
 | 142 | 		goto restart; | 
 | 143 | 	} | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 144 | 	return last_error; | 
 | 145 | } | 
 | 146 |  | 
| Christoph Hellwig | fe588ed | 2009-06-08 15:35:27 +0200 | [diff] [blame] | 147 | int | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 148 | xfs_inode_ag_iterator( | 
 | 149 | 	struct xfs_mount	*mp, | 
 | 150 | 	int			(*execute)(struct xfs_inode *ip, | 
 | 151 | 					   struct xfs_perag *pag, int flags), | 
 | 152 | 	int			flags, | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 153 | 	int			tag, | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 154 | 	int			exclusive, | 
 | 155 | 	int			*nr_to_scan) | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 156 | { | 
 | 157 | 	int			error = 0; | 
 | 158 | 	int			last_error = 0; | 
 | 159 | 	xfs_agnumber_t		ag; | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 160 | 	int			nr; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 161 |  | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 162 | 	nr = nr_to_scan ? *nr_to_scan : INT_MAX; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 163 | 	for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 164 | 		struct xfs_perag	*pag; | 
 | 165 |  | 
 | 166 | 		pag = xfs_perag_get(mp, ag); | 
 | 167 | 		if (!pag->pag_ici_init) { | 
 | 168 | 			xfs_perag_put(pag); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 169 | 			continue; | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 170 | 		} | 
 | 171 | 		error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 172 | 						exclusive, &nr); | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 173 | 		xfs_perag_put(pag); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 174 | 		if (error) { | 
 | 175 | 			last_error = error; | 
 | 176 | 			if (error == EFSCORRUPTED) | 
 | 177 | 				break; | 
 | 178 | 		} | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 179 | 		if (nr <= 0) | 
 | 180 | 			break; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 181 | 	} | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 182 | 	if (nr_to_scan) | 
 | 183 | 		*nr_to_scan = nr; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 184 | 	return XFS_ERROR(last_error); | 
 | 185 | } | 
 | 186 |  | 
| Dave Chinner | 1da8eec | 2009-06-08 15:35:07 +0200 | [diff] [blame] | 187 | /* must be called with pag_ici_lock held and releases it */ | 
| Christoph Hellwig | fe588ed | 2009-06-08 15:35:27 +0200 | [diff] [blame] | 188 | int | 
| Dave Chinner | 1da8eec | 2009-06-08 15:35:07 +0200 | [diff] [blame] | 189 | xfs_sync_inode_valid( | 
 | 190 | 	struct xfs_inode	*ip, | 
 | 191 | 	struct xfs_perag	*pag) | 
 | 192 | { | 
 | 193 | 	struct inode		*inode = VFS_I(ip); | 
| Dave Chinner | 018027b | 2010-01-10 23:51:46 +0000 | [diff] [blame] | 194 | 	int			error = EFSCORRUPTED; | 
| Dave Chinner | 1da8eec | 2009-06-08 15:35:07 +0200 | [diff] [blame] | 195 |  | 
 | 196 | 	/* nothing to sync during shutdown */ | 
| Dave Chinner | 018027b | 2010-01-10 23:51:46 +0000 | [diff] [blame] | 197 | 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 
 | 198 | 		goto out_unlock; | 
| Dave Chinner | 1da8eec | 2009-06-08 15:35:07 +0200 | [diff] [blame] | 199 |  | 
| Dave Chinner | 018027b | 2010-01-10 23:51:46 +0000 | [diff] [blame] | 200 | 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */ | 
 | 201 | 	error = ENOENT; | 
 | 202 | 	if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) | 
 | 203 | 		goto out_unlock; | 
| Dave Chinner | 1da8eec | 2009-06-08 15:35:07 +0200 | [diff] [blame] | 204 |  | 
| Dave Chinner | 018027b | 2010-01-10 23:51:46 +0000 | [diff] [blame] | 205 | 	/* If we can't grab the inode, it must on it's way to reclaim. */ | 
 | 206 | 	if (!igrab(inode)) | 
 | 207 | 		goto out_unlock; | 
 | 208 |  | 
 | 209 | 	if (is_bad_inode(inode)) { | 
| Dave Chinner | 1da8eec | 2009-06-08 15:35:07 +0200 | [diff] [blame] | 210 | 		IRELE(ip); | 
| Dave Chinner | 018027b | 2010-01-10 23:51:46 +0000 | [diff] [blame] | 211 | 		goto out_unlock; | 
| Dave Chinner | 1da8eec | 2009-06-08 15:35:07 +0200 | [diff] [blame] | 212 | 	} | 
 | 213 |  | 
| Dave Chinner | 018027b | 2010-01-10 23:51:46 +0000 | [diff] [blame] | 214 | 	/* inode is valid */ | 
 | 215 | 	error = 0; | 
 | 216 | out_unlock: | 
 | 217 | 	read_unlock(&pag->pag_ici_lock); | 
 | 218 | 	return error; | 
| Dave Chinner | 1da8eec | 2009-06-08 15:35:07 +0200 | [diff] [blame] | 219 | } | 
 | 220 |  | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 221 | STATIC int | 
 | 222 | xfs_sync_inode_data( | 
 | 223 | 	struct xfs_inode	*ip, | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 224 | 	struct xfs_perag	*pag, | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 225 | 	int			flags) | 
 | 226 | { | 
 | 227 | 	struct inode		*inode = VFS_I(ip); | 
 | 228 | 	struct address_space *mapping = inode->i_mapping; | 
 | 229 | 	int			error = 0; | 
 | 230 |  | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 231 | 	error = xfs_sync_inode_valid(ip, pag); | 
 | 232 | 	if (error) | 
 | 233 | 		return error; | 
 | 234 |  | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 235 | 	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | 
 | 236 | 		goto out_wait; | 
 | 237 |  | 
 | 238 | 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { | 
 | 239 | 		if (flags & SYNC_TRYLOCK) | 
 | 240 | 			goto out_wait; | 
 | 241 | 		xfs_ilock(ip, XFS_IOLOCK_SHARED); | 
 | 242 | 	} | 
 | 243 |  | 
 | 244 | 	error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? | 
| Christoph Hellwig | 0cadda1 | 2010-01-19 09:56:44 +0000 | [diff] [blame] | 245 | 				0 : XBF_ASYNC, FI_NONE); | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 246 | 	xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 
 | 247 |  | 
 | 248 |  out_wait: | 
| Christoph Hellwig | b0710cc | 2009-06-08 15:37:11 +0200 | [diff] [blame] | 249 | 	if (flags & SYNC_WAIT) | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 250 | 		xfs_ioend_wait(ip); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 251 | 	IRELE(ip); | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 252 | 	return error; | 
 | 253 | } | 
 | 254 |  | 
| Christoph Hellwig | 845b6d0 | 2009-06-08 15:35:05 +0200 | [diff] [blame] | 255 | STATIC int | 
 | 256 | xfs_sync_inode_attr( | 
 | 257 | 	struct xfs_inode	*ip, | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 258 | 	struct xfs_perag	*pag, | 
| Christoph Hellwig | 845b6d0 | 2009-06-08 15:35:05 +0200 | [diff] [blame] | 259 | 	int			flags) | 
 | 260 | { | 
 | 261 | 	int			error = 0; | 
 | 262 |  | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 263 | 	error = xfs_sync_inode_valid(ip, pag); | 
 | 264 | 	if (error) | 
 | 265 | 		return error; | 
 | 266 |  | 
| Christoph Hellwig | 845b6d0 | 2009-06-08 15:35:05 +0200 | [diff] [blame] | 267 | 	xfs_ilock(ip, XFS_ILOCK_SHARED); | 
 | 268 | 	if (xfs_inode_clean(ip)) | 
 | 269 | 		goto out_unlock; | 
 | 270 | 	if (!xfs_iflock_nowait(ip)) { | 
 | 271 | 		if (!(flags & SYNC_WAIT)) | 
 | 272 | 			goto out_unlock; | 
 | 273 | 		xfs_iflock(ip); | 
 | 274 | 	} | 
 | 275 |  | 
 | 276 | 	if (xfs_inode_clean(ip)) { | 
 | 277 | 		xfs_ifunlock(ip); | 
 | 278 | 		goto out_unlock; | 
 | 279 | 	} | 
 | 280 |  | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 281 | 	error = xfs_iflush(ip, flags); | 
| Christoph Hellwig | 845b6d0 | 2009-06-08 15:35:05 +0200 | [diff] [blame] | 282 |  | 
 | 283 |  out_unlock: | 
 | 284 | 	xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 285 | 	IRELE(ip); | 
| Christoph Hellwig | 845b6d0 | 2009-06-08 15:35:05 +0200 | [diff] [blame] | 286 | 	return error; | 
 | 287 | } | 
 | 288 |  | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 289 | /* | 
 | 290 |  * Write out pagecache data for the whole filesystem. | 
 | 291 |  */ | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 292 | int | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 293 | xfs_sync_data( | 
 | 294 | 	struct xfs_mount	*mp, | 
 | 295 | 	int			flags) | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 296 | { | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 297 | 	int			error; | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 298 |  | 
| Christoph Hellwig | b0710cc | 2009-06-08 15:37:11 +0200 | [diff] [blame] | 299 | 	ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 300 |  | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 301 | 	error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags, | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 302 | 				      XFS_ICI_NO_TAG, 0, NULL); | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 303 | 	if (error) | 
 | 304 | 		return XFS_ERROR(error); | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 305 |  | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 306 | 	xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 307 | 	return 0; | 
 | 308 | } | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 309 |  | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 310 | /* | 
 | 311 |  * Write out inode metadata (attributes) for the whole filesystem. | 
 | 312 |  */ | 
 | 313 | int | 
 | 314 | xfs_sync_attr( | 
 | 315 | 	struct xfs_mount	*mp, | 
 | 316 | 	int			flags) | 
 | 317 | { | 
 | 318 | 	ASSERT((flags & ~SYNC_WAIT) == 0); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 319 |  | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 320 | 	return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags, | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 321 | 				     XFS_ICI_NO_TAG, 0, NULL); | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 322 | } | 
 | 323 |  | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 324 | STATIC int | 
 | 325 | xfs_commit_dummy_trans( | 
 | 326 | 	struct xfs_mount	*mp, | 
| Dave Chinner | dce5065 | 2009-10-06 20:29:30 +0000 | [diff] [blame] | 327 | 	uint			flags) | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 328 | { | 
 | 329 | 	struct xfs_inode	*ip = mp->m_rootip; | 
 | 330 | 	struct xfs_trans	*tp; | 
 | 331 | 	int			error; | 
 | 332 |  | 
 | 333 | 	/* | 
 | 334 | 	 * Put a dummy transaction in the log to tell recovery | 
 | 335 | 	 * that all others are OK. | 
 | 336 | 	 */ | 
 | 337 | 	tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); | 
 | 338 | 	error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); | 
 | 339 | 	if (error) { | 
 | 340 | 		xfs_trans_cancel(tp, 0); | 
 | 341 | 		return error; | 
 | 342 | 	} | 
 | 343 |  | 
 | 344 | 	xfs_ilock(ip, XFS_ILOCK_EXCL); | 
 | 345 |  | 
 | 346 | 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 
 | 347 | 	xfs_trans_ihold(tp, ip); | 
 | 348 | 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 349 | 	error = xfs_trans_commit(tp, 0); | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 350 | 	xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
 | 351 |  | 
| Dave Chinner | dce5065 | 2009-10-06 20:29:30 +0000 | [diff] [blame] | 352 | 	/* the log force ensures this transaction is pushed to disk */ | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 353 | 	xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); | 
| Dave Chinner | dce5065 | 2009-10-06 20:29:30 +0000 | [diff] [blame] | 354 | 	return error; | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 355 | } | 
 | 356 |  | 
| Eric Sandeen | 5d77c0d | 2009-11-19 15:52:00 +0000 | [diff] [blame] | 357 | STATIC int | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 358 | xfs_sync_fsdata( | 
 | 359 | 	struct xfs_mount	*mp, | 
 | 360 | 	int			flags) | 
 | 361 | { | 
 | 362 | 	struct xfs_buf		*bp; | 
 | 363 | 	struct xfs_buf_log_item	*bip; | 
 | 364 | 	int			error = 0; | 
 | 365 |  | 
 | 366 | 	/* | 
 | 367 | 	 * If this is xfssyncd() then only sync the superblock if we can | 
 | 368 | 	 * lock it without sleeping and it is not pinned. | 
 | 369 | 	 */ | 
| Christoph Hellwig | 8b5403a | 2009-06-08 15:37:16 +0200 | [diff] [blame] | 370 | 	if (flags & SYNC_TRYLOCK) { | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 371 | 		ASSERT(!(flags & SYNC_WAIT)); | 
 | 372 |  | 
| Christoph Hellwig | 0cadda1 | 2010-01-19 09:56:44 +0000 | [diff] [blame] | 373 | 		bp = xfs_getsb(mp, XBF_TRYLOCK); | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 374 | 		if (!bp) | 
 | 375 | 			goto out; | 
 | 376 |  | 
 | 377 | 		bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *); | 
 | 378 | 		if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp)) | 
 | 379 | 			goto out_brelse; | 
 | 380 | 	} else { | 
 | 381 | 		bp = xfs_getsb(mp, 0); | 
 | 382 |  | 
 | 383 | 		/* | 
 | 384 | 		 * If the buffer is pinned then push on the log so we won't | 
 | 385 | 		 * get stuck waiting in the write for someone, maybe | 
 | 386 | 		 * ourselves, to flush the log. | 
 | 387 | 		 * | 
 | 388 | 		 * Even though we just pushed the log above, we did not have | 
 | 389 | 		 * the superblock buffer locked at that point so it can | 
 | 390 | 		 * become pinned in between there and here. | 
 | 391 | 		 */ | 
 | 392 | 		if (XFS_BUF_ISPINNED(bp)) | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 393 | 			xfs_log_force(mp, 0); | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 394 | 	} | 
 | 395 |  | 
 | 396 |  | 
 | 397 | 	if (flags & SYNC_WAIT) | 
 | 398 | 		XFS_BUF_UNASYNC(bp); | 
 | 399 | 	else | 
 | 400 | 		XFS_BUF_ASYNC(bp); | 
 | 401 |  | 
| Dave Chinner | dce5065 | 2009-10-06 20:29:30 +0000 | [diff] [blame] | 402 | 	error = xfs_bwrite(mp, bp); | 
 | 403 | 	if (error) | 
 | 404 | 		return error; | 
 | 405 |  | 
 | 406 | 	/* | 
 | 407 | 	 * If this is a data integrity sync make sure all pending buffers | 
 | 408 | 	 * are flushed out for the log coverage check below. | 
 | 409 | 	 */ | 
 | 410 | 	if (flags & SYNC_WAIT) | 
 | 411 | 		xfs_flush_buftarg(mp->m_ddev_targp, 1); | 
 | 412 |  | 
 | 413 | 	if (xfs_log_need_covered(mp)) | 
 | 414 | 		error = xfs_commit_dummy_trans(mp, flags); | 
 | 415 | 	return error; | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 416 |  | 
 | 417 |  out_brelse: | 
 | 418 | 	xfs_buf_relse(bp); | 
 | 419 |  out: | 
 | 420 | 	return error; | 
 | 421 | } | 
 | 422 |  | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 423 | /* | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 424 |  * When remounting a filesystem read-only or freezing the filesystem, we have | 
 | 425 |  * two phases to execute. This first phase is syncing the data before we | 
 | 426 |  * quiesce the filesystem, and the second is flushing all the inodes out after | 
 | 427 |  * we've waited for all the transactions created by the first phase to | 
 | 428 |  * complete. The second phase ensures that the inodes are written to their | 
 | 429 |  * location on disk rather than just existing in transactions in the log. This | 
 | 430 |  * means after a quiesce there is no log replay required to write the inodes to | 
 | 431 |  * disk (this is the main difference between a sync and a quiesce). | 
 | 432 |  */ | 
 | 433 | /* | 
 | 434 |  * First stage of freeze - no writers will make progress now we are here, | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 435 |  * so we flush delwri and delalloc buffers here, then wait for all I/O to | 
 | 436 |  * complete.  Data is frozen at that point. Metadata is not frozen, | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 437 |  * transactions can still occur here so don't bother flushing the buftarg | 
 | 438 |  * because it'll just get dirty again. | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 439 |  */ | 
 | 440 | int | 
 | 441 | xfs_quiesce_data( | 
 | 442 | 	struct xfs_mount	*mp) | 
 | 443 | { | 
 | 444 | 	int error; | 
 | 445 |  | 
 | 446 | 	/* push non-blocking */ | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 447 | 	xfs_sync_data(mp, 0); | 
| Christoph Hellwig | 8b5403a | 2009-06-08 15:37:16 +0200 | [diff] [blame] | 448 | 	xfs_qm_sync(mp, SYNC_TRYLOCK); | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 449 |  | 
| Dave Chinner | c90b07e | 2009-10-06 20:29:27 +0000 | [diff] [blame] | 450 | 	/* push and block till complete */ | 
| Christoph Hellwig | b0710cc | 2009-06-08 15:37:11 +0200 | [diff] [blame] | 451 | 	xfs_sync_data(mp, SYNC_WAIT); | 
| Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 452 | 	xfs_qm_sync(mp, SYNC_WAIT); | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 453 |  | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 454 | 	/* write superblock and hoover up shutdown errors */ | 
| Dave Chinner | c90b07e | 2009-10-06 20:29:27 +0000 | [diff] [blame] | 455 | 	error = xfs_sync_fsdata(mp, SYNC_WAIT); | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 456 |  | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 457 | 	/* flush data-only devices */ | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 458 | 	if (mp->m_rtdev_targp) | 
 | 459 | 		XFS_bflush(mp->m_rtdev_targp); | 
 | 460 |  | 
 | 461 | 	return error; | 
 | 462 | } | 
 | 463 |  | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 464 | STATIC void | 
 | 465 | xfs_quiesce_fs( | 
 | 466 | 	struct xfs_mount	*mp) | 
 | 467 | { | 
 | 468 | 	int	count = 0, pincount; | 
 | 469 |  | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 470 | 	xfs_reclaim_inodes(mp, 0); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 471 | 	xfs_flush_buftarg(mp->m_ddev_targp, 0); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 472 |  | 
 | 473 | 	/* | 
 | 474 | 	 * This loop must run at least twice.  The first instance of the loop | 
 | 475 | 	 * will flush most meta data but that will generate more meta data | 
 | 476 | 	 * (typically directory updates).  Which then must be flushed and | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 477 | 	 * logged before we can write the unmount record. We also so sync | 
 | 478 | 	 * reclaim of inodes to catch any that the above delwri flush skipped. | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 479 | 	 */ | 
 | 480 | 	do { | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 481 | 		xfs_reclaim_inodes(mp, SYNC_WAIT); | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 482 | 		xfs_sync_attr(mp, SYNC_WAIT); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 483 | 		pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); | 
 | 484 | 		if (!pincount) { | 
 | 485 | 			delay(50); | 
 | 486 | 			count++; | 
 | 487 | 		} | 
 | 488 | 	} while (count < 2); | 
 | 489 | } | 
 | 490 |  | 
 | 491 | /* | 
 | 492 |  * Second stage of a quiesce. The data is already synced, now we have to take | 
 | 493 |  * care of the metadata. New transactions are already blocked, so we need to | 
 | 494 |  * wait for any remaining transactions to drain out before proceding. | 
 | 495 |  */ | 
 | 496 | void | 
 | 497 | xfs_quiesce_attr( | 
 | 498 | 	struct xfs_mount	*mp) | 
 | 499 | { | 
 | 500 | 	int	error = 0; | 
 | 501 |  | 
 | 502 | 	/* wait for all modifications to complete */ | 
 | 503 | 	while (atomic_read(&mp->m_active_trans) > 0) | 
 | 504 | 		delay(100); | 
 | 505 |  | 
 | 506 | 	/* flush inodes and push all remaining buffers out to disk */ | 
 | 507 | 	xfs_quiesce_fs(mp); | 
 | 508 |  | 
| Felix Blyakher | 5e10657 | 2009-01-22 21:34:05 -0600 | [diff] [blame] | 509 | 	/* | 
 | 510 | 	 * Just warn here till VFS can correctly support | 
 | 511 | 	 * read-only remount without racing. | 
 | 512 | 	 */ | 
 | 513 | 	WARN_ON(atomic_read(&mp->m_active_trans) != 0); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 514 |  | 
 | 515 | 	/* Push the superblock and write an unmount record */ | 
 | 516 | 	error = xfs_log_sbcount(mp, 1); | 
 | 517 | 	if (error) | 
 | 518 | 		xfs_fs_cmn_err(CE_WARN, mp, | 
 | 519 | 				"xfs_attr_quiesce: failed to log sb changes. " | 
 | 520 | 				"Frozen image may not be consistent."); | 
 | 521 | 	xfs_log_unmount_write(mp); | 
 | 522 | 	xfs_unmountfs_writesb(mp); | 
 | 523 | } | 
 | 524 |  | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 525 | /* | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 526 |  * Enqueue a work item to be picked up by the vfs xfssyncd thread. | 
 | 527 |  * Doing this has two advantages: | 
 | 528 |  * - It saves on stack space, which is tight in certain situations | 
 | 529 |  * - It can be used (with care) as a mechanism to avoid deadlocks. | 
 | 530 |  * Flushing while allocating in a full filesystem requires both. | 
 | 531 |  */ | 
 | 532 | STATIC void | 
 | 533 | xfs_syncd_queue_work( | 
 | 534 | 	struct xfs_mount *mp, | 
 | 535 | 	void		*data, | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 536 | 	void		(*syncer)(struct xfs_mount *, void *), | 
 | 537 | 	struct completion *completion) | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 538 | { | 
| Dave Chinner | a8d770d | 2009-04-06 18:44:54 +0200 | [diff] [blame] | 539 | 	struct xfs_sync_work *work; | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 540 |  | 
| Dave Chinner | a8d770d | 2009-04-06 18:44:54 +0200 | [diff] [blame] | 541 | 	work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 542 | 	INIT_LIST_HEAD(&work->w_list); | 
 | 543 | 	work->w_syncer = syncer; | 
 | 544 | 	work->w_data = data; | 
 | 545 | 	work->w_mount = mp; | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 546 | 	work->w_completion = completion; | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 547 | 	spin_lock(&mp->m_sync_lock); | 
 | 548 | 	list_add_tail(&work->w_list, &mp->m_sync_list); | 
 | 549 | 	spin_unlock(&mp->m_sync_lock); | 
 | 550 | 	wake_up_process(mp->m_sync_task); | 
 | 551 | } | 
 | 552 |  | 
 | 553 | /* | 
 | 554 |  * Flush delayed allocate data, attempting to free up reserved space | 
 | 555 |  * from existing allocations.  At this point a new allocation attempt | 
 | 556 |  * has failed with ENOSPC and we are in the process of scratching our | 
 | 557 |  * heads, looking about for more room... | 
 | 558 |  */ | 
 | 559 | STATIC void | 
| Dave Chinner | a8d770d | 2009-04-06 18:44:54 +0200 | [diff] [blame] | 560 | xfs_flush_inodes_work( | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 561 | 	struct xfs_mount *mp, | 
 | 562 | 	void		*arg) | 
 | 563 | { | 
 | 564 | 	struct inode	*inode = arg; | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 565 | 	xfs_sync_data(mp, SYNC_TRYLOCK); | 
| Christoph Hellwig | b0710cc | 2009-06-08 15:37:11 +0200 | [diff] [blame] | 566 | 	xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 567 | 	iput(inode); | 
 | 568 | } | 
 | 569 |  | 
 | 570 | void | 
| Dave Chinner | a8d770d | 2009-04-06 18:44:54 +0200 | [diff] [blame] | 571 | xfs_flush_inodes( | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 572 | 	xfs_inode_t	*ip) | 
 | 573 | { | 
 | 574 | 	struct inode	*inode = VFS_I(ip); | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 575 | 	DECLARE_COMPLETION_ONSTACK(completion); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 576 |  | 
 | 577 | 	igrab(inode); | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 578 | 	xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion); | 
 | 579 | 	wait_for_completion(&completion); | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 580 | 	xfs_log_force(ip->i_mount, XFS_LOG_SYNC); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 581 | } | 
 | 582 |  | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 583 | /* | 
 | 584 |  * Every sync period we need to unpin all items, reclaim inodes, sync | 
 | 585 |  * quota and write out the superblock. We might need to cover the log | 
 | 586 |  * to indicate it is idle. | 
 | 587 |  */ | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 588 | STATIC void | 
 | 589 | xfs_sync_worker( | 
 | 590 | 	struct xfs_mount *mp, | 
 | 591 | 	void		*unused) | 
 | 592 | { | 
 | 593 | 	int		error; | 
 | 594 |  | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 595 | 	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 596 | 		xfs_log_force(mp, 0); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 597 | 		xfs_reclaim_inodes(mp, 0); | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 598 | 		/* dgc: errors ignored here */ | 
| Christoph Hellwig | 8b5403a | 2009-06-08 15:37:16 +0200 | [diff] [blame] | 599 | 		error = xfs_qm_sync(mp, SYNC_TRYLOCK); | 
 | 600 | 		error = xfs_sync_fsdata(mp, SYNC_TRYLOCK); | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 601 | 	} | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 602 | 	mp->m_sync_seq++; | 
 | 603 | 	wake_up(&mp->m_wait_single_sync_task); | 
 | 604 | } | 
 | 605 |  | 
 | 606 | STATIC int | 
 | 607 | xfssyncd( | 
 | 608 | 	void			*arg) | 
 | 609 | { | 
 | 610 | 	struct xfs_mount	*mp = arg; | 
 | 611 | 	long			timeleft; | 
| Dave Chinner | a8d770d | 2009-04-06 18:44:54 +0200 | [diff] [blame] | 612 | 	xfs_sync_work_t		*work, *n; | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 613 | 	LIST_HEAD		(tmp); | 
 | 614 |  | 
 | 615 | 	set_freezable(); | 
 | 616 | 	timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); | 
 | 617 | 	for (;;) { | 
| Dave Chinner | 20f6b2c | 2010-03-04 01:46:23 +0000 | [diff] [blame] | 618 | 		if (list_empty(&mp->m_sync_list)) | 
 | 619 | 			timeleft = schedule_timeout_interruptible(timeleft); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 620 | 		/* swsusp */ | 
 | 621 | 		try_to_freeze(); | 
 | 622 | 		if (kthread_should_stop() && list_empty(&mp->m_sync_list)) | 
 | 623 | 			break; | 
 | 624 |  | 
 | 625 | 		spin_lock(&mp->m_sync_lock); | 
 | 626 | 		/* | 
 | 627 | 		 * We can get woken by laptop mode, to do a sync - | 
 | 628 | 		 * that's the (only!) case where the list would be | 
 | 629 | 		 * empty with time remaining. | 
 | 630 | 		 */ | 
 | 631 | 		if (!timeleft || list_empty(&mp->m_sync_list)) { | 
 | 632 | 			if (!timeleft) | 
 | 633 | 				timeleft = xfs_syncd_centisecs * | 
 | 634 | 							msecs_to_jiffies(10); | 
 | 635 | 			INIT_LIST_HEAD(&mp->m_sync_work.w_list); | 
 | 636 | 			list_add_tail(&mp->m_sync_work.w_list, | 
 | 637 | 					&mp->m_sync_list); | 
 | 638 | 		} | 
| Dave Chinner | 20f6b2c | 2010-03-04 01:46:23 +0000 | [diff] [blame] | 639 | 		list_splice_init(&mp->m_sync_list, &tmp); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 640 | 		spin_unlock(&mp->m_sync_lock); | 
 | 641 |  | 
 | 642 | 		list_for_each_entry_safe(work, n, &tmp, w_list) { | 
 | 643 | 			(*work->w_syncer)(mp, work->w_data); | 
 | 644 | 			list_del(&work->w_list); | 
 | 645 | 			if (work == &mp->m_sync_work) | 
 | 646 | 				continue; | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 647 | 			if (work->w_completion) | 
 | 648 | 				complete(work->w_completion); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 649 | 			kmem_free(work); | 
 | 650 | 		} | 
 | 651 | 	} | 
 | 652 |  | 
 | 653 | 	return 0; | 
 | 654 | } | 
 | 655 |  | 
 | 656 | int | 
 | 657 | xfs_syncd_init( | 
 | 658 | 	struct xfs_mount	*mp) | 
 | 659 | { | 
 | 660 | 	mp->m_sync_work.w_syncer = xfs_sync_worker; | 
 | 661 | 	mp->m_sync_work.w_mount = mp; | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 662 | 	mp->m_sync_work.w_completion = NULL; | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 663 | 	mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd"); | 
 | 664 | 	if (IS_ERR(mp->m_sync_task)) | 
 | 665 | 		return -PTR_ERR(mp->m_sync_task); | 
 | 666 | 	return 0; | 
 | 667 | } | 
 | 668 |  | 
 | 669 | void | 
 | 670 | xfs_syncd_stop( | 
 | 671 | 	struct xfs_mount	*mp) | 
 | 672 | { | 
 | 673 | 	kthread_stop(mp->m_sync_task); | 
 | 674 | } | 
 | 675 |  | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 676 | void | 
 | 677 | __xfs_inode_set_reclaim_tag( | 
 | 678 | 	struct xfs_perag	*pag, | 
 | 679 | 	struct xfs_inode	*ip) | 
 | 680 | { | 
 | 681 | 	radix_tree_tag_set(&pag->pag_ici_root, | 
 | 682 | 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | 
 | 683 | 			   XFS_ICI_RECLAIM_TAG); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 684 | 	pag->pag_ici_reclaimable++; | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 685 | } | 
 | 686 |  | 
| David Chinner | 1165451 | 2008-10-30 17:37:49 +1100 | [diff] [blame] | 687 | /* | 
 | 688 |  * We set the inode flag atomically with the radix tree tag. | 
 | 689 |  * Once we get tag lookups on the radix tree, this inode flag | 
 | 690 |  * can go away. | 
 | 691 |  */ | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 692 | void | 
 | 693 | xfs_inode_set_reclaim_tag( | 
 | 694 | 	xfs_inode_t	*ip) | 
 | 695 | { | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 696 | 	struct xfs_mount *mp = ip->i_mount; | 
 | 697 | 	struct xfs_perag *pag; | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 698 |  | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 699 | 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | 
| Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 700 | 	write_lock(&pag->pag_ici_lock); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 701 | 	spin_lock(&ip->i_flags_lock); | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 702 | 	__xfs_inode_set_reclaim_tag(pag, ip); | 
| David Chinner | 1165451 | 2008-10-30 17:37:49 +1100 | [diff] [blame] | 703 | 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 704 | 	spin_unlock(&ip->i_flags_lock); | 
| Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 705 | 	write_unlock(&pag->pag_ici_lock); | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 706 | 	xfs_perag_put(pag); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 707 | } | 
 | 708 |  | 
 | 709 | void | 
 | 710 | __xfs_inode_clear_reclaim_tag( | 
 | 711 | 	xfs_mount_t	*mp, | 
 | 712 | 	xfs_perag_t	*pag, | 
 | 713 | 	xfs_inode_t	*ip) | 
 | 714 | { | 
 | 715 | 	radix_tree_tag_clear(&pag->pag_ici_root, | 
 | 716 | 			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 717 | 	pag->pag_ici_reclaimable--; | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 718 | } | 
 | 719 |  | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 720 | /* | 
 | 721 |  * Inodes in different states need to be treated differently, and the return | 
 | 722 |  * value of xfs_iflush is not sufficient to get this right. The following table | 
 | 723 |  * lists the inode states and the reclaim actions necessary for non-blocking | 
 | 724 |  * reclaim: | 
 | 725 |  * | 
 | 726 |  * | 
 | 727 |  *	inode state	     iflush ret		required action | 
 | 728 |  *      ---------------      ----------         --------------- | 
 | 729 |  *	bad			-		reclaim | 
 | 730 |  *	shutdown		EIO		unpin and reclaim | 
 | 731 |  *	clean, unpinned		0		reclaim | 
 | 732 |  *	stale, unpinned		0		reclaim | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 733 |  *	clean, pinned(*)	0		requeue | 
 | 734 |  *	stale, pinned		EAGAIN		requeue | 
 | 735 |  *	dirty, delwri ok	0		requeue | 
 | 736 |  *	dirty, delwri blocked	EAGAIN		requeue | 
 | 737 |  *	dirty, sync flush	0		reclaim | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 738 |  * | 
 | 739 |  * (*) dgc: I don't think the clean, pinned state is possible but it gets | 
 | 740 |  * handled anyway given the order of checks implemented. | 
 | 741 |  * | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 742 |  * As can be seen from the table, the return value of xfs_iflush() is not | 
 | 743 |  * sufficient to correctly decide the reclaim action here. The checks in | 
 | 744 |  * xfs_iflush() might look like duplicates, but they are not. | 
 | 745 |  * | 
 | 746 |  * Also, because we get the flush lock first, we know that any inode that has | 
 | 747 |  * been flushed delwri has had the flush completed by the time we check that | 
 | 748 |  * the inode is clean. The clean inode check needs to be done before flushing | 
 | 749 |  * the inode delwri otherwise we would loop forever requeuing clean inodes as | 
 | 750 |  * we cannot tell apart a successful delwri flush and a clean inode from the | 
 | 751 |  * return value of xfs_iflush(). | 
 | 752 |  * | 
 | 753 |  * Note that because the inode is flushed delayed write by background | 
 | 754 |  * writeback, the flush lock may already be held here and waiting on it can | 
 | 755 |  * result in very long latencies. Hence for sync reclaims, where we wait on the | 
 | 756 |  * flush lock, the caller should push out delayed write inodes first before | 
 | 757 |  * trying to reclaim them to minimise the amount of time spent waiting. For | 
 | 758 |  * background relaim, we just requeue the inode for the next pass. | 
 | 759 |  * | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 760 |  * Hence the order of actions after gaining the locks should be: | 
 | 761 |  *	bad		=> reclaim | 
 | 762 |  *	shutdown	=> unpin and reclaim | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 763 |  *	pinned, delwri	=> requeue | 
 | 764 |  *	pinned, sync	=> unpin | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 765 |  *	stale		=> reclaim | 
 | 766 |  *	clean		=> reclaim | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 767 |  *	dirty, delwri	=> flush and requeue | 
 | 768 |  *	dirty, sync	=> flush, wait and reclaim | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 769 |  */ | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 770 | STATIC int | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 771 | xfs_reclaim_inode( | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 772 | 	struct xfs_inode	*ip, | 
 | 773 | 	struct xfs_perag	*pag, | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 774 | 	int			sync_mode) | 
| David Chinner | 7a3be02 | 2008-10-30 17:37:37 +1100 | [diff] [blame] | 775 | { | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 776 | 	int	error = 0; | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 777 |  | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 778 | 	/* | 
 | 779 | 	 * The radix tree lock here protects a thread in xfs_iget from racing | 
 | 780 | 	 * with us starting reclaim on the inode.  Once we have the | 
 | 781 | 	 * XFS_IRECLAIM flag set it will not touch us. | 
 | 782 | 	 */ | 
 | 783 | 	spin_lock(&ip->i_flags_lock); | 
 | 784 | 	ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE)); | 
 | 785 | 	if (__xfs_iflags_test(ip, XFS_IRECLAIM)) { | 
 | 786 | 		/* ignore as it is already under reclaim */ | 
 | 787 | 		spin_unlock(&ip->i_flags_lock); | 
 | 788 | 		write_unlock(&pag->pag_ici_lock); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 789 | 		return 0; | 
| David Chinner | 7a3be02 | 2008-10-30 17:37:37 +1100 | [diff] [blame] | 790 | 	} | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 791 | 	__xfs_iflags_set(ip, XFS_IRECLAIM); | 
 | 792 | 	spin_unlock(&ip->i_flags_lock); | 
 | 793 | 	write_unlock(&pag->pag_ici_lock); | 
| David Chinner | 7a3be02 | 2008-10-30 17:37:37 +1100 | [diff] [blame] | 794 |  | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 795 | 	xfs_ilock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 796 | 	if (!xfs_iflock_nowait(ip)) { | 
 | 797 | 		if (!(sync_mode & SYNC_WAIT)) | 
 | 798 | 			goto out; | 
 | 799 | 		xfs_iflock(ip); | 
 | 800 | 	} | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 801 |  | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 802 | 	if (is_bad_inode(VFS_I(ip))) | 
 | 803 | 		goto reclaim; | 
 | 804 | 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 
 | 805 | 		xfs_iunpin_wait(ip); | 
 | 806 | 		goto reclaim; | 
 | 807 | 	} | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 808 | 	if (xfs_ipincount(ip)) { | 
 | 809 | 		if (!(sync_mode & SYNC_WAIT)) { | 
 | 810 | 			xfs_ifunlock(ip); | 
 | 811 | 			goto out; | 
 | 812 | 		} | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 813 | 		xfs_iunpin_wait(ip); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 814 | 	} | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 815 | 	if (xfs_iflags_test(ip, XFS_ISTALE)) | 
 | 816 | 		goto reclaim; | 
 | 817 | 	if (xfs_inode_clean(ip)) | 
 | 818 | 		goto reclaim; | 
 | 819 |  | 
 | 820 | 	/* Now we have an inode that needs flushing */ | 
 | 821 | 	error = xfs_iflush(ip, sync_mode); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 822 | 	if (sync_mode & SYNC_WAIT) { | 
 | 823 | 		xfs_iflock(ip); | 
 | 824 | 		goto reclaim; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 825 | 	} | 
 | 826 |  | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 827 | 	/* | 
 | 828 | 	 * When we have to flush an inode but don't have SYNC_WAIT set, we | 
 | 829 | 	 * flush the inode out using a delwri buffer and wait for the next | 
 | 830 | 	 * call into reclaim to find it in a clean state instead of waiting for | 
 | 831 | 	 * it now. We also don't return errors here - if the error is transient | 
 | 832 | 	 * then the next reclaim pass will flush the inode, and if the error | 
| Dave Chinner | f1d486a | 2010-04-13 15:06:45 +1000 | [diff] [blame] | 833 | 	 * is permanent then the next sync reclaim will reclaim the inode and | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 834 | 	 * pass on the error. | 
 | 835 | 	 */ | 
| Dave Chinner | f1d486a | 2010-04-13 15:06:45 +1000 | [diff] [blame] | 836 | 	if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 837 | 		xfs_fs_cmn_err(CE_WARN, ip->i_mount, | 
 | 838 | 			"inode 0x%llx background reclaim flush failed with %d", | 
 | 839 | 			(long long)ip->i_ino, error); | 
 | 840 | 	} | 
 | 841 | out: | 
 | 842 | 	xfs_iflags_clear(ip, XFS_IRECLAIM); | 
 | 843 | 	xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
 | 844 | 	/* | 
 | 845 | 	 * We could return EAGAIN here to make reclaim rescan the inode tree in | 
 | 846 | 	 * a short while. However, this just burns CPU time scanning the tree | 
 | 847 | 	 * waiting for IO to complete and xfssyncd never goes back to the idle | 
 | 848 | 	 * state. Instead, return 0 to let the next scheduled background reclaim | 
 | 849 | 	 * attempt to reclaim the inode again. | 
 | 850 | 	 */ | 
 | 851 | 	return 0; | 
 | 852 |  | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 853 | reclaim: | 
 | 854 | 	xfs_ifunlock(ip); | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 855 | 	xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
 | 856 | 	xfs_ireclaim(ip); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 857 | 	return error; | 
 | 858 |  | 
| David Chinner | 7a3be02 | 2008-10-30 17:37:37 +1100 | [diff] [blame] | 859 | } | 
 | 860 |  | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 861 | int | 
| David Chinner | 1dc3318 | 2008-10-30 17:37:15 +1100 | [diff] [blame] | 862 | xfs_reclaim_inodes( | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 863 | 	xfs_mount_t	*mp, | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 864 | 	int		mode) | 
 | 865 | { | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 866 | 	return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode, | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 867 | 					XFS_ICI_RECLAIM_TAG, 1, NULL); | 
 | 868 | } | 
 | 869 |  | 
 | 870 | /* | 
 | 871 |  * Shrinker infrastructure. | 
 | 872 |  * | 
 | 873 |  * This is all far more complex than it needs to be. It adds a global list of | 
 | 874 |  * mounts because the shrinkers can only call a global context. We need to make | 
 | 875 |  * the shrinkers pass a context to avoid the need for global state. | 
 | 876 |  */ | 
 | 877 | static LIST_HEAD(xfs_mount_list); | 
 | 878 | static struct rw_semaphore xfs_mount_list_lock; | 
 | 879 |  | 
 | 880 | static int | 
 | 881 | xfs_reclaim_inode_shrink( | 
 | 882 | 	int		nr_to_scan, | 
 | 883 | 	gfp_t		gfp_mask) | 
 | 884 | { | 
 | 885 | 	struct xfs_mount *mp; | 
 | 886 | 	struct xfs_perag *pag; | 
 | 887 | 	xfs_agnumber_t	ag; | 
 | 888 | 	int		reclaimable = 0; | 
 | 889 |  | 
 | 890 | 	if (nr_to_scan) { | 
 | 891 | 		if (!(gfp_mask & __GFP_FS)) | 
 | 892 | 			return -1; | 
 | 893 |  | 
 | 894 | 		down_read(&xfs_mount_list_lock); | 
 | 895 | 		list_for_each_entry(mp, &xfs_mount_list, m_mplist) { | 
 | 896 | 			xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, | 
 | 897 | 					XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); | 
 | 898 | 			if (nr_to_scan <= 0) | 
 | 899 | 				break; | 
 | 900 | 		} | 
 | 901 | 		up_read(&xfs_mount_list_lock); | 
 | 902 | 	} | 
 | 903 |  | 
 | 904 | 	down_read(&xfs_mount_list_lock); | 
 | 905 | 	list_for_each_entry(mp, &xfs_mount_list, m_mplist) { | 
 | 906 | 		for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { | 
 | 907 |  | 
 | 908 | 			pag = xfs_perag_get(mp, ag); | 
 | 909 | 			if (!pag->pag_ici_init) { | 
 | 910 | 				xfs_perag_put(pag); | 
 | 911 | 				continue; | 
 | 912 | 			} | 
 | 913 | 			reclaimable += pag->pag_ici_reclaimable; | 
 | 914 | 			xfs_perag_put(pag); | 
 | 915 | 		} | 
 | 916 | 	} | 
 | 917 | 	up_read(&xfs_mount_list_lock); | 
 | 918 | 	return reclaimable; | 
 | 919 | } | 
 | 920 |  | 
 | 921 | static struct shrinker xfs_inode_shrinker = { | 
 | 922 | 	.shrink = xfs_reclaim_inode_shrink, | 
 | 923 | 	.seeks = DEFAULT_SEEKS, | 
 | 924 | }; | 
 | 925 |  | 
 | 926 | void __init | 
 | 927 | xfs_inode_shrinker_init(void) | 
 | 928 | { | 
 | 929 | 	init_rwsem(&xfs_mount_list_lock); | 
 | 930 | 	register_shrinker(&xfs_inode_shrinker); | 
 | 931 | } | 
 | 932 |  | 
 | 933 | void | 
 | 934 | xfs_inode_shrinker_destroy(void) | 
 | 935 | { | 
 | 936 | 	ASSERT(list_empty(&xfs_mount_list)); | 
 | 937 | 	unregister_shrinker(&xfs_inode_shrinker); | 
 | 938 | } | 
 | 939 |  | 
 | 940 | void | 
 | 941 | xfs_inode_shrinker_register( | 
 | 942 | 	struct xfs_mount	*mp) | 
 | 943 | { | 
 | 944 | 	down_write(&xfs_mount_list_lock); | 
 | 945 | 	list_add_tail(&mp->m_mplist, &xfs_mount_list); | 
 | 946 | 	up_write(&xfs_mount_list_lock); | 
 | 947 | } | 
 | 948 |  | 
 | 949 | void | 
 | 950 | xfs_inode_shrinker_unregister( | 
 | 951 | 	struct xfs_mount	*mp) | 
 | 952 | { | 
 | 953 | 	down_write(&xfs_mount_list_lock); | 
 | 954 | 	list_del(&mp->m_mplist); | 
 | 955 | 	up_write(&xfs_mount_list_lock); | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 956 | } |