| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | 
|  | 3 | * All Rights Reserved. | 
|  | 4 | * | 
|  | 5 | * This program is free software; you can redistribute it and/or | 
|  | 6 | * modify it under the terms of the GNU General Public License as | 
|  | 7 | * published by the Free Software Foundation. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it would be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 12 | * GNU General Public License for more details. | 
|  | 13 | * | 
|  | 14 | * You should have received a copy of the GNU General Public License | 
|  | 15 | * along with this program; if not, write the Free Software Foundation, | 
|  | 16 | * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
|  | 17 | */ | 
|  | 18 | #include "xfs.h" | 
|  | 19 | #include "xfs_fs.h" | 
|  | 20 | #include "xfs_types.h" | 
|  | 21 | #include "xfs_bit.h" | 
|  | 22 | #include "xfs_log.h" | 
|  | 23 | #include "xfs_inum.h" | 
|  | 24 | #include "xfs_trans.h" | 
|  | 25 | #include "xfs_sb.h" | 
|  | 26 | #include "xfs_ag.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 27 | #include "xfs_mount.h" | 
|  | 28 | #include "xfs_bmap_btree.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 29 | #include "xfs_inode.h" | 
|  | 30 | #include "xfs_dinode.h" | 
|  | 31 | #include "xfs_error.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 32 | #include "xfs_filestream.h" | 
|  | 33 | #include "xfs_vnodeops.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 34 | #include "xfs_inode_item.h" | 
| Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 35 | #include "xfs_quota.h" | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 36 | #include "xfs_trace.h" | 
| Dave Chinner | 1a387d3 | 2010-08-24 11:46:31 +1000 | [diff] [blame] | 37 | #include "xfs_fsops.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 38 |  | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 39 | #include <linux/kthread.h> | 
|  | 40 | #include <linux/freezer.h> | 
|  | 41 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 42 | /* | 
|  | 43 | * The inode lookup is done in batches to keep the amount of lock traffic and | 
|  | 44 | * radix tree lookups to a minimum. The batch size is a trade off between | 
|  | 45 | * lookup reduction and stack usage. This is in the reclaim path, so we can't | 
|  | 46 | * be too greedy. | 
|  | 47 | */ | 
|  | 48 | #define XFS_LOOKUP_BATCH	32 | 
|  | 49 |  | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 50 | STATIC int | 
|  | 51 | xfs_inode_ag_walk_grab( | 
|  | 52 | struct xfs_inode	*ip) | 
|  | 53 | { | 
|  | 54 | struct inode		*inode = VFS_I(ip); | 
|  | 55 |  | 
|  | 56 | /* nothing to sync during shutdown */ | 
|  | 57 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 
|  | 58 | return EFSCORRUPTED; | 
|  | 59 |  | 
|  | 60 | /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ | 
|  | 61 | if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) | 
|  | 62 | return ENOENT; | 
|  | 63 |  | 
|  | 64 | /* If we can't grab the inode, it must on it's way to reclaim. */ | 
|  | 65 | if (!igrab(inode)) | 
|  | 66 | return ENOENT; | 
|  | 67 |  | 
|  | 68 | if (is_bad_inode(inode)) { | 
|  | 69 | IRELE(ip); | 
|  | 70 | return ENOENT; | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | /* inode is valid */ | 
|  | 74 | return 0; | 
|  | 75 | } | 
|  | 76 |  | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 77 | STATIC int | 
|  | 78 | xfs_inode_ag_walk( | 
|  | 79 | struct xfs_mount	*mp, | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 80 | struct xfs_perag	*pag, | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 81 | int			(*execute)(struct xfs_inode *ip, | 
|  | 82 | struct xfs_perag *pag, int flags), | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 83 | int			flags) | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 84 | { | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 85 | uint32_t		first_index; | 
|  | 86 | int			last_error = 0; | 
|  | 87 | int			skipped; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 88 | int			done; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 89 | int			nr_found; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 90 |  | 
|  | 91 | restart: | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 92 | done = 0; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 93 | skipped = 0; | 
|  | 94 | first_index = 0; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 95 | nr_found = 0; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 96 | do { | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 97 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 98 | int		error = 0; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 99 | int		i; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 100 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 101 | read_lock(&pag->pag_ici_lock); | 
|  | 102 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 103 | (void **)batch, first_index, | 
|  | 104 | XFS_LOOKUP_BATCH); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 105 | if (!nr_found) { | 
|  | 106 | read_unlock(&pag->pag_ici_lock); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 107 | break; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 108 | } | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 109 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 110 | /* | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 111 | * Grab the inodes before we drop the lock. if we found | 
|  | 112 | * nothing, nr == 0 and the loop will be skipped. | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 113 | */ | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 114 | for (i = 0; i < nr_found; i++) { | 
|  | 115 | struct xfs_inode *ip = batch[i]; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 116 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 117 | if (done || xfs_inode_ag_walk_grab(ip)) | 
|  | 118 | batch[i] = NULL; | 
|  | 119 |  | 
|  | 120 | /* | 
|  | 121 | * Update the index for the next lookup. Catch overflows | 
|  | 122 | * into the next AG range which can occur if we have inodes | 
|  | 123 | * in the last block of the AG and we are currently | 
|  | 124 | * pointing to the last inode. | 
|  | 125 | */ | 
|  | 126 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | 
|  | 127 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | 
|  | 128 | done = 1; | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 129 | } | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 130 |  | 
|  | 131 | /* unlock now we've grabbed the inodes. */ | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 132 | read_unlock(&pag->pag_ici_lock); | 
|  | 133 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 134 | for (i = 0; i < nr_found; i++) { | 
|  | 135 | if (!batch[i]) | 
|  | 136 | continue; | 
|  | 137 | error = execute(batch[i], pag, flags); | 
|  | 138 | IRELE(batch[i]); | 
|  | 139 | if (error == EAGAIN) { | 
|  | 140 | skipped++; | 
|  | 141 | continue; | 
|  | 142 | } | 
|  | 143 | if (error && last_error != EFSCORRUPTED) | 
|  | 144 | last_error = error; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 145 | } | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 146 |  | 
|  | 147 | /* bail out if the filesystem is corrupted.  */ | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 148 | if (error == EFSCORRUPTED) | 
|  | 149 | break; | 
|  | 150 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 151 | } while (nr_found && !done); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 152 |  | 
|  | 153 | if (skipped) { | 
|  | 154 | delay(1); | 
|  | 155 | goto restart; | 
|  | 156 | } | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 157 | return last_error; | 
|  | 158 | } | 
|  | 159 |  | 
| Christoph Hellwig | fe588ed | 2009-06-08 15:35:27 +0200 | [diff] [blame] | 160 | int | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 161 | xfs_inode_ag_iterator( | 
|  | 162 | struct xfs_mount	*mp, | 
|  | 163 | int			(*execute)(struct xfs_inode *ip, | 
|  | 164 | struct xfs_perag *pag, int flags), | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 165 | int			flags) | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 166 | { | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 167 | struct xfs_perag	*pag; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 168 | int			error = 0; | 
|  | 169 | int			last_error = 0; | 
|  | 170 | xfs_agnumber_t		ag; | 
|  | 171 |  | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 172 | ag = 0; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 173 | while ((pag = xfs_perag_get(mp, ag))) { | 
|  | 174 | ag = pag->pag_agno + 1; | 
|  | 175 | error = xfs_inode_ag_walk(mp, pag, execute, flags); | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 176 | xfs_perag_put(pag); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 177 | if (error) { | 
|  | 178 | last_error = error; | 
|  | 179 | if (error == EFSCORRUPTED) | 
|  | 180 | break; | 
|  | 181 | } | 
|  | 182 | } | 
|  | 183 | return XFS_ERROR(last_error); | 
|  | 184 | } | 
|  | 185 |  | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 186 | STATIC int | 
|  | 187 | xfs_sync_inode_data( | 
|  | 188 | struct xfs_inode	*ip, | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 189 | struct xfs_perag	*pag, | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 190 | int			flags) | 
|  | 191 | { | 
|  | 192 | struct inode		*inode = VFS_I(ip); | 
|  | 193 | struct address_space *mapping = inode->i_mapping; | 
|  | 194 | int			error = 0; | 
|  | 195 |  | 
|  | 196 | if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | 
|  | 197 | goto out_wait; | 
|  | 198 |  | 
|  | 199 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { | 
|  | 200 | if (flags & SYNC_TRYLOCK) | 
|  | 201 | goto out_wait; | 
|  | 202 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? | 
| Christoph Hellwig | 0cadda1 | 2010-01-19 09:56:44 +0000 | [diff] [blame] | 206 | 0 : XBF_ASYNC, FI_NONE); | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 207 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 
|  | 208 |  | 
|  | 209 | out_wait: | 
| Christoph Hellwig | b0710cc | 2009-06-08 15:37:11 +0200 | [diff] [blame] | 210 | if (flags & SYNC_WAIT) | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 211 | xfs_ioend_wait(ip); | 
|  | 212 | return error; | 
|  | 213 | } | 
|  | 214 |  | 
| Christoph Hellwig | 845b6d0 | 2009-06-08 15:35:05 +0200 | [diff] [blame] | 215 | STATIC int | 
|  | 216 | xfs_sync_inode_attr( | 
|  | 217 | struct xfs_inode	*ip, | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 218 | struct xfs_perag	*pag, | 
| Christoph Hellwig | 845b6d0 | 2009-06-08 15:35:05 +0200 | [diff] [blame] | 219 | int			flags) | 
|  | 220 | { | 
|  | 221 | int			error = 0; | 
|  | 222 |  | 
|  | 223 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 
|  | 224 | if (xfs_inode_clean(ip)) | 
|  | 225 | goto out_unlock; | 
|  | 226 | if (!xfs_iflock_nowait(ip)) { | 
|  | 227 | if (!(flags & SYNC_WAIT)) | 
|  | 228 | goto out_unlock; | 
|  | 229 | xfs_iflock(ip); | 
|  | 230 | } | 
|  | 231 |  | 
|  | 232 | if (xfs_inode_clean(ip)) { | 
|  | 233 | xfs_ifunlock(ip); | 
|  | 234 | goto out_unlock; | 
|  | 235 | } | 
|  | 236 |  | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 237 | error = xfs_iflush(ip, flags); | 
| Christoph Hellwig | 845b6d0 | 2009-06-08 15:35:05 +0200 | [diff] [blame] | 238 |  | 
|  | 239 | out_unlock: | 
|  | 240 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 
|  | 241 | return error; | 
|  | 242 | } | 
|  | 243 |  | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 244 | /* | 
|  | 245 | * Write out pagecache data for the whole filesystem. | 
|  | 246 | */ | 
| Christoph Hellwig | 64c8614 | 2010-06-24 11:45:34 +1000 | [diff] [blame] | 247 | STATIC int | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 248 | xfs_sync_data( | 
|  | 249 | struct xfs_mount	*mp, | 
|  | 250 | int			flags) | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 251 | { | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 252 | int			error; | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 253 |  | 
| Christoph Hellwig | b0710cc | 2009-06-08 15:37:11 +0200 | [diff] [blame] | 254 | ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 255 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 256 | error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags); | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 257 | if (error) | 
|  | 258 | return XFS_ERROR(error); | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 259 |  | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 260 | xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 261 | return 0; | 
|  | 262 | } | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 263 |  | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 264 | /* | 
|  | 265 | * Write out inode metadata (attributes) for the whole filesystem. | 
|  | 266 | */ | 
| Christoph Hellwig | 64c8614 | 2010-06-24 11:45:34 +1000 | [diff] [blame] | 267 | STATIC int | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 268 | xfs_sync_attr( | 
|  | 269 | struct xfs_mount	*mp, | 
|  | 270 | int			flags) | 
|  | 271 | { | 
|  | 272 | ASSERT((flags & ~SYNC_WAIT) == 0); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 273 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 274 | return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags); | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 275 | } | 
|  | 276 |  | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 277 | STATIC int | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 278 | xfs_sync_fsdata( | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 279 | struct xfs_mount	*mp) | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 280 | { | 
|  | 281 | struct xfs_buf		*bp; | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 282 |  | 
|  | 283 | /* | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 284 | * If the buffer is pinned then push on the log so we won't get stuck | 
|  | 285 | * waiting in the write for someone, maybe ourselves, to flush the log. | 
|  | 286 | * | 
|  | 287 | * Even though we just pushed the log above, we did not have the | 
|  | 288 | * superblock buffer locked at that point so it can become pinned in | 
|  | 289 | * between there and here. | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 290 | */ | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 291 | bp = xfs_getsb(mp, 0); | 
|  | 292 | if (XFS_BUF_ISPINNED(bp)) | 
|  | 293 | xfs_log_force(mp, 0); | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 294 |  | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 295 | return xfs_bwrite(mp, bp); | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 296 | } | 
|  | 297 |  | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 298 | /* | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 299 | * When remounting a filesystem read-only or freezing the filesystem, we have | 
|  | 300 | * two phases to execute. This first phase is syncing the data before we | 
|  | 301 | * quiesce the filesystem, and the second is flushing all the inodes out after | 
|  | 302 | * we've waited for all the transactions created by the first phase to | 
|  | 303 | * complete. The second phase ensures that the inodes are written to their | 
|  | 304 | * location on disk rather than just existing in transactions in the log. This | 
|  | 305 | * means after a quiesce there is no log replay required to write the inodes to | 
|  | 306 | * disk (this is the main difference between a sync and a quiesce). | 
|  | 307 | */ | 
|  | 308 | /* | 
|  | 309 | * First stage of freeze - no writers will make progress now we are here, | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 310 | * so we flush delwri and delalloc buffers here, then wait for all I/O to | 
|  | 311 | * complete.  Data is frozen at that point. Metadata is not frozen, | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 312 | * transactions can still occur here so don't bother flushing the buftarg | 
|  | 313 | * because it'll just get dirty again. | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 314 | */ | 
|  | 315 | int | 
|  | 316 | xfs_quiesce_data( | 
|  | 317 | struct xfs_mount	*mp) | 
|  | 318 | { | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 319 | int			error, error2 = 0; | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 320 |  | 
|  | 321 | /* push non-blocking */ | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 322 | xfs_sync_data(mp, 0); | 
| Christoph Hellwig | 8b5403a | 2009-06-08 15:37:16 +0200 | [diff] [blame] | 323 | xfs_qm_sync(mp, SYNC_TRYLOCK); | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 324 |  | 
| Dave Chinner | c90b07e | 2009-10-06 20:29:27 +0000 | [diff] [blame] | 325 | /* push and block till complete */ | 
| Christoph Hellwig | b0710cc | 2009-06-08 15:37:11 +0200 | [diff] [blame] | 326 | xfs_sync_data(mp, SYNC_WAIT); | 
| Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 327 | xfs_qm_sync(mp, SYNC_WAIT); | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 328 |  | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 329 | /* write superblock and hoover up shutdown errors */ | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 330 | error = xfs_sync_fsdata(mp); | 
|  | 331 |  | 
|  | 332 | /* make sure all delwri buffers are written out */ | 
|  | 333 | xfs_flush_buftarg(mp->m_ddev_targp, 1); | 
|  | 334 |  | 
|  | 335 | /* mark the log as covered if needed */ | 
|  | 336 | if (xfs_log_need_covered(mp)) | 
| Dave Chinner | 1a387d3 | 2010-08-24 11:46:31 +1000 | [diff] [blame] | 337 | error2 = xfs_fs_log_dummy(mp, SYNC_WAIT); | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 338 |  | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 339 | /* flush data-only devices */ | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 340 | if (mp->m_rtdev_targp) | 
|  | 341 | XFS_bflush(mp->m_rtdev_targp); | 
|  | 342 |  | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 343 | return error ? error : error2; | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 344 | } | 
|  | 345 |  | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 346 | STATIC void | 
|  | 347 | xfs_quiesce_fs( | 
|  | 348 | struct xfs_mount	*mp) | 
|  | 349 | { | 
|  | 350 | int	count = 0, pincount; | 
|  | 351 |  | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 352 | xfs_reclaim_inodes(mp, 0); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 353 | xfs_flush_buftarg(mp->m_ddev_targp, 0); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 354 |  | 
|  | 355 | /* | 
|  | 356 | * This loop must run at least twice.  The first instance of the loop | 
|  | 357 | * will flush most meta data but that will generate more meta data | 
|  | 358 | * (typically directory updates).  Which then must be flushed and | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 359 | * logged before we can write the unmount record. We also so sync | 
|  | 360 | * reclaim of inodes to catch any that the above delwri flush skipped. | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 361 | */ | 
|  | 362 | do { | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 363 | xfs_reclaim_inodes(mp, SYNC_WAIT); | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 364 | xfs_sync_attr(mp, SYNC_WAIT); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 365 | pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); | 
|  | 366 | if (!pincount) { | 
|  | 367 | delay(50); | 
|  | 368 | count++; | 
|  | 369 | } | 
|  | 370 | } while (count < 2); | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 | /* | 
|  | 374 | * Second stage of a quiesce. The data is already synced, now we have to take | 
|  | 375 | * care of the metadata. New transactions are already blocked, so we need to | 
|  | 376 | * wait for any remaining transactions to drain out before proceding. | 
|  | 377 | */ | 
|  | 378 | void | 
|  | 379 | xfs_quiesce_attr( | 
|  | 380 | struct xfs_mount	*mp) | 
|  | 381 | { | 
|  | 382 | int	error = 0; | 
|  | 383 |  | 
|  | 384 | /* wait for all modifications to complete */ | 
|  | 385 | while (atomic_read(&mp->m_active_trans) > 0) | 
|  | 386 | delay(100); | 
|  | 387 |  | 
|  | 388 | /* flush inodes and push all remaining buffers out to disk */ | 
|  | 389 | xfs_quiesce_fs(mp); | 
|  | 390 |  | 
| Felix Blyakher | 5e10657 | 2009-01-22 21:34:05 -0600 | [diff] [blame] | 391 | /* | 
|  | 392 | * Just warn here till VFS can correctly support | 
|  | 393 | * read-only remount without racing. | 
|  | 394 | */ | 
|  | 395 | WARN_ON(atomic_read(&mp->m_active_trans) != 0); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 396 |  | 
|  | 397 | /* Push the superblock and write an unmount record */ | 
|  | 398 | error = xfs_log_sbcount(mp, 1); | 
|  | 399 | if (error) | 
|  | 400 | xfs_fs_cmn_err(CE_WARN, mp, | 
|  | 401 | "xfs_attr_quiesce: failed to log sb changes. " | 
|  | 402 | "Frozen image may not be consistent."); | 
|  | 403 | xfs_log_unmount_write(mp); | 
|  | 404 | xfs_unmountfs_writesb(mp); | 
|  | 405 | } | 
|  | 406 |  | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 407 | /* | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 408 | * Enqueue a work item to be picked up by the vfs xfssyncd thread. | 
|  | 409 | * Doing this has two advantages: | 
|  | 410 | * - It saves on stack space, which is tight in certain situations | 
|  | 411 | * - It can be used (with care) as a mechanism to avoid deadlocks. | 
|  | 412 | * Flushing while allocating in a full filesystem requires both. | 
|  | 413 | */ | 
|  | 414 | STATIC void | 
|  | 415 | xfs_syncd_queue_work( | 
|  | 416 | struct xfs_mount *mp, | 
|  | 417 | void		*data, | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 418 | void		(*syncer)(struct xfs_mount *, void *), | 
|  | 419 | struct completion *completion) | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 420 | { | 
| Dave Chinner | a8d770d | 2009-04-06 18:44:54 +0200 | [diff] [blame] | 421 | struct xfs_sync_work *work; | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 422 |  | 
| Dave Chinner | a8d770d | 2009-04-06 18:44:54 +0200 | [diff] [blame] | 423 | work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 424 | INIT_LIST_HEAD(&work->w_list); | 
|  | 425 | work->w_syncer = syncer; | 
|  | 426 | work->w_data = data; | 
|  | 427 | work->w_mount = mp; | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 428 | work->w_completion = completion; | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 429 | spin_lock(&mp->m_sync_lock); | 
|  | 430 | list_add_tail(&work->w_list, &mp->m_sync_list); | 
|  | 431 | spin_unlock(&mp->m_sync_lock); | 
|  | 432 | wake_up_process(mp->m_sync_task); | 
|  | 433 | } | 
|  | 434 |  | 
|  | 435 | /* | 
|  | 436 | * Flush delayed allocate data, attempting to free up reserved space | 
|  | 437 | * from existing allocations.  At this point a new allocation attempt | 
|  | 438 | * has failed with ENOSPC and we are in the process of scratching our | 
|  | 439 | * heads, looking about for more room... | 
|  | 440 | */ | 
|  | 441 | STATIC void | 
| Dave Chinner | a8d770d | 2009-04-06 18:44:54 +0200 | [diff] [blame] | 442 | xfs_flush_inodes_work( | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 443 | struct xfs_mount *mp, | 
|  | 444 | void		*arg) | 
|  | 445 | { | 
|  | 446 | struct inode	*inode = arg; | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 447 | xfs_sync_data(mp, SYNC_TRYLOCK); | 
| Christoph Hellwig | b0710cc | 2009-06-08 15:37:11 +0200 | [diff] [blame] | 448 | xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 449 | iput(inode); | 
|  | 450 | } | 
|  | 451 |  | 
|  | 452 | void | 
| Dave Chinner | a8d770d | 2009-04-06 18:44:54 +0200 | [diff] [blame] | 453 | xfs_flush_inodes( | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 454 | xfs_inode_t	*ip) | 
|  | 455 | { | 
|  | 456 | struct inode	*inode = VFS_I(ip); | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 457 | DECLARE_COMPLETION_ONSTACK(completion); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 458 |  | 
|  | 459 | igrab(inode); | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 460 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion); | 
|  | 461 | wait_for_completion(&completion); | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 462 | xfs_log_force(ip->i_mount, XFS_LOG_SYNC); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 463 | } | 
|  | 464 |  | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 465 | /* | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 466 | * Every sync period we need to unpin all items, reclaim inodes and sync | 
|  | 467 | * disk quotas.  We might need to cover the log to indicate that the | 
| Dave Chinner | 1a387d3 | 2010-08-24 11:46:31 +1000 | [diff] [blame] | 468 | * filesystem is idle and not frozen. | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 469 | */ | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 470 | STATIC void | 
|  | 471 | xfs_sync_worker( | 
|  | 472 | struct xfs_mount *mp, | 
|  | 473 | void		*unused) | 
|  | 474 | { | 
|  | 475 | int		error; | 
|  | 476 |  | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 477 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 478 | xfs_log_force(mp, 0); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 479 | xfs_reclaim_inodes(mp, 0); | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 480 | /* dgc: errors ignored here */ | 
| Christoph Hellwig | 8b5403a | 2009-06-08 15:37:16 +0200 | [diff] [blame] | 481 | error = xfs_qm_sync(mp, SYNC_TRYLOCK); | 
| Dave Chinner | 1a387d3 | 2010-08-24 11:46:31 +1000 | [diff] [blame] | 482 | if (mp->m_super->s_frozen == SB_UNFROZEN && | 
|  | 483 | xfs_log_need_covered(mp)) | 
|  | 484 | error = xfs_fs_log_dummy(mp, 0); | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 485 | } | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 486 | mp->m_sync_seq++; | 
|  | 487 | wake_up(&mp->m_wait_single_sync_task); | 
|  | 488 | } | 
|  | 489 |  | 
|  | 490 | STATIC int | 
|  | 491 | xfssyncd( | 
|  | 492 | void			*arg) | 
|  | 493 | { | 
|  | 494 | struct xfs_mount	*mp = arg; | 
|  | 495 | long			timeleft; | 
| Dave Chinner | a8d770d | 2009-04-06 18:44:54 +0200 | [diff] [blame] | 496 | xfs_sync_work_t		*work, *n; | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 497 | LIST_HEAD		(tmp); | 
|  | 498 |  | 
|  | 499 | set_freezable(); | 
|  | 500 | timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); | 
|  | 501 | for (;;) { | 
| Dave Chinner | 20f6b2c | 2010-03-04 01:46:23 +0000 | [diff] [blame] | 502 | if (list_empty(&mp->m_sync_list)) | 
|  | 503 | timeleft = schedule_timeout_interruptible(timeleft); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 504 | /* swsusp */ | 
|  | 505 | try_to_freeze(); | 
|  | 506 | if (kthread_should_stop() && list_empty(&mp->m_sync_list)) | 
|  | 507 | break; | 
|  | 508 |  | 
|  | 509 | spin_lock(&mp->m_sync_lock); | 
|  | 510 | /* | 
|  | 511 | * We can get woken by laptop mode, to do a sync - | 
|  | 512 | * that's the (only!) case where the list would be | 
|  | 513 | * empty with time remaining. | 
|  | 514 | */ | 
|  | 515 | if (!timeleft || list_empty(&mp->m_sync_list)) { | 
|  | 516 | if (!timeleft) | 
|  | 517 | timeleft = xfs_syncd_centisecs * | 
|  | 518 | msecs_to_jiffies(10); | 
|  | 519 | INIT_LIST_HEAD(&mp->m_sync_work.w_list); | 
|  | 520 | list_add_tail(&mp->m_sync_work.w_list, | 
|  | 521 | &mp->m_sync_list); | 
|  | 522 | } | 
| Dave Chinner | 20f6b2c | 2010-03-04 01:46:23 +0000 | [diff] [blame] | 523 | list_splice_init(&mp->m_sync_list, &tmp); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 524 | spin_unlock(&mp->m_sync_lock); | 
|  | 525 |  | 
|  | 526 | list_for_each_entry_safe(work, n, &tmp, w_list) { | 
|  | 527 | (*work->w_syncer)(mp, work->w_data); | 
|  | 528 | list_del(&work->w_list); | 
|  | 529 | if (work == &mp->m_sync_work) | 
|  | 530 | continue; | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 531 | if (work->w_completion) | 
|  | 532 | complete(work->w_completion); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 533 | kmem_free(work); | 
|  | 534 | } | 
|  | 535 | } | 
|  | 536 |  | 
|  | 537 | return 0; | 
|  | 538 | } | 
|  | 539 |  | 
|  | 540 | int | 
|  | 541 | xfs_syncd_init( | 
|  | 542 | struct xfs_mount	*mp) | 
|  | 543 | { | 
|  | 544 | mp->m_sync_work.w_syncer = xfs_sync_worker; | 
|  | 545 | mp->m_sync_work.w_mount = mp; | 
| Dave Chinner | e43afd7 | 2009-04-06 18:47:27 +0200 | [diff] [blame] | 546 | mp->m_sync_work.w_completion = NULL; | 
| Jan Engelhardt | e2a0781 | 2010-03-23 09:52:55 +1100 | [diff] [blame] | 547 | mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 548 | if (IS_ERR(mp->m_sync_task)) | 
|  | 549 | return -PTR_ERR(mp->m_sync_task); | 
|  | 550 | return 0; | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | void | 
|  | 554 | xfs_syncd_stop( | 
|  | 555 | struct xfs_mount	*mp) | 
|  | 556 | { | 
|  | 557 | kthread_stop(mp->m_sync_task); | 
|  | 558 | } | 
|  | 559 |  | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 560 | void | 
|  | 561 | __xfs_inode_set_reclaim_tag( | 
|  | 562 | struct xfs_perag	*pag, | 
|  | 563 | struct xfs_inode	*ip) | 
|  | 564 | { | 
|  | 565 | radix_tree_tag_set(&pag->pag_ici_root, | 
|  | 566 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | 
|  | 567 | XFS_ICI_RECLAIM_TAG); | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 568 |  | 
|  | 569 | if (!pag->pag_ici_reclaimable) { | 
|  | 570 | /* propagate the reclaim tag up into the perag radix tree */ | 
|  | 571 | spin_lock(&ip->i_mount->m_perag_lock); | 
|  | 572 | radix_tree_tag_set(&ip->i_mount->m_perag_tree, | 
|  | 573 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | 
|  | 574 | XFS_ICI_RECLAIM_TAG); | 
|  | 575 | spin_unlock(&ip->i_mount->m_perag_lock); | 
|  | 576 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, | 
|  | 577 | -1, _RET_IP_); | 
|  | 578 | } | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 579 | pag->pag_ici_reclaimable++; | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 580 | } | 
|  | 581 |  | 
| David Chinner | 1165451 | 2008-10-30 17:37:49 +1100 | [diff] [blame] | 582 | /* | 
|  | 583 | * We set the inode flag atomically with the radix tree tag. | 
|  | 584 | * Once we get tag lookups on the radix tree, this inode flag | 
|  | 585 | * can go away. | 
|  | 586 | */ | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 587 | void | 
|  | 588 | xfs_inode_set_reclaim_tag( | 
|  | 589 | xfs_inode_t	*ip) | 
|  | 590 | { | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 591 | struct xfs_mount *mp = ip->i_mount; | 
|  | 592 | struct xfs_perag *pag; | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 593 |  | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 594 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | 
| Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 595 | write_lock(&pag->pag_ici_lock); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 596 | spin_lock(&ip->i_flags_lock); | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 597 | __xfs_inode_set_reclaim_tag(pag, ip); | 
| David Chinner | 1165451 | 2008-10-30 17:37:49 +1100 | [diff] [blame] | 598 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 599 | spin_unlock(&ip->i_flags_lock); | 
| Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 600 | write_unlock(&pag->pag_ici_lock); | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 601 | xfs_perag_put(pag); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 602 | } | 
|  | 603 |  | 
| Johannes Weiner | 081003f | 2010-10-01 07:43:54 +0000 | [diff] [blame] | 604 | STATIC void | 
|  | 605 | __xfs_inode_clear_reclaim( | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 606 | xfs_perag_t	*pag, | 
|  | 607 | xfs_inode_t	*ip) | 
|  | 608 | { | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 609 | pag->pag_ici_reclaimable--; | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 610 | if (!pag->pag_ici_reclaimable) { | 
|  | 611 | /* clear the reclaim tag from the perag radix tree */ | 
|  | 612 | spin_lock(&ip->i_mount->m_perag_lock); | 
|  | 613 | radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | 
|  | 614 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | 
|  | 615 | XFS_ICI_RECLAIM_TAG); | 
|  | 616 | spin_unlock(&ip->i_mount->m_perag_lock); | 
|  | 617 | trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, | 
|  | 618 | -1, _RET_IP_); | 
|  | 619 | } | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 620 | } | 
|  | 621 |  | 
| Johannes Weiner | 081003f | 2010-10-01 07:43:54 +0000 | [diff] [blame] | 622 | void | 
|  | 623 | __xfs_inode_clear_reclaim_tag( | 
|  | 624 | xfs_mount_t	*mp, | 
|  | 625 | xfs_perag_t	*pag, | 
|  | 626 | xfs_inode_t	*ip) | 
|  | 627 | { | 
|  | 628 | radix_tree_tag_clear(&pag->pag_ici_root, | 
|  | 629 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | 
|  | 630 | __xfs_inode_clear_reclaim(pag, ip); | 
|  | 631 | } | 
|  | 632 |  | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 633 | /* | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 634 | * Grab the inode for reclaim exclusively. | 
|  | 635 | * Return 0 if we grabbed it, non-zero otherwise. | 
|  | 636 | */ | 
|  | 637 | STATIC int | 
|  | 638 | xfs_reclaim_inode_grab( | 
|  | 639 | struct xfs_inode	*ip, | 
|  | 640 | int			flags) | 
|  | 641 | { | 
|  | 642 |  | 
|  | 643 | /* | 
|  | 644 | * do some unlocked checks first to avoid unnecceary lock traffic. | 
|  | 645 | * The first is a flush lock check, the second is a already in reclaim | 
|  | 646 | * check. Only do these checks if we are not going to block on locks. | 
|  | 647 | */ | 
|  | 648 | if ((flags & SYNC_TRYLOCK) && | 
|  | 649 | (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) { | 
|  | 650 | return 1; | 
|  | 651 | } | 
|  | 652 |  | 
|  | 653 | /* | 
|  | 654 | * The radix tree lock here protects a thread in xfs_iget from racing | 
|  | 655 | * with us starting reclaim on the inode.  Once we have the | 
|  | 656 | * XFS_IRECLAIM flag set it will not touch us. | 
|  | 657 | */ | 
|  | 658 | spin_lock(&ip->i_flags_lock); | 
|  | 659 | ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE)); | 
|  | 660 | if (__xfs_iflags_test(ip, XFS_IRECLAIM)) { | 
|  | 661 | /* ignore as it is already under reclaim */ | 
|  | 662 | spin_unlock(&ip->i_flags_lock); | 
|  | 663 | return 1; | 
|  | 664 | } | 
|  | 665 | __xfs_iflags_set(ip, XFS_IRECLAIM); | 
|  | 666 | spin_unlock(&ip->i_flags_lock); | 
|  | 667 | return 0; | 
|  | 668 | } | 
|  | 669 |  | 
|  | 670 | /* | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 671 | * Inodes in different states need to be treated differently, and the return | 
|  | 672 | * value of xfs_iflush is not sufficient to get this right. The following table | 
|  | 673 | * lists the inode states and the reclaim actions necessary for non-blocking | 
|  | 674 | * reclaim: | 
|  | 675 | * | 
|  | 676 | * | 
|  | 677 | *	inode state	     iflush ret		required action | 
|  | 678 | *      ---------------      ----------         --------------- | 
|  | 679 | *	bad			-		reclaim | 
|  | 680 | *	shutdown		EIO		unpin and reclaim | 
|  | 681 | *	clean, unpinned		0		reclaim | 
|  | 682 | *	stale, unpinned		0		reclaim | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 683 | *	clean, pinned(*)	0		requeue | 
|  | 684 | *	stale, pinned		EAGAIN		requeue | 
|  | 685 | *	dirty, delwri ok	0		requeue | 
|  | 686 | *	dirty, delwri blocked	EAGAIN		requeue | 
|  | 687 | *	dirty, sync flush	0		reclaim | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 688 | * | 
|  | 689 | * (*) dgc: I don't think the clean, pinned state is possible but it gets | 
|  | 690 | * handled anyway given the order of checks implemented. | 
|  | 691 | * | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 692 | * As can be seen from the table, the return value of xfs_iflush() is not | 
|  | 693 | * sufficient to correctly decide the reclaim action here. The checks in | 
|  | 694 | * xfs_iflush() might look like duplicates, but they are not. | 
|  | 695 | * | 
|  | 696 | * Also, because we get the flush lock first, we know that any inode that has | 
|  | 697 | * been flushed delwri has had the flush completed by the time we check that | 
|  | 698 | * the inode is clean. The clean inode check needs to be done before flushing | 
|  | 699 | * the inode delwri otherwise we would loop forever requeuing clean inodes as | 
|  | 700 | * we cannot tell apart a successful delwri flush and a clean inode from the | 
|  | 701 | * return value of xfs_iflush(). | 
|  | 702 | * | 
|  | 703 | * Note that because the inode is flushed delayed write by background | 
|  | 704 | * writeback, the flush lock may already be held here and waiting on it can | 
|  | 705 | * result in very long latencies. Hence for sync reclaims, where we wait on the | 
|  | 706 | * flush lock, the caller should push out delayed write inodes first before | 
|  | 707 | * trying to reclaim them to minimise the amount of time spent waiting. For | 
|  | 708 | * background relaim, we just requeue the inode for the next pass. | 
|  | 709 | * | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 710 | * Hence the order of actions after gaining the locks should be: | 
|  | 711 | *	bad		=> reclaim | 
|  | 712 | *	shutdown	=> unpin and reclaim | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 713 | *	pinned, delwri	=> requeue | 
|  | 714 | *	pinned, sync	=> unpin | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 715 | *	stale		=> reclaim | 
|  | 716 | *	clean		=> reclaim | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 717 | *	dirty, delwri	=> flush and requeue | 
|  | 718 | *	dirty, sync	=> flush, wait and reclaim | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 719 | */ | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 720 | STATIC int | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 721 | xfs_reclaim_inode( | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 722 | struct xfs_inode	*ip, | 
|  | 723 | struct xfs_perag	*pag, | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 724 | int			sync_mode) | 
| David Chinner | 7a3be02 | 2008-10-30 17:37:37 +1100 | [diff] [blame] | 725 | { | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 726 | int	error = 0; | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 727 |  | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 728 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 729 | if (!xfs_iflock_nowait(ip)) { | 
|  | 730 | if (!(sync_mode & SYNC_WAIT)) | 
|  | 731 | goto out; | 
|  | 732 | xfs_iflock(ip); | 
|  | 733 | } | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 734 |  | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 735 | if (is_bad_inode(VFS_I(ip))) | 
|  | 736 | goto reclaim; | 
|  | 737 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 
|  | 738 | xfs_iunpin_wait(ip); | 
|  | 739 | goto reclaim; | 
|  | 740 | } | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 741 | if (xfs_ipincount(ip)) { | 
|  | 742 | if (!(sync_mode & SYNC_WAIT)) { | 
|  | 743 | xfs_ifunlock(ip); | 
|  | 744 | goto out; | 
|  | 745 | } | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 746 | xfs_iunpin_wait(ip); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 747 | } | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 748 | if (xfs_iflags_test(ip, XFS_ISTALE)) | 
|  | 749 | goto reclaim; | 
|  | 750 | if (xfs_inode_clean(ip)) | 
|  | 751 | goto reclaim; | 
|  | 752 |  | 
|  | 753 | /* Now we have an inode that needs flushing */ | 
|  | 754 | error = xfs_iflush(ip, sync_mode); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 755 | if (sync_mode & SYNC_WAIT) { | 
|  | 756 | xfs_iflock(ip); | 
|  | 757 | goto reclaim; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 758 | } | 
|  | 759 |  | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 760 | /* | 
|  | 761 | * When we have to flush an inode but don't have SYNC_WAIT set, we | 
|  | 762 | * flush the inode out using a delwri buffer and wait for the next | 
|  | 763 | * call into reclaim to find it in a clean state instead of waiting for | 
|  | 764 | * it now. We also don't return errors here - if the error is transient | 
|  | 765 | * then the next reclaim pass will flush the inode, and if the error | 
| Dave Chinner | f1d486a | 2010-04-13 15:06:45 +1000 | [diff] [blame] | 766 | * is permanent then the next sync reclaim will reclaim the inode and | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 767 | * pass on the error. | 
|  | 768 | */ | 
| Dave Chinner | f1d486a | 2010-04-13 15:06:45 +1000 | [diff] [blame] | 769 | if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 770 | xfs_fs_cmn_err(CE_WARN, ip->i_mount, | 
|  | 771 | "inode 0x%llx background reclaim flush failed with %d", | 
|  | 772 | (long long)ip->i_ino, error); | 
|  | 773 | } | 
|  | 774 | out: | 
|  | 775 | xfs_iflags_clear(ip, XFS_IRECLAIM); | 
|  | 776 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 777 | /* | 
|  | 778 | * We could return EAGAIN here to make reclaim rescan the inode tree in | 
|  | 779 | * a short while. However, this just burns CPU time scanning the tree | 
|  | 780 | * waiting for IO to complete and xfssyncd never goes back to the idle | 
|  | 781 | * state. Instead, return 0 to let the next scheduled background reclaim | 
|  | 782 | * attempt to reclaim the inode again. | 
|  | 783 | */ | 
|  | 784 | return 0; | 
|  | 785 |  | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 786 | reclaim: | 
|  | 787 | xfs_ifunlock(ip); | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 788 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 789 |  | 
|  | 790 | XFS_STATS_INC(xs_ig_reclaims); | 
|  | 791 | /* | 
|  | 792 | * Remove the inode from the per-AG radix tree. | 
|  | 793 | * | 
|  | 794 | * Because radix_tree_delete won't complain even if the item was never | 
|  | 795 | * added to the tree assert that it's been there before to catch | 
|  | 796 | * problems with the inode life time early on. | 
|  | 797 | */ | 
|  | 798 | write_lock(&pag->pag_ici_lock); | 
|  | 799 | if (!radix_tree_delete(&pag->pag_ici_root, | 
|  | 800 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) | 
|  | 801 | ASSERT(0); | 
| Johannes Weiner | 081003f | 2010-10-01 07:43:54 +0000 | [diff] [blame] | 802 | __xfs_inode_clear_reclaim(pag, ip); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 803 | write_unlock(&pag->pag_ici_lock); | 
|  | 804 |  | 
|  | 805 | /* | 
|  | 806 | * Here we do an (almost) spurious inode lock in order to coordinate | 
|  | 807 | * with inode cache radix tree lookups.  This is because the lookup | 
|  | 808 | * can reference the inodes in the cache without taking references. | 
|  | 809 | * | 
|  | 810 | * We make that OK here by ensuring that we wait until the inode is | 
|  | 811 | * unlocked after the lookup before we go ahead and free it.  We get | 
|  | 812 | * both the ilock and the iolock because the code may need to drop the | 
|  | 813 | * ilock one but will still hold the iolock. | 
|  | 814 | */ | 
|  | 815 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 
|  | 816 | xfs_qm_dqdetach(ip); | 
|  | 817 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 
|  | 818 |  | 
|  | 819 | xfs_inode_free(ip); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 820 | return error; | 
|  | 821 |  | 
| David Chinner | 7a3be02 | 2008-10-30 17:37:37 +1100 | [diff] [blame] | 822 | } | 
|  | 823 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 824 | /* | 
|  | 825 | * Walk the AGs and reclaim the inodes in them. Even if the filesystem is | 
|  | 826 | * corrupted, we still want to try to reclaim all the inodes. If we don't, | 
|  | 827 | * then a shut down during filesystem unmount reclaim walk leak all the | 
|  | 828 | * unreclaimed inodes. | 
|  | 829 | */ | 
|  | 830 | int | 
|  | 831 | xfs_reclaim_inodes_ag( | 
|  | 832 | struct xfs_mount	*mp, | 
|  | 833 | int			flags, | 
|  | 834 | int			*nr_to_scan) | 
|  | 835 | { | 
|  | 836 | struct xfs_perag	*pag; | 
|  | 837 | int			error = 0; | 
|  | 838 | int			last_error = 0; | 
|  | 839 | xfs_agnumber_t		ag; | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 840 | int			trylock = flags & SYNC_TRYLOCK; | 
|  | 841 | int			skipped; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 842 |  | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 843 | restart: | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 844 | ag = 0; | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 845 | skipped = 0; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 846 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { | 
|  | 847 | unsigned long	first_index = 0; | 
|  | 848 | int		done = 0; | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 849 | int		nr_found = 0; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 850 |  | 
|  | 851 | ag = pag->pag_agno + 1; | 
|  | 852 |  | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 853 | if (trylock) { | 
|  | 854 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { | 
|  | 855 | skipped++; | 
|  | 856 | continue; | 
|  | 857 | } | 
|  | 858 | first_index = pag->pag_ici_reclaim_cursor; | 
|  | 859 | } else | 
|  | 860 | mutex_lock(&pag->pag_ici_reclaim_lock); | 
|  | 861 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 862 | do { | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 863 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; | 
|  | 864 | int	i; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 865 |  | 
|  | 866 | write_lock(&pag->pag_ici_lock); | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 867 | nr_found = radix_tree_gang_lookup_tag( | 
|  | 868 | &pag->pag_ici_root, | 
|  | 869 | (void **)batch, first_index, | 
|  | 870 | XFS_LOOKUP_BATCH, | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 871 | XFS_ICI_RECLAIM_TAG); | 
|  | 872 | if (!nr_found) { | 
|  | 873 | write_unlock(&pag->pag_ici_lock); | 
|  | 874 | break; | 
|  | 875 | } | 
|  | 876 |  | 
|  | 877 | /* | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 878 | * Grab the inodes before we drop the lock. if we found | 
|  | 879 | * nothing, nr == 0 and the loop will be skipped. | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 880 | */ | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 881 | for (i = 0; i < nr_found; i++) { | 
|  | 882 | struct xfs_inode *ip = batch[i]; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 883 |  | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 884 | if (done || xfs_reclaim_inode_grab(ip, flags)) | 
|  | 885 | batch[i] = NULL; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 886 |  | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 887 | /* | 
|  | 888 | * Update the index for the next lookup. Catch | 
|  | 889 | * overflows into the next AG range which can | 
|  | 890 | * occur if we have inodes in the last block of | 
|  | 891 | * the AG and we are currently pointing to the | 
|  | 892 | * last inode. | 
|  | 893 | */ | 
|  | 894 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | 
|  | 895 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | 
|  | 896 | done = 1; | 
|  | 897 | } | 
|  | 898 |  | 
|  | 899 | /* unlock now we've grabbed the inodes. */ | 
|  | 900 | write_unlock(&pag->pag_ici_lock); | 
|  | 901 |  | 
|  | 902 | for (i = 0; i < nr_found; i++) { | 
|  | 903 | if (!batch[i]) | 
|  | 904 | continue; | 
|  | 905 | error = xfs_reclaim_inode(batch[i], pag, flags); | 
|  | 906 | if (error && last_error != EFSCORRUPTED) | 
|  | 907 | last_error = error; | 
|  | 908 | } | 
|  | 909 |  | 
|  | 910 | *nr_to_scan -= XFS_LOOKUP_BATCH; | 
|  | 911 |  | 
|  | 912 | } while (nr_found && !done && *nr_to_scan > 0); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 913 |  | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 914 | if (trylock && !done) | 
|  | 915 | pag->pag_ici_reclaim_cursor = first_index; | 
|  | 916 | else | 
|  | 917 | pag->pag_ici_reclaim_cursor = 0; | 
|  | 918 | mutex_unlock(&pag->pag_ici_reclaim_lock); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 919 | xfs_perag_put(pag); | 
|  | 920 | } | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 921 |  | 
|  | 922 | /* | 
|  | 923 | * if we skipped any AG, and we still have scan count remaining, do | 
|  | 924 | * another pass this time using blocking reclaim semantics (i.e | 
|  | 925 | * waiting on the reclaim locks and ignoring the reclaim cursors). This | 
|  | 926 | * ensure that when we get more reclaimers than AGs we block rather | 
|  | 927 | * than spin trying to execute reclaim. | 
|  | 928 | */ | 
|  | 929 | if (trylock && skipped && *nr_to_scan > 0) { | 
|  | 930 | trylock = 0; | 
|  | 931 | goto restart; | 
|  | 932 | } | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 933 | return XFS_ERROR(last_error); | 
|  | 934 | } | 
|  | 935 |  | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 936 | int | 
| David Chinner | 1dc3318 | 2008-10-30 17:37:15 +1100 | [diff] [blame] | 937 | xfs_reclaim_inodes( | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 938 | xfs_mount_t	*mp, | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 939 | int		mode) | 
|  | 940 | { | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 941 | int		nr_to_scan = INT_MAX; | 
|  | 942 |  | 
|  | 943 | return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 944 | } | 
|  | 945 |  | 
|  | 946 | /* | 
|  | 947 | * Shrinker infrastructure. | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 948 | */ | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 949 | static int | 
|  | 950 | xfs_reclaim_inode_shrink( | 
| Dave Chinner | 7f8275d | 2010-07-19 14:56:17 +1000 | [diff] [blame] | 951 | struct shrinker	*shrink, | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 952 | int		nr_to_scan, | 
|  | 953 | gfp_t		gfp_mask) | 
|  | 954 | { | 
|  | 955 | struct xfs_mount *mp; | 
|  | 956 | struct xfs_perag *pag; | 
|  | 957 | xfs_agnumber_t	ag; | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 958 | int		reclaimable; | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 959 |  | 
| Dave Chinner | 70e60ce | 2010-07-20 08:07:02 +1000 | [diff] [blame] | 960 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 961 | if (nr_to_scan) { | 
|  | 962 | if (!(gfp_mask & __GFP_FS)) | 
|  | 963 | return -1; | 
|  | 964 |  | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 965 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 966 | /* terminate if we don't exhaust the scan */ | 
| Dave Chinner | 70e60ce | 2010-07-20 08:07:02 +1000 | [diff] [blame] | 967 | if (nr_to_scan > 0) | 
|  | 968 | return -1; | 
|  | 969 | } | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 970 |  | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 971 | reclaimable = 0; | 
|  | 972 | ag = 0; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 973 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { | 
|  | 974 | ag = pag->pag_agno + 1; | 
| Dave Chinner | 70e60ce | 2010-07-20 08:07:02 +1000 | [diff] [blame] | 975 | reclaimable += pag->pag_ici_reclaimable; | 
|  | 976 | xfs_perag_put(pag); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 977 | } | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 978 | return reclaimable; | 
|  | 979 | } | 
|  | 980 |  | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 981 | void | 
|  | 982 | xfs_inode_shrinker_register( | 
|  | 983 | struct xfs_mount	*mp) | 
|  | 984 | { | 
| Dave Chinner | 70e60ce | 2010-07-20 08:07:02 +1000 | [diff] [blame] | 985 | mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink; | 
|  | 986 | mp->m_inode_shrink.seeks = DEFAULT_SEEKS; | 
|  | 987 | register_shrinker(&mp->m_inode_shrink); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 988 | } | 
|  | 989 |  | 
|  | 990 | void | 
|  | 991 | xfs_inode_shrinker_unregister( | 
|  | 992 | struct xfs_mount	*mp) | 
|  | 993 | { | 
| Dave Chinner | 70e60ce | 2010-07-20 08:07:02 +1000 | [diff] [blame] | 994 | unregister_shrinker(&mp->m_inode_shrink); | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 995 | } |