| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | 
|  | 3 | * All Rights Reserved. | 
|  | 4 | * | 
|  | 5 | * This program is free software; you can redistribute it and/or | 
|  | 6 | * modify it under the terms of the GNU General Public License as | 
|  | 7 | * published by the Free Software Foundation. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it would be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 12 | * GNU General Public License for more details. | 
|  | 13 | * | 
|  | 14 | * You should have received a copy of the GNU General Public License | 
|  | 15 | * along with this program; if not, write the Free Software Foundation, | 
|  | 16 | * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
|  | 17 | */ | 
|  | 18 | #include "xfs.h" | 
|  | 19 | #include "xfs_fs.h" | 
|  | 20 | #include "xfs_types.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 21 | #include "xfs_log.h" | 
|  | 22 | #include "xfs_inum.h" | 
|  | 23 | #include "xfs_trans.h" | 
| Dave Chinner | fd07484 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 24 | #include "xfs_trans_priv.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 25 | #include "xfs_sb.h" | 
|  | 26 | #include "xfs_ag.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 27 | #include "xfs_mount.h" | 
|  | 28 | #include "xfs_bmap_btree.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 29 | #include "xfs_inode.h" | 
|  | 30 | #include "xfs_dinode.h" | 
|  | 31 | #include "xfs_error.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 32 | #include "xfs_filestream.h" | 
|  | 33 | #include "xfs_vnodeops.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 34 | #include "xfs_inode_item.h" | 
| Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 35 | #include "xfs_quota.h" | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 36 | #include "xfs_trace.h" | 
| Dave Chinner | 1a387d3 | 2010-08-24 11:46:31 +1000 | [diff] [blame] | 37 | #include "xfs_fsops.h" | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 38 |  | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 39 | #include <linux/kthread.h> | 
|  | 40 | #include <linux/freezer.h> | 
|  | 41 |  | 
| Dave Chinner | c6d09b6 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 42 | struct workqueue_struct	*xfs_syncd_wq;	/* sync workqueue */ | 
|  | 43 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 44 | /* | 
|  | 45 | * The inode lookup is done in batches to keep the amount of lock traffic and | 
|  | 46 | * radix tree lookups to a minimum. The batch size is a trade off between | 
|  | 47 | * lookup reduction and stack usage. This is in the reclaim path, so we can't | 
|  | 48 | * be too greedy. | 
|  | 49 | */ | 
|  | 50 | #define XFS_LOOKUP_BATCH	32 | 
|  | 51 |  | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 52 | STATIC int | 
|  | 53 | xfs_inode_ag_walk_grab( | 
|  | 54 | struct xfs_inode	*ip) | 
|  | 55 | { | 
|  | 56 | struct inode		*inode = VFS_I(ip); | 
|  | 57 |  | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 58 | ASSERT(rcu_read_lock_held()); | 
|  | 59 |  | 
|  | 60 | /* | 
|  | 61 | * check for stale RCU freed inode | 
|  | 62 | * | 
|  | 63 | * If the inode has been reallocated, it doesn't matter if it's not in | 
|  | 64 | * the AG we are walking - we are walking for writeback, so if it | 
|  | 65 | * passes all the "valid inode" checks and is dirty, then we'll write | 
|  | 66 | * it back anyway.  If it has been reallocated and still being | 
|  | 67 | * initialised, the XFS_INEW check below will catch it. | 
|  | 68 | */ | 
|  | 69 | spin_lock(&ip->i_flags_lock); | 
|  | 70 | if (!ip->i_ino) | 
|  | 71 | goto out_unlock_noent; | 
|  | 72 |  | 
|  | 73 | /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ | 
|  | 74 | if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) | 
|  | 75 | goto out_unlock_noent; | 
|  | 76 | spin_unlock(&ip->i_flags_lock); | 
|  | 77 |  | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 78 | /* nothing to sync during shutdown */ | 
|  | 79 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 
|  | 80 | return EFSCORRUPTED; | 
|  | 81 |  | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 82 | /* If we can't grab the inode, it must on it's way to reclaim. */ | 
|  | 83 | if (!igrab(inode)) | 
|  | 84 | return ENOENT; | 
|  | 85 |  | 
|  | 86 | if (is_bad_inode(inode)) { | 
|  | 87 | IRELE(ip); | 
|  | 88 | return ENOENT; | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | /* inode is valid */ | 
|  | 92 | return 0; | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 93 |  | 
|  | 94 | out_unlock_noent: | 
|  | 95 | spin_unlock(&ip->i_flags_lock); | 
|  | 96 | return ENOENT; | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 97 | } | 
|  | 98 |  | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 99 | STATIC int | 
|  | 100 | xfs_inode_ag_walk( | 
|  | 101 | struct xfs_mount	*mp, | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 102 | struct xfs_perag	*pag, | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 103 | int			(*execute)(struct xfs_inode *ip, | 
|  | 104 | struct xfs_perag *pag, int flags), | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 105 | int			flags) | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 106 | { | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 107 | uint32_t		first_index; | 
|  | 108 | int			last_error = 0; | 
|  | 109 | int			skipped; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 110 | int			done; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 111 | int			nr_found; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 112 |  | 
|  | 113 | restart: | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 114 | done = 0; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 115 | skipped = 0; | 
|  | 116 | first_index = 0; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 117 | nr_found = 0; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 118 | do { | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 119 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 120 | int		error = 0; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 121 | int		i; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 122 |  | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 123 | rcu_read_lock(); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 124 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 125 | (void **)batch, first_index, | 
|  | 126 | XFS_LOOKUP_BATCH); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 127 | if (!nr_found) { | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 128 | rcu_read_unlock(); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 129 | break; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 130 | } | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 131 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 132 | /* | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 133 | * Grab the inodes before we drop the lock. if we found | 
|  | 134 | * nothing, nr == 0 and the loop will be skipped. | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 135 | */ | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 136 | for (i = 0; i < nr_found; i++) { | 
|  | 137 | struct xfs_inode *ip = batch[i]; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 138 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 139 | if (done || xfs_inode_ag_walk_grab(ip)) | 
|  | 140 | batch[i] = NULL; | 
|  | 141 |  | 
|  | 142 | /* | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 143 | * Update the index for the next lookup. Catch | 
|  | 144 | * overflows into the next AG range which can occur if | 
|  | 145 | * we have inodes in the last block of the AG and we | 
|  | 146 | * are currently pointing to the last inode. | 
|  | 147 | * | 
|  | 148 | * Because we may see inodes that are from the wrong AG | 
|  | 149 | * due to RCU freeing and reallocation, only update the | 
|  | 150 | * index if it lies in this AG. It was a race that lead | 
|  | 151 | * us to see this inode, so another lookup from the | 
|  | 152 | * same index will not find it again. | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 153 | */ | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 154 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) | 
|  | 155 | continue; | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 156 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | 
|  | 157 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | 
|  | 158 | done = 1; | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 159 | } | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 160 |  | 
|  | 161 | /* unlock now we've grabbed the inodes. */ | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 162 | rcu_read_unlock(); | 
| Dave Chinner | e13de95 | 2010-09-28 12:28:06 +1000 | [diff] [blame] | 163 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 164 | for (i = 0; i < nr_found; i++) { | 
|  | 165 | if (!batch[i]) | 
|  | 166 | continue; | 
|  | 167 | error = execute(batch[i], pag, flags); | 
|  | 168 | IRELE(batch[i]); | 
|  | 169 | if (error == EAGAIN) { | 
|  | 170 | skipped++; | 
|  | 171 | continue; | 
|  | 172 | } | 
|  | 173 | if (error && last_error != EFSCORRUPTED) | 
|  | 174 | last_error = error; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 175 | } | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 176 |  | 
|  | 177 | /* bail out if the filesystem is corrupted.  */ | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 178 | if (error == EFSCORRUPTED) | 
|  | 179 | break; | 
|  | 180 |  | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 181 | cond_resched(); | 
|  | 182 |  | 
| Dave Chinner | 78ae525 | 2010-09-28 12:28:19 +1000 | [diff] [blame] | 183 | } while (nr_found && !done); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 184 |  | 
|  | 185 | if (skipped) { | 
|  | 186 | delay(1); | 
|  | 187 | goto restart; | 
|  | 188 | } | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 189 | return last_error; | 
|  | 190 | } | 
|  | 191 |  | 
| Christoph Hellwig | fe588ed | 2009-06-08 15:35:27 +0200 | [diff] [blame] | 192 | int | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 193 | xfs_inode_ag_iterator( | 
|  | 194 | struct xfs_mount	*mp, | 
|  | 195 | int			(*execute)(struct xfs_inode *ip, | 
|  | 196 | struct xfs_perag *pag, int flags), | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 197 | int			flags) | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 198 | { | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 199 | struct xfs_perag	*pag; | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 200 | int			error = 0; | 
|  | 201 | int			last_error = 0; | 
|  | 202 | xfs_agnumber_t		ag; | 
|  | 203 |  | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 204 | ag = 0; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 205 | while ((pag = xfs_perag_get(mp, ag))) { | 
|  | 206 | ag = pag->pag_agno + 1; | 
|  | 207 | error = xfs_inode_ag_walk(mp, pag, execute, flags); | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 208 | xfs_perag_put(pag); | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 209 | if (error) { | 
|  | 210 | last_error = error; | 
|  | 211 | if (error == EFSCORRUPTED) | 
|  | 212 | break; | 
|  | 213 | } | 
|  | 214 | } | 
|  | 215 | return XFS_ERROR(last_error); | 
|  | 216 | } | 
|  | 217 |  | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 218 | STATIC int | 
|  | 219 | xfs_sync_inode_data( | 
|  | 220 | struct xfs_inode	*ip, | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 221 | struct xfs_perag	*pag, | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 222 | int			flags) | 
|  | 223 | { | 
|  | 224 | struct inode		*inode = VFS_I(ip); | 
|  | 225 | struct address_space *mapping = inode->i_mapping; | 
|  | 226 | int			error = 0; | 
|  | 227 |  | 
|  | 228 | if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | 
| Christoph Hellwig | 4a06fd2 | 2011-08-23 08:28:13 +0000 | [diff] [blame] | 229 | return 0; | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 230 |  | 
|  | 231 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { | 
|  | 232 | if (flags & SYNC_TRYLOCK) | 
| Christoph Hellwig | 4a06fd2 | 2011-08-23 08:28:13 +0000 | [diff] [blame] | 233 | return 0; | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 234 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 
|  | 235 | } | 
|  | 236 |  | 
|  | 237 | error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? | 
| Christoph Hellwig | 0cadda1 | 2010-01-19 09:56:44 +0000 | [diff] [blame] | 238 | 0 : XBF_ASYNC, FI_NONE); | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 239 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 
| Dave Chinner | 5a34d5c | 2009-06-08 15:35:03 +0200 | [diff] [blame] | 240 | return error; | 
|  | 241 | } | 
|  | 242 |  | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 243 | /* | 
|  | 244 | * Write out pagecache data for the whole filesystem. | 
|  | 245 | */ | 
| Christoph Hellwig | 64c8614 | 2010-06-24 11:45:34 +1000 | [diff] [blame] | 246 | STATIC int | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 247 | xfs_sync_data( | 
|  | 248 | struct xfs_mount	*mp, | 
|  | 249 | int			flags) | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 250 | { | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 251 | int			error; | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 252 |  | 
| Christoph Hellwig | b0710cc | 2009-06-08 15:37:11 +0200 | [diff] [blame] | 253 | ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 254 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 255 | error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags); | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 256 | if (error) | 
|  | 257 | return XFS_ERROR(error); | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 258 |  | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 259 | xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); | 
| Christoph Hellwig | 075fe10 | 2009-06-08 15:35:48 +0200 | [diff] [blame] | 260 | return 0; | 
|  | 261 | } | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 262 |  | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 263 | STATIC int | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 264 | xfs_sync_fsdata( | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 265 | struct xfs_mount	*mp) | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 266 | { | 
|  | 267 | struct xfs_buf		*bp; | 
| Christoph Hellwig | c2b006c | 2011-08-23 08:28:07 +0000 | [diff] [blame] | 268 | int			error; | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 269 |  | 
|  | 270 | /* | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 271 | * If the buffer is pinned then push on the log so we won't get stuck | 
|  | 272 | * waiting in the write for someone, maybe ourselves, to flush the log. | 
|  | 273 | * | 
|  | 274 | * Even though we just pushed the log above, we did not have the | 
|  | 275 | * superblock buffer locked at that point so it can become pinned in | 
|  | 276 | * between there and here. | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 277 | */ | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 278 | bp = xfs_getsb(mp, 0); | 
| Chandra Seetharaman | 811e64c | 2011-07-22 23:40:27 +0000 | [diff] [blame] | 279 | if (xfs_buf_ispinned(bp)) | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 280 | xfs_log_force(mp, 0); | 
| Christoph Hellwig | c2b006c | 2011-08-23 08:28:07 +0000 | [diff] [blame] | 281 | error = xfs_bwrite(bp); | 
|  | 282 | xfs_buf_relse(bp); | 
|  | 283 | return error; | 
| Christoph Hellwig | 2af75df | 2008-10-30 17:14:53 +1100 | [diff] [blame] | 284 | } | 
|  | 285 |  | 
| David Chinner | fe4fa4b | 2008-10-30 17:06:08 +1100 | [diff] [blame] | 286 | /* | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 287 | * When remounting a filesystem read-only or freezing the filesystem, we have | 
|  | 288 | * two phases to execute. This first phase is syncing the data before we | 
|  | 289 | * quiesce the filesystem, and the second is flushing all the inodes out after | 
|  | 290 | * we've waited for all the transactions created by the first phase to | 
|  | 291 | * complete. The second phase ensures that the inodes are written to their | 
|  | 292 | * location on disk rather than just existing in transactions in the log. This | 
|  | 293 | * means after a quiesce there is no log replay required to write the inodes to | 
|  | 294 | * disk (this is the main difference between a sync and a quiesce). | 
|  | 295 | */ | 
|  | 296 | /* | 
|  | 297 | * First stage of freeze - no writers will make progress now we are here, | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 298 | * so we flush delwri and delalloc buffers here, then wait for all I/O to | 
|  | 299 | * complete.  Data is frozen at that point. Metadata is not frozen, | 
| Christoph Hellwig | 211e4d4 | 2012-04-23 15:58:34 +1000 | [diff] [blame] | 300 | * transactions can still occur here so don't bother emptying the AIL | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 301 | * because it'll just get dirty again. | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 302 | */ | 
|  | 303 | int | 
|  | 304 | xfs_quiesce_data( | 
|  | 305 | struct xfs_mount	*mp) | 
|  | 306 | { | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 307 | int			error, error2 = 0; | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 308 |  | 
| Christoph Hellwig | 34625c6 | 2011-12-06 21:58:12 +0000 | [diff] [blame] | 309 | /* force out the log */ | 
| Christoph Hellwig | 33b8f7c | 2011-07-08 14:34:39 +0200 | [diff] [blame] | 310 | xfs_log_force(mp, XFS_LOG_SYNC); | 
|  | 311 |  | 
| David Chinner | a4e4c4f | 2008-10-30 17:16:11 +1100 | [diff] [blame] | 312 | /* write superblock and hoover up shutdown errors */ | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 313 | error = xfs_sync_fsdata(mp); | 
|  | 314 |  | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 315 | /* mark the log as covered if needed */ | 
|  | 316 | if (xfs_log_need_covered(mp)) | 
| Dave Chinner | c58efdb | 2011-01-04 04:49:29 +0000 | [diff] [blame] | 317 | error2 = xfs_fs_log_dummy(mp); | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 318 |  | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 319 | return error ? error : error2; | 
| David Chinner | e9f1c6e | 2008-10-30 17:15:50 +1100 | [diff] [blame] | 320 | } | 
|  | 321 |  | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 322 | /* | 
|  | 323 | * Second stage of a quiesce. The data is already synced, now we have to take | 
|  | 324 | * care of the metadata. New transactions are already blocked, so we need to | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 325 | * wait for any remaining transactions to drain out before proceeding. | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 326 | */ | 
|  | 327 | void | 
|  | 328 | xfs_quiesce_attr( | 
|  | 329 | struct xfs_mount	*mp) | 
|  | 330 | { | 
|  | 331 | int	error = 0; | 
|  | 332 |  | 
|  | 333 | /* wait for all modifications to complete */ | 
|  | 334 | while (atomic_read(&mp->m_active_trans) > 0) | 
|  | 335 | delay(100); | 
|  | 336 |  | 
| Christoph Hellwig | 211e4d4 | 2012-04-23 15:58:34 +1000 | [diff] [blame] | 337 | /* reclaim inodes to do any IO before the freeze completes */ | 
|  | 338 | xfs_reclaim_inodes(mp, 0); | 
|  | 339 | xfs_reclaim_inodes(mp, SYNC_WAIT); | 
|  | 340 |  | 
|  | 341 | /* flush all pending changes from the AIL */ | 
|  | 342 | xfs_ail_push_all_sync(mp->m_ail); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 343 |  | 
| Felix Blyakher | 5e10657 | 2009-01-22 21:34:05 -0600 | [diff] [blame] | 344 | /* | 
|  | 345 | * Just warn here till VFS can correctly support | 
|  | 346 | * read-only remount without racing. | 
|  | 347 | */ | 
|  | 348 | WARN_ON(atomic_read(&mp->m_active_trans) != 0); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 349 |  | 
|  | 350 | /* Push the superblock and write an unmount record */ | 
| Chandra Seetharaman | adab0f6 | 2011-06-29 22:10:14 +0000 | [diff] [blame] | 351 | error = xfs_log_sbcount(mp); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 352 | if (error) | 
| Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 353 | xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. " | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 354 | "Frozen image may not be consistent."); | 
|  | 355 | xfs_log_unmount_write(mp); | 
| Christoph Hellwig | 211e4d4 | 2012-04-23 15:58:34 +1000 | [diff] [blame] | 356 |  | 
|  | 357 | /* | 
|  | 358 | * At this point we might have modified the superblock again and thus | 
|  | 359 | * added an item to the AIL, thus flush it again. | 
|  | 360 | */ | 
|  | 361 | xfs_ail_push_all_sync(mp->m_ail); | 
| Mark Tinguely | 9a57fa8 | 2012-07-24 10:59:19 -0500 | [diff] [blame] | 362 |  | 
|  | 363 | /* | 
|  | 364 | * The superblock buffer is uncached and xfsaild_push() will lock and | 
|  | 365 | * set the XBF_ASYNC flag on the buffer. We cannot do xfs_buf_iowait() | 
|  | 366 | * here but a lock on the superblock buffer will block until iodone() | 
|  | 367 | * has completed. | 
|  | 368 | */ | 
|  | 369 | xfs_buf_lock(mp->m_sb_bp); | 
|  | 370 | xfs_buf_unlock(mp->m_sb_bp); | 
| David Chinner | 76bf105 | 2008-10-30 17:16:21 +1100 | [diff] [blame] | 371 | } | 
|  | 372 |  | 
| Dave Chinner | c6d09b6 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 373 | static void | 
|  | 374 | xfs_syncd_queue_sync( | 
|  | 375 | struct xfs_mount        *mp) | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 376 | { | 
| Dave Chinner | c6d09b6 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 377 | queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work, | 
|  | 378 | msecs_to_jiffies(xfs_syncd_centisecs * 10)); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 379 | } | 
|  | 380 |  | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 381 | /* | 
| Christoph Hellwig | df308bc | 2010-03-12 10:59:16 +0000 | [diff] [blame] | 382 | * Every sync period we need to unpin all items, reclaim inodes and sync | 
|  | 383 | * disk quotas.  We might need to cover the log to indicate that the | 
| Dave Chinner | 1a387d3 | 2010-08-24 11:46:31 +1000 | [diff] [blame] | 384 | * filesystem is idle and not frozen. | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 385 | */ | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 386 | STATIC void | 
|  | 387 | xfs_sync_worker( | 
| Dave Chinner | c6d09b6 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 388 | struct work_struct *work) | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 389 | { | 
| Dave Chinner | c6d09b6 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 390 | struct xfs_mount *mp = container_of(to_delayed_work(work), | 
|  | 391 | struct xfs_mount, m_sync_work); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 392 | int		error; | 
|  | 393 |  | 
| Dave Chinner | 8a00ebe | 2012-04-13 12:10:44 +0000 | [diff] [blame] | 394 | /* | 
|  | 395 | * We shouldn't write/force the log if we are in the mount/unmount | 
|  | 396 | * process or on a read only filesystem. The workqueue still needs to be | 
|  | 397 | * active in both cases, however, because it is used for inode reclaim | 
| Ben Myers | 11159a0 | 2012-05-25 15:45:36 -0500 | [diff] [blame] | 398 | * during these times.  Use the MS_ACTIVE flag to avoid doing anything | 
|  | 399 | * during mount.  Doing work during unmount is avoided by calling | 
|  | 400 | * cancel_delayed_work_sync on this work queue before tearing down | 
|  | 401 | * the ail and the log in xfs_log_unmount. | 
| Dave Chinner | 8a00ebe | 2012-04-13 12:10:44 +0000 | [diff] [blame] | 402 | */ | 
| Ben Myers | 11159a0 | 2012-05-25 15:45:36 -0500 | [diff] [blame] | 403 | if (!(mp->m_super->s_flags & MS_ACTIVE) && | 
|  | 404 | !(mp->m_flags & XFS_MOUNT_RDONLY)) { | 
|  | 405 | /* dgc: errors ignored here */ | 
| Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 406 | if (mp->m_super->s_writers.frozen == SB_UNFROZEN && | 
| Ben Myers | 11159a0 | 2012-05-25 15:45:36 -0500 | [diff] [blame] | 407 | xfs_log_need_covered(mp)) | 
|  | 408 | error = xfs_fs_log_dummy(mp); | 
|  | 409 | else | 
|  | 410 | xfs_log_force(mp, 0); | 
| Dave Chinner | fd07484 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 411 |  | 
| Ben Myers | 11159a0 | 2012-05-25 15:45:36 -0500 | [diff] [blame] | 412 | /* start pushing all the metadata that is currently | 
|  | 413 | * dirty */ | 
|  | 414 | xfs_ail_push_all(mp->m_ail); | 
| David Chinner | aacaa88 | 2008-10-30 17:15:29 +1100 | [diff] [blame] | 415 | } | 
| Dave Chinner | c6d09b6 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 416 |  | 
|  | 417 | /* queue us up again */ | 
|  | 418 | xfs_syncd_queue_sync(mp); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 419 | } | 
|  | 420 |  | 
| Dave Chinner | 89e4cb5 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 421 | /* | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 422 | * Queue a new inode reclaim pass if there are reclaimable inodes and there | 
|  | 423 | * isn't a reclaim pass already in progress. By default it runs every 5s based | 
|  | 424 | * on the xfs syncd work default of 30s. Perhaps this should have it's own | 
|  | 425 | * tunable, but that can be done if this method proves to be ineffective or too | 
|  | 426 | * aggressive. | 
|  | 427 | */ | 
|  | 428 | static void | 
|  | 429 | xfs_syncd_queue_reclaim( | 
|  | 430 | struct xfs_mount        *mp) | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 431 | { | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 432 |  | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 433 | rcu_read_lock(); | 
|  | 434 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { | 
|  | 435 | queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work, | 
|  | 436 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 437 | } | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 438 | rcu_read_unlock(); | 
|  | 439 | } | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 440 |  | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 441 | /* | 
|  | 442 | * This is a fast pass over the inode cache to try to get reclaim moving on as | 
|  | 443 | * many inodes as possible in a short period of time. It kicks itself every few | 
|  | 444 | * seconds, as well as being kicked by the inode cache shrinker when memory | 
|  | 445 | * goes low. It scans as quickly as possible avoiding locked inodes or those | 
|  | 446 | * already being flushed, and once done schedules a future pass. | 
|  | 447 | */ | 
|  | 448 | STATIC void | 
|  | 449 | xfs_reclaim_worker( | 
|  | 450 | struct work_struct *work) | 
|  | 451 | { | 
|  | 452 | struct xfs_mount *mp = container_of(to_delayed_work(work), | 
|  | 453 | struct xfs_mount, m_reclaim_work); | 
|  | 454 |  | 
|  | 455 | xfs_reclaim_inodes(mp, SYNC_TRYLOCK); | 
|  | 456 | xfs_syncd_queue_reclaim(mp); | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | /* | 
| Dave Chinner | 89e4cb5 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 460 | * Flush delayed allocate data, attempting to free up reserved space | 
|  | 461 | * from existing allocations.  At this point a new allocation attempt | 
|  | 462 | * has failed with ENOSPC and we are in the process of scratching our | 
|  | 463 | * heads, looking about for more room. | 
|  | 464 | * | 
|  | 465 | * Queue a new data flush if there isn't one already in progress and | 
|  | 466 | * wait for completion of the flush. This means that we only ever have one | 
|  | 467 | * inode flush in progress no matter how many ENOSPC events are occurring and | 
|  | 468 | * so will prevent the system from bogging down due to every concurrent | 
|  | 469 | * ENOSPC event scanning all the active inodes in the system for writeback. | 
|  | 470 | */ | 
|  | 471 | void | 
|  | 472 | xfs_flush_inodes( | 
|  | 473 | struct xfs_inode	*ip) | 
|  | 474 | { | 
|  | 475 | struct xfs_mount	*mp = ip->i_mount; | 
|  | 476 |  | 
|  | 477 | queue_work(xfs_syncd_wq, &mp->m_flush_work); | 
|  | 478 | flush_work_sync(&mp->m_flush_work); | 
|  | 479 | } | 
|  | 480 |  | 
|  | 481 | STATIC void | 
|  | 482 | xfs_flush_worker( | 
|  | 483 | struct work_struct *work) | 
|  | 484 | { | 
|  | 485 | struct xfs_mount *mp = container_of(work, | 
|  | 486 | struct xfs_mount, m_flush_work); | 
|  | 487 |  | 
|  | 488 | xfs_sync_data(mp, SYNC_TRYLOCK); | 
|  | 489 | xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 490 | } | 
|  | 491 |  | 
|  | 492 | int | 
|  | 493 | xfs_syncd_init( | 
|  | 494 | struct xfs_mount	*mp) | 
|  | 495 | { | 
| Dave Chinner | 89e4cb5 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 496 | INIT_WORK(&mp->m_flush_work, xfs_flush_worker); | 
| Dave Chinner | c6d09b6 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 497 | INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker); | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 498 | INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); | 
|  | 499 |  | 
| Dave Chinner | c6d09b6 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 500 | xfs_syncd_queue_sync(mp); | 
|  | 501 |  | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 502 | return 0; | 
|  | 503 | } | 
|  | 504 |  | 
|  | 505 | void | 
|  | 506 | xfs_syncd_stop( | 
|  | 507 | struct xfs_mount	*mp) | 
|  | 508 | { | 
| Dave Chinner | c6d09b6 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 509 | cancel_delayed_work_sync(&mp->m_sync_work); | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 510 | cancel_delayed_work_sync(&mp->m_reclaim_work); | 
| Dave Chinner | 89e4cb5 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 511 | cancel_work_sync(&mp->m_flush_work); | 
| David Chinner | a167b17 | 2008-10-30 17:06:18 +1100 | [diff] [blame] | 512 | } | 
|  | 513 |  | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 514 | void | 
|  | 515 | __xfs_inode_set_reclaim_tag( | 
|  | 516 | struct xfs_perag	*pag, | 
|  | 517 | struct xfs_inode	*ip) | 
|  | 518 | { | 
|  | 519 | radix_tree_tag_set(&pag->pag_ici_root, | 
|  | 520 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | 
|  | 521 | XFS_ICI_RECLAIM_TAG); | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 522 |  | 
|  | 523 | if (!pag->pag_ici_reclaimable) { | 
|  | 524 | /* propagate the reclaim tag up into the perag radix tree */ | 
|  | 525 | spin_lock(&ip->i_mount->m_perag_lock); | 
|  | 526 | radix_tree_tag_set(&ip->i_mount->m_perag_tree, | 
|  | 527 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | 
|  | 528 | XFS_ICI_RECLAIM_TAG); | 
|  | 529 | spin_unlock(&ip->i_mount->m_perag_lock); | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 530 |  | 
|  | 531 | /* schedule periodic background inode reclaim */ | 
|  | 532 | xfs_syncd_queue_reclaim(ip->i_mount); | 
|  | 533 |  | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 534 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, | 
|  | 535 | -1, _RET_IP_); | 
|  | 536 | } | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 537 | pag->pag_ici_reclaimable++; | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 538 | } | 
|  | 539 |  | 
| David Chinner | 1165451 | 2008-10-30 17:37:49 +1100 | [diff] [blame] | 540 | /* | 
|  | 541 | * We set the inode flag atomically with the radix tree tag. | 
|  | 542 | * Once we get tag lookups on the radix tree, this inode flag | 
|  | 543 | * can go away. | 
|  | 544 | */ | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 545 | void | 
|  | 546 | xfs_inode_set_reclaim_tag( | 
|  | 547 | xfs_inode_t	*ip) | 
|  | 548 | { | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 549 | struct xfs_mount *mp = ip->i_mount; | 
|  | 550 | struct xfs_perag *pag; | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 551 |  | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 552 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 553 | spin_lock(&pag->pag_ici_lock); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 554 | spin_lock(&ip->i_flags_lock); | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 555 | __xfs_inode_set_reclaim_tag(pag, ip); | 
| David Chinner | 1165451 | 2008-10-30 17:37:49 +1100 | [diff] [blame] | 556 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 557 | spin_unlock(&ip->i_flags_lock); | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 558 | spin_unlock(&pag->pag_ici_lock); | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 559 | xfs_perag_put(pag); | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 560 | } | 
|  | 561 |  | 
| Johannes Weiner | 081003f | 2010-10-01 07:43:54 +0000 | [diff] [blame] | 562 | STATIC void | 
|  | 563 | __xfs_inode_clear_reclaim( | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 564 | xfs_perag_t	*pag, | 
|  | 565 | xfs_inode_t	*ip) | 
|  | 566 | { | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 567 | pag->pag_ici_reclaimable--; | 
| Dave Chinner | 16fd536 | 2010-07-20 09:43:39 +1000 | [diff] [blame] | 568 | if (!pag->pag_ici_reclaimable) { | 
|  | 569 | /* clear the reclaim tag from the perag radix tree */ | 
|  | 570 | spin_lock(&ip->i_mount->m_perag_lock); | 
|  | 571 | radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | 
|  | 572 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | 
|  | 573 | XFS_ICI_RECLAIM_TAG); | 
|  | 574 | spin_unlock(&ip->i_mount->m_perag_lock); | 
|  | 575 | trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, | 
|  | 576 | -1, _RET_IP_); | 
|  | 577 | } | 
| David Chinner | 396beb8 | 2008-10-30 17:37:26 +1100 | [diff] [blame] | 578 | } | 
|  | 579 |  | 
| Johannes Weiner | 081003f | 2010-10-01 07:43:54 +0000 | [diff] [blame] | 580 | void | 
|  | 581 | __xfs_inode_clear_reclaim_tag( | 
|  | 582 | xfs_mount_t	*mp, | 
|  | 583 | xfs_perag_t	*pag, | 
|  | 584 | xfs_inode_t	*ip) | 
|  | 585 | { | 
|  | 586 | radix_tree_tag_clear(&pag->pag_ici_root, | 
|  | 587 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | 
|  | 588 | __xfs_inode_clear_reclaim(pag, ip); | 
|  | 589 | } | 
|  | 590 |  | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 591 | /* | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 592 | * Grab the inode for reclaim exclusively. | 
|  | 593 | * Return 0 if we grabbed it, non-zero otherwise. | 
|  | 594 | */ | 
|  | 595 | STATIC int | 
|  | 596 | xfs_reclaim_inode_grab( | 
|  | 597 | struct xfs_inode	*ip, | 
|  | 598 | int			flags) | 
|  | 599 | { | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 600 | ASSERT(rcu_read_lock_held()); | 
|  | 601 |  | 
|  | 602 | /* quick check for stale RCU freed inode */ | 
|  | 603 | if (!ip->i_ino) | 
|  | 604 | return 1; | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 605 |  | 
|  | 606 | /* | 
| Christoph Hellwig | 474fce0 | 2011-12-18 20:00:09 +0000 | [diff] [blame] | 607 | * If we are asked for non-blocking operation, do unlocked checks to | 
|  | 608 | * see if the inode already is being flushed or in reclaim to avoid | 
|  | 609 | * lock traffic. | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 610 | */ | 
|  | 611 | if ((flags & SYNC_TRYLOCK) && | 
| Christoph Hellwig | 474fce0 | 2011-12-18 20:00:09 +0000 | [diff] [blame] | 612 | __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 613 | return 1; | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 614 |  | 
|  | 615 | /* | 
|  | 616 | * The radix tree lock here protects a thread in xfs_iget from racing | 
|  | 617 | * with us starting reclaim on the inode.  Once we have the | 
|  | 618 | * XFS_IRECLAIM flag set it will not touch us. | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 619 | * | 
|  | 620 | * Due to RCU lookup, we may find inodes that have been freed and only | 
|  | 621 | * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that | 
|  | 622 | * aren't candidates for reclaim at all, so we must check the | 
|  | 623 | * XFS_IRECLAIMABLE is set first before proceeding to reclaim. | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 624 | */ | 
|  | 625 | spin_lock(&ip->i_flags_lock); | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 626 | if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || | 
|  | 627 | __xfs_iflags_test(ip, XFS_IRECLAIM)) { | 
|  | 628 | /* not a reclaim candidate. */ | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 629 | spin_unlock(&ip->i_flags_lock); | 
|  | 630 | return 1; | 
|  | 631 | } | 
|  | 632 | __xfs_iflags_set(ip, XFS_IRECLAIM); | 
|  | 633 | spin_unlock(&ip->i_flags_lock); | 
|  | 634 | return 0; | 
|  | 635 | } | 
|  | 636 |  | 
|  | 637 | /* | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 638 | * Inodes in different states need to be treated differently. The following | 
|  | 639 | * table lists the inode states and the reclaim actions necessary: | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 640 | * | 
|  | 641 | *	inode state	     iflush ret		required action | 
|  | 642 | *      ---------------      ----------         --------------- | 
|  | 643 | *	bad			-		reclaim | 
|  | 644 | *	shutdown		EIO		unpin and reclaim | 
|  | 645 | *	clean, unpinned		0		reclaim | 
|  | 646 | *	stale, unpinned		0		reclaim | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 647 | *	clean, pinned(*)	0		requeue | 
|  | 648 | *	stale, pinned		EAGAIN		requeue | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 649 | *	dirty, async		-		requeue | 
|  | 650 | *	dirty, sync		0		reclaim | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 651 | * | 
|  | 652 | * (*) dgc: I don't think the clean, pinned state is possible but it gets | 
|  | 653 | * handled anyway given the order of checks implemented. | 
|  | 654 | * | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 655 | * Also, because we get the flush lock first, we know that any inode that has | 
|  | 656 | * been flushed delwri has had the flush completed by the time we check that | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 657 | * the inode is clean. | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 658 | * | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 659 | * Note that because the inode is flushed delayed write by AIL pushing, the | 
|  | 660 | * flush lock may already be held here and waiting on it can result in very | 
|  | 661 | * long latencies.  Hence for sync reclaims, where we wait on the flush lock, | 
|  | 662 | * the caller should push the AIL first before trying to reclaim inodes to | 
|  | 663 | * minimise the amount of time spent waiting.  For background relaim, we only | 
|  | 664 | * bother to reclaim clean inodes anyway. | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 665 | * | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 666 | * Hence the order of actions after gaining the locks should be: | 
|  | 667 | *	bad		=> reclaim | 
|  | 668 | *	shutdown	=> unpin and reclaim | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 669 | *	pinned, async	=> requeue | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 670 | *	pinned, sync	=> unpin | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 671 | *	stale		=> reclaim | 
|  | 672 | *	clean		=> reclaim | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 673 | *	dirty, async	=> requeue | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 674 | *	dirty, sync	=> flush, wait and reclaim | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 675 | */ | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 676 | STATIC int | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 677 | xfs_reclaim_inode( | 
| Dave Chinner | 75f3cb1 | 2009-06-08 15:35:14 +0200 | [diff] [blame] | 678 | struct xfs_inode	*ip, | 
|  | 679 | struct xfs_perag	*pag, | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 680 | int			sync_mode) | 
| David Chinner | 7a3be02 | 2008-10-30 17:37:37 +1100 | [diff] [blame] | 681 | { | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 682 | struct xfs_buf		*bp = NULL; | 
|  | 683 | int			error; | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 684 |  | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 685 | restart: | 
|  | 686 | error = 0; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 687 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 688 | if (!xfs_iflock_nowait(ip)) { | 
|  | 689 | if (!(sync_mode & SYNC_WAIT)) | 
|  | 690 | goto out; | 
|  | 691 | xfs_iflock(ip); | 
|  | 692 | } | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 693 |  | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 694 | if (is_bad_inode(VFS_I(ip))) | 
|  | 695 | goto reclaim; | 
|  | 696 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 
|  | 697 | xfs_iunpin_wait(ip); | 
| Dave Chinner | 04913fd | 2012-04-23 15:58:41 +1000 | [diff] [blame] | 698 | xfs_iflush_abort(ip, false); | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 699 | goto reclaim; | 
|  | 700 | } | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 701 | if (xfs_ipincount(ip)) { | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 702 | if (!(sync_mode & SYNC_WAIT)) | 
|  | 703 | goto out_ifunlock; | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 704 | xfs_iunpin_wait(ip); | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 705 | } | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 706 | if (xfs_iflags_test(ip, XFS_ISTALE)) | 
|  | 707 | goto reclaim; | 
|  | 708 | if (xfs_inode_clean(ip)) | 
|  | 709 | goto reclaim; | 
|  | 710 |  | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 711 | /* | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 712 | * Never flush out dirty data during non-blocking reclaim, as it would | 
|  | 713 | * just contend with AIL pushing trying to do the same job. | 
|  | 714 | */ | 
|  | 715 | if (!(sync_mode & SYNC_WAIT)) | 
|  | 716 | goto out_ifunlock; | 
|  | 717 |  | 
|  | 718 | /* | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 719 | * Now we have an inode that needs flushing. | 
|  | 720 | * | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 721 | * Note that xfs_iflush will never block on the inode buffer lock, as | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 722 | * xfs_ifree_cluster() can lock the inode buffer before it locks the | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 723 | * ip->i_lock, and we are doing the exact opposite here.  As a result, | 
| Christoph Hellwig | 475ee41 | 2012-07-03 12:21:22 -0400 | [diff] [blame] | 724 | * doing a blocking xfs_imap_to_bp() to get the cluster buffer would | 
|  | 725 | * result in an ABBA deadlock with xfs_ifree_cluster(). | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 726 | * | 
|  | 727 | * As xfs_ifree_cluser() must gather all inodes that are active in the | 
|  | 728 | * cache to mark them stale, if we hit this case we don't actually want | 
|  | 729 | * to do IO here - we want the inode marked stale so we can simply | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 730 | * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the | 
|  | 731 | * inode, back off and try again.  Hopefully the next pass through will | 
|  | 732 | * see the stale flag set on the inode. | 
| Dave Chinner | 1bfd8d0 | 2011-03-26 09:13:55 +1100 | [diff] [blame] | 733 | */ | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 734 | error = xfs_iflush(ip, &bp); | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 735 | if (error == EAGAIN) { | 
|  | 736 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 737 | /* backoff longer than in xfs_ifree_cluster */ | 
|  | 738 | delay(2); | 
|  | 739 | goto restart; | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 740 | } | 
| Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 741 |  | 
| Christoph Hellwig | 4c46819 | 2012-04-23 15:58:36 +1000 | [diff] [blame] | 742 | if (!error) { | 
|  | 743 | error = xfs_bwrite(bp); | 
|  | 744 | xfs_buf_relse(bp); | 
|  | 745 | } | 
|  | 746 |  | 
|  | 747 | xfs_iflock(ip); | 
| Dave Chinner | 777df5a | 2010-02-06 12:37:26 +1100 | [diff] [blame] | 748 | reclaim: | 
|  | 749 | xfs_ifunlock(ip); | 
| Dave Chinner | c8e20be | 2010-01-10 23:51:45 +0000 | [diff] [blame] | 750 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 751 |  | 
|  | 752 | XFS_STATS_INC(xs_ig_reclaims); | 
|  | 753 | /* | 
|  | 754 | * Remove the inode from the per-AG radix tree. | 
|  | 755 | * | 
|  | 756 | * Because radix_tree_delete won't complain even if the item was never | 
|  | 757 | * added to the tree assert that it's been there before to catch | 
|  | 758 | * problems with the inode life time early on. | 
|  | 759 | */ | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 760 | spin_lock(&pag->pag_ici_lock); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 761 | if (!radix_tree_delete(&pag->pag_ici_root, | 
|  | 762 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) | 
|  | 763 | ASSERT(0); | 
| Johannes Weiner | 081003f | 2010-10-01 07:43:54 +0000 | [diff] [blame] | 764 | __xfs_inode_clear_reclaim(pag, ip); | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 765 | spin_unlock(&pag->pag_ici_lock); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 766 |  | 
|  | 767 | /* | 
|  | 768 | * Here we do an (almost) spurious inode lock in order to coordinate | 
|  | 769 | * with inode cache radix tree lookups.  This is because the lookup | 
|  | 770 | * can reference the inodes in the cache without taking references. | 
|  | 771 | * | 
|  | 772 | * We make that OK here by ensuring that we wait until the inode is | 
| Alex Elder | ad637a1 | 2012-02-16 22:01:00 +0000 | [diff] [blame] | 773 | * unlocked after the lookup before we go ahead and free it. | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 774 | */ | 
| Alex Elder | ad637a1 | 2012-02-16 22:01:00 +0000 | [diff] [blame] | 775 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 776 | xfs_qm_dqdetach(ip); | 
| Alex Elder | ad637a1 | 2012-02-16 22:01:00 +0000 | [diff] [blame] | 777 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 778 |  | 
|  | 779 | xfs_inode_free(ip); | 
| Alex Elder | ad637a1 | 2012-02-16 22:01:00 +0000 | [diff] [blame] | 780 | return error; | 
| Christoph Hellwig | 8a48088 | 2012-04-23 15:58:35 +1000 | [diff] [blame] | 781 |  | 
|  | 782 | out_ifunlock: | 
|  | 783 | xfs_ifunlock(ip); | 
|  | 784 | out: | 
|  | 785 | xfs_iflags_clear(ip, XFS_IRECLAIM); | 
|  | 786 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 787 | /* | 
|  | 788 | * We could return EAGAIN here to make reclaim rescan the inode tree in | 
|  | 789 | * a short while. However, this just burns CPU time scanning the tree | 
|  | 790 | * waiting for IO to complete and xfssyncd never goes back to the idle | 
|  | 791 | * state. Instead, return 0 to let the next scheduled background reclaim | 
|  | 792 | * attempt to reclaim the inode again. | 
|  | 793 | */ | 
|  | 794 | return 0; | 
| David Chinner | 7a3be02 | 2008-10-30 17:37:37 +1100 | [diff] [blame] | 795 | } | 
|  | 796 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 797 | /* | 
|  | 798 | * Walk the AGs and reclaim the inodes in them. Even if the filesystem is | 
|  | 799 | * corrupted, we still want to try to reclaim all the inodes. If we don't, | 
|  | 800 | * then a shut down during filesystem unmount reclaim walk leak all the | 
|  | 801 | * unreclaimed inodes. | 
|  | 802 | */ | 
|  | 803 | int | 
|  | 804 | xfs_reclaim_inodes_ag( | 
|  | 805 | struct xfs_mount	*mp, | 
|  | 806 | int			flags, | 
|  | 807 | int			*nr_to_scan) | 
|  | 808 | { | 
|  | 809 | struct xfs_perag	*pag; | 
|  | 810 | int			error = 0; | 
|  | 811 | int			last_error = 0; | 
|  | 812 | xfs_agnumber_t		ag; | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 813 | int			trylock = flags & SYNC_TRYLOCK; | 
|  | 814 | int			skipped; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 815 |  | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 816 | restart: | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 817 | ag = 0; | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 818 | skipped = 0; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 819 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { | 
|  | 820 | unsigned long	first_index = 0; | 
|  | 821 | int		done = 0; | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 822 | int		nr_found = 0; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 823 |  | 
|  | 824 | ag = pag->pag_agno + 1; | 
|  | 825 |  | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 826 | if (trylock) { | 
|  | 827 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { | 
|  | 828 | skipped++; | 
| Dave Chinner | f83282a | 2010-11-08 08:55:04 +0000 | [diff] [blame] | 829 | xfs_perag_put(pag); | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 830 | continue; | 
|  | 831 | } | 
|  | 832 | first_index = pag->pag_ici_reclaim_cursor; | 
|  | 833 | } else | 
|  | 834 | mutex_lock(&pag->pag_ici_reclaim_lock); | 
|  | 835 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 836 | do { | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 837 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; | 
|  | 838 | int	i; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 839 |  | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 840 | rcu_read_lock(); | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 841 | nr_found = radix_tree_gang_lookup_tag( | 
|  | 842 | &pag->pag_ici_root, | 
|  | 843 | (void **)batch, first_index, | 
|  | 844 | XFS_LOOKUP_BATCH, | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 845 | XFS_ICI_RECLAIM_TAG); | 
|  | 846 | if (!nr_found) { | 
| Dave Chinner | b223221 | 2011-05-06 02:54:04 +0000 | [diff] [blame] | 847 | done = 1; | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 848 | rcu_read_unlock(); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 849 | break; | 
|  | 850 | } | 
|  | 851 |  | 
|  | 852 | /* | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 853 | * Grab the inodes before we drop the lock. if we found | 
|  | 854 | * nothing, nr == 0 and the loop will be skipped. | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 855 | */ | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 856 | for (i = 0; i < nr_found; i++) { | 
|  | 857 | struct xfs_inode *ip = batch[i]; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 858 |  | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 859 | if (done || xfs_reclaim_inode_grab(ip, flags)) | 
|  | 860 | batch[i] = NULL; | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 861 |  | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 862 | /* | 
|  | 863 | * Update the index for the next lookup. Catch | 
|  | 864 | * overflows into the next AG range which can | 
|  | 865 | * occur if we have inodes in the last block of | 
|  | 866 | * the AG and we are currently pointing to the | 
|  | 867 | * last inode. | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 868 | * | 
|  | 869 | * Because we may see inodes that are from the | 
|  | 870 | * wrong AG due to RCU freeing and | 
|  | 871 | * reallocation, only update the index if it | 
|  | 872 | * lies in this AG. It was a race that lead us | 
|  | 873 | * to see this inode, so another lookup from | 
|  | 874 | * the same index will not find it again. | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 875 | */ | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 876 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != | 
|  | 877 | pag->pag_agno) | 
|  | 878 | continue; | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 879 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | 
|  | 880 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | 
|  | 881 | done = 1; | 
|  | 882 | } | 
|  | 883 |  | 
|  | 884 | /* unlock now we've grabbed the inodes. */ | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 885 | rcu_read_unlock(); | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 886 |  | 
|  | 887 | for (i = 0; i < nr_found; i++) { | 
|  | 888 | if (!batch[i]) | 
|  | 889 | continue; | 
|  | 890 | error = xfs_reclaim_inode(batch[i], pag, flags); | 
|  | 891 | if (error && last_error != EFSCORRUPTED) | 
|  | 892 | last_error = error; | 
|  | 893 | } | 
|  | 894 |  | 
|  | 895 | *nr_to_scan -= XFS_LOOKUP_BATCH; | 
|  | 896 |  | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 897 | cond_resched(); | 
|  | 898 |  | 
| Dave Chinner | e3a20c0 | 2010-09-24 19:51:50 +1000 | [diff] [blame] | 899 | } while (nr_found && !done && *nr_to_scan > 0); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 900 |  | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 901 | if (trylock && !done) | 
|  | 902 | pag->pag_ici_reclaim_cursor = first_index; | 
|  | 903 | else | 
|  | 904 | pag->pag_ici_reclaim_cursor = 0; | 
|  | 905 | mutex_unlock(&pag->pag_ici_reclaim_lock); | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 906 | xfs_perag_put(pag); | 
|  | 907 | } | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 908 |  | 
|  | 909 | /* | 
|  | 910 | * if we skipped any AG, and we still have scan count remaining, do | 
|  | 911 | * another pass this time using blocking reclaim semantics (i.e | 
|  | 912 | * waiting on the reclaim locks and ignoring the reclaim cursors). This | 
|  | 913 | * ensure that when we get more reclaimers than AGs we block rather | 
|  | 914 | * than spin trying to execute reclaim. | 
|  | 915 | */ | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 916 | if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { | 
| Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 917 | trylock = 0; | 
|  | 918 | goto restart; | 
|  | 919 | } | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 920 | return XFS_ERROR(last_error); | 
|  | 921 | } | 
|  | 922 |  | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 923 | int | 
| David Chinner | 1dc3318 | 2008-10-30 17:37:15 +1100 | [diff] [blame] | 924 | xfs_reclaim_inodes( | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 925 | xfs_mount_t	*mp, | 
| David Chinner | fce08f2 | 2008-10-30 17:37:03 +1100 | [diff] [blame] | 926 | int		mode) | 
|  | 927 | { | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 928 | int		nr_to_scan = INT_MAX; | 
|  | 929 |  | 
|  | 930 | return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 931 | } | 
|  | 932 |  | 
|  | 933 | /* | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 934 | * Scan a certain number of inodes for reclaim. | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 935 | * | 
|  | 936 | * When called we make sure that there is a background (fast) inode reclaim in | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 937 | * progress, while we will throttle the speed of reclaim via doing synchronous | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 938 | * reclaim of inodes. That means if we come across dirty inodes, we wait for | 
|  | 939 | * them to be cleaned, which we hope will not be very long due to the | 
|  | 940 | * background walker having already kicked the IO off on those dirty inodes. | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 941 | */ | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 942 | void | 
|  | 943 | xfs_reclaim_inodes_nr( | 
|  | 944 | struct xfs_mount	*mp, | 
|  | 945 | int			nr_to_scan) | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 946 | { | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 947 | /* kick background reclaimer and push the AIL */ | 
|  | 948 | xfs_syncd_queue_reclaim(mp); | 
|  | 949 | xfs_ail_push_all(mp->m_ail); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 950 |  | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 951 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); | 
|  | 952 | } | 
| Dave Chinner | a7b339f | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 953 |  | 
| Dave Chinner | 8daaa83 | 2011-07-08 14:14:46 +1000 | [diff] [blame] | 954 | /* | 
|  | 955 | * Return the number of reclaimable inodes in the filesystem for | 
|  | 956 | * the shrinker to determine how much to reclaim. | 
|  | 957 | */ | 
|  | 958 | int | 
|  | 959 | xfs_reclaim_inodes_count( | 
|  | 960 | struct xfs_mount	*mp) | 
|  | 961 | { | 
|  | 962 | struct xfs_perag	*pag; | 
|  | 963 | xfs_agnumber_t		ag = 0; | 
|  | 964 | int			reclaimable = 0; | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 965 |  | 
| Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 966 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { | 
|  | 967 | ag = pag->pag_agno + 1; | 
| Dave Chinner | 70e60ce | 2010-07-20 08:07:02 +1000 | [diff] [blame] | 968 | reclaimable += pag->pag_ici_reclaimable; | 
|  | 969 | xfs_perag_put(pag); | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 970 | } | 
| Dave Chinner | 9bf729c | 2010-04-29 09:55:50 +1000 | [diff] [blame] | 971 | return reclaimable; | 
|  | 972 | } | 
|  | 973 |  |