blob: 7b630288bab5a37a843d645706fa5b803d1877da [file] [log] [blame]
David Chinnerfe4fa4b2008-10-30 17:06:08 +11001/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110021#include "xfs_log.h"
Dave Chinnerf661f1e2012-10-08 21:56:02 +110022#include "xfs_log_priv.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110023#include "xfs_inum.h"
24#include "xfs_trans.h"
Dave Chinnerfd074842011-04-08 12:45:07 +100025#include "xfs_trans_priv.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110026#include "xfs_sb.h"
27#include "xfs_ag.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110028#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110030#include "xfs_inode.h"
31#include "xfs_dinode.h"
32#include "xfs_error.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110033#include "xfs_filestream.h"
34#include "xfs_vnodeops.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110035#include "xfs_inode_item.h"
Christoph Hellwig7d095252009-06-08 15:33:32 +020036#include "xfs_quota.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000037#include "xfs_trace.h"
Dave Chinner1a387d32010-08-24 11:46:31 +100038#include "xfs_fsops.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110039
David Chinnera167b172008-10-30 17:06:18 +110040#include <linux/kthread.h>
41#include <linux/freezer.h>
42
Dave Chinner78ae5252010-09-28 12:28:19 +100043/*
44 * The inode lookup is done in batches to keep the amount of lock traffic and
45 * radix tree lookups to a minimum. The batch size is a trade off between
46 * lookup reduction and stack usage. This is in the reclaim path, so we can't
47 * be too greedy.
48 */
49#define XFS_LOOKUP_BATCH 32
50
Dave Chinnere13de952010-09-28 12:28:06 +100051STATIC int
52xfs_inode_ag_walk_grab(
53 struct xfs_inode *ip)
54{
55 struct inode *inode = VFS_I(ip);
56
Dave Chinner1a3e8f32010-12-17 17:29:43 +110057 ASSERT(rcu_read_lock_held());
58
59 /*
60 * check for stale RCU freed inode
61 *
62 * If the inode has been reallocated, it doesn't matter if it's not in
63 * the AG we are walking - we are walking for writeback, so if it
64 * passes all the "valid inode" checks and is dirty, then we'll write
65 * it back anyway. If it has been reallocated and still being
66 * initialised, the XFS_INEW check below will catch it.
67 */
68 spin_lock(&ip->i_flags_lock);
69 if (!ip->i_ino)
70 goto out_unlock_noent;
71
72 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
73 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
74 goto out_unlock_noent;
75 spin_unlock(&ip->i_flags_lock);
76
Dave Chinnere13de952010-09-28 12:28:06 +100077 /* nothing to sync during shutdown */
78 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
79 return EFSCORRUPTED;
80
Dave Chinnere13de952010-09-28 12:28:06 +100081 /* If we can't grab the inode, it must on it's way to reclaim. */
82 if (!igrab(inode))
83 return ENOENT;
84
85 if (is_bad_inode(inode)) {
86 IRELE(ip);
87 return ENOENT;
88 }
89
90 /* inode is valid */
91 return 0;
Dave Chinner1a3e8f32010-12-17 17:29:43 +110092
93out_unlock_noent:
94 spin_unlock(&ip->i_flags_lock);
95 return ENOENT;
Dave Chinnere13de952010-09-28 12:28:06 +100096}
97
Dave Chinner75f3cb12009-06-08 15:35:14 +020098STATIC int
99xfs_inode_ag_walk(
100 struct xfs_mount *mp,
Dave Chinner5017e972010-01-11 11:47:40 +0000101 struct xfs_perag *pag,
Dave Chinner75f3cb12009-06-08 15:35:14 +0200102 int (*execute)(struct xfs_inode *ip,
103 struct xfs_perag *pag, int flags),
Dave Chinner65d0f202010-09-24 18:40:15 +1000104 int flags)
Dave Chinner75f3cb12009-06-08 15:35:14 +0200105{
Dave Chinner75f3cb12009-06-08 15:35:14 +0200106 uint32_t first_index;
107 int last_error = 0;
108 int skipped;
Dave Chinner65d0f202010-09-24 18:40:15 +1000109 int done;
Dave Chinner78ae5252010-09-28 12:28:19 +1000110 int nr_found;
Dave Chinner75f3cb12009-06-08 15:35:14 +0200111
112restart:
Dave Chinner65d0f202010-09-24 18:40:15 +1000113 done = 0;
Dave Chinner75f3cb12009-06-08 15:35:14 +0200114 skipped = 0;
115 first_index = 0;
Dave Chinner78ae5252010-09-28 12:28:19 +1000116 nr_found = 0;
Dave Chinner75f3cb12009-06-08 15:35:14 +0200117 do {
Dave Chinner78ae5252010-09-28 12:28:19 +1000118 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
Dave Chinner75f3cb12009-06-08 15:35:14 +0200119 int error = 0;
Dave Chinner78ae5252010-09-28 12:28:19 +1000120 int i;
Dave Chinner75f3cb12009-06-08 15:35:14 +0200121
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100122 rcu_read_lock();
Dave Chinner65d0f202010-09-24 18:40:15 +1000123 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
Dave Chinner78ae5252010-09-28 12:28:19 +1000124 (void **)batch, first_index,
125 XFS_LOOKUP_BATCH);
Dave Chinner65d0f202010-09-24 18:40:15 +1000126 if (!nr_found) {
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100127 rcu_read_unlock();
Dave Chinner75f3cb12009-06-08 15:35:14 +0200128 break;
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000129 }
Dave Chinner75f3cb12009-06-08 15:35:14 +0200130
Dave Chinner65d0f202010-09-24 18:40:15 +1000131 /*
Dave Chinner78ae5252010-09-28 12:28:19 +1000132 * Grab the inodes before we drop the lock. if we found
133 * nothing, nr == 0 and the loop will be skipped.
Dave Chinner65d0f202010-09-24 18:40:15 +1000134 */
Dave Chinner78ae5252010-09-28 12:28:19 +1000135 for (i = 0; i < nr_found; i++) {
136 struct xfs_inode *ip = batch[i];
Dave Chinner65d0f202010-09-24 18:40:15 +1000137
Dave Chinner78ae5252010-09-28 12:28:19 +1000138 if (done || xfs_inode_ag_walk_grab(ip))
139 batch[i] = NULL;
140
141 /*
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100142 * Update the index for the next lookup. Catch
143 * overflows into the next AG range which can occur if
144 * we have inodes in the last block of the AG and we
145 * are currently pointing to the last inode.
146 *
147 * Because we may see inodes that are from the wrong AG
148 * due to RCU freeing and reallocation, only update the
149 * index if it lies in this AG. It was a race that lead
150 * us to see this inode, so another lookup from the
151 * same index will not find it again.
Dave Chinner78ae5252010-09-28 12:28:19 +1000152 */
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100153 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
154 continue;
Dave Chinner78ae5252010-09-28 12:28:19 +1000155 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
156 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
157 done = 1;
Dave Chinnere13de952010-09-28 12:28:06 +1000158 }
Dave Chinner78ae5252010-09-28 12:28:19 +1000159
160 /* unlock now we've grabbed the inodes. */
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100161 rcu_read_unlock();
Dave Chinnere13de952010-09-28 12:28:06 +1000162
Dave Chinner78ae5252010-09-28 12:28:19 +1000163 for (i = 0; i < nr_found; i++) {
164 if (!batch[i])
165 continue;
166 error = execute(batch[i], pag, flags);
167 IRELE(batch[i]);
168 if (error == EAGAIN) {
169 skipped++;
170 continue;
171 }
172 if (error && last_error != EFSCORRUPTED)
173 last_error = error;
Dave Chinner75f3cb12009-06-08 15:35:14 +0200174 }
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000175
176 /* bail out if the filesystem is corrupted. */
Dave Chinner75f3cb12009-06-08 15:35:14 +0200177 if (error == EFSCORRUPTED)
178 break;
179
Dave Chinner8daaa832011-07-08 14:14:46 +1000180 cond_resched();
181
Dave Chinner78ae5252010-09-28 12:28:19 +1000182 } while (nr_found && !done);
Dave Chinner75f3cb12009-06-08 15:35:14 +0200183
184 if (skipped) {
185 delay(1);
186 goto restart;
187 }
Dave Chinner75f3cb12009-06-08 15:35:14 +0200188 return last_error;
189}
190
Christoph Hellwigfe588ed2009-06-08 15:35:27 +0200191int
Dave Chinner75f3cb12009-06-08 15:35:14 +0200192xfs_inode_ag_iterator(
193 struct xfs_mount *mp,
194 int (*execute)(struct xfs_inode *ip,
195 struct xfs_perag *pag, int flags),
Dave Chinner65d0f202010-09-24 18:40:15 +1000196 int flags)
Dave Chinner75f3cb12009-06-08 15:35:14 +0200197{
Dave Chinner16fd5362010-07-20 09:43:39 +1000198 struct xfs_perag *pag;
Dave Chinner75f3cb12009-06-08 15:35:14 +0200199 int error = 0;
200 int last_error = 0;
201 xfs_agnumber_t ag;
202
Dave Chinner16fd5362010-07-20 09:43:39 +1000203 ag = 0;
Dave Chinner65d0f202010-09-24 18:40:15 +1000204 while ((pag = xfs_perag_get(mp, ag))) {
205 ag = pag->pag_agno + 1;
206 error = xfs_inode_ag_walk(mp, pag, execute, flags);
Dave Chinner5017e972010-01-11 11:47:40 +0000207 xfs_perag_put(pag);
Dave Chinner75f3cb12009-06-08 15:35:14 +0200208 if (error) {
209 last_error = error;
210 if (error == EFSCORRUPTED)
211 break;
212 }
213 }
214 return XFS_ERROR(last_error);
215}
216
David Chinner76bf1052008-10-30 17:16:21 +1100217/*
Dave Chinnera7b339f2011-04-08 12:45:07 +1000218 * Queue a new inode reclaim pass if there are reclaimable inodes and there
219 * isn't a reclaim pass already in progress. By default it runs every 5s based
Dave Chinner58896082012-10-08 21:56:05 +1100220 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
Dave Chinnera7b339f2011-04-08 12:45:07 +1000221 * tunable, but that can be done if this method proves to be ineffective or too
222 * aggressive.
223 */
224static void
Dave Chinner58896082012-10-08 21:56:05 +1100225xfs_reclaim_work_queue(
Dave Chinnera7b339f2011-04-08 12:45:07 +1000226 struct xfs_mount *mp)
David Chinnera167b172008-10-30 17:06:18 +1100227{
David Chinnera167b172008-10-30 17:06:18 +1100228
Dave Chinnera7b339f2011-04-08 12:45:07 +1000229 rcu_read_lock();
230 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
Dave Chinner58896082012-10-08 21:56:05 +1100231 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
Dave Chinnera7b339f2011-04-08 12:45:07 +1000232 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
David Chinnera167b172008-10-30 17:06:18 +1100233 }
Dave Chinnera7b339f2011-04-08 12:45:07 +1000234 rcu_read_unlock();
235}
David Chinnera167b172008-10-30 17:06:18 +1100236
Dave Chinnera7b339f2011-04-08 12:45:07 +1000237/*
238 * This is a fast pass over the inode cache to try to get reclaim moving on as
239 * many inodes as possible in a short period of time. It kicks itself every few
240 * seconds, as well as being kicked by the inode cache shrinker when memory
241 * goes low. It scans as quickly as possible avoiding locked inodes or those
242 * already being flushed, and once done schedules a future pass.
243 */
Dave Chinner33c7a2b2012-10-08 21:55:59 +1100244void
Dave Chinnera7b339f2011-04-08 12:45:07 +1000245xfs_reclaim_worker(
246 struct work_struct *work)
247{
248 struct xfs_mount *mp = container_of(to_delayed_work(work),
249 struct xfs_mount, m_reclaim_work);
250
251 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
Dave Chinner58896082012-10-08 21:56:05 +1100252 xfs_reclaim_work_queue(mp);
Dave Chinnera7b339f2011-04-08 12:45:07 +1000253}
254
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400255void
256__xfs_inode_set_reclaim_tag(
257 struct xfs_perag *pag,
258 struct xfs_inode *ip)
259{
260 radix_tree_tag_set(&pag->pag_ici_root,
261 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
262 XFS_ICI_RECLAIM_TAG);
Dave Chinner16fd5362010-07-20 09:43:39 +1000263
264 if (!pag->pag_ici_reclaimable) {
265 /* propagate the reclaim tag up into the perag radix tree */
266 spin_lock(&ip->i_mount->m_perag_lock);
267 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
268 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
269 XFS_ICI_RECLAIM_TAG);
270 spin_unlock(&ip->i_mount->m_perag_lock);
Dave Chinnera7b339f2011-04-08 12:45:07 +1000271
272 /* schedule periodic background inode reclaim */
Dave Chinner58896082012-10-08 21:56:05 +1100273 xfs_reclaim_work_queue(ip->i_mount);
Dave Chinnera7b339f2011-04-08 12:45:07 +1000274
Dave Chinner16fd5362010-07-20 09:43:39 +1000275 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
276 -1, _RET_IP_);
277 }
Dave Chinner9bf729c2010-04-29 09:55:50 +1000278 pag->pag_ici_reclaimable++;
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400279}
280
David Chinner11654512008-10-30 17:37:49 +1100281/*
282 * We set the inode flag atomically with the radix tree tag.
283 * Once we get tag lookups on the radix tree, this inode flag
284 * can go away.
285 */
David Chinner396beb82008-10-30 17:37:26 +1100286void
287xfs_inode_set_reclaim_tag(
288 xfs_inode_t *ip)
289{
Dave Chinner5017e972010-01-11 11:47:40 +0000290 struct xfs_mount *mp = ip->i_mount;
291 struct xfs_perag *pag;
David Chinner396beb82008-10-30 17:37:26 +1100292
Dave Chinner5017e972010-01-11 11:47:40 +0000293 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
Dave Chinner1a427ab2010-12-16 17:08:41 +1100294 spin_lock(&pag->pag_ici_lock);
David Chinner396beb82008-10-30 17:37:26 +1100295 spin_lock(&ip->i_flags_lock);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400296 __xfs_inode_set_reclaim_tag(pag, ip);
David Chinner11654512008-10-30 17:37:49 +1100297 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
David Chinner396beb82008-10-30 17:37:26 +1100298 spin_unlock(&ip->i_flags_lock);
Dave Chinner1a427ab2010-12-16 17:08:41 +1100299 spin_unlock(&pag->pag_ici_lock);
Dave Chinner5017e972010-01-11 11:47:40 +0000300 xfs_perag_put(pag);
David Chinner396beb82008-10-30 17:37:26 +1100301}
302
Johannes Weiner081003f2010-10-01 07:43:54 +0000303STATIC void
304__xfs_inode_clear_reclaim(
David Chinner396beb82008-10-30 17:37:26 +1100305 xfs_perag_t *pag,
306 xfs_inode_t *ip)
307{
Dave Chinner9bf729c2010-04-29 09:55:50 +1000308 pag->pag_ici_reclaimable--;
Dave Chinner16fd5362010-07-20 09:43:39 +1000309 if (!pag->pag_ici_reclaimable) {
310 /* clear the reclaim tag from the perag radix tree */
311 spin_lock(&ip->i_mount->m_perag_lock);
312 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
313 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
314 XFS_ICI_RECLAIM_TAG);
315 spin_unlock(&ip->i_mount->m_perag_lock);
316 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
317 -1, _RET_IP_);
318 }
David Chinner396beb82008-10-30 17:37:26 +1100319}
320
Johannes Weiner081003f2010-10-01 07:43:54 +0000321void
322__xfs_inode_clear_reclaim_tag(
323 xfs_mount_t *mp,
324 xfs_perag_t *pag,
325 xfs_inode_t *ip)
326{
327 radix_tree_tag_clear(&pag->pag_ici_root,
328 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
329 __xfs_inode_clear_reclaim(pag, ip);
330}
331
Dave Chinner777df5a2010-02-06 12:37:26 +1100332/*
Dave Chinnere3a20c02010-09-24 19:51:50 +1000333 * Grab the inode for reclaim exclusively.
334 * Return 0 if we grabbed it, non-zero otherwise.
335 */
336STATIC int
337xfs_reclaim_inode_grab(
338 struct xfs_inode *ip,
339 int flags)
340{
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100341 ASSERT(rcu_read_lock_held());
342
343 /* quick check for stale RCU freed inode */
344 if (!ip->i_ino)
345 return 1;
Dave Chinnere3a20c02010-09-24 19:51:50 +1000346
347 /*
Christoph Hellwig474fce02011-12-18 20:00:09 +0000348 * If we are asked for non-blocking operation, do unlocked checks to
349 * see if the inode already is being flushed or in reclaim to avoid
350 * lock traffic.
Dave Chinnere3a20c02010-09-24 19:51:50 +1000351 */
352 if ((flags & SYNC_TRYLOCK) &&
Christoph Hellwig474fce02011-12-18 20:00:09 +0000353 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
Dave Chinnere3a20c02010-09-24 19:51:50 +1000354 return 1;
Dave Chinnere3a20c02010-09-24 19:51:50 +1000355
356 /*
357 * The radix tree lock here protects a thread in xfs_iget from racing
358 * with us starting reclaim on the inode. Once we have the
359 * XFS_IRECLAIM flag set it will not touch us.
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100360 *
361 * Due to RCU lookup, we may find inodes that have been freed and only
362 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
363 * aren't candidates for reclaim at all, so we must check the
364 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
Dave Chinnere3a20c02010-09-24 19:51:50 +1000365 */
366 spin_lock(&ip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100367 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
368 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
369 /* not a reclaim candidate. */
Dave Chinnere3a20c02010-09-24 19:51:50 +1000370 spin_unlock(&ip->i_flags_lock);
371 return 1;
372 }
373 __xfs_iflags_set(ip, XFS_IRECLAIM);
374 spin_unlock(&ip->i_flags_lock);
375 return 0;
376}
377
378/*
Christoph Hellwig8a480882012-04-23 15:58:35 +1000379 * Inodes in different states need to be treated differently. The following
380 * table lists the inode states and the reclaim actions necessary:
Dave Chinner777df5a2010-02-06 12:37:26 +1100381 *
382 * inode state iflush ret required action
383 * --------------- ---------- ---------------
384 * bad - reclaim
385 * shutdown EIO unpin and reclaim
386 * clean, unpinned 0 reclaim
387 * stale, unpinned 0 reclaim
Dave Chinnerc8543632010-02-06 12:39:36 +1100388 * clean, pinned(*) 0 requeue
389 * stale, pinned EAGAIN requeue
Christoph Hellwig8a480882012-04-23 15:58:35 +1000390 * dirty, async - requeue
391 * dirty, sync 0 reclaim
Dave Chinner777df5a2010-02-06 12:37:26 +1100392 *
393 * (*) dgc: I don't think the clean, pinned state is possible but it gets
394 * handled anyway given the order of checks implemented.
395 *
Dave Chinnerc8543632010-02-06 12:39:36 +1100396 * Also, because we get the flush lock first, we know that any inode that has
397 * been flushed delwri has had the flush completed by the time we check that
Christoph Hellwig8a480882012-04-23 15:58:35 +1000398 * the inode is clean.
Dave Chinnerc8543632010-02-06 12:39:36 +1100399 *
Christoph Hellwig8a480882012-04-23 15:58:35 +1000400 * Note that because the inode is flushed delayed write by AIL pushing, the
401 * flush lock may already be held here and waiting on it can result in very
402 * long latencies. Hence for sync reclaims, where we wait on the flush lock,
403 * the caller should push the AIL first before trying to reclaim inodes to
404 * minimise the amount of time spent waiting. For background relaim, we only
405 * bother to reclaim clean inodes anyway.
Dave Chinnerc8543632010-02-06 12:39:36 +1100406 *
Dave Chinner777df5a2010-02-06 12:37:26 +1100407 * Hence the order of actions after gaining the locks should be:
408 * bad => reclaim
409 * shutdown => unpin and reclaim
Christoph Hellwig8a480882012-04-23 15:58:35 +1000410 * pinned, async => requeue
Dave Chinnerc8543632010-02-06 12:39:36 +1100411 * pinned, sync => unpin
Dave Chinner777df5a2010-02-06 12:37:26 +1100412 * stale => reclaim
413 * clean => reclaim
Christoph Hellwig8a480882012-04-23 15:58:35 +1000414 * dirty, async => requeue
Dave Chinnerc8543632010-02-06 12:39:36 +1100415 * dirty, sync => flush, wait and reclaim
Dave Chinner777df5a2010-02-06 12:37:26 +1100416 */
Dave Chinner75f3cb12009-06-08 15:35:14 +0200417STATIC int
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000418xfs_reclaim_inode(
Dave Chinner75f3cb12009-06-08 15:35:14 +0200419 struct xfs_inode *ip,
420 struct xfs_perag *pag,
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000421 int sync_mode)
David Chinner7a3be022008-10-30 17:37:37 +1100422{
Christoph Hellwig4c468192012-04-23 15:58:36 +1000423 struct xfs_buf *bp = NULL;
424 int error;
Dave Chinner777df5a2010-02-06 12:37:26 +1100425
Dave Chinner1bfd8d02011-03-26 09:13:55 +1100426restart:
427 error = 0;
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000428 xfs_ilock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc8543632010-02-06 12:39:36 +1100429 if (!xfs_iflock_nowait(ip)) {
430 if (!(sync_mode & SYNC_WAIT))
431 goto out;
432 xfs_iflock(ip);
433 }
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000434
Dave Chinner777df5a2010-02-06 12:37:26 +1100435 if (is_bad_inode(VFS_I(ip)))
436 goto reclaim;
437 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
438 xfs_iunpin_wait(ip);
Dave Chinner04913fd2012-04-23 15:58:41 +1000439 xfs_iflush_abort(ip, false);
Dave Chinner777df5a2010-02-06 12:37:26 +1100440 goto reclaim;
441 }
Dave Chinnerc8543632010-02-06 12:39:36 +1100442 if (xfs_ipincount(ip)) {
Christoph Hellwig8a480882012-04-23 15:58:35 +1000443 if (!(sync_mode & SYNC_WAIT))
444 goto out_ifunlock;
Dave Chinner777df5a2010-02-06 12:37:26 +1100445 xfs_iunpin_wait(ip);
Dave Chinnerc8543632010-02-06 12:39:36 +1100446 }
Dave Chinner777df5a2010-02-06 12:37:26 +1100447 if (xfs_iflags_test(ip, XFS_ISTALE))
448 goto reclaim;
449 if (xfs_inode_clean(ip))
450 goto reclaim;
451
Dave Chinner1bfd8d02011-03-26 09:13:55 +1100452 /*
Christoph Hellwig8a480882012-04-23 15:58:35 +1000453 * Never flush out dirty data during non-blocking reclaim, as it would
454 * just contend with AIL pushing trying to do the same job.
455 */
456 if (!(sync_mode & SYNC_WAIT))
457 goto out_ifunlock;
458
459 /*
Dave Chinner1bfd8d02011-03-26 09:13:55 +1100460 * Now we have an inode that needs flushing.
461 *
Christoph Hellwig4c468192012-04-23 15:58:36 +1000462 * Note that xfs_iflush will never block on the inode buffer lock, as
Dave Chinner1bfd8d02011-03-26 09:13:55 +1100463 * xfs_ifree_cluster() can lock the inode buffer before it locks the
Christoph Hellwig4c468192012-04-23 15:58:36 +1000464 * ip->i_lock, and we are doing the exact opposite here. As a result,
Christoph Hellwig475ee412012-07-03 12:21:22 -0400465 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
466 * result in an ABBA deadlock with xfs_ifree_cluster().
Dave Chinner1bfd8d02011-03-26 09:13:55 +1100467 *
468 * As xfs_ifree_cluser() must gather all inodes that are active in the
469 * cache to mark them stale, if we hit this case we don't actually want
470 * to do IO here - we want the inode marked stale so we can simply
Christoph Hellwig4c468192012-04-23 15:58:36 +1000471 * reclaim it. Hence if we get an EAGAIN error here, just unlock the
472 * inode, back off and try again. Hopefully the next pass through will
473 * see the stale flag set on the inode.
Dave Chinner1bfd8d02011-03-26 09:13:55 +1100474 */
Christoph Hellwig4c468192012-04-23 15:58:36 +1000475 error = xfs_iflush(ip, &bp);
Christoph Hellwig8a480882012-04-23 15:58:35 +1000476 if (error == EAGAIN) {
477 xfs_iunlock(ip, XFS_ILOCK_EXCL);
478 /* backoff longer than in xfs_ifree_cluster */
479 delay(2);
480 goto restart;
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000481 }
Dave Chinnerc8543632010-02-06 12:39:36 +1100482
Christoph Hellwig4c468192012-04-23 15:58:36 +1000483 if (!error) {
484 error = xfs_bwrite(bp);
485 xfs_buf_relse(bp);
486 }
487
488 xfs_iflock(ip);
Dave Chinner777df5a2010-02-06 12:37:26 +1100489reclaim:
490 xfs_ifunlock(ip);
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000491 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000492
493 XFS_STATS_INC(xs_ig_reclaims);
494 /*
495 * Remove the inode from the per-AG radix tree.
496 *
497 * Because radix_tree_delete won't complain even if the item was never
498 * added to the tree assert that it's been there before to catch
499 * problems with the inode life time early on.
500 */
Dave Chinner1a427ab2010-12-16 17:08:41 +1100501 spin_lock(&pag->pag_ici_lock);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000502 if (!radix_tree_delete(&pag->pag_ici_root,
503 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
504 ASSERT(0);
Johannes Weiner081003f2010-10-01 07:43:54 +0000505 __xfs_inode_clear_reclaim(pag, ip);
Dave Chinner1a427ab2010-12-16 17:08:41 +1100506 spin_unlock(&pag->pag_ici_lock);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000507
508 /*
509 * Here we do an (almost) spurious inode lock in order to coordinate
510 * with inode cache radix tree lookups. This is because the lookup
511 * can reference the inodes in the cache without taking references.
512 *
513 * We make that OK here by ensuring that we wait until the inode is
Alex Elderad637a12012-02-16 22:01:00 +0000514 * unlocked after the lookup before we go ahead and free it.
Dave Chinner2f11fea2010-07-20 17:53:25 +1000515 */
Alex Elderad637a12012-02-16 22:01:00 +0000516 xfs_ilock(ip, XFS_ILOCK_EXCL);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000517 xfs_qm_dqdetach(ip);
Alex Elderad637a12012-02-16 22:01:00 +0000518 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000519
520 xfs_inode_free(ip);
Alex Elderad637a12012-02-16 22:01:00 +0000521 return error;
Christoph Hellwig8a480882012-04-23 15:58:35 +1000522
523out_ifunlock:
524 xfs_ifunlock(ip);
525out:
526 xfs_iflags_clear(ip, XFS_IRECLAIM);
527 xfs_iunlock(ip, XFS_ILOCK_EXCL);
528 /*
529 * We could return EAGAIN here to make reclaim rescan the inode tree in
530 * a short while. However, this just burns CPU time scanning the tree
Dave Chinner58896082012-10-08 21:56:05 +1100531 * waiting for IO to complete and the reclaim work never goes back to
532 * the idle state. Instead, return 0 to let the next scheduled
533 * background reclaim attempt to reclaim the inode again.
Christoph Hellwig8a480882012-04-23 15:58:35 +1000534 */
535 return 0;
David Chinner7a3be022008-10-30 17:37:37 +1100536}
537
Dave Chinner65d0f202010-09-24 18:40:15 +1000538/*
539 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
540 * corrupted, we still want to try to reclaim all the inodes. If we don't,
541 * then a shut down during filesystem unmount reclaim walk leak all the
542 * unreclaimed inodes.
543 */
544int
545xfs_reclaim_inodes_ag(
546 struct xfs_mount *mp,
547 int flags,
548 int *nr_to_scan)
549{
550 struct xfs_perag *pag;
551 int error = 0;
552 int last_error = 0;
553 xfs_agnumber_t ag;
Dave Chinner69b491c2010-09-27 11:09:51 +1000554 int trylock = flags & SYNC_TRYLOCK;
555 int skipped;
Dave Chinner65d0f202010-09-24 18:40:15 +1000556
Dave Chinner69b491c2010-09-27 11:09:51 +1000557restart:
Dave Chinner65d0f202010-09-24 18:40:15 +1000558 ag = 0;
Dave Chinner69b491c2010-09-27 11:09:51 +1000559 skipped = 0;
Dave Chinner65d0f202010-09-24 18:40:15 +1000560 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
561 unsigned long first_index = 0;
562 int done = 0;
Dave Chinnere3a20c02010-09-24 19:51:50 +1000563 int nr_found = 0;
Dave Chinner65d0f202010-09-24 18:40:15 +1000564
565 ag = pag->pag_agno + 1;
566
Dave Chinner69b491c2010-09-27 11:09:51 +1000567 if (trylock) {
568 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
569 skipped++;
Dave Chinnerf83282a2010-11-08 08:55:04 +0000570 xfs_perag_put(pag);
Dave Chinner69b491c2010-09-27 11:09:51 +1000571 continue;
572 }
573 first_index = pag->pag_ici_reclaim_cursor;
574 } else
575 mutex_lock(&pag->pag_ici_reclaim_lock);
576
Dave Chinner65d0f202010-09-24 18:40:15 +1000577 do {
Dave Chinnere3a20c02010-09-24 19:51:50 +1000578 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
579 int i;
Dave Chinner65d0f202010-09-24 18:40:15 +1000580
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100581 rcu_read_lock();
Dave Chinnere3a20c02010-09-24 19:51:50 +1000582 nr_found = radix_tree_gang_lookup_tag(
583 &pag->pag_ici_root,
584 (void **)batch, first_index,
585 XFS_LOOKUP_BATCH,
Dave Chinner65d0f202010-09-24 18:40:15 +1000586 XFS_ICI_RECLAIM_TAG);
587 if (!nr_found) {
Dave Chinnerb2232212011-05-06 02:54:04 +0000588 done = 1;
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100589 rcu_read_unlock();
Dave Chinner65d0f202010-09-24 18:40:15 +1000590 break;
591 }
592
593 /*
Dave Chinnere3a20c02010-09-24 19:51:50 +1000594 * Grab the inodes before we drop the lock. if we found
595 * nothing, nr == 0 and the loop will be skipped.
Dave Chinner65d0f202010-09-24 18:40:15 +1000596 */
Dave Chinnere3a20c02010-09-24 19:51:50 +1000597 for (i = 0; i < nr_found; i++) {
598 struct xfs_inode *ip = batch[i];
Dave Chinner65d0f202010-09-24 18:40:15 +1000599
Dave Chinnere3a20c02010-09-24 19:51:50 +1000600 if (done || xfs_reclaim_inode_grab(ip, flags))
601 batch[i] = NULL;
Dave Chinner65d0f202010-09-24 18:40:15 +1000602
Dave Chinnere3a20c02010-09-24 19:51:50 +1000603 /*
604 * Update the index for the next lookup. Catch
605 * overflows into the next AG range which can
606 * occur if we have inodes in the last block of
607 * the AG and we are currently pointing to the
608 * last inode.
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100609 *
610 * Because we may see inodes that are from the
611 * wrong AG due to RCU freeing and
612 * reallocation, only update the index if it
613 * lies in this AG. It was a race that lead us
614 * to see this inode, so another lookup from
615 * the same index will not find it again.
Dave Chinnere3a20c02010-09-24 19:51:50 +1000616 */
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100617 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
618 pag->pag_agno)
619 continue;
Dave Chinnere3a20c02010-09-24 19:51:50 +1000620 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
621 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
622 done = 1;
623 }
624
625 /* unlock now we've grabbed the inodes. */
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100626 rcu_read_unlock();
Dave Chinnere3a20c02010-09-24 19:51:50 +1000627
628 for (i = 0; i < nr_found; i++) {
629 if (!batch[i])
630 continue;
631 error = xfs_reclaim_inode(batch[i], pag, flags);
632 if (error && last_error != EFSCORRUPTED)
633 last_error = error;
634 }
635
636 *nr_to_scan -= XFS_LOOKUP_BATCH;
637
Dave Chinner8daaa832011-07-08 14:14:46 +1000638 cond_resched();
639
Dave Chinnere3a20c02010-09-24 19:51:50 +1000640 } while (nr_found && !done && *nr_to_scan > 0);
Dave Chinner65d0f202010-09-24 18:40:15 +1000641
Dave Chinner69b491c2010-09-27 11:09:51 +1000642 if (trylock && !done)
643 pag->pag_ici_reclaim_cursor = first_index;
644 else
645 pag->pag_ici_reclaim_cursor = 0;
646 mutex_unlock(&pag->pag_ici_reclaim_lock);
Dave Chinner65d0f202010-09-24 18:40:15 +1000647 xfs_perag_put(pag);
648 }
Dave Chinner69b491c2010-09-27 11:09:51 +1000649
650 /*
651 * if we skipped any AG, and we still have scan count remaining, do
652 * another pass this time using blocking reclaim semantics (i.e
653 * waiting on the reclaim locks and ignoring the reclaim cursors). This
654 * ensure that when we get more reclaimers than AGs we block rather
655 * than spin trying to execute reclaim.
656 */
Dave Chinner8daaa832011-07-08 14:14:46 +1000657 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
Dave Chinner69b491c2010-09-27 11:09:51 +1000658 trylock = 0;
659 goto restart;
660 }
Dave Chinner65d0f202010-09-24 18:40:15 +1000661 return XFS_ERROR(last_error);
662}
663
David Chinnerfce08f22008-10-30 17:37:03 +1100664int
David Chinner1dc33182008-10-30 17:37:15 +1100665xfs_reclaim_inodes(
David Chinnerfce08f22008-10-30 17:37:03 +1100666 xfs_mount_t *mp,
David Chinnerfce08f22008-10-30 17:37:03 +1100667 int mode)
668{
Dave Chinner65d0f202010-09-24 18:40:15 +1000669 int nr_to_scan = INT_MAX;
670
671 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
Dave Chinner9bf729c2010-04-29 09:55:50 +1000672}
673
674/*
Dave Chinner8daaa832011-07-08 14:14:46 +1000675 * Scan a certain number of inodes for reclaim.
Dave Chinnera7b339f2011-04-08 12:45:07 +1000676 *
677 * When called we make sure that there is a background (fast) inode reclaim in
Dave Chinner8daaa832011-07-08 14:14:46 +1000678 * progress, while we will throttle the speed of reclaim via doing synchronous
Dave Chinnera7b339f2011-04-08 12:45:07 +1000679 * reclaim of inodes. That means if we come across dirty inodes, we wait for
680 * them to be cleaned, which we hope will not be very long due to the
681 * background walker having already kicked the IO off on those dirty inodes.
Dave Chinner9bf729c2010-04-29 09:55:50 +1000682 */
Dave Chinner8daaa832011-07-08 14:14:46 +1000683void
684xfs_reclaim_inodes_nr(
685 struct xfs_mount *mp,
686 int nr_to_scan)
Dave Chinner9bf729c2010-04-29 09:55:50 +1000687{
Dave Chinner8daaa832011-07-08 14:14:46 +1000688 /* kick background reclaimer and push the AIL */
Dave Chinner58896082012-10-08 21:56:05 +1100689 xfs_reclaim_work_queue(mp);
Dave Chinner8daaa832011-07-08 14:14:46 +1000690 xfs_ail_push_all(mp->m_ail);
Dave Chinner9bf729c2010-04-29 09:55:50 +1000691
Dave Chinner8daaa832011-07-08 14:14:46 +1000692 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
693}
Dave Chinnera7b339f2011-04-08 12:45:07 +1000694
Dave Chinner8daaa832011-07-08 14:14:46 +1000695/*
696 * Return the number of reclaimable inodes in the filesystem for
697 * the shrinker to determine how much to reclaim.
698 */
699int
700xfs_reclaim_inodes_count(
701 struct xfs_mount *mp)
702{
703 struct xfs_perag *pag;
704 xfs_agnumber_t ag = 0;
705 int reclaimable = 0;
Dave Chinner9bf729c2010-04-29 09:55:50 +1000706
Dave Chinner65d0f202010-09-24 18:40:15 +1000707 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
708 ag = pag->pag_agno + 1;
Dave Chinner70e60ce2010-07-20 08:07:02 +1000709 reclaimable += pag->pag_ici_reclaimable;
710 xfs_perag_put(pag);
Dave Chinner9bf729c2010-04-29 09:55:50 +1000711 }
Dave Chinner9bf729c2010-04-29 09:55:50 +1000712 return reclaimable;
713}
714