blob: 961df0a22c7837e9a969a8ab73331c594d5a6116 [file] [log] [blame]
David Chinnerfe4fa4b2008-10-30 17:06:08 +11001/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_inode.h"
37#include "xfs_dinode.h"
38#include "xfs_error.h"
39#include "xfs_mru_cache.h"
40#include "xfs_filestream.h"
41#include "xfs_vnodeops.h"
42#include "xfs_utils.h"
43#include "xfs_buf_item.h"
44#include "xfs_inode_item.h"
45#include "xfs_rw.h"
Christoph Hellwig7d095252009-06-08 15:33:32 +020046#include "xfs_quota.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110047
David Chinnera167b172008-10-30 17:06:18 +110048#include <linux/kthread.h>
49#include <linux/freezer.h>
50
Dave Chinner5a34d5c2009-06-08 15:35:03 +020051
Dave Chinner75f3cb12009-06-08 15:35:14 +020052STATIC xfs_inode_t *
53xfs_inode_ag_lookup(
54 struct xfs_mount *mp,
55 struct xfs_perag *pag,
56 uint32_t *first_index,
57 int tag)
58{
59 int nr_found;
60 struct xfs_inode *ip;
61
62 /*
63 * use a gang lookup to find the next inode in the tree
64 * as the tree is sparse and a gang lookup walks to find
65 * the number of objects requested.
66 */
67 read_lock(&pag->pag_ici_lock);
68 if (tag == XFS_ICI_NO_TAG) {
69 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
70 (void **)&ip, *first_index, 1);
71 } else {
72 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
73 (void **)&ip, *first_index, 1, tag);
74 }
75 if (!nr_found)
76 goto unlock;
77
78 /*
79 * Update the index for the next lookup. Catch overflows
80 * into the next AG range which can occur if we have inodes
81 * in the last block of the AG and we are currently
82 * pointing to the last inode.
83 */
84 *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
85 if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
86 goto unlock;
87
88 return ip;
89
90unlock:
91 read_unlock(&pag->pag_ici_lock);
92 return NULL;
93}
94
95STATIC int
96xfs_inode_ag_walk(
97 struct xfs_mount *mp,
98 xfs_agnumber_t ag,
99 int (*execute)(struct xfs_inode *ip,
100 struct xfs_perag *pag, int flags),
101 int flags,
102 int tag)
103{
104 struct xfs_perag *pag = &mp->m_perag[ag];
105 uint32_t first_index;
106 int last_error = 0;
107 int skipped;
108
109restart:
110 skipped = 0;
111 first_index = 0;
112 do {
113 int error = 0;
114 xfs_inode_t *ip;
115
116 ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
117 if (!ip)
118 break;
119
120 error = execute(ip, pag, flags);
121 if (error == EAGAIN) {
122 skipped++;
123 continue;
124 }
125 if (error)
126 last_error = error;
127 /*
128 * bail out if the filesystem is corrupted.
129 */
130 if (error == EFSCORRUPTED)
131 break;
132
133 } while (1);
134
135 if (skipped) {
136 delay(1);
137 goto restart;
138 }
139
140 xfs_put_perag(mp, pag);
141 return last_error;
142}
143
Christoph Hellwigfe588ed2009-06-08 15:35:27 +0200144int
Dave Chinner75f3cb12009-06-08 15:35:14 +0200145xfs_inode_ag_iterator(
146 struct xfs_mount *mp,
147 int (*execute)(struct xfs_inode *ip,
148 struct xfs_perag *pag, int flags),
149 int flags,
150 int tag)
151{
152 int error = 0;
153 int last_error = 0;
154 xfs_agnumber_t ag;
155
156 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
157 if (!mp->m_perag[ag].pag_ici_init)
158 continue;
159 error = xfs_inode_ag_walk(mp, ag, execute, flags, tag);
160 if (error) {
161 last_error = error;
162 if (error == EFSCORRUPTED)
163 break;
164 }
165 }
166 return XFS_ERROR(last_error);
167}
168
Dave Chinner1da8eec2009-06-08 15:35:07 +0200169/* must be called with pag_ici_lock held and releases it */
Christoph Hellwigfe588ed2009-06-08 15:35:27 +0200170int
Dave Chinner1da8eec2009-06-08 15:35:07 +0200171xfs_sync_inode_valid(
172 struct xfs_inode *ip,
173 struct xfs_perag *pag)
174{
175 struct inode *inode = VFS_I(ip);
176
177 /* nothing to sync during shutdown */
178 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
179 read_unlock(&pag->pag_ici_lock);
180 return EFSCORRUPTED;
181 }
182
183 /*
184 * If we can't get a reference on the inode, it must be in reclaim.
185 * Leave it for the reclaim code to flush. Also avoid inodes that
186 * haven't been fully initialised.
187 */
188 if (!igrab(inode)) {
189 read_unlock(&pag->pag_ici_lock);
190 return ENOENT;
191 }
192 read_unlock(&pag->pag_ici_lock);
193
194 if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) {
195 IRELE(ip);
196 return ENOENT;
197 }
198
199 return 0;
200}
201
Dave Chinner5a34d5c2009-06-08 15:35:03 +0200202STATIC int
203xfs_sync_inode_data(
204 struct xfs_inode *ip,
Dave Chinner75f3cb12009-06-08 15:35:14 +0200205 struct xfs_perag *pag,
Dave Chinner5a34d5c2009-06-08 15:35:03 +0200206 int flags)
207{
208 struct inode *inode = VFS_I(ip);
209 struct address_space *mapping = inode->i_mapping;
210 int error = 0;
211
Dave Chinner75f3cb12009-06-08 15:35:14 +0200212 error = xfs_sync_inode_valid(ip, pag);
213 if (error)
214 return error;
215
Dave Chinner5a34d5c2009-06-08 15:35:03 +0200216 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
217 goto out_wait;
218
219 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
220 if (flags & SYNC_TRYLOCK)
221 goto out_wait;
222 xfs_ilock(ip, XFS_IOLOCK_SHARED);
223 }
224
225 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
226 0 : XFS_B_ASYNC, FI_NONE);
227 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
228
229 out_wait:
Christoph Hellwigb0710cc2009-06-08 15:37:11 +0200230 if (flags & SYNC_WAIT)
Dave Chinner5a34d5c2009-06-08 15:35:03 +0200231 xfs_ioend_wait(ip);
Dave Chinner75f3cb12009-06-08 15:35:14 +0200232 IRELE(ip);
Dave Chinner5a34d5c2009-06-08 15:35:03 +0200233 return error;
234}
235
Christoph Hellwig845b6d02009-06-08 15:35:05 +0200236STATIC int
237xfs_sync_inode_attr(
238 struct xfs_inode *ip,
Dave Chinner75f3cb12009-06-08 15:35:14 +0200239 struct xfs_perag *pag,
Christoph Hellwig845b6d02009-06-08 15:35:05 +0200240 int flags)
241{
242 int error = 0;
243
Dave Chinner75f3cb12009-06-08 15:35:14 +0200244 error = xfs_sync_inode_valid(ip, pag);
245 if (error)
246 return error;
247
Christoph Hellwig845b6d02009-06-08 15:35:05 +0200248 xfs_ilock(ip, XFS_ILOCK_SHARED);
249 if (xfs_inode_clean(ip))
250 goto out_unlock;
251 if (!xfs_iflock_nowait(ip)) {
252 if (!(flags & SYNC_WAIT))
253 goto out_unlock;
254 xfs_iflock(ip);
255 }
256
257 if (xfs_inode_clean(ip)) {
258 xfs_ifunlock(ip);
259 goto out_unlock;
260 }
261
262 error = xfs_iflush(ip, (flags & SYNC_WAIT) ?
263 XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI);
264
265 out_unlock:
266 xfs_iunlock(ip, XFS_ILOCK_SHARED);
Dave Chinner75f3cb12009-06-08 15:35:14 +0200267 IRELE(ip);
Christoph Hellwig845b6d02009-06-08 15:35:05 +0200268 return error;
269}
270
Christoph Hellwig075fe102009-06-08 15:35:48 +0200271/*
272 * Write out pagecache data for the whole filesystem.
273 */
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100274int
Christoph Hellwig075fe102009-06-08 15:35:48 +0200275xfs_sync_data(
276 struct xfs_mount *mp,
277 int flags)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100278{
Christoph Hellwig075fe102009-06-08 15:35:48 +0200279 int error;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100280
Christoph Hellwigb0710cc2009-06-08 15:37:11 +0200281 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100282
Christoph Hellwig075fe102009-06-08 15:35:48 +0200283 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
284 XFS_ICI_NO_TAG);
285 if (error)
286 return XFS_ERROR(error);
David Chinnere9f1c6e2008-10-30 17:15:50 +1100287
Christoph Hellwig075fe102009-06-08 15:35:48 +0200288 xfs_log_force(mp, 0,
289 (flags & SYNC_WAIT) ?
290 XFS_LOG_FORCE | XFS_LOG_SYNC :
291 XFS_LOG_FORCE);
292 return 0;
293}
David Chinnere9f1c6e2008-10-30 17:15:50 +1100294
Christoph Hellwig075fe102009-06-08 15:35:48 +0200295/*
296 * Write out inode metadata (attributes) for the whole filesystem.
297 */
298int
299xfs_sync_attr(
300 struct xfs_mount *mp,
301 int flags)
302{
303 ASSERT((flags & ~SYNC_WAIT) == 0);
Dave Chinner75f3cb12009-06-08 15:35:14 +0200304
Christoph Hellwig075fe102009-06-08 15:35:48 +0200305 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
306 XFS_ICI_NO_TAG);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100307}
308
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100309STATIC int
310xfs_commit_dummy_trans(
311 struct xfs_mount *mp,
Dave Chinnerdce50652009-10-06 20:29:30 +0000312 uint flags)
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100313{
314 struct xfs_inode *ip = mp->m_rootip;
315 struct xfs_trans *tp;
316 int error;
Dave Chinnerdce50652009-10-06 20:29:30 +0000317 int log_flags = XFS_LOG_FORCE;
318
319 if (flags & SYNC_WAIT)
320 log_flags |= XFS_LOG_SYNC;
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100321
322 /*
323 * Put a dummy transaction in the log to tell recovery
324 * that all others are OK.
325 */
326 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
327 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
328 if (error) {
329 xfs_trans_cancel(tp, 0);
330 return error;
331 }
332
333 xfs_ilock(ip, XFS_ILOCK_EXCL);
334
335 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
336 xfs_trans_ihold(tp, ip);
337 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100338 error = xfs_trans_commit(tp, 0);
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100339 xfs_iunlock(ip, XFS_ILOCK_EXCL);
340
Dave Chinnerdce50652009-10-06 20:29:30 +0000341 /* the log force ensures this transaction is pushed to disk */
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100342 xfs_log_force(mp, 0, log_flags);
Dave Chinnerdce50652009-10-06 20:29:30 +0000343 return error;
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100344}
345
David Chinnere9f1c6e2008-10-30 17:15:50 +1100346int
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100347xfs_sync_fsdata(
348 struct xfs_mount *mp,
349 int flags)
350{
351 struct xfs_buf *bp;
352 struct xfs_buf_log_item *bip;
353 int error = 0;
354
355 /*
356 * If this is xfssyncd() then only sync the superblock if we can
357 * lock it without sleeping and it is not pinned.
358 */
Christoph Hellwig8b5403a2009-06-08 15:37:16 +0200359 if (flags & SYNC_TRYLOCK) {
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100360 ASSERT(!(flags & SYNC_WAIT));
361
362 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
363 if (!bp)
364 goto out;
365
366 bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
367 if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
368 goto out_brelse;
369 } else {
370 bp = xfs_getsb(mp, 0);
371
372 /*
373 * If the buffer is pinned then push on the log so we won't
374 * get stuck waiting in the write for someone, maybe
375 * ourselves, to flush the log.
376 *
377 * Even though we just pushed the log above, we did not have
378 * the superblock buffer locked at that point so it can
379 * become pinned in between there and here.
380 */
381 if (XFS_BUF_ISPINNED(bp))
382 xfs_log_force(mp, 0, XFS_LOG_FORCE);
383 }
384
385
386 if (flags & SYNC_WAIT)
387 XFS_BUF_UNASYNC(bp);
388 else
389 XFS_BUF_ASYNC(bp);
390
Dave Chinnerdce50652009-10-06 20:29:30 +0000391 error = xfs_bwrite(mp, bp);
392 if (error)
393 return error;
394
395 /*
396 * If this is a data integrity sync make sure all pending buffers
397 * are flushed out for the log coverage check below.
398 */
399 if (flags & SYNC_WAIT)
400 xfs_flush_buftarg(mp->m_ddev_targp, 1);
401
402 if (xfs_log_need_covered(mp))
403 error = xfs_commit_dummy_trans(mp, flags);
404 return error;
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100405
406 out_brelse:
407 xfs_buf_relse(bp);
408 out:
409 return error;
410}
411
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100412/*
David Chinnera4e4c4f2008-10-30 17:16:11 +1100413 * When remounting a filesystem read-only or freezing the filesystem, we have
414 * two phases to execute. This first phase is syncing the data before we
415 * quiesce the filesystem, and the second is flushing all the inodes out after
416 * we've waited for all the transactions created by the first phase to
417 * complete. The second phase ensures that the inodes are written to their
418 * location on disk rather than just existing in transactions in the log. This
419 * means after a quiesce there is no log replay required to write the inodes to
420 * disk (this is the main difference between a sync and a quiesce).
421 */
422/*
423 * First stage of freeze - no writers will make progress now we are here,
David Chinnere9f1c6e2008-10-30 17:15:50 +1100424 * so we flush delwri and delalloc buffers here, then wait for all I/O to
425 * complete. Data is frozen at that point. Metadata is not frozen,
David Chinnera4e4c4f2008-10-30 17:16:11 +1100426 * transactions can still occur here so don't bother flushing the buftarg
427 * because it'll just get dirty again.
David Chinnere9f1c6e2008-10-30 17:15:50 +1100428 */
429int
430xfs_quiesce_data(
431 struct xfs_mount *mp)
432{
433 int error;
434
435 /* push non-blocking */
Christoph Hellwig075fe102009-06-08 15:35:48 +0200436 xfs_sync_data(mp, 0);
Christoph Hellwig8b5403a2009-06-08 15:37:16 +0200437 xfs_qm_sync(mp, SYNC_TRYLOCK);
David Chinnere9f1c6e2008-10-30 17:15:50 +1100438
Dave Chinnerc90b07e2009-10-06 20:29:27 +0000439 /* push and block till complete */
Christoph Hellwigb0710cc2009-06-08 15:37:11 +0200440 xfs_sync_data(mp, SYNC_WAIT);
Christoph Hellwig7d095252009-06-08 15:33:32 +0200441 xfs_qm_sync(mp, SYNC_WAIT);
David Chinnere9f1c6e2008-10-30 17:15:50 +1100442
Dave Chinnerc90b07e2009-10-06 20:29:27 +0000443 /* drop inode references pinned by filestreams */
444 xfs_filestream_flush(mp);
445
David Chinnera4e4c4f2008-10-30 17:16:11 +1100446 /* write superblock and hoover up shutdown errors */
Dave Chinnerc90b07e2009-10-06 20:29:27 +0000447 error = xfs_sync_fsdata(mp, SYNC_WAIT);
David Chinnere9f1c6e2008-10-30 17:15:50 +1100448
David Chinnera4e4c4f2008-10-30 17:16:11 +1100449 /* flush data-only devices */
David Chinnere9f1c6e2008-10-30 17:15:50 +1100450 if (mp->m_rtdev_targp)
451 XFS_bflush(mp->m_rtdev_targp);
452
453 return error;
454}
455
David Chinner76bf1052008-10-30 17:16:21 +1100456STATIC void
457xfs_quiesce_fs(
458 struct xfs_mount *mp)
459{
460 int count = 0, pincount;
461
462 xfs_flush_buftarg(mp->m_ddev_targp, 0);
Dave Chinnerabc10642009-06-08 15:35:12 +0200463 xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
David Chinner76bf1052008-10-30 17:16:21 +1100464
465 /*
466 * This loop must run at least twice. The first instance of the loop
467 * will flush most meta data but that will generate more meta data
468 * (typically directory updates). Which then must be flushed and
469 * logged before we can write the unmount record.
470 */
471 do {
Christoph Hellwig075fe102009-06-08 15:35:48 +0200472 xfs_sync_attr(mp, SYNC_WAIT);
David Chinner76bf1052008-10-30 17:16:21 +1100473 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
474 if (!pincount) {
475 delay(50);
476 count++;
477 }
478 } while (count < 2);
479}
480
481/*
482 * Second stage of a quiesce. The data is already synced, now we have to take
483 * care of the metadata. New transactions are already blocked, so we need to
484 * wait for any remaining transactions to drain out before proceding.
485 */
486void
487xfs_quiesce_attr(
488 struct xfs_mount *mp)
489{
490 int error = 0;
491
492 /* wait for all modifications to complete */
493 while (atomic_read(&mp->m_active_trans) > 0)
494 delay(100);
495
496 /* flush inodes and push all remaining buffers out to disk */
497 xfs_quiesce_fs(mp);
498
Felix Blyakher5e106572009-01-22 21:34:05 -0600499 /*
500 * Just warn here till VFS can correctly support
501 * read-only remount without racing.
502 */
503 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
David Chinner76bf1052008-10-30 17:16:21 +1100504
505 /* Push the superblock and write an unmount record */
506 error = xfs_log_sbcount(mp, 1);
507 if (error)
508 xfs_fs_cmn_err(CE_WARN, mp,
509 "xfs_attr_quiesce: failed to log sb changes. "
510 "Frozen image may not be consistent.");
511 xfs_log_unmount_write(mp);
512 xfs_unmountfs_writesb(mp);
513}
514
David Chinnere9f1c6e2008-10-30 17:15:50 +1100515/*
David Chinnera167b172008-10-30 17:06:18 +1100516 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
517 * Doing this has two advantages:
518 * - It saves on stack space, which is tight in certain situations
519 * - It can be used (with care) as a mechanism to avoid deadlocks.
520 * Flushing while allocating in a full filesystem requires both.
521 */
522STATIC void
523xfs_syncd_queue_work(
524 struct xfs_mount *mp,
525 void *data,
Dave Chinnere43afd72009-04-06 18:47:27 +0200526 void (*syncer)(struct xfs_mount *, void *),
527 struct completion *completion)
David Chinnera167b172008-10-30 17:06:18 +1100528{
Dave Chinnera8d770d2009-04-06 18:44:54 +0200529 struct xfs_sync_work *work;
David Chinnera167b172008-10-30 17:06:18 +1100530
Dave Chinnera8d770d2009-04-06 18:44:54 +0200531 work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
David Chinnera167b172008-10-30 17:06:18 +1100532 INIT_LIST_HEAD(&work->w_list);
533 work->w_syncer = syncer;
534 work->w_data = data;
535 work->w_mount = mp;
Dave Chinnere43afd72009-04-06 18:47:27 +0200536 work->w_completion = completion;
David Chinnera167b172008-10-30 17:06:18 +1100537 spin_lock(&mp->m_sync_lock);
538 list_add_tail(&work->w_list, &mp->m_sync_list);
539 spin_unlock(&mp->m_sync_lock);
540 wake_up_process(mp->m_sync_task);
541}
542
543/*
544 * Flush delayed allocate data, attempting to free up reserved space
545 * from existing allocations. At this point a new allocation attempt
546 * has failed with ENOSPC and we are in the process of scratching our
547 * heads, looking about for more room...
548 */
549STATIC void
Dave Chinnera8d770d2009-04-06 18:44:54 +0200550xfs_flush_inodes_work(
David Chinnera167b172008-10-30 17:06:18 +1100551 struct xfs_mount *mp,
552 void *arg)
553{
554 struct inode *inode = arg;
Christoph Hellwig075fe102009-06-08 15:35:48 +0200555 xfs_sync_data(mp, SYNC_TRYLOCK);
Christoph Hellwigb0710cc2009-06-08 15:37:11 +0200556 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
David Chinnera167b172008-10-30 17:06:18 +1100557 iput(inode);
558}
559
560void
Dave Chinnera8d770d2009-04-06 18:44:54 +0200561xfs_flush_inodes(
David Chinnera167b172008-10-30 17:06:18 +1100562 xfs_inode_t *ip)
563{
564 struct inode *inode = VFS_I(ip);
Dave Chinnere43afd72009-04-06 18:47:27 +0200565 DECLARE_COMPLETION_ONSTACK(completion);
David Chinnera167b172008-10-30 17:06:18 +1100566
567 igrab(inode);
Dave Chinnere43afd72009-04-06 18:47:27 +0200568 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
569 wait_for_completion(&completion);
David Chinnera167b172008-10-30 17:06:18 +1100570 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
571}
572
David Chinneraacaa882008-10-30 17:15:29 +1100573/*
574 * Every sync period we need to unpin all items, reclaim inodes, sync
575 * quota and write out the superblock. We might need to cover the log
576 * to indicate it is idle.
577 */
David Chinnera167b172008-10-30 17:06:18 +1100578STATIC void
579xfs_sync_worker(
580 struct xfs_mount *mp,
581 void *unused)
582{
583 int error;
584
David Chinneraacaa882008-10-30 17:15:29 +1100585 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
586 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
Dave Chinnerabc10642009-06-08 15:35:12 +0200587 xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
David Chinneraacaa882008-10-30 17:15:29 +1100588 /* dgc: errors ignored here */
Christoph Hellwig8b5403a2009-06-08 15:37:16 +0200589 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
590 error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
David Chinneraacaa882008-10-30 17:15:29 +1100591 }
David Chinnera167b172008-10-30 17:06:18 +1100592 mp->m_sync_seq++;
593 wake_up(&mp->m_wait_single_sync_task);
594}
595
596STATIC int
597xfssyncd(
598 void *arg)
599{
600 struct xfs_mount *mp = arg;
601 long timeleft;
Dave Chinnera8d770d2009-04-06 18:44:54 +0200602 xfs_sync_work_t *work, *n;
David Chinnera167b172008-10-30 17:06:18 +1100603 LIST_HEAD (tmp);
604
605 set_freezable();
606 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
607 for (;;) {
608 timeleft = schedule_timeout_interruptible(timeleft);
609 /* swsusp */
610 try_to_freeze();
611 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
612 break;
613
614 spin_lock(&mp->m_sync_lock);
615 /*
616 * We can get woken by laptop mode, to do a sync -
617 * that's the (only!) case where the list would be
618 * empty with time remaining.
619 */
620 if (!timeleft || list_empty(&mp->m_sync_list)) {
621 if (!timeleft)
622 timeleft = xfs_syncd_centisecs *
623 msecs_to_jiffies(10);
624 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
625 list_add_tail(&mp->m_sync_work.w_list,
626 &mp->m_sync_list);
627 }
628 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
629 list_move(&work->w_list, &tmp);
630 spin_unlock(&mp->m_sync_lock);
631
632 list_for_each_entry_safe(work, n, &tmp, w_list) {
633 (*work->w_syncer)(mp, work->w_data);
634 list_del(&work->w_list);
635 if (work == &mp->m_sync_work)
636 continue;
Dave Chinnere43afd72009-04-06 18:47:27 +0200637 if (work->w_completion)
638 complete(work->w_completion);
David Chinnera167b172008-10-30 17:06:18 +1100639 kmem_free(work);
640 }
641 }
642
643 return 0;
644}
645
646int
647xfs_syncd_init(
648 struct xfs_mount *mp)
649{
650 mp->m_sync_work.w_syncer = xfs_sync_worker;
651 mp->m_sync_work.w_mount = mp;
Dave Chinnere43afd72009-04-06 18:47:27 +0200652 mp->m_sync_work.w_completion = NULL;
David Chinnera167b172008-10-30 17:06:18 +1100653 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
654 if (IS_ERR(mp->m_sync_task))
655 return -PTR_ERR(mp->m_sync_task);
656 return 0;
657}
658
659void
660xfs_syncd_stop(
661 struct xfs_mount *mp)
662{
663 kthread_stop(mp->m_sync_task);
664}
665
David Chinnerfce08f22008-10-30 17:37:03 +1100666int
David Chinner1dc33182008-10-30 17:37:15 +1100667xfs_reclaim_inode(
David Chinnerfce08f22008-10-30 17:37:03 +1100668 xfs_inode_t *ip,
669 int locked,
670 int sync_mode)
671{
672 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
673
674 /* The hash lock here protects a thread in xfs_iget_core from
675 * racing with us on linking the inode back with a vnode.
676 * Once we have the XFS_IRECLAIM flag set it will not touch
677 * us.
678 */
679 write_lock(&pag->pag_ici_lock);
680 spin_lock(&ip->i_flags_lock);
681 if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
682 !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
683 spin_unlock(&ip->i_flags_lock);
684 write_unlock(&pag->pag_ici_lock);
685 if (locked) {
686 xfs_ifunlock(ip);
687 xfs_iunlock(ip, XFS_ILOCK_EXCL);
688 }
Dave Chinner75f3cb12009-06-08 15:35:14 +0200689 return -EAGAIN;
David Chinnerfce08f22008-10-30 17:37:03 +1100690 }
691 __xfs_iflags_set(ip, XFS_IRECLAIM);
692 spin_unlock(&ip->i_flags_lock);
693 write_unlock(&pag->pag_ici_lock);
694 xfs_put_perag(ip->i_mount, pag);
695
696 /*
697 * If the inode is still dirty, then flush it out. If the inode
698 * is not in the AIL, then it will be OK to flush it delwri as
699 * long as xfs_iflush() does not keep any references to the inode.
700 * We leave that decision up to xfs_iflush() since it has the
701 * knowledge of whether it's OK to simply do a delwri flush of
702 * the inode or whether we need to wait until the inode is
703 * pulled from the AIL.
704 * We get the flush lock regardless, though, just to make sure
705 * we don't free it while it is being flushed.
706 */
707 if (!locked) {
708 xfs_ilock(ip, XFS_ILOCK_EXCL);
709 xfs_iflock(ip);
710 }
711
712 /*
713 * In the case of a forced shutdown we rely on xfs_iflush() to
714 * wait for the inode to be unpinned before returning an error.
715 */
716 if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
717 /* synchronize with xfs_iflush_done */
718 xfs_iflock(ip);
719 xfs_ifunlock(ip);
720 }
721
722 xfs_iunlock(ip, XFS_ILOCK_EXCL);
723 xfs_ireclaim(ip);
724 return 0;
725}
726
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400727void
728__xfs_inode_set_reclaim_tag(
729 struct xfs_perag *pag,
730 struct xfs_inode *ip)
731{
732 radix_tree_tag_set(&pag->pag_ici_root,
733 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
734 XFS_ICI_RECLAIM_TAG);
735}
736
David Chinner11654512008-10-30 17:37:49 +1100737/*
738 * We set the inode flag atomically with the radix tree tag.
739 * Once we get tag lookups on the radix tree, this inode flag
740 * can go away.
741 */
David Chinner396beb82008-10-30 17:37:26 +1100742void
743xfs_inode_set_reclaim_tag(
744 xfs_inode_t *ip)
745{
746 xfs_mount_t *mp = ip->i_mount;
747 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
748
749 read_lock(&pag->pag_ici_lock);
750 spin_lock(&ip->i_flags_lock);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400751 __xfs_inode_set_reclaim_tag(pag, ip);
David Chinner11654512008-10-30 17:37:49 +1100752 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
David Chinner396beb82008-10-30 17:37:26 +1100753 spin_unlock(&ip->i_flags_lock);
754 read_unlock(&pag->pag_ici_lock);
755 xfs_put_perag(mp, pag);
756}
757
758void
759__xfs_inode_clear_reclaim_tag(
760 xfs_mount_t *mp,
761 xfs_perag_t *pag,
762 xfs_inode_t *ip)
763{
764 radix_tree_tag_clear(&pag->pag_ici_root,
765 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
766}
767
Dave Chinner75f3cb12009-06-08 15:35:14 +0200768STATIC int
769xfs_reclaim_inode_now(
770 struct xfs_inode *ip,
771 struct xfs_perag *pag,
772 int flags)
David Chinner7a3be022008-10-30 17:37:37 +1100773{
Dave Chinner75f3cb12009-06-08 15:35:14 +0200774 /* ignore if already under reclaim */
775 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
David Chinner7a3be022008-10-30 17:37:37 +1100776 read_unlock(&pag->pag_ici_lock);
Dave Chinner75f3cb12009-06-08 15:35:14 +0200777 return 0;
David Chinner7a3be022008-10-30 17:37:37 +1100778 }
Dave Chinner75f3cb12009-06-08 15:35:14 +0200779 read_unlock(&pag->pag_ici_lock);
David Chinner7a3be022008-10-30 17:37:37 +1100780
Dave Chinner75f3cb12009-06-08 15:35:14 +0200781 return xfs_reclaim_inode(ip, 0, flags);
David Chinner7a3be022008-10-30 17:37:37 +1100782}
783
David Chinnerfce08f22008-10-30 17:37:03 +1100784int
David Chinner1dc33182008-10-30 17:37:15 +1100785xfs_reclaim_inodes(
David Chinnerfce08f22008-10-30 17:37:03 +1100786 xfs_mount_t *mp,
David Chinnerfce08f22008-10-30 17:37:03 +1100787 int mode)
788{
Dave Chinner75f3cb12009-06-08 15:35:14 +0200789 return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode,
790 XFS_ICI_RECLAIM_TAG);
David Chinnerfce08f22008-10-30 17:37:03 +1100791}