| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * fs/fs-writeback.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Linus Torvalds. | 
 | 5 |  * | 
 | 6 |  * Contains all the functions related to writing back and waiting | 
 | 7 |  * upon dirty inodes against superblocks, and writing back dirty | 
 | 8 |  * pages against inodes.  ie: data writeback.  Writeout of the | 
 | 9 |  * inode itself is not handled here. | 
 | 10 |  * | 
| Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 11 |  * 10Apr2002	Andrew Morton | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 |  *		Split out of fs/inode.c | 
 | 13 |  *		Additions for address_space-based writeback | 
 | 14 |  */ | 
 | 15 |  | 
 | 16 | #include <linux/kernel.h> | 
| Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 17 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/spinlock.h> | 
 | 19 | #include <linux/sched.h> | 
 | 20 | #include <linux/fs.h> | 
 | 21 | #include <linux/mm.h> | 
 | 22 | #include <linux/writeback.h> | 
 | 23 | #include <linux/blkdev.h> | 
 | 24 | #include <linux/backing-dev.h> | 
 | 25 | #include <linux/buffer_head.h> | 
| David Howells | 07f3f05 | 2006-09-30 20:52:18 +0200 | [diff] [blame] | 26 | #include "internal.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 |  | 
| Adrian Bunk | f11b00f | 2008-04-29 00:58:56 -0700 | [diff] [blame] | 28 |  | 
 | 29 | /** | 
 | 30 |  * writeback_acquire - attempt to get exclusive writeback access to a device | 
 | 31 |  * @bdi: the device's backing_dev_info structure | 
 | 32 |  * | 
 | 33 |  * It is a waste of resources to have more than one pdflush thread blocked on | 
 | 34 |  * a single request queue.  Exclusion at the request_queue level is obtained | 
 | 35 |  * via a flag in the request_queue's backing_dev_info.state. | 
 | 36 |  * | 
 | 37 |  * Non-request_queue-backed address_spaces will share default_backing_dev_info, | 
 | 38 |  * unless they implement their own.  Which is somewhat inefficient, as this | 
 | 39 |  * may prevent concurrent writeback against multiple devices. | 
 | 40 |  */ | 
 | 41 | static int writeback_acquire(struct backing_dev_info *bdi) | 
 | 42 | { | 
 | 43 | 	return !test_and_set_bit(BDI_pdflush, &bdi->state); | 
 | 44 | } | 
 | 45 |  | 
 | 46 | /** | 
 | 47 |  * writeback_in_progress - determine whether there is writeback in progress | 
 | 48 |  * @bdi: the device's backing_dev_info structure. | 
 | 49 |  * | 
 | 50 |  * Determine whether there is writeback in progress against a backing device. | 
 | 51 |  */ | 
 | 52 | int writeback_in_progress(struct backing_dev_info *bdi) | 
 | 53 | { | 
 | 54 | 	return test_bit(BDI_pdflush, &bdi->state); | 
 | 55 | } | 
 | 56 |  | 
 | 57 | /** | 
 | 58 |  * writeback_release - relinquish exclusive writeback access against a device. | 
 | 59 |  * @bdi: the device's backing_dev_info structure | 
 | 60 |  */ | 
 | 61 | static void writeback_release(struct backing_dev_info *bdi) | 
 | 62 | { | 
 | 63 | 	BUG_ON(!writeback_in_progress(bdi)); | 
 | 64 | 	clear_bit(BDI_pdflush, &bdi->state); | 
 | 65 | } | 
 | 66 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | /** | 
 | 68 |  *	__mark_inode_dirty -	internal function | 
 | 69 |  *	@inode: inode to mark | 
 | 70 |  *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC) | 
 | 71 |  *	Mark an inode as dirty. Callers should use mark_inode_dirty or | 
 | 72 |  *  	mark_inode_dirty_sync. | 
 | 73 |  * | 
 | 74 |  * Put the inode on the super block's dirty list. | 
 | 75 |  * | 
 | 76 |  * CAREFUL! We mark it dirty unconditionally, but move it onto the | 
 | 77 |  * dirty list only if it is hashed or if it refers to a blockdev. | 
 | 78 |  * If it was not hashed, it will never be added to the dirty list | 
 | 79 |  * even if it is later hashed, as it will have been marked dirty already. | 
 | 80 |  * | 
 | 81 |  * In short, make sure you hash any inodes _before_ you start marking | 
 | 82 |  * them dirty. | 
 | 83 |  * | 
 | 84 |  * This function *must* be atomic for the I_DIRTY_PAGES case - | 
 | 85 |  * set_page_dirty() is called under spinlock in several places. | 
 | 86 |  * | 
 | 87 |  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of | 
 | 88 |  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of | 
 | 89 |  * the kernel-internal blockdev inode represents the dirtying time of the | 
 | 90 |  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use | 
 | 91 |  * page->mapping->host, so the page-dirtying time is recorded in the internal | 
 | 92 |  * blockdev inode. | 
 | 93 |  */ | 
 | 94 | void __mark_inode_dirty(struct inode *inode, int flags) | 
 | 95 | { | 
 | 96 | 	struct super_block *sb = inode->i_sb; | 
 | 97 |  | 
 | 98 | 	/* | 
 | 99 | 	 * Don't do this for I_DIRTY_PAGES - that doesn't actually | 
 | 100 | 	 * dirty the inode itself | 
 | 101 | 	 */ | 
 | 102 | 	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { | 
 | 103 | 		if (sb->s_op->dirty_inode) | 
 | 104 | 			sb->s_op->dirty_inode(inode); | 
 | 105 | 	} | 
 | 106 |  | 
 | 107 | 	/* | 
 | 108 | 	 * make sure that changes are seen by all cpus before we test i_state | 
 | 109 | 	 * -- mikulas | 
 | 110 | 	 */ | 
 | 111 | 	smp_mb(); | 
 | 112 |  | 
 | 113 | 	/* avoid the locking if we can */ | 
 | 114 | 	if ((inode->i_state & flags) == flags) | 
 | 115 | 		return; | 
 | 116 |  | 
 | 117 | 	if (unlikely(block_dump)) { | 
 | 118 | 		struct dentry *dentry = NULL; | 
 | 119 | 		const char *name = "?"; | 
 | 120 |  | 
 | 121 | 		if (!list_empty(&inode->i_dentry)) { | 
 | 122 | 			dentry = list_entry(inode->i_dentry.next, | 
 | 123 | 					    struct dentry, d_alias); | 
 | 124 | 			if (dentry && dentry->d_name.name) | 
 | 125 | 				name = (const char *) dentry->d_name.name; | 
 | 126 | 		} | 
 | 127 |  | 
 | 128 | 		if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) | 
 | 129 | 			printk(KERN_DEBUG | 
 | 130 | 			       "%s(%d): dirtied inode %lu (%s) on %s\n", | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 131 | 			       current->comm, task_pid_nr(current), inode->i_ino, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | 			       name, inode->i_sb->s_id); | 
 | 133 | 	} | 
 | 134 |  | 
 | 135 | 	spin_lock(&inode_lock); | 
 | 136 | 	if ((inode->i_state & flags) != flags) { | 
 | 137 | 		const int was_dirty = inode->i_state & I_DIRTY; | 
 | 138 |  | 
 | 139 | 		inode->i_state |= flags; | 
 | 140 |  | 
 | 141 | 		/* | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 142 | 		 * If the inode is being synced, just update its dirty state. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | 		 * The unlocker will place the inode on the appropriate | 
 | 144 | 		 * superblock list, based upon its state. | 
 | 145 | 		 */ | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 146 | 		if (inode->i_state & I_SYNC) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | 			goto out; | 
 | 148 |  | 
 | 149 | 		/* | 
 | 150 | 		 * Only add valid (hashed) inodes to the superblock's | 
 | 151 | 		 * dirty list.  Add blockdev inodes as well. | 
 | 152 | 		 */ | 
 | 153 | 		if (!S_ISBLK(inode->i_mode)) { | 
 | 154 | 			if (hlist_unhashed(&inode->i_hash)) | 
 | 155 | 				goto out; | 
 | 156 | 		} | 
 | 157 | 		if (inode->i_state & (I_FREEING|I_CLEAR)) | 
 | 158 | 			goto out; | 
 | 159 |  | 
 | 160 | 		/* | 
| Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 161 | 		 * If the inode was already on s_dirty/s_io/s_more_io, don't | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | 		 * reposition it (that would break s_dirty time-ordering). | 
 | 163 | 		 */ | 
 | 164 | 		if (!was_dirty) { | 
 | 165 | 			inode->dirtied_when = jiffies; | 
 | 166 | 			list_move(&inode->i_list, &sb->s_dirty); | 
 | 167 | 		} | 
 | 168 | 	} | 
 | 169 | out: | 
 | 170 | 	spin_unlock(&inode_lock); | 
 | 171 | } | 
 | 172 |  | 
 | 173 | EXPORT_SYMBOL(__mark_inode_dirty); | 
 | 174 |  | 
 | 175 | static int write_inode(struct inode *inode, int sync) | 
 | 176 | { | 
 | 177 | 	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) | 
 | 178 | 		return inode->i_sb->s_op->write_inode(inode, sync); | 
 | 179 | 	return 0; | 
 | 180 | } | 
 | 181 |  | 
 | 182 | /* | 
| Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 183 |  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the | 
 | 184 |  * furthest end of its superblock's dirty-inode list. | 
 | 185 |  * | 
 | 186 |  * Before stamping the inode's ->dirtied_when, we check to see whether it is | 
 | 187 |  * already the most-recently-dirtied inode on the s_dirty list.  If that is | 
 | 188 |  * the case then the inode must have been redirtied while it was being written | 
 | 189 |  * out and we don't reset its dirtied_when. | 
 | 190 |  */ | 
 | 191 | static void redirty_tail(struct inode *inode) | 
 | 192 | { | 
 | 193 | 	struct super_block *sb = inode->i_sb; | 
 | 194 |  | 
 | 195 | 	if (!list_empty(&sb->s_dirty)) { | 
 | 196 | 		struct inode *tail_inode; | 
 | 197 |  | 
 | 198 | 		tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list); | 
| Jeff Layton | d2caa3c | 2009-04-02 16:56:37 -0700 | [diff] [blame] | 199 | 		if (time_before(inode->dirtied_when, | 
| Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 200 | 				tail_inode->dirtied_when)) | 
 | 201 | 			inode->dirtied_when = jiffies; | 
 | 202 | 	} | 
 | 203 | 	list_move(&inode->i_list, &sb->s_dirty); | 
 | 204 | } | 
 | 205 |  | 
 | 206 | /* | 
| Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 207 |  * requeue inode for re-scanning after sb->s_io list is exhausted. | 
| Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 208 |  */ | 
| Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 209 | static void requeue_io(struct inode *inode) | 
| Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 210 | { | 
| Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 211 | 	list_move(&inode->i_list, &inode->i_sb->s_more_io); | 
| Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 212 | } | 
 | 213 |  | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 214 | static void inode_sync_complete(struct inode *inode) | 
 | 215 | { | 
 | 216 | 	/* | 
 | 217 | 	 * Prevent speculative execution through spin_unlock(&inode_lock); | 
 | 218 | 	 */ | 
 | 219 | 	smp_mb(); | 
 | 220 | 	wake_up_bit(&inode->i_state, __I_SYNC); | 
 | 221 | } | 
 | 222 |  | 
| Jeff Layton | d2caa3c | 2009-04-02 16:56:37 -0700 | [diff] [blame] | 223 | static bool inode_dirtied_after(struct inode *inode, unsigned long t) | 
 | 224 | { | 
 | 225 | 	bool ret = time_after(inode->dirtied_when, t); | 
 | 226 | #ifndef CONFIG_64BIT | 
 | 227 | 	/* | 
 | 228 | 	 * For inodes being constantly redirtied, dirtied_when can get stuck. | 
 | 229 | 	 * It _appears_ to be in the future, but is actually in distant past. | 
 | 230 | 	 * This test is necessary to prevent such wrapped-around relative times | 
 | 231 | 	 * from permanently stopping the whole pdflush writeback. | 
 | 232 | 	 */ | 
 | 233 | 	ret = ret && time_before_eq(inode->dirtied_when, jiffies); | 
 | 234 | #endif | 
 | 235 | 	return ret; | 
 | 236 | } | 
 | 237 |  | 
| Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 238 | /* | 
| Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 239 |  * Move expired dirty inodes from @delaying_queue to @dispatch_queue. | 
 | 240 |  */ | 
 | 241 | static void move_expired_inodes(struct list_head *delaying_queue, | 
 | 242 | 			       struct list_head *dispatch_queue, | 
 | 243 | 				unsigned long *older_than_this) | 
 | 244 | { | 
 | 245 | 	while (!list_empty(delaying_queue)) { | 
 | 246 | 		struct inode *inode = list_entry(delaying_queue->prev, | 
 | 247 | 						struct inode, i_list); | 
 | 248 | 		if (older_than_this && | 
| Jeff Layton | d2caa3c | 2009-04-02 16:56:37 -0700 | [diff] [blame] | 249 | 		    inode_dirtied_after(inode, *older_than_this)) | 
| Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 250 | 			break; | 
 | 251 | 		list_move(&inode->i_list, dispatch_queue); | 
 | 252 | 	} | 
 | 253 | } | 
 | 254 |  | 
 | 255 | /* | 
 | 256 |  * Queue all expired dirty inodes for io, eldest first. | 
 | 257 |  */ | 
 | 258 | static void queue_io(struct super_block *sb, | 
 | 259 | 				unsigned long *older_than_this) | 
 | 260 | { | 
 | 261 | 	list_splice_init(&sb->s_more_io, sb->s_io.prev); | 
 | 262 | 	move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this); | 
 | 263 | } | 
 | 264 |  | 
| Fengguang Wu | 08d8e97 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 265 | int sb_has_dirty_inodes(struct super_block *sb) | 
 | 266 | { | 
 | 267 | 	return !list_empty(&sb->s_dirty) || | 
 | 268 | 	       !list_empty(&sb->s_io) || | 
 | 269 | 	       !list_empty(&sb->s_more_io); | 
 | 270 | } | 
 | 271 | EXPORT_SYMBOL(sb_has_dirty_inodes); | 
 | 272 |  | 
| Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 273 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 |  * Write a single inode's dirty pages and inode data out to disk. | 
 | 275 |  * If `wait' is set, wait on the writeout. | 
 | 276 |  * | 
 | 277 |  * The whole writeout design is quite complex and fragile.  We want to avoid | 
 | 278 |  * starvation of particular inodes when others are being redirtied, prevent | 
 | 279 |  * livelocks, etc. | 
 | 280 |  * | 
 | 281 |  * Called under inode_lock. | 
 | 282 |  */ | 
 | 283 | static int | 
 | 284 | __sync_single_inode(struct inode *inode, struct writeback_control *wbc) | 
 | 285 | { | 
 | 286 | 	unsigned dirty; | 
 | 287 | 	struct address_space *mapping = inode->i_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | 	int wait = wbc->sync_mode == WB_SYNC_ALL; | 
 | 289 | 	int ret; | 
 | 290 |  | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 291 | 	BUG_ON(inode->i_state & I_SYNC); | 
| Nick Piggin | 7ef0d73 | 2009-03-12 14:31:38 -0700 | [diff] [blame] | 292 | 	WARN_ON(inode->i_state & I_NEW); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 |  | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 294 | 	/* Set I_SYNC, reset I_DIRTY */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | 	dirty = inode->i_state & I_DIRTY; | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 296 | 	inode->i_state |= I_SYNC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | 	inode->i_state &= ~I_DIRTY; | 
 | 298 |  | 
 | 299 | 	spin_unlock(&inode_lock); | 
 | 300 |  | 
 | 301 | 	ret = do_writepages(mapping, wbc); | 
 | 302 |  | 
 | 303 | 	/* Don't write the inode if only I_DIRTY_PAGES was set */ | 
 | 304 | 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { | 
 | 305 | 		int err = write_inode(inode, wait); | 
 | 306 | 		if (ret == 0) | 
 | 307 | 			ret = err; | 
 | 308 | 	} | 
 | 309 |  | 
 | 310 | 	if (wait) { | 
 | 311 | 		int err = filemap_fdatawait(mapping); | 
 | 312 | 		if (ret == 0) | 
 | 313 | 			ret = err; | 
 | 314 | 	} | 
 | 315 |  | 
 | 316 | 	spin_lock(&inode_lock); | 
| Nick Piggin | 7ef0d73 | 2009-03-12 14:31:38 -0700 | [diff] [blame] | 317 | 	WARN_ON(inode->i_state & I_NEW); | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 318 | 	inode->i_state &= ~I_SYNC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | 	if (!(inode->i_state & I_FREEING)) { | 
 | 320 | 		if (!(inode->i_state & I_DIRTY) && | 
 | 321 | 		    mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { | 
 | 322 | 			/* | 
 | 323 | 			 * We didn't write back all the pages.  nfs_writepages() | 
 | 324 | 			 * sometimes bales out without doing anything. Redirty | 
| Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 325 | 			 * the inode; Move it from s_io onto s_more_io/s_dirty. | 
| Andrew Morton | 1b43ef9 | 2007-10-16 23:30:35 -0700 | [diff] [blame] | 326 | 			 */ | 
 | 327 | 			/* | 
 | 328 | 			 * akpm: if the caller was the kupdate function we put | 
 | 329 | 			 * this inode at the head of s_dirty so it gets first | 
 | 330 | 			 * consideration.  Otherwise, move it to the tail, for | 
 | 331 | 			 * the reasons described there.  I'm not really sure | 
 | 332 | 			 * how much sense this makes.  Presumably I had a good | 
 | 333 | 			 * reasons for doing it this way, and I'd rather not | 
 | 334 | 			 * muck with it at present. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | 			 */ | 
 | 336 | 			if (wbc->for_kupdate) { | 
 | 337 | 				/* | 
| Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 338 | 				 * For the kupdate function we move the inode | 
 | 339 | 				 * to s_more_io so it will get more writeout as | 
 | 340 | 				 * soon as the queue becomes uncongested. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | 				 */ | 
 | 342 | 				inode->i_state |= I_DIRTY_PAGES; | 
| Fengguang Wu | 8bc3be2 | 2008-02-04 22:29:36 -0800 | [diff] [blame] | 343 | 				if (wbc->nr_to_write <= 0) { | 
 | 344 | 					/* | 
 | 345 | 					 * slice used up: queue for next turn | 
 | 346 | 					 */ | 
 | 347 | 					requeue_io(inode); | 
 | 348 | 				} else { | 
 | 349 | 					/* | 
 | 350 | 					 * somehow blocked: retry later | 
 | 351 | 					 */ | 
 | 352 | 					redirty_tail(inode); | 
 | 353 | 				} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | 			} else { | 
 | 355 | 				/* | 
 | 356 | 				 * Otherwise fully redirty the inode so that | 
 | 357 | 				 * other inodes on this superblock will get some | 
 | 358 | 				 * writeout.  Otherwise heavy writing to one | 
 | 359 | 				 * file would indefinitely suspend writeout of | 
 | 360 | 				 * all the other files. | 
 | 361 | 				 */ | 
 | 362 | 				inode->i_state |= I_DIRTY_PAGES; | 
| Andrew Morton | 1b43ef9 | 2007-10-16 23:30:35 -0700 | [diff] [blame] | 363 | 				redirty_tail(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | 			} | 
 | 365 | 		} else if (inode->i_state & I_DIRTY) { | 
 | 366 | 			/* | 
 | 367 | 			 * Someone redirtied the inode while were writing back | 
 | 368 | 			 * the pages. | 
 | 369 | 			 */ | 
| Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 370 | 			redirty_tail(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | 		} else if (atomic_read(&inode->i_count)) { | 
 | 372 | 			/* | 
 | 373 | 			 * The inode is clean, inuse | 
 | 374 | 			 */ | 
 | 375 | 			list_move(&inode->i_list, &inode_in_use); | 
 | 376 | 		} else { | 
 | 377 | 			/* | 
 | 378 | 			 * The inode is clean, unused | 
 | 379 | 			 */ | 
 | 380 | 			list_move(&inode->i_list, &inode_unused); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | 		} | 
 | 382 | 	} | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 383 | 	inode_sync_complete(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | 	return ret; | 
 | 385 | } | 
 | 386 |  | 
 | 387 | /* | 
| Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 388 |  * Write out an inode's dirty pages.  Called under inode_lock.  Either the | 
 | 389 |  * caller has ref on the inode (either via __iget or via syscall against an fd) | 
 | 390 |  * or the inode has I_WILL_FREE set (via generic_forget_inode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 |  */ | 
 | 392 | static int | 
| Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 393 | __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | { | 
 | 395 | 	wait_queue_head_t *wqh; | 
 | 396 |  | 
| Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 397 | 	if (!atomic_read(&inode->i_count)) | 
| Andrea Arcangeli | 659603e | 2005-10-31 14:08:54 -0800 | [diff] [blame] | 398 | 		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); | 
| Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 399 | 	else | 
 | 400 | 		WARN_ON(inode->i_state & I_WILL_FREE); | 
 | 401 |  | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 402 | 	if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) { | 
| Andrew Morton | 65cb9b4 | 2007-10-16 23:30:37 -0700 | [diff] [blame] | 403 | 		/* | 
 | 404 | 		 * We're skipping this inode because it's locked, and we're not | 
| Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 405 | 		 * doing writeback-for-data-integrity.  Move it to s_more_io so | 
 | 406 | 		 * that writeback can proceed with the other inodes on s_io. | 
 | 407 | 		 * We'll have another go at writing back this inode when we | 
 | 408 | 		 * completed a full scan of s_io. | 
| Andrew Morton | 65cb9b4 | 2007-10-16 23:30:37 -0700 | [diff] [blame] | 409 | 		 */ | 
| Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 410 | 		requeue_io(inode); | 
| Qi Yong | 2d54456 | 2008-02-04 22:29:23 -0800 | [diff] [blame] | 411 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | 	} | 
 | 413 |  | 
 | 414 | 	/* | 
 | 415 | 	 * It's a data-integrity sync.  We must wait. | 
 | 416 | 	 */ | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 417 | 	if (inode->i_state & I_SYNC) { | 
 | 418 | 		DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 |  | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 420 | 		wqh = bit_waitqueue(&inode->i_state, __I_SYNC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | 		do { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | 			spin_unlock(&inode_lock); | 
 | 423 | 			__wait_on_bit(wqh, &wq, inode_wait, | 
 | 424 | 							TASK_UNINTERRUPTIBLE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | 			spin_lock(&inode_lock); | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 426 | 		} while (inode->i_state & I_SYNC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | 	} | 
 | 428 | 	return __sync_single_inode(inode, wbc); | 
 | 429 | } | 
 | 430 |  | 
 | 431 | /* | 
 | 432 |  * Write out a superblock's list of dirty inodes.  A wait will be performed | 
 | 433 |  * upon no inodes, all inodes or the final one, depending upon sync_mode. | 
 | 434 |  * | 
 | 435 |  * If older_than_this is non-NULL, then only write out inodes which | 
 | 436 |  * had their first dirtying at a time earlier than *older_than_this. | 
 | 437 |  * | 
| Masatake YAMATO | 3e3cb64 | 2009-02-25 22:51:57 +0900 | [diff] [blame] | 438 |  * If we're a pdflush thread, then implement pdflush collision avoidance | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 |  * against the entire list. | 
 | 440 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 |  * If `bdi' is non-zero then we're being asked to writeback a specific queue. | 
 | 442 |  * This function assumes that the blockdev superblock's inodes are backed by | 
 | 443 |  * a variety of queues, so all inodes are searched.  For other superblocks, | 
 | 444 |  * assume that all inodes are backed by the same queue. | 
 | 445 |  * | 
 | 446 |  * FIXME: this linear search could get expensive with many fileystems.  But | 
 | 447 |  * how to fix?  We need to go from an address_space to all inodes which share | 
 | 448 |  * a queue with that address_space.  (Easy: have a global "dirty superblocks" | 
 | 449 |  * list). | 
 | 450 |  * | 
 | 451 |  * The inodes to be written are parked on sb->s_io.  They are moved back onto | 
 | 452 |  * sb->s_dirty as they are selected for writing.  This way, none can be missed | 
 | 453 |  * on the writer throttling path, and we get decent balancing between many | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 454 |  * throttled threads: we don't want them all piling up on inode_sync_wait. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 |  */ | 
| Artem Bityutskiy | 4ee6afd | 2008-05-07 21:01:30 +0300 | [diff] [blame] | 456 | void generic_sync_sb_inodes(struct super_block *sb, | 
 | 457 | 				struct writeback_control *wbc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | { | 
 | 459 | 	const unsigned long start = jiffies;	/* livelock avoidance */ | 
| Nick Piggin | 38f2197 | 2009-01-06 14:40:25 -0800 | [diff] [blame] | 460 | 	int sync = wbc->sync_mode == WB_SYNC_ALL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 |  | 
| Hans Reiser | ae8547b | 2008-05-07 15:48:57 +0300 | [diff] [blame] | 462 | 	spin_lock(&inode_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | 	if (!wbc->for_kupdate || list_empty(&sb->s_io)) | 
| Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 464 | 		queue_io(sb, wbc->older_than_this); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 |  | 
 | 466 | 	while (!list_empty(&sb->s_io)) { | 
 | 467 | 		struct inode *inode = list_entry(sb->s_io.prev, | 
 | 468 | 						struct inode, i_list); | 
 | 469 | 		struct address_space *mapping = inode->i_mapping; | 
 | 470 | 		struct backing_dev_info *bdi = mapping->backing_dev_info; | 
 | 471 | 		long pages_skipped; | 
 | 472 |  | 
 | 473 | 		if (!bdi_cap_writeback_dirty(bdi)) { | 
| Andrew Morton | 9852a0e7 | 2007-10-16 23:30:33 -0700 | [diff] [blame] | 474 | 			redirty_tail(inode); | 
| David Howells | 7b0de42 | 2006-08-29 19:06:07 +0100 | [diff] [blame] | 475 | 			if (sb_is_blkdev_sb(sb)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | 				/* | 
 | 477 | 				 * Dirty memory-backed blockdev: the ramdisk | 
 | 478 | 				 * driver does this.  Skip just this inode | 
 | 479 | 				 */ | 
 | 480 | 				continue; | 
 | 481 | 			} | 
 | 482 | 			/* | 
 | 483 | 			 * Dirty memory-backed inode against a filesystem other | 
 | 484 | 			 * than the kernel-internal bdev filesystem.  Skip the | 
 | 485 | 			 * entire superblock. | 
 | 486 | 			 */ | 
 | 487 | 			break; | 
 | 488 | 		} | 
 | 489 |  | 
| Nick Piggin | 7ef0d73 | 2009-03-12 14:31:38 -0700 | [diff] [blame] | 490 | 		if (inode->i_state & I_NEW) { | 
 | 491 | 			requeue_io(inode); | 
 | 492 | 			continue; | 
 | 493 | 		} | 
 | 494 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | 		if (wbc->nonblocking && bdi_write_congested(bdi)) { | 
 | 496 | 			wbc->encountered_congestion = 1; | 
| David Howells | 7b0de42 | 2006-08-29 19:06:07 +0100 | [diff] [blame] | 497 | 			if (!sb_is_blkdev_sb(sb)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | 				break;		/* Skip a congested fs */ | 
| Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 499 | 			requeue_io(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | 			continue;		/* Skip a congested blockdev */ | 
 | 501 | 		} | 
 | 502 |  | 
 | 503 | 		if (wbc->bdi && bdi != wbc->bdi) { | 
| David Howells | 7b0de42 | 2006-08-29 19:06:07 +0100 | [diff] [blame] | 504 | 			if (!sb_is_blkdev_sb(sb)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | 				break;		/* fs has the wrong queue */ | 
| Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 506 | 			requeue_io(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | 			continue;		/* blockdev has wrong queue */ | 
 | 508 | 		} | 
 | 509 |  | 
| Jeff Layton | d2caa3c | 2009-04-02 16:56:37 -0700 | [diff] [blame] | 510 | 		/* | 
 | 511 | 		 * Was this inode dirtied after sync_sb_inodes was called? | 
 | 512 | 		 * This keeps sync from extra jobs and livelock. | 
 | 513 | 		 */ | 
 | 514 | 		if (inode_dirtied_after(inode, start)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | 			break; | 
 | 516 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | 		/* Is another pdflush already flushing this queue? */ | 
 | 518 | 		if (current_is_pdflush() && !writeback_acquire(bdi)) | 
 | 519 | 			break; | 
 | 520 |  | 
 | 521 | 		BUG_ON(inode->i_state & I_FREEING); | 
 | 522 | 		__iget(inode); | 
 | 523 | 		pages_skipped = wbc->pages_skipped; | 
 | 524 | 		__writeback_single_inode(inode, wbc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | 		if (current_is_pdflush()) | 
 | 526 | 			writeback_release(bdi); | 
 | 527 | 		if (wbc->pages_skipped != pages_skipped) { | 
 | 528 | 			/* | 
 | 529 | 			 * writeback is not making progress due to locked | 
 | 530 | 			 * buffers.  Skip this inode for now. | 
 | 531 | 			 */ | 
| Andrew Morton | f57b9b7 | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 532 | 			redirty_tail(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | 		} | 
 | 534 | 		spin_unlock(&inode_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | 		iput(inode); | 
| OGAWA Hirofumi | 4ffc844 | 2006-03-25 03:07:44 -0800 | [diff] [blame] | 536 | 		cond_resched(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | 		spin_lock(&inode_lock); | 
| Fengguang Wu | 8bc3be2 | 2008-02-04 22:29:36 -0800 | [diff] [blame] | 538 | 		if (wbc->nr_to_write <= 0) { | 
 | 539 | 			wbc->more_io = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | 			break; | 
| Fengguang Wu | 8bc3be2 | 2008-02-04 22:29:36 -0800 | [diff] [blame] | 541 | 		} | 
 | 542 | 		if (!list_empty(&sb->s_more_io)) | 
 | 543 | 			wbc->more_io = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | 	} | 
| Nick Piggin | 38f2197 | 2009-01-06 14:40:25 -0800 | [diff] [blame] | 545 |  | 
 | 546 | 	if (sync) { | 
 | 547 | 		struct inode *inode, *old_inode = NULL; | 
 | 548 |  | 
 | 549 | 		/* | 
 | 550 | 		 * Data integrity sync. Must wait for all pages under writeback, | 
 | 551 | 		 * because there may have been pages dirtied before our sync | 
 | 552 | 		 * call, but which had writeout started before we write it out. | 
 | 553 | 		 * In which case, the inode may not be on the dirty list, but | 
 | 554 | 		 * we still have to wait for that writeout. | 
 | 555 | 		 */ | 
 | 556 | 		list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 
 | 557 | 			struct address_space *mapping; | 
 | 558 |  | 
| Wu Fengguang | b6fac63 | 2009-04-02 16:56:34 -0700 | [diff] [blame] | 559 | 			if (inode->i_state & | 
 | 560 | 					(I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) | 
| Nick Piggin | 38f2197 | 2009-01-06 14:40:25 -0800 | [diff] [blame] | 561 | 				continue; | 
 | 562 | 			mapping = inode->i_mapping; | 
 | 563 | 			if (mapping->nrpages == 0) | 
 | 564 | 				continue; | 
 | 565 | 			__iget(inode); | 
 | 566 | 			spin_unlock(&inode_lock); | 
 | 567 | 			/* | 
 | 568 | 			 * We hold a reference to 'inode' so it couldn't have | 
 | 569 | 			 * been removed from s_inodes list while we dropped the | 
 | 570 | 			 * inode_lock.  We cannot iput the inode now as we can | 
 | 571 | 			 * be holding the last reference and we cannot iput it | 
 | 572 | 			 * under inode_lock. So we keep the reference and iput | 
 | 573 | 			 * it later. | 
 | 574 | 			 */ | 
 | 575 | 			iput(old_inode); | 
 | 576 | 			old_inode = inode; | 
 | 577 |  | 
 | 578 | 			filemap_fdatawait(mapping); | 
 | 579 |  | 
 | 580 | 			cond_resched(); | 
 | 581 |  | 
 | 582 | 			spin_lock(&inode_lock); | 
 | 583 | 		} | 
 | 584 | 		spin_unlock(&inode_lock); | 
 | 585 | 		iput(old_inode); | 
 | 586 | 	} else | 
 | 587 | 		spin_unlock(&inode_lock); | 
 | 588 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | 	return;		/* Leave any unwritten inodes on s_io */ | 
 | 590 | } | 
| Artem Bityutskiy | 4ee6afd | 2008-05-07 21:01:30 +0300 | [diff] [blame] | 591 | EXPORT_SYMBOL_GPL(generic_sync_sb_inodes); | 
 | 592 |  | 
 | 593 | static void sync_sb_inodes(struct super_block *sb, | 
 | 594 | 				struct writeback_control *wbc) | 
 | 595 | { | 
 | 596 | 	generic_sync_sb_inodes(sb, wbc); | 
 | 597 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 |  | 
 | 599 | /* | 
 | 600 |  * Start writeback of dirty pagecache data against all unlocked inodes. | 
 | 601 |  * | 
 | 602 |  * Note: | 
 | 603 |  * We don't need to grab a reference to superblock here. If it has non-empty | 
 | 604 |  * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed | 
| Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 605 |  * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 |  * empty. Since __sync_single_inode() regains inode_lock before it finally moves | 
 | 607 |  * inode from superblock lists we are OK. | 
 | 608 |  * | 
 | 609 |  * If `older_than_this' is non-zero then only flush inodes which have a | 
 | 610 |  * flushtime older than *older_than_this. | 
 | 611 |  * | 
 | 612 |  * If `bdi' is non-zero then we will scan the first inode against each | 
 | 613 |  * superblock until we find the matching ones.  One group will be the dirty | 
 | 614 |  * inodes against a filesystem.  Then when we hit the dummy blockdev superblock, | 
 | 615 |  * sync_sb_inodes will seekout the blockdev which matches `bdi'.  Maybe not | 
 | 616 |  * super-efficient but we're about to do a ton of I/O... | 
 | 617 |  */ | 
 | 618 | void | 
 | 619 | writeback_inodes(struct writeback_control *wbc) | 
 | 620 | { | 
 | 621 | 	struct super_block *sb; | 
 | 622 |  | 
 | 623 | 	might_sleep(); | 
 | 624 | 	spin_lock(&sb_lock); | 
 | 625 | restart: | 
| Akinobu Mita | 797074e | 2008-02-06 01:37:08 -0800 | [diff] [blame] | 626 | 	list_for_each_entry_reverse(sb, &super_blocks, s_list) { | 
| Fengguang Wu | 08d8e97 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 627 | 		if (sb_has_dirty_inodes(sb)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | 			/* we're making our own get_super here */ | 
 | 629 | 			sb->s_count++; | 
 | 630 | 			spin_unlock(&sb_lock); | 
 | 631 | 			/* | 
 | 632 | 			 * If we can't get the readlock, there's no sense in | 
 | 633 | 			 * waiting around, most of the time the FS is going to | 
 | 634 | 			 * be unmounted by the time it is released. | 
 | 635 | 			 */ | 
 | 636 | 			if (down_read_trylock(&sb->s_umount)) { | 
| Hans Reiser | ae8547b | 2008-05-07 15:48:57 +0300 | [diff] [blame] | 637 | 				if (sb->s_root) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | 					sync_sb_inodes(sb, wbc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | 				up_read(&sb->s_umount); | 
 | 640 | 			} | 
 | 641 | 			spin_lock(&sb_lock); | 
 | 642 | 			if (__put_super_and_need_restart(sb)) | 
 | 643 | 				goto restart; | 
 | 644 | 		} | 
 | 645 | 		if (wbc->nr_to_write <= 0) | 
 | 646 | 			break; | 
 | 647 | 	} | 
 | 648 | 	spin_unlock(&sb_lock); | 
 | 649 | } | 
 | 650 |  | 
 | 651 | /* | 
 | 652 |  * writeback and wait upon the filesystem's dirty inodes.  The caller will | 
| Nick Piggin | 4f5a99d | 2009-01-06 14:40:25 -0800 | [diff] [blame] | 653 |  * do this in two passes - one to write, and one to wait. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 |  * | 
 | 655 |  * A finite limit is set on the number of pages which will be written. | 
 | 656 |  * To prevent infinite livelock of sys_sync(). | 
 | 657 |  * | 
 | 658 |  * We add in the number of potentially dirty inodes, because each inode write | 
 | 659 |  * can dirty pagecache in the underlying blockdev. | 
 | 660 |  */ | 
 | 661 | void sync_inodes_sb(struct super_block *sb, int wait) | 
 | 662 | { | 
 | 663 | 	struct writeback_control wbc = { | 
| Nick Piggin | 4f5a99d | 2009-01-06 14:40:25 -0800 | [diff] [blame] | 664 | 		.sync_mode	= wait ? WB_SYNC_ALL : WB_SYNC_NONE, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 665 | 		.range_start	= 0, | 
 | 666 | 		.range_end	= LLONG_MAX, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | 	}; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 |  | 
| Nick Piggin | 38f2197 | 2009-01-06 14:40:25 -0800 | [diff] [blame] | 669 | 	if (!wait) { | 
 | 670 | 		unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); | 
 | 671 | 		unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); | 
 | 672 |  | 
 | 673 | 		wbc.nr_to_write = nr_dirty + nr_unstable + | 
 | 674 | 			(inodes_stat.nr_inodes - inodes_stat.nr_unused); | 
 | 675 | 	} else | 
 | 676 | 		wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */ | 
 | 677 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | 	sync_sb_inodes(sb, &wbc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | } | 
 | 680 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | /** | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 682 |  * sync_inodes - writes all inodes to disk | 
 | 683 |  * @wait: wait for completion | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 |  * | 
 | 685 |  * sync_inodes() goes through each super block's dirty inode list, writes the | 
 | 686 |  * inodes out, waits on the writeout and puts the inodes back on the normal | 
 | 687 |  * list. | 
 | 688 |  * | 
 | 689 |  * This is for sys_sync().  fsync_dev() uses the same algorithm.  The subtle | 
 | 690 |  * part of the sync functions is that the blockdev "superblock" is processed | 
 | 691 |  * last.  This is because the write_inode() function of a typical fs will | 
 | 692 |  * perform no I/O, but will mark buffers in the blockdev mapping as dirty. | 
 | 693 |  * What we want to do is to perform all that dirtying first, and then write | 
 | 694 |  * back all those inode blocks via the blockdev mapping in one sweep.  So the | 
 | 695 |  * additional (somewhat redundant) sync_blockdev() calls here are to make | 
 | 696 |  * sure that really happens.  Because if we call sync_inodes_sb(wait=1) with | 
 | 697 |  * outstanding dirty inodes, the writeback goes block-at-a-time within the | 
 | 698 |  * filesystem's write_inode().  This is extremely slow. | 
 | 699 |  */ | 
| Kirill Korotaev | 618f063 | 2005-06-23 00:09:54 -0700 | [diff] [blame] | 700 | static void __sync_inodes(int wait) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | { | 
 | 702 | 	struct super_block *sb; | 
 | 703 |  | 
| Kirill Korotaev | 618f063 | 2005-06-23 00:09:54 -0700 | [diff] [blame] | 704 | 	spin_lock(&sb_lock); | 
 | 705 | restart: | 
 | 706 | 	list_for_each_entry(sb, &super_blocks, s_list) { | 
| Kirill Korotaev | 618f063 | 2005-06-23 00:09:54 -0700 | [diff] [blame] | 707 | 		sb->s_count++; | 
 | 708 | 		spin_unlock(&sb_lock); | 
 | 709 | 		down_read(&sb->s_umount); | 
 | 710 | 		if (sb->s_root) { | 
 | 711 | 			sync_inodes_sb(sb, wait); | 
 | 712 | 			sync_blockdev(sb->s_bdev); | 
 | 713 | 		} | 
 | 714 | 		up_read(&sb->s_umount); | 
 | 715 | 		spin_lock(&sb_lock); | 
 | 716 | 		if (__put_super_and_need_restart(sb)) | 
 | 717 | 			goto restart; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | 	} | 
| Kirill Korotaev | 618f063 | 2005-06-23 00:09:54 -0700 | [diff] [blame] | 719 | 	spin_unlock(&sb_lock); | 
 | 720 | } | 
 | 721 |  | 
 | 722 | void sync_inodes(int wait) | 
 | 723 | { | 
| Kirill Korotaev | 618f063 | 2005-06-23 00:09:54 -0700 | [diff] [blame] | 724 | 	__sync_inodes(0); | 
 | 725 |  | 
| Nick Piggin | 856bf4d | 2009-01-06 14:40:26 -0800 | [diff] [blame] | 726 | 	if (wait) | 
| Kirill Korotaev | 618f063 | 2005-06-23 00:09:54 -0700 | [diff] [blame] | 727 | 		__sync_inodes(1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | } | 
 | 729 |  | 
 | 730 | /** | 
| Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 731 |  * write_inode_now	-	write an inode to disk | 
 | 732 |  * @inode: inode to write to disk | 
 | 733 |  * @sync: whether the write should be synchronous or not | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 |  * | 
| Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 735 |  * This function commits an inode to disk immediately if it is dirty. This is | 
 | 736 |  * primarily needed by knfsd. | 
 | 737 |  * | 
 | 738 |  * The caller must either have a ref on the inode or must have set I_WILL_FREE. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | int write_inode_now(struct inode *inode, int sync) | 
 | 741 | { | 
 | 742 | 	int ret; | 
 | 743 | 	struct writeback_control wbc = { | 
 | 744 | 		.nr_to_write = LONG_MAX, | 
| Mike Galbraith | 18914b1 | 2008-02-08 04:20:23 -0800 | [diff] [blame] | 745 | 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 746 | 		.range_start = 0, | 
 | 747 | 		.range_end = LLONG_MAX, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 | 	}; | 
 | 749 |  | 
 | 750 | 	if (!mapping_cap_writeback_dirty(inode->i_mapping)) | 
| Andrew Morton | 49364ce | 2005-11-07 00:59:15 -0800 | [diff] [blame] | 751 | 		wbc.nr_to_write = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 |  | 
 | 753 | 	might_sleep(); | 
 | 754 | 	spin_lock(&inode_lock); | 
 | 755 | 	ret = __writeback_single_inode(inode, &wbc); | 
 | 756 | 	spin_unlock(&inode_lock); | 
 | 757 | 	if (sync) | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 758 | 		inode_sync_wait(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | 	return ret; | 
 | 760 | } | 
 | 761 | EXPORT_SYMBOL(write_inode_now); | 
 | 762 |  | 
 | 763 | /** | 
 | 764 |  * sync_inode - write an inode and its pages to disk. | 
 | 765 |  * @inode: the inode to sync | 
 | 766 |  * @wbc: controls the writeback mode | 
 | 767 |  * | 
 | 768 |  * sync_inode() will write an inode and its pages to disk.  It will also | 
 | 769 |  * correctly update the inode on its superblock's dirty inode lists and will | 
 | 770 |  * update inode->i_state. | 
 | 771 |  * | 
 | 772 |  * The caller must have a ref on the inode. | 
 | 773 |  */ | 
 | 774 | int sync_inode(struct inode *inode, struct writeback_control *wbc) | 
 | 775 | { | 
 | 776 | 	int ret; | 
 | 777 |  | 
 | 778 | 	spin_lock(&inode_lock); | 
 | 779 | 	ret = __writeback_single_inode(inode, wbc); | 
 | 780 | 	spin_unlock(&inode_lock); | 
 | 781 | 	return ret; | 
 | 782 | } | 
 | 783 | EXPORT_SYMBOL(sync_inode); | 
 | 784 |  | 
 | 785 | /** | 
 | 786 |  * generic_osync_inode - flush all dirty data for a given inode to disk | 
 | 787 |  * @inode: inode to write | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 788 |  * @mapping: the address_space that should be flushed | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 |  * @what:  what to write and wait upon | 
 | 790 |  * | 
 | 791 |  * This can be called by file_write functions for files which have the | 
 | 792 |  * O_SYNC flag set, to flush dirty writes to disk. | 
 | 793 |  * | 
 | 794 |  * @what is a bitmask, specifying which part of the inode's data should be | 
| Randy Dunlap | b8887e6 | 2005-11-07 01:01:07 -0800 | [diff] [blame] | 795 |  * written and waited upon. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 |  * | 
 | 797 |  *    OSYNC_DATA:     i_mapping's dirty data | 
 | 798 |  *    OSYNC_METADATA: the buffers at i_mapping->private_list | 
 | 799 |  *    OSYNC_INODE:    the inode itself | 
 | 800 |  */ | 
 | 801 |  | 
 | 802 | int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what) | 
 | 803 | { | 
 | 804 | 	int err = 0; | 
 | 805 | 	int need_write_inode_now = 0; | 
 | 806 | 	int err2; | 
 | 807 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | 	if (what & OSYNC_DATA) | 
 | 809 | 		err = filemap_fdatawrite(mapping); | 
 | 810 | 	if (what & (OSYNC_METADATA|OSYNC_DATA)) { | 
 | 811 | 		err2 = sync_mapping_buffers(mapping); | 
 | 812 | 		if (!err) | 
 | 813 | 			err = err2; | 
 | 814 | 	} | 
 | 815 | 	if (what & OSYNC_DATA) { | 
 | 816 | 		err2 = filemap_fdatawait(mapping); | 
 | 817 | 		if (!err) | 
 | 818 | 			err = err2; | 
 | 819 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 |  | 
 | 821 | 	spin_lock(&inode_lock); | 
 | 822 | 	if ((inode->i_state & I_DIRTY) && | 
 | 823 | 	    ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC))) | 
 | 824 | 		need_write_inode_now = 1; | 
 | 825 | 	spin_unlock(&inode_lock); | 
 | 826 |  | 
 | 827 | 	if (need_write_inode_now) { | 
 | 828 | 		err2 = write_inode_now(inode, 1); | 
 | 829 | 		if (!err) | 
 | 830 | 			err = err2; | 
 | 831 | 	} | 
 | 832 | 	else | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 833 | 		inode_sync_wait(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 |  | 
 | 835 | 	return err; | 
 | 836 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | EXPORT_SYMBOL(generic_osync_inode); |