| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * fs/mpage.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Linus Torvalds. | 
 | 5 |  * | 
 | 6 |  * Contains functions related to preparing and submitting BIOs which contain | 
 | 7 |  * multiple pagecache pages. | 
 | 8 |  * | 
 | 9 |  * 15May2002	akpm@zip.com.au | 
 | 10 |  *		Initial version | 
 | 11 |  * 27Jun2002	axboe@suse.de | 
 | 12 |  *		use bio_add_page() to build bio's just the right size | 
 | 13 |  */ | 
 | 14 |  | 
 | 15 | #include <linux/kernel.h> | 
 | 16 | #include <linux/module.h> | 
 | 17 | #include <linux/mm.h> | 
 | 18 | #include <linux/kdev_t.h> | 
 | 19 | #include <linux/bio.h> | 
 | 20 | #include <linux/fs.h> | 
 | 21 | #include <linux/buffer_head.h> | 
 | 22 | #include <linux/blkdev.h> | 
 | 23 | #include <linux/highmem.h> | 
 | 24 | #include <linux/prefetch.h> | 
 | 25 | #include <linux/mpage.h> | 
 | 26 | #include <linux/writeback.h> | 
 | 27 | #include <linux/backing-dev.h> | 
 | 28 | #include <linux/pagevec.h> | 
 | 29 |  | 
 | 30 | /* | 
 | 31 |  * I/O completion handler for multipage BIOs. | 
 | 32 |  * | 
 | 33 |  * The mpage code never puts partial pages into a BIO (except for end-of-file). | 
 | 34 |  * If a page does not map to a contiguous run of blocks then it simply falls | 
 | 35 |  * back to block_read_full_page(). | 
 | 36 |  * | 
 | 37 |  * Why is this?  If a page's completion depends on a number of different BIOs | 
 | 38 |  * which can complete in any order (or at the same time) then determining the | 
 | 39 |  * status of that page is hard.  See end_buffer_async_read() for the details. | 
 | 40 |  * There is no point in duplicating all that complexity. | 
 | 41 |  */ | 
 | 42 | static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err) | 
 | 43 | { | 
 | 44 | 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 
 | 45 | 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 
 | 46 |  | 
 | 47 | 	if (bio->bi_size) | 
 | 48 | 		return 1; | 
 | 49 |  | 
 | 50 | 	do { | 
 | 51 | 		struct page *page = bvec->bv_page; | 
 | 52 |  | 
 | 53 | 		if (--bvec >= bio->bi_io_vec) | 
 | 54 | 			prefetchw(&bvec->bv_page->flags); | 
 | 55 |  | 
 | 56 | 		if (uptodate) { | 
 | 57 | 			SetPageUptodate(page); | 
 | 58 | 		} else { | 
 | 59 | 			ClearPageUptodate(page); | 
 | 60 | 			SetPageError(page); | 
 | 61 | 		} | 
 | 62 | 		unlock_page(page); | 
 | 63 | 	} while (bvec >= bio->bi_io_vec); | 
 | 64 | 	bio_put(bio); | 
 | 65 | 	return 0; | 
 | 66 | } | 
 | 67 |  | 
 | 68 | static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err) | 
 | 69 | { | 
 | 70 | 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 
 | 71 | 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 
 | 72 |  | 
 | 73 | 	if (bio->bi_size) | 
 | 74 | 		return 1; | 
 | 75 |  | 
 | 76 | 	do { | 
 | 77 | 		struct page *page = bvec->bv_page; | 
 | 78 |  | 
 | 79 | 		if (--bvec >= bio->bi_io_vec) | 
 | 80 | 			prefetchw(&bvec->bv_page->flags); | 
 | 81 |  | 
| Qu Fuping | 854715b | 2005-06-04 15:43:29 -0700 | [diff] [blame] | 82 | 		if (!uptodate){ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | 			SetPageError(page); | 
| Qu Fuping | 854715b | 2005-06-04 15:43:29 -0700 | [diff] [blame] | 84 | 			if (page->mapping) | 
 | 85 | 				set_bit(AS_EIO, &page->mapping->flags); | 
 | 86 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | 		end_page_writeback(page); | 
 | 88 | 	} while (bvec >= bio->bi_io_vec); | 
 | 89 | 	bio_put(bio); | 
 | 90 | 	return 0; | 
 | 91 | } | 
 | 92 |  | 
| Adrian Bunk | 75c96f8 | 2005-05-05 16:16:09 -0700 | [diff] [blame] | 93 | static struct bio *mpage_bio_submit(int rw, struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | { | 
 | 95 | 	bio->bi_end_io = mpage_end_io_read; | 
 | 96 | 	if (rw == WRITE) | 
 | 97 | 		bio->bi_end_io = mpage_end_io_write; | 
 | 98 | 	submit_bio(rw, bio); | 
 | 99 | 	return NULL; | 
 | 100 | } | 
 | 101 |  | 
 | 102 | static struct bio * | 
 | 103 | mpage_alloc(struct block_device *bdev, | 
 | 104 | 		sector_t first_sector, int nr_vecs, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 105 | 		gfp_t gfp_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | { | 
 | 107 | 	struct bio *bio; | 
 | 108 |  | 
 | 109 | 	bio = bio_alloc(gfp_flags, nr_vecs); | 
 | 110 |  | 
 | 111 | 	if (bio == NULL && (current->flags & PF_MEMALLOC)) { | 
 | 112 | 		while (!bio && (nr_vecs /= 2)) | 
 | 113 | 			bio = bio_alloc(gfp_flags, nr_vecs); | 
 | 114 | 	} | 
 | 115 |  | 
 | 116 | 	if (bio) { | 
 | 117 | 		bio->bi_bdev = bdev; | 
 | 118 | 		bio->bi_sector = first_sector; | 
 | 119 | 	} | 
 | 120 | 	return bio; | 
 | 121 | } | 
 | 122 |  | 
 | 123 | /* | 
 | 124 |  * support function for mpage_readpages.  The fs supplied get_block might | 
 | 125 |  * return an up to date buffer.  This is used to map that buffer into | 
 | 126 |  * the page, which allows readpage to avoid triggering a duplicate call | 
 | 127 |  * to get_block. | 
 | 128 |  * | 
 | 129 |  * The idea is to avoid adding buffers to pages that don't already have | 
 | 130 |  * them.  So when the buffer is up to date and the page size == block size, | 
 | 131 |  * this marks the page up to date instead of adding new buffers. | 
 | 132 |  */ | 
 | 133 | static void  | 
 | 134 | map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)  | 
 | 135 | { | 
 | 136 | 	struct inode *inode = page->mapping->host; | 
 | 137 | 	struct buffer_head *page_bh, *head; | 
 | 138 | 	int block = 0; | 
 | 139 |  | 
 | 140 | 	if (!page_has_buffers(page)) { | 
 | 141 | 		/* | 
 | 142 | 		 * don't make any buffers if there is only one buffer on | 
 | 143 | 		 * the page and the page just needs to be set up to date | 
 | 144 | 		 */ | 
 | 145 | 		if (inode->i_blkbits == PAGE_CACHE_SHIFT &&  | 
 | 146 | 		    buffer_uptodate(bh)) { | 
 | 147 | 			SetPageUptodate(page);     | 
 | 148 | 			return; | 
 | 149 | 		} | 
 | 150 | 		create_empty_buffers(page, 1 << inode->i_blkbits, 0); | 
 | 151 | 	} | 
 | 152 | 	head = page_buffers(page); | 
 | 153 | 	page_bh = head; | 
 | 154 | 	do { | 
 | 155 | 		if (block == page_block) { | 
 | 156 | 			page_bh->b_state = bh->b_state; | 
 | 157 | 			page_bh->b_bdev = bh->b_bdev; | 
 | 158 | 			page_bh->b_blocknr = bh->b_blocknr; | 
 | 159 | 			break; | 
 | 160 | 		} | 
 | 161 | 		page_bh = page_bh->b_this_page; | 
 | 162 | 		block++; | 
 | 163 | 	} while (page_bh != head); | 
 | 164 | } | 
 | 165 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 166 | /* | 
 | 167 |  * This is the worker routine which does all the work of mapping the disk | 
 | 168 |  * blocks and constructs largest possible bios, submits them for IO if the | 
 | 169 |  * blocks are not contiguous on the disk. | 
 | 170 |  * | 
 | 171 |  * We pass a buffer_head back and forth and use its buffer_mapped() flag to | 
 | 172 |  * represent the validity of its disk mapping and to decide when to do the next | 
 | 173 |  * get_block() call. | 
 | 174 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | static struct bio * | 
 | 176 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 177 | 		sector_t *last_block_in_bio, struct buffer_head *map_bh, | 
 | 178 | 		unsigned long *first_logical_block, get_block_t get_block) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | { | 
 | 180 | 	struct inode *inode = page->mapping->host; | 
 | 181 | 	const unsigned blkbits = inode->i_blkbits; | 
 | 182 | 	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; | 
 | 183 | 	const unsigned blocksize = 1 << blkbits; | 
 | 184 | 	sector_t block_in_file; | 
 | 185 | 	sector_t last_block; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 186 | 	sector_t last_block_in_file; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | 	sector_t blocks[MAX_BUF_PER_PAGE]; | 
 | 188 | 	unsigned page_block; | 
 | 189 | 	unsigned first_hole = blocks_per_page; | 
 | 190 | 	struct block_device *bdev = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | 	int length; | 
 | 192 | 	int fully_mapped = 1; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 193 | 	unsigned nblocks; | 
 | 194 | 	unsigned relative_block; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 |  | 
 | 196 | 	if (page_has_buffers(page)) | 
 | 197 | 		goto confused; | 
 | 198 |  | 
| Andrew Morton | 54b21a7 | 2006-01-08 01:03:05 -0800 | [diff] [blame] | 199 | 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 200 | 	last_block = block_in_file + nr_pages * blocks_per_page; | 
 | 201 | 	last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; | 
 | 202 | 	if (last_block > last_block_in_file) | 
 | 203 | 		last_block = last_block_in_file; | 
 | 204 | 	page_block = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 206 | 	/* | 
 | 207 | 	 * Map blocks using the result from the previous get_blocks call first. | 
 | 208 | 	 */ | 
 | 209 | 	nblocks = map_bh->b_size >> blkbits; | 
 | 210 | 	if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && | 
 | 211 | 			block_in_file < (*first_logical_block + nblocks)) { | 
 | 212 | 		unsigned map_offset = block_in_file - *first_logical_block; | 
 | 213 | 		unsigned last = nblocks - map_offset; | 
 | 214 |  | 
 | 215 | 		for (relative_block = 0; ; relative_block++) { | 
 | 216 | 			if (relative_block == last) { | 
 | 217 | 				clear_buffer_mapped(map_bh); | 
 | 218 | 				break; | 
 | 219 | 			} | 
 | 220 | 			if (page_block == blocks_per_page) | 
 | 221 | 				break; | 
 | 222 | 			blocks[page_block] = map_bh->b_blocknr + map_offset + | 
 | 223 | 						relative_block; | 
 | 224 | 			page_block++; | 
 | 225 | 			block_in_file++; | 
 | 226 | 		} | 
 | 227 | 		bdev = map_bh->b_bdev; | 
 | 228 | 	} | 
 | 229 |  | 
 | 230 | 	/* | 
 | 231 | 	 * Then do more get_blocks calls until we are done with this page. | 
 | 232 | 	 */ | 
 | 233 | 	map_bh->b_page = page; | 
 | 234 | 	while (page_block < blocks_per_page) { | 
 | 235 | 		map_bh->b_state = 0; | 
 | 236 | 		map_bh->b_size = 0; | 
 | 237 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | 		if (block_in_file < last_block) { | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 239 | 			map_bh->b_size = (last_block-block_in_file) << blkbits; | 
 | 240 | 			if (get_block(inode, block_in_file, map_bh, 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | 				goto confused; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 242 | 			*first_logical_block = block_in_file; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | 		} | 
 | 244 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 245 | 		if (!buffer_mapped(map_bh)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | 			fully_mapped = 0; | 
 | 247 | 			if (first_hole == blocks_per_page) | 
 | 248 | 				first_hole = page_block; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 249 | 			page_block++; | 
 | 250 | 			block_in_file++; | 
 | 251 | 			clear_buffer_mapped(map_bh); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | 			continue; | 
 | 253 | 		} | 
 | 254 |  | 
 | 255 | 		/* some filesystems will copy data into the page during | 
 | 256 | 		 * the get_block call, in which case we don't want to | 
 | 257 | 		 * read it again.  map_buffer_to_page copies the data | 
 | 258 | 		 * we just collected from get_block into the page's buffers | 
 | 259 | 		 * so readpage doesn't have to repeat the get_block call | 
 | 260 | 		 */ | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 261 | 		if (buffer_uptodate(map_bh)) { | 
 | 262 | 			map_buffer_to_page(page, map_bh, page_block); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | 			goto confused; | 
 | 264 | 		} | 
 | 265 | 	 | 
 | 266 | 		if (first_hole != blocks_per_page) | 
 | 267 | 			goto confused;		/* hole -> non-hole */ | 
 | 268 |  | 
 | 269 | 		/* Contiguous blocks? */ | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 270 | 		if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | 			goto confused; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 272 | 		nblocks = map_bh->b_size >> blkbits; | 
 | 273 | 		for (relative_block = 0; ; relative_block++) { | 
 | 274 | 			if (relative_block == nblocks) { | 
 | 275 | 				clear_buffer_mapped(map_bh); | 
 | 276 | 				break; | 
 | 277 | 			} else if (page_block == blocks_per_page) | 
 | 278 | 				break; | 
 | 279 | 			blocks[page_block] = map_bh->b_blocknr+relative_block; | 
 | 280 | 			page_block++; | 
 | 281 | 			block_in_file++; | 
 | 282 | 		} | 
 | 283 | 		bdev = map_bh->b_bdev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | 	} | 
 | 285 |  | 
 | 286 | 	if (first_hole != blocks_per_page) { | 
 | 287 | 		char *kaddr = kmap_atomic(page, KM_USER0); | 
 | 288 | 		memset(kaddr + (first_hole << blkbits), 0, | 
 | 289 | 				PAGE_CACHE_SIZE - (first_hole << blkbits)); | 
 | 290 | 		flush_dcache_page(page); | 
 | 291 | 		kunmap_atomic(kaddr, KM_USER0); | 
 | 292 | 		if (first_hole == 0) { | 
 | 293 | 			SetPageUptodate(page); | 
 | 294 | 			unlock_page(page); | 
 | 295 | 			goto out; | 
 | 296 | 		} | 
 | 297 | 	} else if (fully_mapped) { | 
 | 298 | 		SetPageMappedToDisk(page); | 
 | 299 | 	} | 
 | 300 |  | 
 | 301 | 	/* | 
 | 302 | 	 * This page will go to BIO.  Do we need to send this BIO off first? | 
 | 303 | 	 */ | 
 | 304 | 	if (bio && (*last_block_in_bio != blocks[0] - 1)) | 
 | 305 | 		bio = mpage_bio_submit(READ, bio); | 
 | 306 |  | 
 | 307 | alloc_new: | 
 | 308 | 	if (bio == NULL) { | 
 | 309 | 		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | 
 | 310 | 			  	min_t(int, nr_pages, bio_get_nr_vecs(bdev)), | 
 | 311 | 				GFP_KERNEL); | 
 | 312 | 		if (bio == NULL) | 
 | 313 | 			goto confused; | 
 | 314 | 	} | 
 | 315 |  | 
 | 316 | 	length = first_hole << blkbits; | 
 | 317 | 	if (bio_add_page(bio, page, length, 0) < length) { | 
 | 318 | 		bio = mpage_bio_submit(READ, bio); | 
 | 319 | 		goto alloc_new; | 
 | 320 | 	} | 
 | 321 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 322 | 	if (buffer_boundary(map_bh) || (first_hole != blocks_per_page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | 		bio = mpage_bio_submit(READ, bio); | 
 | 324 | 	else | 
 | 325 | 		*last_block_in_bio = blocks[blocks_per_page - 1]; | 
 | 326 | out: | 
 | 327 | 	return bio; | 
 | 328 |  | 
 | 329 | confused: | 
 | 330 | 	if (bio) | 
 | 331 | 		bio = mpage_bio_submit(READ, bio); | 
 | 332 | 	if (!PageUptodate(page)) | 
 | 333 | 	        block_read_full_page(page, get_block); | 
 | 334 | 	else | 
 | 335 | 		unlock_page(page); | 
 | 336 | 	goto out; | 
 | 337 | } | 
 | 338 |  | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 339 | /** | 
 | 340 |  * mpage_readpages - populate an address space with some pages, and | 
 | 341 |  *                       start reads against them. | 
 | 342 |  * | 
 | 343 |  * @mapping: the address_space | 
 | 344 |  * @pages: The address of a list_head which contains the target pages.  These | 
 | 345 |  *   pages have their ->index populated and are otherwise uninitialised. | 
 | 346 |  * | 
 | 347 |  *   The page at @pages->prev has the lowest file offset, and reads should be | 
 | 348 |  *   issued in @pages->prev to @pages->next order. | 
 | 349 |  * | 
 | 350 |  * @nr_pages: The number of pages at *@pages | 
 | 351 |  * @get_block: The filesystem's block mapper function. | 
 | 352 |  * | 
 | 353 |  * This function walks the pages and the blocks within each page, building and | 
 | 354 |  * emitting large BIOs. | 
 | 355 |  * | 
 | 356 |  * If anything unusual happens, such as: | 
 | 357 |  * | 
 | 358 |  * - encountering a page which has buffers | 
 | 359 |  * - encountering a page which has a non-hole after a hole | 
 | 360 |  * - encountering a page with non-contiguous blocks | 
 | 361 |  * | 
 | 362 |  * then this code just gives up and calls the buffer_head-based read function. | 
 | 363 |  * It does handle a page which has holes at the end - that is a common case: | 
 | 364 |  * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. | 
 | 365 |  * | 
 | 366 |  * BH_Boundary explanation: | 
 | 367 |  * | 
 | 368 |  * There is a problem.  The mpage read code assembles several pages, gets all | 
 | 369 |  * their disk mappings, and then submits them all.  That's fine, but obtaining | 
 | 370 |  * the disk mappings may require I/O.  Reads of indirect blocks, for example. | 
 | 371 |  * | 
 | 372 |  * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be | 
 | 373 |  * submitted in the following order: | 
 | 374 |  * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 | 
 | 375 |  * because the indirect block has to be read to get the mappings of blocks | 
 | 376 |  * 13,14,15,16.  Obviously, this impacts performance. | 
 | 377 |  * | 
 | 378 |  * So what we do it to allow the filesystem's get_block() function to set | 
 | 379 |  * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block | 
 | 380 |  * after this one will require I/O against a block which is probably close to | 
 | 381 |  * this one.  So you should push what I/O you have currently accumulated. | 
 | 382 |  * | 
 | 383 |  * This all causes the disk requests to be issued in the correct order. | 
 | 384 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | int | 
 | 386 | mpage_readpages(struct address_space *mapping, struct list_head *pages, | 
 | 387 | 				unsigned nr_pages, get_block_t get_block) | 
 | 388 | { | 
 | 389 | 	struct bio *bio = NULL; | 
 | 390 | 	unsigned page_idx; | 
 | 391 | 	sector_t last_block_in_bio = 0; | 
 | 392 | 	struct pagevec lru_pvec; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 393 | 	struct buffer_head map_bh; | 
 | 394 | 	unsigned long first_logical_block = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 396 | 	clear_buffer_mapped(&map_bh); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | 	pagevec_init(&lru_pvec, 0); | 
 | 398 | 	for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 
 | 399 | 		struct page *page = list_entry(pages->prev, struct page, lru); | 
 | 400 |  | 
 | 401 | 		prefetchw(&page->flags); | 
 | 402 | 		list_del(&page->lru); | 
 | 403 | 		if (!add_to_page_cache(page, mapping, | 
 | 404 | 					page->index, GFP_KERNEL)) { | 
 | 405 | 			bio = do_mpage_readpage(bio, page, | 
 | 406 | 					nr_pages - page_idx, | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 407 | 					&last_block_in_bio, &map_bh, | 
 | 408 | 					&first_logical_block, | 
 | 409 | 					get_block); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | 			if (!pagevec_add(&lru_pvec, page)) | 
 | 411 | 				__pagevec_lru_add(&lru_pvec); | 
 | 412 | 		} else { | 
 | 413 | 			page_cache_release(page); | 
 | 414 | 		} | 
 | 415 | 	} | 
 | 416 | 	pagevec_lru_add(&lru_pvec); | 
 | 417 | 	BUG_ON(!list_empty(pages)); | 
 | 418 | 	if (bio) | 
 | 419 | 		mpage_bio_submit(READ, bio); | 
 | 420 | 	return 0; | 
 | 421 | } | 
 | 422 | EXPORT_SYMBOL(mpage_readpages); | 
 | 423 |  | 
 | 424 | /* | 
 | 425 |  * This isn't called much at all | 
 | 426 |  */ | 
 | 427 | int mpage_readpage(struct page *page, get_block_t get_block) | 
 | 428 | { | 
 | 429 | 	struct bio *bio = NULL; | 
 | 430 | 	sector_t last_block_in_bio = 0; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 431 | 	struct buffer_head map_bh; | 
 | 432 | 	unsigned long first_logical_block = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 434 | 	clear_buffer_mapped(&map_bh); | 
 | 435 | 	bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, | 
 | 436 | 			&map_bh, &first_logical_block, get_block); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | 	if (bio) | 
 | 438 | 		mpage_bio_submit(READ, bio); | 
 | 439 | 	return 0; | 
 | 440 | } | 
 | 441 | EXPORT_SYMBOL(mpage_readpage); | 
 | 442 |  | 
 | 443 | /* | 
 | 444 |  * Writing is not so simple. | 
 | 445 |  * | 
 | 446 |  * If the page has buffers then they will be used for obtaining the disk | 
 | 447 |  * mapping.  We only support pages which are fully mapped-and-dirty, with a | 
 | 448 |  * special case for pages which are unmapped at the end: end-of-file. | 
 | 449 |  * | 
 | 450 |  * If the page has no buffers (preferred) then the page is mapped here. | 
 | 451 |  * | 
 | 452 |  * If all blocks are found to be contiguous then the page can go into the | 
 | 453 |  * BIO.  Otherwise fall back to the mapping's writepage(). | 
 | 454 |  *  | 
 | 455 |  * FIXME: This code wants an estimate of how many pages are still to be | 
 | 456 |  * written, so it can intelligently allocate a suitably-sized BIO.  For now, | 
 | 457 |  * just allocate full-size (16-page) BIOs. | 
 | 458 |  */ | 
 | 459 | static struct bio * | 
 | 460 | __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, | 
 | 461 | 	sector_t *last_block_in_bio, int *ret, struct writeback_control *wbc, | 
 | 462 | 	writepage_t writepage_fn) | 
 | 463 | { | 
 | 464 | 	struct address_space *mapping = page->mapping; | 
 | 465 | 	struct inode *inode = page->mapping->host; | 
 | 466 | 	const unsigned blkbits = inode->i_blkbits; | 
 | 467 | 	unsigned long end_index; | 
 | 468 | 	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; | 
 | 469 | 	sector_t last_block; | 
 | 470 | 	sector_t block_in_file; | 
 | 471 | 	sector_t blocks[MAX_BUF_PER_PAGE]; | 
 | 472 | 	unsigned page_block; | 
 | 473 | 	unsigned first_unmapped = blocks_per_page; | 
 | 474 | 	struct block_device *bdev = NULL; | 
 | 475 | 	int boundary = 0; | 
 | 476 | 	sector_t boundary_block = 0; | 
 | 477 | 	struct block_device *boundary_bdev = NULL; | 
 | 478 | 	int length; | 
 | 479 | 	struct buffer_head map_bh; | 
 | 480 | 	loff_t i_size = i_size_read(inode); | 
 | 481 |  | 
 | 482 | 	if (page_has_buffers(page)) { | 
 | 483 | 		struct buffer_head *head = page_buffers(page); | 
 | 484 | 		struct buffer_head *bh = head; | 
 | 485 |  | 
 | 486 | 		/* If they're all mapped and dirty, do it */ | 
 | 487 | 		page_block = 0; | 
 | 488 | 		do { | 
 | 489 | 			BUG_ON(buffer_locked(bh)); | 
 | 490 | 			if (!buffer_mapped(bh)) { | 
 | 491 | 				/* | 
 | 492 | 				 * unmapped dirty buffers are created by | 
 | 493 | 				 * __set_page_dirty_buffers -> mmapped data | 
 | 494 | 				 */ | 
 | 495 | 				if (buffer_dirty(bh)) | 
 | 496 | 					goto confused; | 
 | 497 | 				if (first_unmapped == blocks_per_page) | 
 | 498 | 					first_unmapped = page_block; | 
 | 499 | 				continue; | 
 | 500 | 			} | 
 | 501 |  | 
 | 502 | 			if (first_unmapped != blocks_per_page) | 
 | 503 | 				goto confused;	/* hole -> non-hole */ | 
 | 504 |  | 
 | 505 | 			if (!buffer_dirty(bh) || !buffer_uptodate(bh)) | 
 | 506 | 				goto confused; | 
 | 507 | 			if (page_block) { | 
 | 508 | 				if (bh->b_blocknr != blocks[page_block-1] + 1) | 
 | 509 | 					goto confused; | 
 | 510 | 			} | 
 | 511 | 			blocks[page_block++] = bh->b_blocknr; | 
 | 512 | 			boundary = buffer_boundary(bh); | 
 | 513 | 			if (boundary) { | 
 | 514 | 				boundary_block = bh->b_blocknr; | 
 | 515 | 				boundary_bdev = bh->b_bdev; | 
 | 516 | 			} | 
 | 517 | 			bdev = bh->b_bdev; | 
 | 518 | 		} while ((bh = bh->b_this_page) != head); | 
 | 519 |  | 
 | 520 | 		if (first_unmapped) | 
 | 521 | 			goto page_is_mapped; | 
 | 522 |  | 
 | 523 | 		/* | 
 | 524 | 		 * Page has buffers, but they are all unmapped. The page was | 
 | 525 | 		 * created by pagein or read over a hole which was handled by | 
 | 526 | 		 * block_read_full_page().  If this address_space is also | 
 | 527 | 		 * using mpage_readpages then this can rarely happen. | 
 | 528 | 		 */ | 
 | 529 | 		goto confused; | 
 | 530 | 	} | 
 | 531 |  | 
 | 532 | 	/* | 
 | 533 | 	 * The page has no buffers: map it to disk | 
 | 534 | 	 */ | 
 | 535 | 	BUG_ON(!PageUptodate(page)); | 
| Andrew Morton | 54b21a7 | 2006-01-08 01:03:05 -0800 | [diff] [blame] | 536 | 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | 	last_block = (i_size - 1) >> blkbits; | 
 | 538 | 	map_bh.b_page = page; | 
 | 539 | 	for (page_block = 0; page_block < blocks_per_page; ) { | 
 | 540 |  | 
 | 541 | 		map_bh.b_state = 0; | 
| Badari Pulavarty | b0cf232 | 2006-03-26 01:38:00 -0800 | [diff] [blame] | 542 | 		map_bh.b_size = 1 << blkbits; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | 		if (get_block(inode, block_in_file, &map_bh, 1)) | 
 | 544 | 			goto confused; | 
 | 545 | 		if (buffer_new(&map_bh)) | 
 | 546 | 			unmap_underlying_metadata(map_bh.b_bdev, | 
 | 547 | 						map_bh.b_blocknr); | 
 | 548 | 		if (buffer_boundary(&map_bh)) { | 
 | 549 | 			boundary_block = map_bh.b_blocknr; | 
 | 550 | 			boundary_bdev = map_bh.b_bdev; | 
 | 551 | 		} | 
 | 552 | 		if (page_block) { | 
 | 553 | 			if (map_bh.b_blocknr != blocks[page_block-1] + 1) | 
 | 554 | 				goto confused; | 
 | 555 | 		} | 
 | 556 | 		blocks[page_block++] = map_bh.b_blocknr; | 
 | 557 | 		boundary = buffer_boundary(&map_bh); | 
 | 558 | 		bdev = map_bh.b_bdev; | 
 | 559 | 		if (block_in_file == last_block) | 
 | 560 | 			break; | 
 | 561 | 		block_in_file++; | 
 | 562 | 	} | 
 | 563 | 	BUG_ON(page_block == 0); | 
 | 564 |  | 
 | 565 | 	first_unmapped = page_block; | 
 | 566 |  | 
 | 567 | page_is_mapped: | 
 | 568 | 	end_index = i_size >> PAGE_CACHE_SHIFT; | 
 | 569 | 	if (page->index >= end_index) { | 
 | 570 | 		/* | 
 | 571 | 		 * The page straddles i_size.  It must be zeroed out on each | 
 | 572 | 		 * and every writepage invokation because it may be mmapped. | 
 | 573 | 		 * "A file is mapped in multiples of the page size.  For a file | 
 | 574 | 		 * that is not a multiple of the page size, the remaining memory | 
 | 575 | 		 * is zeroed when mapped, and writes to that region are not | 
 | 576 | 		 * written out to the file." | 
 | 577 | 		 */ | 
 | 578 | 		unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); | 
 | 579 | 		char *kaddr; | 
 | 580 |  | 
 | 581 | 		if (page->index > end_index || !offset) | 
 | 582 | 			goto confused; | 
 | 583 | 		kaddr = kmap_atomic(page, KM_USER0); | 
 | 584 | 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | 
 | 585 | 		flush_dcache_page(page); | 
 | 586 | 		kunmap_atomic(kaddr, KM_USER0); | 
 | 587 | 	} | 
 | 588 |  | 
 | 589 | 	/* | 
 | 590 | 	 * This page will go to BIO.  Do we need to send this BIO off first? | 
 | 591 | 	 */ | 
 | 592 | 	if (bio && *last_block_in_bio != blocks[0] - 1) | 
 | 593 | 		bio = mpage_bio_submit(WRITE, bio); | 
 | 594 |  | 
 | 595 | alloc_new: | 
 | 596 | 	if (bio == NULL) { | 
 | 597 | 		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | 
 | 598 | 				bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH); | 
 | 599 | 		if (bio == NULL) | 
 | 600 | 			goto confused; | 
 | 601 | 	} | 
 | 602 |  | 
 | 603 | 	/* | 
 | 604 | 	 * Must try to add the page before marking the buffer clean or | 
 | 605 | 	 * the confused fail path above (OOM) will be very confused when | 
 | 606 | 	 * it finds all bh marked clean (i.e. it will not write anything) | 
 | 607 | 	 */ | 
 | 608 | 	length = first_unmapped << blkbits; | 
 | 609 | 	if (bio_add_page(bio, page, length, 0) < length) { | 
 | 610 | 		bio = mpage_bio_submit(WRITE, bio); | 
 | 611 | 		goto alloc_new; | 
 | 612 | 	} | 
 | 613 |  | 
 | 614 | 	/* | 
 | 615 | 	 * OK, we have our BIO, so we can now mark the buffers clean.  Make | 
 | 616 | 	 * sure to only clean buffers which we know we'll be writing. | 
 | 617 | 	 */ | 
 | 618 | 	if (page_has_buffers(page)) { | 
 | 619 | 		struct buffer_head *head = page_buffers(page); | 
 | 620 | 		struct buffer_head *bh = head; | 
 | 621 | 		unsigned buffer_counter = 0; | 
 | 622 |  | 
 | 623 | 		do { | 
 | 624 | 			if (buffer_counter++ == first_unmapped) | 
 | 625 | 				break; | 
 | 626 | 			clear_buffer_dirty(bh); | 
 | 627 | 			bh = bh->b_this_page; | 
 | 628 | 		} while (bh != head); | 
 | 629 |  | 
 | 630 | 		/* | 
 | 631 | 		 * we cannot drop the bh if the page is not uptodate | 
 | 632 | 		 * or a concurrent readpage would fail to serialize with the bh | 
 | 633 | 		 * and it would read from disk before we reach the platter. | 
 | 634 | 		 */ | 
 | 635 | 		if (buffer_heads_over_limit && PageUptodate(page)) | 
 | 636 | 			try_to_free_buffers(page); | 
 | 637 | 	} | 
 | 638 |  | 
 | 639 | 	BUG_ON(PageWriteback(page)); | 
 | 640 | 	set_page_writeback(page); | 
 | 641 | 	unlock_page(page); | 
 | 642 | 	if (boundary || (first_unmapped != blocks_per_page)) { | 
 | 643 | 		bio = mpage_bio_submit(WRITE, bio); | 
 | 644 | 		if (boundary_block) { | 
 | 645 | 			write_boundary_block(boundary_bdev, | 
 | 646 | 					boundary_block, 1 << blkbits); | 
 | 647 | 		} | 
 | 648 | 	} else { | 
 | 649 | 		*last_block_in_bio = blocks[blocks_per_page - 1]; | 
 | 650 | 	} | 
 | 651 | 	goto out; | 
 | 652 |  | 
 | 653 | confused: | 
 | 654 | 	if (bio) | 
 | 655 | 		bio = mpage_bio_submit(WRITE, bio); | 
 | 656 |  | 
 | 657 | 	if (writepage_fn) { | 
 | 658 | 		*ret = (*writepage_fn)(page, wbc); | 
 | 659 | 	} else { | 
 | 660 | 		*ret = -EAGAIN; | 
 | 661 | 		goto out; | 
 | 662 | 	} | 
 | 663 | 	/* | 
 | 664 | 	 * The caller has a ref on the inode, so *mapping is stable | 
 | 665 | 	 */ | 
 | 666 | 	if (*ret) { | 
 | 667 | 		if (*ret == -ENOSPC) | 
 | 668 | 			set_bit(AS_ENOSPC, &mapping->flags); | 
 | 669 | 		else | 
 | 670 | 			set_bit(AS_EIO, &mapping->flags); | 
 | 671 | 	} | 
 | 672 | out: | 
 | 673 | 	return bio; | 
 | 674 | } | 
 | 675 |  | 
 | 676 | /** | 
 | 677 |  * mpage_writepages - walk the list of dirty pages of the given | 
 | 678 |  * address space and writepage() all of them. | 
 | 679 |  *  | 
 | 680 |  * @mapping: address space structure to write | 
 | 681 |  * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
 | 682 |  * @get_block: the filesystem's block mapper function. | 
 | 683 |  *             If this is NULL then use a_ops->writepage.  Otherwise, go | 
 | 684 |  *             direct-to-BIO. | 
 | 685 |  * | 
 | 686 |  * This is a library function, which implements the writepages() | 
 | 687 |  * address_space_operation. | 
 | 688 |  * | 
 | 689 |  * If a page is already under I/O, generic_writepages() skips it, even | 
 | 690 |  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback, | 
 | 691 |  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync() | 
 | 692 |  * and msync() need to guarantee that all the data which was dirty at the time | 
 | 693 |  * the call was made get new I/O started against them.  If wbc->sync_mode is | 
 | 694 |  * WB_SYNC_ALL then we were called for data integrity and we must wait for | 
 | 695 |  * existing IO to complete. | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 696 |  * | 
 | 697 |  * If you fix this you should check generic_writepages() also! | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 |  */ | 
 | 699 | int | 
 | 700 | mpage_writepages(struct address_space *mapping, | 
 | 701 | 		struct writeback_control *wbc, get_block_t get_block) | 
 | 702 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | 	struct backing_dev_info *bdi = mapping->backing_dev_info; | 
 | 704 | 	struct bio *bio = NULL; | 
 | 705 | 	sector_t last_block_in_bio = 0; | 
 | 706 | 	int ret = 0; | 
 | 707 | 	int done = 0; | 
 | 708 | 	int (*writepage)(struct page *page, struct writeback_control *wbc); | 
 | 709 | 	struct pagevec pvec; | 
 | 710 | 	int nr_pages; | 
 | 711 | 	pgoff_t index; | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 712 | 	pgoff_t end;		/* Inclusive */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | 	int scanned = 0; | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 714 | 	int range_whole = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 |  | 
 | 716 | 	if (wbc->nonblocking && bdi_write_congested(bdi)) { | 
 | 717 | 		wbc->encountered_congestion = 1; | 
 | 718 | 		return 0; | 
 | 719 | 	} | 
 | 720 |  | 
 | 721 | 	writepage = NULL; | 
 | 722 | 	if (get_block == NULL) | 
 | 723 | 		writepage = mapping->a_ops->writepage; | 
 | 724 |  | 
 | 725 | 	pagevec_init(&pvec, 0); | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 726 | 	if (wbc->range_cyclic) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | 		index = mapping->writeback_index; /* Start from prev offset */ | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 728 | 		end = -1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | 	} else { | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 730 | 		index = wbc->range_start >> PAGE_CACHE_SHIFT; | 
 | 731 | 		end = wbc->range_end >> PAGE_CACHE_SHIFT; | 
 | 732 | 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | 
 | 733 | 			range_whole = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | 		scanned = 1; | 
 | 735 | 	} | 
 | 736 | retry: | 
 | 737 | 	while (!done && (index <= end) && | 
 | 738 | 			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | 
 | 739 | 			PAGECACHE_TAG_DIRTY, | 
 | 740 | 			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { | 
 | 741 | 		unsigned i; | 
 | 742 |  | 
 | 743 | 		scanned = 1; | 
 | 744 | 		for (i = 0; i < nr_pages; i++) { | 
 | 745 | 			struct page *page = pvec.pages[i]; | 
 | 746 |  | 
 | 747 | 			/* | 
 | 748 | 			 * At this point we hold neither mapping->tree_lock nor | 
 | 749 | 			 * lock on the page itself: the page may be truncated or | 
 | 750 | 			 * invalidated (changing page->mapping to NULL), or even | 
 | 751 | 			 * swizzled back from swapper_space to tmpfs file | 
 | 752 | 			 * mapping | 
 | 753 | 			 */ | 
 | 754 |  | 
 | 755 | 			lock_page(page); | 
 | 756 |  | 
 | 757 | 			if (unlikely(page->mapping != mapping)) { | 
 | 758 | 				unlock_page(page); | 
 | 759 | 				continue; | 
 | 760 | 			} | 
 | 761 |  | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 762 | 			if (!wbc->range_cyclic && page->index > end) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 763 | 				done = 1; | 
 | 764 | 				unlock_page(page); | 
 | 765 | 				continue; | 
 | 766 | 			} | 
 | 767 |  | 
 | 768 | 			if (wbc->sync_mode != WB_SYNC_NONE) | 
 | 769 | 				wait_on_page_writeback(page); | 
 | 770 |  | 
 | 771 | 			if (PageWriteback(page) || | 
 | 772 | 					!clear_page_dirty_for_io(page)) { | 
 | 773 | 				unlock_page(page); | 
 | 774 | 				continue; | 
 | 775 | 			} | 
 | 776 |  | 
 | 777 | 			if (writepage) { | 
 | 778 | 				ret = (*writepage)(page, wbc); | 
 | 779 | 				if (ret) { | 
 | 780 | 					if (ret == -ENOSPC) | 
 | 781 | 						set_bit(AS_ENOSPC, | 
 | 782 | 							&mapping->flags); | 
 | 783 | 					else | 
 | 784 | 						set_bit(AS_EIO, | 
 | 785 | 							&mapping->flags); | 
 | 786 | 				} | 
 | 787 | 			} else { | 
 | 788 | 				bio = __mpage_writepage(bio, page, get_block, | 
 | 789 | 						&last_block_in_bio, &ret, wbc, | 
| Andrew Morton | d17d7fa | 2005-05-05 16:16:02 -0700 | [diff] [blame] | 790 | 						page->mapping->a_ops->writepage); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 791 | 			} | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 792 | 			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) | 
| Nikita Danilov | 552fca4 | 2005-05-01 08:58:39 -0700 | [diff] [blame] | 793 | 				unlock_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | 			if (ret || (--(wbc->nr_to_write) <= 0)) | 
 | 795 | 				done = 1; | 
 | 796 | 			if (wbc->nonblocking && bdi_write_congested(bdi)) { | 
 | 797 | 				wbc->encountered_congestion = 1; | 
 | 798 | 				done = 1; | 
 | 799 | 			} | 
 | 800 | 		} | 
 | 801 | 		pagevec_release(&pvec); | 
 | 802 | 		cond_resched(); | 
 | 803 | 	} | 
 | 804 | 	if (!scanned && !done) { | 
 | 805 | 		/* | 
 | 806 | 		 * We hit the last page and there is more work to be done: wrap | 
 | 807 | 		 * back to the start of the file | 
 | 808 | 		 */ | 
 | 809 | 		scanned = 1; | 
 | 810 | 		index = 0; | 
 | 811 | 		goto retry; | 
 | 812 | 	} | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 813 | 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | 		mapping->writeback_index = index; | 
 | 815 | 	if (bio) | 
 | 816 | 		mpage_bio_submit(WRITE, bio); | 
 | 817 | 	return ret; | 
 | 818 | } | 
 | 819 | EXPORT_SYMBOL(mpage_writepages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 |  | 
 | 821 | int mpage_writepage(struct page *page, get_block_t get_block, | 
 | 822 | 	struct writeback_control *wbc) | 
 | 823 | { | 
 | 824 | 	int ret = 0; | 
 | 825 | 	struct bio *bio; | 
 | 826 | 	sector_t last_block_in_bio = 0; | 
 | 827 |  | 
 | 828 | 	bio = __mpage_writepage(NULL, page, get_block, | 
 | 829 | 			&last_block_in_bio, &ret, wbc, NULL); | 
 | 830 | 	if (bio) | 
 | 831 | 		mpage_bio_submit(WRITE, bio); | 
 | 832 |  | 
 | 833 | 	return ret; | 
 | 834 | } | 
 | 835 | EXPORT_SYMBOL(mpage_writepage); |