| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * fs/mpage.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Linus Torvalds. | 
 | 5 |  * | 
 | 6 |  * Contains functions related to preparing and submitting BIOs which contain | 
 | 7 |  * multiple pagecache pages. | 
 | 8 |  * | 
 | 9 |  * 15May2002	akpm@zip.com.au | 
 | 10 |  *		Initial version | 
 | 11 |  * 27Jun2002	axboe@suse.de | 
 | 12 |  *		use bio_add_page() to build bio's just the right size | 
 | 13 |  */ | 
 | 14 |  | 
 | 15 | #include <linux/kernel.h> | 
 | 16 | #include <linux/module.h> | 
 | 17 | #include <linux/mm.h> | 
 | 18 | #include <linux/kdev_t.h> | 
 | 19 | #include <linux/bio.h> | 
 | 20 | #include <linux/fs.h> | 
 | 21 | #include <linux/buffer_head.h> | 
 | 22 | #include <linux/blkdev.h> | 
 | 23 | #include <linux/highmem.h> | 
 | 24 | #include <linux/prefetch.h> | 
 | 25 | #include <linux/mpage.h> | 
 | 26 | #include <linux/writeback.h> | 
 | 27 | #include <linux/backing-dev.h> | 
 | 28 | #include <linux/pagevec.h> | 
 | 29 |  | 
 | 30 | /* | 
 | 31 |  * I/O completion handler for multipage BIOs. | 
 | 32 |  * | 
 | 33 |  * The mpage code never puts partial pages into a BIO (except for end-of-file). | 
 | 34 |  * If a page does not map to a contiguous run of blocks then it simply falls | 
 | 35 |  * back to block_read_full_page(). | 
 | 36 |  * | 
 | 37 |  * Why is this?  If a page's completion depends on a number of different BIOs | 
 | 38 |  * which can complete in any order (or at the same time) then determining the | 
 | 39 |  * status of that page is hard.  See end_buffer_async_read() for the details. | 
 | 40 |  * There is no point in duplicating all that complexity. | 
 | 41 |  */ | 
| NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 42 | static void mpage_end_io_read(struct bio *bio, int err) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | { | 
 | 44 | 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 
 | 45 | 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 
 | 46 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | 	do { | 
 | 48 | 		struct page *page = bvec->bv_page; | 
 | 49 |  | 
 | 50 | 		if (--bvec >= bio->bi_io_vec) | 
 | 51 | 			prefetchw(&bvec->bv_page->flags); | 
 | 52 |  | 
 | 53 | 		if (uptodate) { | 
 | 54 | 			SetPageUptodate(page); | 
 | 55 | 		} else { | 
 | 56 | 			ClearPageUptodate(page); | 
 | 57 | 			SetPageError(page); | 
 | 58 | 		} | 
 | 59 | 		unlock_page(page); | 
 | 60 | 	} while (bvec >= bio->bi_io_vec); | 
 | 61 | 	bio_put(bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | } | 
 | 63 |  | 
| NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 64 | static void mpage_end_io_write(struct bio *bio, int err) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | { | 
 | 66 | 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 
 | 67 | 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 
 | 68 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | 	do { | 
 | 70 | 		struct page *page = bvec->bv_page; | 
 | 71 |  | 
 | 72 | 		if (--bvec >= bio->bi_io_vec) | 
 | 73 | 			prefetchw(&bvec->bv_page->flags); | 
 | 74 |  | 
| Qu Fuping | 854715b | 2005-06-04 15:43:29 -0700 | [diff] [blame] | 75 | 		if (!uptodate){ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | 			SetPageError(page); | 
| Qu Fuping | 854715b | 2005-06-04 15:43:29 -0700 | [diff] [blame] | 77 | 			if (page->mapping) | 
 | 78 | 				set_bit(AS_EIO, &page->mapping->flags); | 
 | 79 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | 		end_page_writeback(page); | 
 | 81 | 	} while (bvec >= bio->bi_io_vec); | 
 | 82 | 	bio_put(bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | } | 
 | 84 |  | 
| Adrian Bunk | 75c96f8 | 2005-05-05 16:16:09 -0700 | [diff] [blame] | 85 | static struct bio *mpage_bio_submit(int rw, struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | { | 
 | 87 | 	bio->bi_end_io = mpage_end_io_read; | 
 | 88 | 	if (rw == WRITE) | 
 | 89 | 		bio->bi_end_io = mpage_end_io_write; | 
 | 90 | 	submit_bio(rw, bio); | 
 | 91 | 	return NULL; | 
 | 92 | } | 
 | 93 |  | 
 | 94 | static struct bio * | 
 | 95 | mpage_alloc(struct block_device *bdev, | 
 | 96 | 		sector_t first_sector, int nr_vecs, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 97 | 		gfp_t gfp_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | { | 
 | 99 | 	struct bio *bio; | 
 | 100 |  | 
 | 101 | 	bio = bio_alloc(gfp_flags, nr_vecs); | 
 | 102 |  | 
 | 103 | 	if (bio == NULL && (current->flags & PF_MEMALLOC)) { | 
 | 104 | 		while (!bio && (nr_vecs /= 2)) | 
 | 105 | 			bio = bio_alloc(gfp_flags, nr_vecs); | 
 | 106 | 	} | 
 | 107 |  | 
 | 108 | 	if (bio) { | 
 | 109 | 		bio->bi_bdev = bdev; | 
 | 110 | 		bio->bi_sector = first_sector; | 
 | 111 | 	} | 
 | 112 | 	return bio; | 
 | 113 | } | 
 | 114 |  | 
 | 115 | /* | 
 | 116 |  * support function for mpage_readpages.  The fs supplied get_block might | 
 | 117 |  * return an up to date buffer.  This is used to map that buffer into | 
 | 118 |  * the page, which allows readpage to avoid triggering a duplicate call | 
 | 119 |  * to get_block. | 
 | 120 |  * | 
 | 121 |  * The idea is to avoid adding buffers to pages that don't already have | 
 | 122 |  * them.  So when the buffer is up to date and the page size == block size, | 
 | 123 |  * this marks the page up to date instead of adding new buffers. | 
 | 124 |  */ | 
 | 125 | static void  | 
 | 126 | map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)  | 
 | 127 | { | 
 | 128 | 	struct inode *inode = page->mapping->host; | 
 | 129 | 	struct buffer_head *page_bh, *head; | 
 | 130 | 	int block = 0; | 
 | 131 |  | 
 | 132 | 	if (!page_has_buffers(page)) { | 
 | 133 | 		/* | 
 | 134 | 		 * don't make any buffers if there is only one buffer on | 
 | 135 | 		 * the page and the page just needs to be set up to date | 
 | 136 | 		 */ | 
 | 137 | 		if (inode->i_blkbits == PAGE_CACHE_SHIFT &&  | 
 | 138 | 		    buffer_uptodate(bh)) { | 
 | 139 | 			SetPageUptodate(page);     | 
 | 140 | 			return; | 
 | 141 | 		} | 
 | 142 | 		create_empty_buffers(page, 1 << inode->i_blkbits, 0); | 
 | 143 | 	} | 
 | 144 | 	head = page_buffers(page); | 
 | 145 | 	page_bh = head; | 
 | 146 | 	do { | 
 | 147 | 		if (block == page_block) { | 
 | 148 | 			page_bh->b_state = bh->b_state; | 
 | 149 | 			page_bh->b_bdev = bh->b_bdev; | 
 | 150 | 			page_bh->b_blocknr = bh->b_blocknr; | 
 | 151 | 			break; | 
 | 152 | 		} | 
 | 153 | 		page_bh = page_bh->b_this_page; | 
 | 154 | 		block++; | 
 | 155 | 	} while (page_bh != head); | 
 | 156 | } | 
 | 157 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 158 | /* | 
 | 159 |  * This is the worker routine which does all the work of mapping the disk | 
 | 160 |  * blocks and constructs largest possible bios, submits them for IO if the | 
 | 161 |  * blocks are not contiguous on the disk. | 
 | 162 |  * | 
 | 163 |  * We pass a buffer_head back and forth and use its buffer_mapped() flag to | 
 | 164 |  * represent the validity of its disk mapping and to decide when to do the next | 
 | 165 |  * get_block() call. | 
 | 166 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | static struct bio * | 
 | 168 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 169 | 		sector_t *last_block_in_bio, struct buffer_head *map_bh, | 
 | 170 | 		unsigned long *first_logical_block, get_block_t get_block) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | { | 
 | 172 | 	struct inode *inode = page->mapping->host; | 
 | 173 | 	const unsigned blkbits = inode->i_blkbits; | 
 | 174 | 	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; | 
 | 175 | 	const unsigned blocksize = 1 << blkbits; | 
 | 176 | 	sector_t block_in_file; | 
 | 177 | 	sector_t last_block; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 178 | 	sector_t last_block_in_file; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | 	sector_t blocks[MAX_BUF_PER_PAGE]; | 
 | 180 | 	unsigned page_block; | 
 | 181 | 	unsigned first_hole = blocks_per_page; | 
 | 182 | 	struct block_device *bdev = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | 	int length; | 
 | 184 | 	int fully_mapped = 1; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 185 | 	unsigned nblocks; | 
 | 186 | 	unsigned relative_block; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 |  | 
 | 188 | 	if (page_has_buffers(page)) | 
 | 189 | 		goto confused; | 
 | 190 |  | 
| Andrew Morton | 54b21a7 | 2006-01-08 01:03:05 -0800 | [diff] [blame] | 191 | 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 192 | 	last_block = block_in_file + nr_pages * blocks_per_page; | 
 | 193 | 	last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; | 
 | 194 | 	if (last_block > last_block_in_file) | 
 | 195 | 		last_block = last_block_in_file; | 
 | 196 | 	page_block = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 198 | 	/* | 
 | 199 | 	 * Map blocks using the result from the previous get_blocks call first. | 
 | 200 | 	 */ | 
 | 201 | 	nblocks = map_bh->b_size >> blkbits; | 
 | 202 | 	if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && | 
 | 203 | 			block_in_file < (*first_logical_block + nblocks)) { | 
 | 204 | 		unsigned map_offset = block_in_file - *first_logical_block; | 
 | 205 | 		unsigned last = nblocks - map_offset; | 
 | 206 |  | 
 | 207 | 		for (relative_block = 0; ; relative_block++) { | 
 | 208 | 			if (relative_block == last) { | 
 | 209 | 				clear_buffer_mapped(map_bh); | 
 | 210 | 				break; | 
 | 211 | 			} | 
 | 212 | 			if (page_block == blocks_per_page) | 
 | 213 | 				break; | 
 | 214 | 			blocks[page_block] = map_bh->b_blocknr + map_offset + | 
 | 215 | 						relative_block; | 
 | 216 | 			page_block++; | 
 | 217 | 			block_in_file++; | 
 | 218 | 		} | 
 | 219 | 		bdev = map_bh->b_bdev; | 
 | 220 | 	} | 
 | 221 |  | 
 | 222 | 	/* | 
 | 223 | 	 * Then do more get_blocks calls until we are done with this page. | 
 | 224 | 	 */ | 
 | 225 | 	map_bh->b_page = page; | 
 | 226 | 	while (page_block < blocks_per_page) { | 
 | 227 | 		map_bh->b_state = 0; | 
 | 228 | 		map_bh->b_size = 0; | 
 | 229 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | 		if (block_in_file < last_block) { | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 231 | 			map_bh->b_size = (last_block-block_in_file) << blkbits; | 
 | 232 | 			if (get_block(inode, block_in_file, map_bh, 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | 				goto confused; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 234 | 			*first_logical_block = block_in_file; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | 		} | 
 | 236 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 237 | 		if (!buffer_mapped(map_bh)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | 			fully_mapped = 0; | 
 | 239 | 			if (first_hole == blocks_per_page) | 
 | 240 | 				first_hole = page_block; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 241 | 			page_block++; | 
 | 242 | 			block_in_file++; | 
 | 243 | 			clear_buffer_mapped(map_bh); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | 			continue; | 
 | 245 | 		} | 
 | 246 |  | 
 | 247 | 		/* some filesystems will copy data into the page during | 
 | 248 | 		 * the get_block call, in which case we don't want to | 
 | 249 | 		 * read it again.  map_buffer_to_page copies the data | 
 | 250 | 		 * we just collected from get_block into the page's buffers | 
 | 251 | 		 * so readpage doesn't have to repeat the get_block call | 
 | 252 | 		 */ | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 253 | 		if (buffer_uptodate(map_bh)) { | 
 | 254 | 			map_buffer_to_page(page, map_bh, page_block); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | 			goto confused; | 
 | 256 | 		} | 
 | 257 | 	 | 
 | 258 | 		if (first_hole != blocks_per_page) | 
 | 259 | 			goto confused;		/* hole -> non-hole */ | 
 | 260 |  | 
 | 261 | 		/* Contiguous blocks? */ | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 262 | 		if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | 			goto confused; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 264 | 		nblocks = map_bh->b_size >> blkbits; | 
 | 265 | 		for (relative_block = 0; ; relative_block++) { | 
 | 266 | 			if (relative_block == nblocks) { | 
 | 267 | 				clear_buffer_mapped(map_bh); | 
 | 268 | 				break; | 
 | 269 | 			} else if (page_block == blocks_per_page) | 
 | 270 | 				break; | 
 | 271 | 			blocks[page_block] = map_bh->b_blocknr+relative_block; | 
 | 272 | 			page_block++; | 
 | 273 | 			block_in_file++; | 
 | 274 | 		} | 
 | 275 | 		bdev = map_bh->b_bdev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | 	} | 
 | 277 |  | 
 | 278 | 	if (first_hole != blocks_per_page) { | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 279 | 		zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | 		if (first_hole == 0) { | 
 | 281 | 			SetPageUptodate(page); | 
 | 282 | 			unlock_page(page); | 
 | 283 | 			goto out; | 
 | 284 | 		} | 
 | 285 | 	} else if (fully_mapped) { | 
 | 286 | 		SetPageMappedToDisk(page); | 
 | 287 | 	} | 
 | 288 |  | 
 | 289 | 	/* | 
 | 290 | 	 * This page will go to BIO.  Do we need to send this BIO off first? | 
 | 291 | 	 */ | 
 | 292 | 	if (bio && (*last_block_in_bio != blocks[0] - 1)) | 
 | 293 | 		bio = mpage_bio_submit(READ, bio); | 
 | 294 |  | 
 | 295 | alloc_new: | 
 | 296 | 	if (bio == NULL) { | 
 | 297 | 		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | 
 | 298 | 			  	min_t(int, nr_pages, bio_get_nr_vecs(bdev)), | 
 | 299 | 				GFP_KERNEL); | 
 | 300 | 		if (bio == NULL) | 
 | 301 | 			goto confused; | 
 | 302 | 	} | 
 | 303 |  | 
 | 304 | 	length = first_hole << blkbits; | 
 | 305 | 	if (bio_add_page(bio, page, length, 0) < length) { | 
 | 306 | 		bio = mpage_bio_submit(READ, bio); | 
 | 307 | 		goto alloc_new; | 
 | 308 | 	} | 
 | 309 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 310 | 	if (buffer_boundary(map_bh) || (first_hole != blocks_per_page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | 		bio = mpage_bio_submit(READ, bio); | 
 | 312 | 	else | 
 | 313 | 		*last_block_in_bio = blocks[blocks_per_page - 1]; | 
 | 314 | out: | 
 | 315 | 	return bio; | 
 | 316 |  | 
 | 317 | confused: | 
 | 318 | 	if (bio) | 
 | 319 | 		bio = mpage_bio_submit(READ, bio); | 
 | 320 | 	if (!PageUptodate(page)) | 
 | 321 | 	        block_read_full_page(page, get_block); | 
 | 322 | 	else | 
 | 323 | 		unlock_page(page); | 
 | 324 | 	goto out; | 
 | 325 | } | 
 | 326 |  | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 327 | /** | 
 | 328 |  * mpage_readpages - populate an address space with some pages, and | 
 | 329 |  *                       start reads against them. | 
 | 330 |  * | 
 | 331 |  * @mapping: the address_space | 
 | 332 |  * @pages: The address of a list_head which contains the target pages.  These | 
 | 333 |  *   pages have their ->index populated and are otherwise uninitialised. | 
 | 334 |  * | 
 | 335 |  *   The page at @pages->prev has the lowest file offset, and reads should be | 
 | 336 |  *   issued in @pages->prev to @pages->next order. | 
 | 337 |  * | 
 | 338 |  * @nr_pages: The number of pages at *@pages | 
 | 339 |  * @get_block: The filesystem's block mapper function. | 
 | 340 |  * | 
 | 341 |  * This function walks the pages and the blocks within each page, building and | 
 | 342 |  * emitting large BIOs. | 
 | 343 |  * | 
 | 344 |  * If anything unusual happens, such as: | 
 | 345 |  * | 
 | 346 |  * - encountering a page which has buffers | 
 | 347 |  * - encountering a page which has a non-hole after a hole | 
 | 348 |  * - encountering a page with non-contiguous blocks | 
 | 349 |  * | 
 | 350 |  * then this code just gives up and calls the buffer_head-based read function. | 
 | 351 |  * It does handle a page which has holes at the end - that is a common case: | 
 | 352 |  * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. | 
 | 353 |  * | 
 | 354 |  * BH_Boundary explanation: | 
 | 355 |  * | 
 | 356 |  * There is a problem.  The mpage read code assembles several pages, gets all | 
 | 357 |  * their disk mappings, and then submits them all.  That's fine, but obtaining | 
 | 358 |  * the disk mappings may require I/O.  Reads of indirect blocks, for example. | 
 | 359 |  * | 
 | 360 |  * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be | 
 | 361 |  * submitted in the following order: | 
 | 362 |  * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 | 
 | 363 |  * because the indirect block has to be read to get the mappings of blocks | 
 | 364 |  * 13,14,15,16.  Obviously, this impacts performance. | 
 | 365 |  * | 
 | 366 |  * So what we do it to allow the filesystem's get_block() function to set | 
 | 367 |  * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block | 
 | 368 |  * after this one will require I/O against a block which is probably close to | 
 | 369 |  * this one.  So you should push what I/O you have currently accumulated. | 
 | 370 |  * | 
 | 371 |  * This all causes the disk requests to be issued in the correct order. | 
 | 372 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | int | 
 | 374 | mpage_readpages(struct address_space *mapping, struct list_head *pages, | 
 | 375 | 				unsigned nr_pages, get_block_t get_block) | 
 | 376 | { | 
 | 377 | 	struct bio *bio = NULL; | 
 | 378 | 	unsigned page_idx; | 
 | 379 | 	sector_t last_block_in_bio = 0; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 380 | 	struct buffer_head map_bh; | 
 | 381 | 	unsigned long first_logical_block = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 383 | 	clear_buffer_mapped(&map_bh); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | 	for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 
 | 385 | 		struct page *page = list_entry(pages->prev, struct page, lru); | 
 | 386 |  | 
 | 387 | 		prefetchw(&page->flags); | 
 | 388 | 		list_del(&page->lru); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 389 | 		if (!add_to_page_cache_lru(page, mapping, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | 					page->index, GFP_KERNEL)) { | 
 | 391 | 			bio = do_mpage_readpage(bio, page, | 
 | 392 | 					nr_pages - page_idx, | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 393 | 					&last_block_in_bio, &map_bh, | 
 | 394 | 					&first_logical_block, | 
 | 395 | 					get_block); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | 		} | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 397 | 		page_cache_release(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | 	BUG_ON(!list_empty(pages)); | 
 | 400 | 	if (bio) | 
 | 401 | 		mpage_bio_submit(READ, bio); | 
 | 402 | 	return 0; | 
 | 403 | } | 
 | 404 | EXPORT_SYMBOL(mpage_readpages); | 
 | 405 |  | 
 | 406 | /* | 
 | 407 |  * This isn't called much at all | 
 | 408 |  */ | 
 | 409 | int mpage_readpage(struct page *page, get_block_t get_block) | 
 | 410 | { | 
 | 411 | 	struct bio *bio = NULL; | 
 | 412 | 	sector_t last_block_in_bio = 0; | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 413 | 	struct buffer_head map_bh; | 
 | 414 | 	unsigned long first_logical_block = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 |  | 
| Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 416 | 	clear_buffer_mapped(&map_bh); | 
 | 417 | 	bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, | 
 | 418 | 			&map_bh, &first_logical_block, get_block); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | 	if (bio) | 
 | 420 | 		mpage_bio_submit(READ, bio); | 
 | 421 | 	return 0; | 
 | 422 | } | 
 | 423 | EXPORT_SYMBOL(mpage_readpage); | 
 | 424 |  | 
 | 425 | /* | 
 | 426 |  * Writing is not so simple. | 
 | 427 |  * | 
 | 428 |  * If the page has buffers then they will be used for obtaining the disk | 
 | 429 |  * mapping.  We only support pages which are fully mapped-and-dirty, with a | 
 | 430 |  * special case for pages which are unmapped at the end: end-of-file. | 
 | 431 |  * | 
 | 432 |  * If the page has no buffers (preferred) then the page is mapped here. | 
 | 433 |  * | 
 | 434 |  * If all blocks are found to be contiguous then the page can go into the | 
 | 435 |  * BIO.  Otherwise fall back to the mapping's writepage(). | 
 | 436 |  *  | 
 | 437 |  * FIXME: This code wants an estimate of how many pages are still to be | 
 | 438 |  * written, so it can intelligently allocate a suitably-sized BIO.  For now, | 
 | 439 |  * just allocate full-size (16-page) BIOs. | 
 | 440 |  */ | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 441 | struct mpage_data { | 
 | 442 | 	struct bio *bio; | 
 | 443 | 	sector_t last_block_in_bio; | 
 | 444 | 	get_block_t *get_block; | 
 | 445 | 	unsigned use_writepage; | 
 | 446 | }; | 
 | 447 |  | 
 | 448 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | 
 | 449 | 			     void *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | { | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 451 | 	struct mpage_data *mpd = data; | 
 | 452 | 	struct bio *bio = mpd->bio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | 	struct address_space *mapping = page->mapping; | 
 | 454 | 	struct inode *inode = page->mapping->host; | 
 | 455 | 	const unsigned blkbits = inode->i_blkbits; | 
 | 456 | 	unsigned long end_index; | 
 | 457 | 	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; | 
 | 458 | 	sector_t last_block; | 
 | 459 | 	sector_t block_in_file; | 
 | 460 | 	sector_t blocks[MAX_BUF_PER_PAGE]; | 
 | 461 | 	unsigned page_block; | 
 | 462 | 	unsigned first_unmapped = blocks_per_page; | 
 | 463 | 	struct block_device *bdev = NULL; | 
 | 464 | 	int boundary = 0; | 
 | 465 | 	sector_t boundary_block = 0; | 
 | 466 | 	struct block_device *boundary_bdev = NULL; | 
 | 467 | 	int length; | 
 | 468 | 	struct buffer_head map_bh; | 
 | 469 | 	loff_t i_size = i_size_read(inode); | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 470 | 	int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 |  | 
 | 472 | 	if (page_has_buffers(page)) { | 
 | 473 | 		struct buffer_head *head = page_buffers(page); | 
 | 474 | 		struct buffer_head *bh = head; | 
 | 475 |  | 
 | 476 | 		/* If they're all mapped and dirty, do it */ | 
 | 477 | 		page_block = 0; | 
 | 478 | 		do { | 
 | 479 | 			BUG_ON(buffer_locked(bh)); | 
 | 480 | 			if (!buffer_mapped(bh)) { | 
 | 481 | 				/* | 
 | 482 | 				 * unmapped dirty buffers are created by | 
 | 483 | 				 * __set_page_dirty_buffers -> mmapped data | 
 | 484 | 				 */ | 
 | 485 | 				if (buffer_dirty(bh)) | 
 | 486 | 					goto confused; | 
 | 487 | 				if (first_unmapped == blocks_per_page) | 
 | 488 | 					first_unmapped = page_block; | 
 | 489 | 				continue; | 
 | 490 | 			} | 
 | 491 |  | 
 | 492 | 			if (first_unmapped != blocks_per_page) | 
 | 493 | 				goto confused;	/* hole -> non-hole */ | 
 | 494 |  | 
 | 495 | 			if (!buffer_dirty(bh) || !buffer_uptodate(bh)) | 
 | 496 | 				goto confused; | 
 | 497 | 			if (page_block) { | 
 | 498 | 				if (bh->b_blocknr != blocks[page_block-1] + 1) | 
 | 499 | 					goto confused; | 
 | 500 | 			} | 
 | 501 | 			blocks[page_block++] = bh->b_blocknr; | 
 | 502 | 			boundary = buffer_boundary(bh); | 
 | 503 | 			if (boundary) { | 
 | 504 | 				boundary_block = bh->b_blocknr; | 
 | 505 | 				boundary_bdev = bh->b_bdev; | 
 | 506 | 			} | 
 | 507 | 			bdev = bh->b_bdev; | 
 | 508 | 		} while ((bh = bh->b_this_page) != head); | 
 | 509 |  | 
 | 510 | 		if (first_unmapped) | 
 | 511 | 			goto page_is_mapped; | 
 | 512 |  | 
 | 513 | 		/* | 
 | 514 | 		 * Page has buffers, but they are all unmapped. The page was | 
 | 515 | 		 * created by pagein or read over a hole which was handled by | 
 | 516 | 		 * block_read_full_page().  If this address_space is also | 
 | 517 | 		 * using mpage_readpages then this can rarely happen. | 
 | 518 | 		 */ | 
 | 519 | 		goto confused; | 
 | 520 | 	} | 
 | 521 |  | 
 | 522 | 	/* | 
 | 523 | 	 * The page has no buffers: map it to disk | 
 | 524 | 	 */ | 
 | 525 | 	BUG_ON(!PageUptodate(page)); | 
| Andrew Morton | 54b21a7 | 2006-01-08 01:03:05 -0800 | [diff] [blame] | 526 | 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | 	last_block = (i_size - 1) >> blkbits; | 
 | 528 | 	map_bh.b_page = page; | 
 | 529 | 	for (page_block = 0; page_block < blocks_per_page; ) { | 
 | 530 |  | 
 | 531 | 		map_bh.b_state = 0; | 
| Badari Pulavarty | b0cf232 | 2006-03-26 01:38:00 -0800 | [diff] [blame] | 532 | 		map_bh.b_size = 1 << blkbits; | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 533 | 		if (mpd->get_block(inode, block_in_file, &map_bh, 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | 			goto confused; | 
 | 535 | 		if (buffer_new(&map_bh)) | 
 | 536 | 			unmap_underlying_metadata(map_bh.b_bdev, | 
 | 537 | 						map_bh.b_blocknr); | 
 | 538 | 		if (buffer_boundary(&map_bh)) { | 
 | 539 | 			boundary_block = map_bh.b_blocknr; | 
 | 540 | 			boundary_bdev = map_bh.b_bdev; | 
 | 541 | 		} | 
 | 542 | 		if (page_block) { | 
 | 543 | 			if (map_bh.b_blocknr != blocks[page_block-1] + 1) | 
 | 544 | 				goto confused; | 
 | 545 | 		} | 
 | 546 | 		blocks[page_block++] = map_bh.b_blocknr; | 
 | 547 | 		boundary = buffer_boundary(&map_bh); | 
 | 548 | 		bdev = map_bh.b_bdev; | 
 | 549 | 		if (block_in_file == last_block) | 
 | 550 | 			break; | 
 | 551 | 		block_in_file++; | 
 | 552 | 	} | 
 | 553 | 	BUG_ON(page_block == 0); | 
 | 554 |  | 
 | 555 | 	first_unmapped = page_block; | 
 | 556 |  | 
 | 557 | page_is_mapped: | 
 | 558 | 	end_index = i_size >> PAGE_CACHE_SHIFT; | 
 | 559 | 	if (page->index >= end_index) { | 
 | 560 | 		/* | 
 | 561 | 		 * The page straddles i_size.  It must be zeroed out on each | 
 | 562 | 		 * and every writepage invokation because it may be mmapped. | 
 | 563 | 		 * "A file is mapped in multiples of the page size.  For a file | 
 | 564 | 		 * that is not a multiple of the page size, the remaining memory | 
 | 565 | 		 * is zeroed when mapped, and writes to that region are not | 
 | 566 | 		 * written out to the file." | 
 | 567 | 		 */ | 
 | 568 | 		unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 |  | 
 | 570 | 		if (page->index > end_index || !offset) | 
 | 571 | 			goto confused; | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 572 | 		zero_user_segment(page, offset, PAGE_CACHE_SIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | 	} | 
 | 574 |  | 
 | 575 | 	/* | 
 | 576 | 	 * This page will go to BIO.  Do we need to send this BIO off first? | 
 | 577 | 	 */ | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 578 | 	if (bio && mpd->last_block_in_bio != blocks[0] - 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | 		bio = mpage_bio_submit(WRITE, bio); | 
 | 580 |  | 
 | 581 | alloc_new: | 
 | 582 | 	if (bio == NULL) { | 
 | 583 | 		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | 
 | 584 | 				bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH); | 
 | 585 | 		if (bio == NULL) | 
 | 586 | 			goto confused; | 
 | 587 | 	} | 
 | 588 |  | 
 | 589 | 	/* | 
 | 590 | 	 * Must try to add the page before marking the buffer clean or | 
 | 591 | 	 * the confused fail path above (OOM) will be very confused when | 
 | 592 | 	 * it finds all bh marked clean (i.e. it will not write anything) | 
 | 593 | 	 */ | 
 | 594 | 	length = first_unmapped << blkbits; | 
 | 595 | 	if (bio_add_page(bio, page, length, 0) < length) { | 
 | 596 | 		bio = mpage_bio_submit(WRITE, bio); | 
 | 597 | 		goto alloc_new; | 
 | 598 | 	} | 
 | 599 |  | 
 | 600 | 	/* | 
 | 601 | 	 * OK, we have our BIO, so we can now mark the buffers clean.  Make | 
 | 602 | 	 * sure to only clean buffers which we know we'll be writing. | 
 | 603 | 	 */ | 
 | 604 | 	if (page_has_buffers(page)) { | 
 | 605 | 		struct buffer_head *head = page_buffers(page); | 
 | 606 | 		struct buffer_head *bh = head; | 
 | 607 | 		unsigned buffer_counter = 0; | 
 | 608 |  | 
 | 609 | 		do { | 
 | 610 | 			if (buffer_counter++ == first_unmapped) | 
 | 611 | 				break; | 
 | 612 | 			clear_buffer_dirty(bh); | 
 | 613 | 			bh = bh->b_this_page; | 
 | 614 | 		} while (bh != head); | 
 | 615 |  | 
 | 616 | 		/* | 
 | 617 | 		 * we cannot drop the bh if the page is not uptodate | 
 | 618 | 		 * or a concurrent readpage would fail to serialize with the bh | 
 | 619 | 		 * and it would read from disk before we reach the platter. | 
 | 620 | 		 */ | 
 | 621 | 		if (buffer_heads_over_limit && PageUptodate(page)) | 
 | 622 | 			try_to_free_buffers(page); | 
 | 623 | 	} | 
 | 624 |  | 
 | 625 | 	BUG_ON(PageWriteback(page)); | 
 | 626 | 	set_page_writeback(page); | 
 | 627 | 	unlock_page(page); | 
 | 628 | 	if (boundary || (first_unmapped != blocks_per_page)) { | 
 | 629 | 		bio = mpage_bio_submit(WRITE, bio); | 
 | 630 | 		if (boundary_block) { | 
 | 631 | 			write_boundary_block(boundary_bdev, | 
 | 632 | 					boundary_block, 1 << blkbits); | 
 | 633 | 		} | 
 | 634 | 	} else { | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 635 | 		mpd->last_block_in_bio = blocks[blocks_per_page - 1]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | 	} | 
 | 637 | 	goto out; | 
 | 638 |  | 
 | 639 | confused: | 
 | 640 | 	if (bio) | 
 | 641 | 		bio = mpage_bio_submit(WRITE, bio); | 
 | 642 |  | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 643 | 	if (mpd->use_writepage) { | 
 | 644 | 		ret = mapping->a_ops->writepage(page, wbc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | 	} else { | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 646 | 		ret = -EAGAIN; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | 		goto out; | 
 | 648 | 	} | 
 | 649 | 	/* | 
 | 650 | 	 * The caller has a ref on the inode, so *mapping is stable | 
 | 651 | 	 */ | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 652 | 	mapping_set_error(mapping, ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | out: | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 654 | 	mpd->bio = bio; | 
 | 655 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | } | 
 | 657 |  | 
 | 658 | /** | 
 | 659 |  * mpage_writepages - walk the list of dirty pages of the given | 
 | 660 |  * address space and writepage() all of them. | 
 | 661 |  *  | 
 | 662 |  * @mapping: address space structure to write | 
 | 663 |  * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
 | 664 |  * @get_block: the filesystem's block mapper function. | 
 | 665 |  *             If this is NULL then use a_ops->writepage.  Otherwise, go | 
 | 666 |  *             direct-to-BIO. | 
 | 667 |  * | 
 | 668 |  * This is a library function, which implements the writepages() | 
 | 669 |  * address_space_operation. | 
 | 670 |  * | 
 | 671 |  * If a page is already under I/O, generic_writepages() skips it, even | 
 | 672 |  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback, | 
 | 673 |  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync() | 
 | 674 |  * and msync() need to guarantee that all the data which was dirty at the time | 
 | 675 |  * the call was made get new I/O started against them.  If wbc->sync_mode is | 
 | 676 |  * WB_SYNC_ALL then we were called for data integrity and we must wait for | 
 | 677 |  * existing IO to complete. | 
 | 678 |  */ | 
 | 679 | int | 
 | 680 | mpage_writepages(struct address_space *mapping, | 
 | 681 | 		struct writeback_control *wbc, get_block_t get_block) | 
 | 682 | { | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 683 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 |  | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 685 | 	if (!get_block) | 
 | 686 | 		ret = generic_writepages(mapping, wbc); | 
 | 687 | 	else { | 
 | 688 | 		struct mpage_data mpd = { | 
 | 689 | 			.bio = NULL, | 
 | 690 | 			.last_block_in_bio = 0, | 
 | 691 | 			.get_block = get_block, | 
 | 692 | 			.use_writepage = 1, | 
 | 693 | 		}; | 
 | 694 |  | 
 | 695 | 		ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); | 
 | 696 | 		if (mpd.bio) | 
 | 697 | 			mpage_bio_submit(WRITE, mpd.bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | 	return ret; | 
 | 700 | } | 
 | 701 | EXPORT_SYMBOL(mpage_writepages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 |  | 
 | 703 | int mpage_writepage(struct page *page, get_block_t get_block, | 
 | 704 | 	struct writeback_control *wbc) | 
 | 705 | { | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 706 | 	struct mpage_data mpd = { | 
 | 707 | 		.bio = NULL, | 
 | 708 | 		.last_block_in_bio = 0, | 
 | 709 | 		.get_block = get_block, | 
 | 710 | 		.use_writepage = 0, | 
 | 711 | 	}; | 
 | 712 | 	int ret = __mpage_writepage(page, wbc, &mpd); | 
 | 713 | 	if (mpd.bio) | 
 | 714 | 		mpage_bio_submit(WRITE, mpd.bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | 	return ret; | 
 | 716 | } | 
 | 717 | EXPORT_SYMBOL(mpage_writepage); |