| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * mm/readahead.c - address_space-level file readahead. | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Linus Torvalds | 
 | 5 |  * | 
 | 6 |  * 09Apr2002	akpm@zip.com.au | 
 | 7 |  *		Initial version. | 
 | 8 |  */ | 
 | 9 |  | 
 | 10 | #include <linux/kernel.h> | 
 | 11 | #include <linux/fs.h> | 
 | 12 | #include <linux/mm.h> | 
 | 13 | #include <linux/module.h> | 
 | 14 | #include <linux/blkdev.h> | 
 | 15 | #include <linux/backing-dev.h> | 
| Andrew Morton | 8bde37f | 2006-12-10 02:19:40 -0800 | [diff] [blame] | 16 | #include <linux/task_io_accounting_ops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/pagevec.h> | 
| Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 18 | #include <linux/pagemap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 |  | 
 | 20 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | 
 | 21 | { | 
 | 22 | } | 
 | 23 | EXPORT_SYMBOL(default_unplug_io_fn); | 
 | 24 |  | 
 | 25 | struct backing_dev_info default_backing_dev_info = { | 
| Fengguang Wu | 535443f | 2007-10-16 01:24:36 -0700 | [diff] [blame] | 26 | 	.ra_pages	= VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | 	.state		= 0, | 
 | 28 | 	.capabilities	= BDI_CAP_MAP_COPY, | 
 | 29 | 	.unplug_io_fn	= default_unplug_io_fn, | 
 | 30 | }; | 
 | 31 | EXPORT_SYMBOL_GPL(default_backing_dev_info); | 
 | 32 |  | 
 | 33 | /* | 
 | 34 |  * Initialise a struct file's readahead state.  Assumes that the caller has | 
 | 35 |  * memset *ra to zero. | 
 | 36 |  */ | 
 | 37 | void | 
 | 38 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | 
 | 39 | { | 
 | 40 | 	ra->ra_pages = mapping->backing_dev_info->ra_pages; | 
| Fengguang Wu | f4e6b49 | 2007-10-16 01:24:33 -0700 | [diff] [blame] | 41 | 	ra->prev_pos = -1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | } | 
| Steven Whitehouse | d41cc70 | 2006-01-30 08:53:33 +0000 | [diff] [blame] | 43 | EXPORT_SYMBOL_GPL(file_ra_state_init); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) | 
 | 46 |  | 
 | 47 | /** | 
| Randy Dunlap | bd40cdd | 2006-06-25 05:48:08 -0700 | [diff] [blame] | 48 |  * read_cache_pages - populate an address space with some pages & start reads against them | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 |  * @mapping: the address_space | 
 | 50 |  * @pages: The address of a list_head which contains the target pages.  These | 
 | 51 |  *   pages have their ->index populated and are otherwise uninitialised. | 
 | 52 |  * @filler: callback routine for filling a single page. | 
 | 53 |  * @data: private data for the callback routine. | 
 | 54 |  * | 
 | 55 |  * Hides the details of the LRU cache etc from the filesystems. | 
 | 56 |  */ | 
 | 57 | int read_cache_pages(struct address_space *mapping, struct list_head *pages, | 
 | 58 | 			int (*filler)(void *, struct page *), void *data) | 
 | 59 | { | 
 | 60 | 	struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | 	int ret = 0; | 
 | 62 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | 	while (!list_empty(pages)) { | 
 | 64 | 		page = list_to_page(pages); | 
 | 65 | 		list_del(&page->lru); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 66 | 		if (add_to_page_cache_lru(page, mapping, | 
 | 67 | 					page->index, GFP_KERNEL)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | 			page_cache_release(page); | 
 | 69 | 			continue; | 
 | 70 | 		} | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 71 | 		page_cache_release(page); | 
 | 72 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | 		ret = filler(data, page); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 74 | 		if (unlikely(ret)) { | 
| OGAWA Hirofumi | 38da288 | 2006-12-06 20:36:46 -0800 | [diff] [blame] | 75 | 			put_pages_list(pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | 			break; | 
 | 77 | 		} | 
| Andrew Morton | 8bde37f | 2006-12-10 02:19:40 -0800 | [diff] [blame] | 78 | 		task_io_account_read(PAGE_CACHE_SIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | 	return ret; | 
 | 81 | } | 
 | 82 |  | 
 | 83 | EXPORT_SYMBOL(read_cache_pages); | 
 | 84 |  | 
 | 85 | static int read_pages(struct address_space *mapping, struct file *filp, | 
 | 86 | 		struct list_head *pages, unsigned nr_pages) | 
 | 87 | { | 
 | 88 | 	unsigned page_idx; | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 89 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 |  | 
 | 91 | 	if (mapping->a_ops->readpages) { | 
 | 92 | 		ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); | 
| OGAWA Hirofumi | 029e332 | 2006-11-02 22:07:06 -0800 | [diff] [blame] | 93 | 		/* Clean up the remaining pages */ | 
 | 94 | 		put_pages_list(pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | 		goto out; | 
 | 96 | 	} | 
 | 97 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | 	for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 
 | 99 | 		struct page *page = list_to_page(pages); | 
 | 100 | 		list_del(&page->lru); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 101 | 		if (!add_to_page_cache_lru(page, mapping, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | 					page->index, GFP_KERNEL)) { | 
| Zach Brown | 9f1a3cf | 2006-06-25 05:46:46 -0700 | [diff] [blame] | 103 | 			mapping->a_ops->readpage(filp, page); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 104 | 		} | 
 | 105 | 		page_cache_release(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | 	} | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 107 | 	ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | out: | 
 | 109 | 	return ret; | 
 | 110 | } | 
 | 111 |  | 
 | 112 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 |  * do_page_cache_readahead actually reads a chunk of disk.  It allocates all | 
 | 114 |  * the pages first, then submits them all for I/O. This avoids the very bad | 
 | 115 |  * behaviour which would occur if page allocations are causing VM writeback. | 
 | 116 |  * We really don't want to intermingle reads and writes like that. | 
 | 117 |  * | 
 | 118 |  * Returns the number of pages requested, or the maximum amount of I/O allowed. | 
 | 119 |  * | 
 | 120 |  * do_page_cache_readahead() returns -1 if it encountered request queue | 
 | 121 |  * congestion. | 
 | 122 |  */ | 
 | 123 | static int | 
 | 124 | __do_page_cache_readahead(struct address_space *mapping, struct file *filp, | 
| Fengguang Wu | 46fc3e7 | 2007-07-19 01:47:57 -0700 | [diff] [blame] | 125 | 			pgoff_t offset, unsigned long nr_to_read, | 
 | 126 | 			unsigned long lookahead_size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | { | 
 | 128 | 	struct inode *inode = mapping->host; | 
 | 129 | 	struct page *page; | 
 | 130 | 	unsigned long end_index;	/* The last page we want to read */ | 
 | 131 | 	LIST_HEAD(page_pool); | 
 | 132 | 	int page_idx; | 
 | 133 | 	int ret = 0; | 
 | 134 | 	loff_t isize = i_size_read(inode); | 
 | 135 |  | 
 | 136 | 	if (isize == 0) | 
 | 137 | 		goto out; | 
 | 138 |  | 
| Fengguang Wu | 46fc3e7 | 2007-07-19 01:47:57 -0700 | [diff] [blame] | 139 | 	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 |  | 
 | 141 | 	/* | 
 | 142 | 	 * Preallocate as many pages as we will need. | 
 | 143 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | 	for (page_idx = 0; page_idx < nr_to_read; page_idx++) { | 
| Andrew Morton | 7361f4d | 2005-11-07 00:59:28 -0800 | [diff] [blame] | 145 | 		pgoff_t page_offset = offset + page_idx; | 
| Fengguang Wu | c743d96 | 2007-07-19 01:48:04 -0700 | [diff] [blame] | 146 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | 		if (page_offset > end_index) | 
 | 148 | 			break; | 
 | 149 |  | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 150 | 		rcu_read_lock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | 		page = radix_tree_lookup(&mapping->page_tree, page_offset); | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 152 | 		rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | 		if (page) | 
 | 154 | 			continue; | 
 | 155 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | 		page = page_cache_alloc_cold(mapping); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | 		if (!page) | 
 | 158 | 			break; | 
 | 159 | 		page->index = page_offset; | 
 | 160 | 		list_add(&page->lru, &page_pool); | 
| Fengguang Wu | 46fc3e7 | 2007-07-19 01:47:57 -0700 | [diff] [blame] | 161 | 		if (page_idx == nr_to_read - lookahead_size) | 
 | 162 | 			SetPageReadahead(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | 		ret++; | 
 | 164 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 |  | 
 | 166 | 	/* | 
 | 167 | 	 * Now start the IO.  We ignore I/O errors - if the page is not | 
 | 168 | 	 * uptodate then the caller will launch readpage again, and | 
 | 169 | 	 * will then handle the error. | 
 | 170 | 	 */ | 
 | 171 | 	if (ret) | 
 | 172 | 		read_pages(mapping, filp, &page_pool, ret); | 
 | 173 | 	BUG_ON(!list_empty(&page_pool)); | 
 | 174 | out: | 
 | 175 | 	return ret; | 
 | 176 | } | 
 | 177 |  | 
 | 178 | /* | 
 | 179 |  * Chunk the readahead into 2 megabyte units, so that we don't pin too much | 
 | 180 |  * memory at once. | 
 | 181 |  */ | 
 | 182 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | 
| Andrew Morton | 7361f4d | 2005-11-07 00:59:28 -0800 | [diff] [blame] | 183 | 		pgoff_t offset, unsigned long nr_to_read) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | { | 
 | 185 | 	int ret = 0; | 
 | 186 |  | 
 | 187 | 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) | 
 | 188 | 		return -EINVAL; | 
 | 189 |  | 
 | 190 | 	while (nr_to_read) { | 
 | 191 | 		int err; | 
 | 192 |  | 
 | 193 | 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; | 
 | 194 |  | 
 | 195 | 		if (this_chunk > nr_to_read) | 
 | 196 | 			this_chunk = nr_to_read; | 
 | 197 | 		err = __do_page_cache_readahead(mapping, filp, | 
| Fengguang Wu | 46fc3e7 | 2007-07-19 01:47:57 -0700 | [diff] [blame] | 198 | 						offset, this_chunk, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | 		if (err < 0) { | 
 | 200 | 			ret = err; | 
 | 201 | 			break; | 
 | 202 | 		} | 
 | 203 | 		ret += err; | 
 | 204 | 		offset += this_chunk; | 
 | 205 | 		nr_to_read -= this_chunk; | 
 | 206 | 	} | 
 | 207 | 	return ret; | 
 | 208 | } | 
 | 209 |  | 
 | 210 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 |  * This version skips the IO if the queue is read-congested, and will tell the | 
 | 212 |  * block layer to abandon the readahead if request allocation would block. | 
 | 213 |  * | 
 | 214 |  * force_page_cache_readahead() will ignore queue congestion and will block on | 
 | 215 |  * request queues. | 
 | 216 |  */ | 
 | 217 | int do_page_cache_readahead(struct address_space *mapping, struct file *filp, | 
| Andrew Morton | 7361f4d | 2005-11-07 00:59:28 -0800 | [diff] [blame] | 218 | 			pgoff_t offset, unsigned long nr_to_read) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | { | 
 | 220 | 	if (bdi_read_congested(mapping->backing_dev_info)) | 
 | 221 | 		return -1; | 
 | 222 |  | 
| Fengguang Wu | 46fc3e7 | 2007-07-19 01:47:57 -0700 | [diff] [blame] | 223 | 	return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | } | 
 | 225 |  | 
 | 226 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 |  * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a | 
 | 228 |  * sensible upper limit. | 
 | 229 |  */ | 
 | 230 | unsigned long max_sane_readahead(unsigned long nr) | 
 | 231 | { | 
| Christoph Lameter | 05a0416 | 2007-02-10 01:43:05 -0800 | [diff] [blame] | 232 | 	return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE) | 
 | 233 | 		+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | } | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 235 |  | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 236 | static int __init readahead_init(void) | 
 | 237 | { | 
 | 238 | 	return bdi_init(&default_backing_dev_info); | 
 | 239 | } | 
 | 240 | subsys_initcall(readahead_init); | 
 | 241 |  | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 242 | /* | 
 | 243 |  * Submit IO for the read-ahead request in file_ra_state. | 
 | 244 |  */ | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 245 | static unsigned long ra_submit(struct file_ra_state *ra, | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 246 | 		       struct address_space *mapping, struct file *filp) | 
 | 247 | { | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 248 | 	int actual; | 
 | 249 |  | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 250 | 	actual = __do_page_cache_readahead(mapping, filp, | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 251 | 					ra->start, ra->size, ra->async_size); | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 252 |  | 
 | 253 | 	return actual; | 
 | 254 | } | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 255 |  | 
 | 256 | /* | 
| Fengguang Wu | c743d96 | 2007-07-19 01:48:04 -0700 | [diff] [blame] | 257 |  * Set the initial window size, round to next power of 2 and square | 
 | 258 |  * for small size, x 4 for medium, and x 2 for large | 
 | 259 |  * for 128k (32 page) max ra | 
 | 260 |  * 1-8 page = 32k initial, > 8 page = 128k initial | 
 | 261 |  */ | 
 | 262 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | 
 | 263 | { | 
 | 264 | 	unsigned long newsize = roundup_pow_of_two(size); | 
 | 265 |  | 
 | 266 | 	if (newsize <= max / 32) | 
 | 267 | 		newsize = newsize * 4; | 
 | 268 | 	else if (newsize <= max / 4) | 
 | 269 | 		newsize = newsize * 2; | 
 | 270 | 	else | 
 | 271 | 		newsize = max; | 
 | 272 |  | 
 | 273 | 	return newsize; | 
 | 274 | } | 
 | 275 |  | 
 | 276 | /* | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 277 |  *  Get the previous window size, ramp it up, and | 
 | 278 |  *  return it as the new window size. | 
 | 279 |  */ | 
| Fengguang Wu | c743d96 | 2007-07-19 01:48:04 -0700 | [diff] [blame] | 280 | static unsigned long get_next_ra_size(struct file_ra_state *ra, | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 281 | 						unsigned long max) | 
 | 282 | { | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 283 | 	unsigned long cur = ra->size; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 284 | 	unsigned long newsize; | 
 | 285 |  | 
 | 286 | 	if (cur < max / 16) | 
| Fengguang Wu | c743d96 | 2007-07-19 01:48:04 -0700 | [diff] [blame] | 287 | 		newsize = 4 * cur; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 288 | 	else | 
| Fengguang Wu | c743d96 | 2007-07-19 01:48:04 -0700 | [diff] [blame] | 289 | 		newsize = 2 * cur; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 290 |  | 
 | 291 | 	return min(newsize, max); | 
 | 292 | } | 
 | 293 |  | 
 | 294 | /* | 
 | 295 |  * On-demand readahead design. | 
 | 296 |  * | 
 | 297 |  * The fields in struct file_ra_state represent the most-recently-executed | 
 | 298 |  * readahead attempt: | 
 | 299 |  * | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 300 |  *                        |<----- async_size ---------| | 
 | 301 |  *     |------------------- size -------------------->| | 
 | 302 |  *     |==================#===========================| | 
 | 303 |  *     ^start             ^page marked with PG_readahead | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 304 |  * | 
 | 305 |  * To overlap application thinking time and disk I/O time, we do | 
 | 306 |  * `readahead pipelining': Do not wait until the application consumed all | 
 | 307 |  * readahead pages and stalled on the missing page at readahead_index; | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 308 |  * Instead, submit an asynchronous readahead I/O as soon as there are | 
 | 309 |  * only async_size pages left in the readahead window. Normally async_size | 
 | 310 |  * will be equal to size, for maximum pipelining. | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 311 |  * | 
 | 312 |  * In interleaved sequential reads, concurrent streams on the same fd can | 
 | 313 |  * be invalidating each other's readahead state. So we flag the new readahead | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 314 |  * page at (start+size-async_size) with PG_readahead, and use it as readahead | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 315 |  * indicator. The flag won't be set on already cached pages, to avoid the | 
 | 316 |  * readahead-for-nothing fuss, saving pointless page cache lookups. | 
 | 317 |  * | 
| Fengguang Wu | f4e6b49 | 2007-10-16 01:24:33 -0700 | [diff] [blame] | 318 |  * prev_pos tracks the last visited byte in the _previous_ read request. | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 319 |  * It should be maintained by the caller, and will be used for detecting | 
 | 320 |  * small random reads. Note that the readahead algorithm checks loosely | 
 | 321 |  * for sequential patterns. Hence interleaved reads might be served as | 
 | 322 |  * sequential ones. | 
 | 323 |  * | 
 | 324 |  * There is a special-case: if the first page which the application tries to | 
 | 325 |  * read happens to be the first page of the file, it is assumed that a linear | 
 | 326 |  * read is about to happen and the window is immediately set to the initial size | 
 | 327 |  * based on I/O request size and the max_readahead. | 
 | 328 |  * | 
 | 329 |  * The code ramps up the readahead size aggressively at first, but slow down as | 
 | 330 |  * it approaches max_readhead. | 
 | 331 |  */ | 
 | 332 |  | 
 | 333 | /* | 
 | 334 |  * A minimal readahead algorithm for trivial sequential/random reads. | 
 | 335 |  */ | 
 | 336 | static unsigned long | 
 | 337 | ondemand_readahead(struct address_space *mapping, | 
 | 338 | 		   struct file_ra_state *ra, struct file *filp, | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 339 | 		   bool hit_readahead_marker, pgoff_t offset, | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 340 | 		   unsigned long req_size) | 
 | 341 | { | 
| Fengguang Wu | f4e6b49 | 2007-10-16 01:24:33 -0700 | [diff] [blame] | 342 | 	int	max = ra->ra_pages;	/* max readahead pages */ | 
 | 343 | 	pgoff_t prev_offset; | 
 | 344 | 	int	sequential; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 345 |  | 
 | 346 | 	/* | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 347 | 	 * It's the expected callback offset, assume sequential access. | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 348 | 	 * Ramp up sizes, and push forward the readahead window. | 
 | 349 | 	 */ | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 350 | 	if (offset && (offset == (ra->start + ra->size - ra->async_size) || | 
 | 351 | 			offset == (ra->start + ra->size))) { | 
 | 352 | 		ra->start += ra->size; | 
 | 353 | 		ra->size = get_next_ra_size(ra, max); | 
 | 354 | 		ra->async_size = ra->size; | 
 | 355 | 		goto readit; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 356 | 	} | 
 | 357 |  | 
| Fengguang Wu | f4e6b49 | 2007-10-16 01:24:33 -0700 | [diff] [blame] | 358 | 	prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT; | 
 | 359 | 	sequential = offset - prev_offset <= 1UL || req_size > max; | 
 | 360 |  | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 361 | 	/* | 
 | 362 | 	 * Standalone, small read. | 
 | 363 | 	 * Read as is, and do not pollute the readahead state. | 
 | 364 | 	 */ | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 365 | 	if (!hit_readahead_marker && !sequential) { | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 366 | 		return __do_page_cache_readahead(mapping, filp, | 
 | 367 | 						offset, req_size, 0); | 
 | 368 | 	} | 
 | 369 |  | 
 | 370 | 	/* | 
| Fengguang Wu | 6b10c6c | 2007-10-16 01:24:34 -0700 | [diff] [blame] | 371 | 	 * Hit a marked page without valid readahead state. | 
 | 372 | 	 * E.g. interleaved reads. | 
 | 373 | 	 * Query the pagecache for async_size, which normally equals to | 
 | 374 | 	 * readahead size. Ramp it up and use it as the new readahead size. | 
 | 375 | 	 */ | 
 | 376 | 	if (hit_readahead_marker) { | 
 | 377 | 		pgoff_t start; | 
 | 378 |  | 
 | 379 | 		read_lock_irq(&mapping->tree_lock); | 
 | 380 | 		start = radix_tree_next_hole(&mapping->page_tree, offset, max+1); | 
 | 381 | 		read_unlock_irq(&mapping->tree_lock); | 
 | 382 |  | 
 | 383 | 		if (!start || start - offset > max) | 
 | 384 | 			return 0; | 
 | 385 |  | 
 | 386 | 		ra->start = start; | 
 | 387 | 		ra->size = start - offset;	/* old async_size */ | 
 | 388 | 		ra->size = get_next_ra_size(ra, max); | 
 | 389 | 		ra->async_size = ra->size; | 
 | 390 | 		goto readit; | 
 | 391 | 	} | 
 | 392 |  | 
 | 393 | 	/* | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 394 | 	 * It may be one of | 
 | 395 | 	 * 	- first read on start of file | 
 | 396 | 	 * 	- sequential cache miss | 
 | 397 | 	 * 	- oversize random read | 
 | 398 | 	 * Start readahead for it. | 
 | 399 | 	 */ | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 400 | 	ra->start = offset; | 
 | 401 | 	ra->size = get_init_ra_size(req_size, max); | 
 | 402 | 	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 403 |  | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 404 | readit: | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 405 | 	return ra_submit(ra, mapping, filp); | 
 | 406 | } | 
 | 407 |  | 
 | 408 | /** | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 409 |  * page_cache_sync_readahead - generic file readahead | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 410 |  * @mapping: address_space which holds the pagecache and I/O vectors | 
 | 411 |  * @ra: file_ra_state which holds the readahead state | 
 | 412 |  * @filp: passed on to ->readpage() and ->readpages() | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 413 |  * @offset: start offset into @mapping, in pagecache page-sized units | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 414 |  * @req_size: hint: total size of the read which the caller is performing in | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 415 |  *            pagecache pages | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 416 |  * | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 417 |  * page_cache_sync_readahead() should be called when a cache miss happened: | 
 | 418 |  * it will submit the read.  The readahead logic may decide to piggyback more | 
 | 419 |  * pages onto the read request if access patterns suggest it will improve | 
 | 420 |  * performance. | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 421 |  */ | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 422 | void page_cache_sync_readahead(struct address_space *mapping, | 
 | 423 | 			       struct file_ra_state *ra, struct file *filp, | 
 | 424 | 			       pgoff_t offset, unsigned long req_size) | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 425 | { | 
 | 426 | 	/* no read-ahead */ | 
 | 427 | 	if (!ra->ra_pages) | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 428 | 		return; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 429 |  | 
 | 430 | 	/* do read-ahead */ | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 431 | 	ondemand_readahead(mapping, ra, filp, false, offset, req_size); | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 432 | } | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 433 | EXPORT_SYMBOL_GPL(page_cache_sync_readahead); | 
 | 434 |  | 
 | 435 | /** | 
 | 436 |  * page_cache_async_readahead - file readahead for marked pages | 
 | 437 |  * @mapping: address_space which holds the pagecache and I/O vectors | 
 | 438 |  * @ra: file_ra_state which holds the readahead state | 
 | 439 |  * @filp: passed on to ->readpage() and ->readpages() | 
 | 440 |  * @page: the page at @offset which has the PG_readahead flag set | 
 | 441 |  * @offset: start offset into @mapping, in pagecache page-sized units | 
 | 442 |  * @req_size: hint: total size of the read which the caller is performing in | 
 | 443 |  *            pagecache pages | 
 | 444 |  * | 
 | 445 |  * page_cache_async_ondemand() should be called when a page is used which | 
| Randy Dunlap | f7850d9 | 2008-03-19 17:01:02 -0700 | [diff] [blame] | 446 |  * has the PG_readahead flag; this is a marker to suggest that the application | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 447 |  * has used up enough of the readahead window that we should start pulling in | 
| Randy Dunlap | f7850d9 | 2008-03-19 17:01:02 -0700 | [diff] [blame] | 448 |  * more pages. | 
 | 449 |  */ | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 450 | void | 
 | 451 | page_cache_async_readahead(struct address_space *mapping, | 
 | 452 | 			   struct file_ra_state *ra, struct file *filp, | 
 | 453 | 			   struct page *page, pgoff_t offset, | 
 | 454 | 			   unsigned long req_size) | 
 | 455 | { | 
 | 456 | 	/* no read-ahead */ | 
 | 457 | 	if (!ra->ra_pages) | 
 | 458 | 		return; | 
 | 459 |  | 
 | 460 | 	/* | 
 | 461 | 	 * Same bit is used for PG_readahead and PG_reclaim. | 
 | 462 | 	 */ | 
 | 463 | 	if (PageWriteback(page)) | 
 | 464 | 		return; | 
 | 465 |  | 
 | 466 | 	ClearPageReadahead(page); | 
 | 467 |  | 
 | 468 | 	/* | 
 | 469 | 	 * Defer asynchronous read-ahead on IO congestion. | 
 | 470 | 	 */ | 
 | 471 | 	if (bdi_read_congested(mapping->backing_dev_info)) | 
 | 472 | 		return; | 
 | 473 |  | 
 | 474 | 	/* do read-ahead */ | 
 | 475 | 	ondemand_readahead(mapping, ra, filp, true, offset, req_size); | 
 | 476 | } | 
 | 477 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |