| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * mm/readahead.c - address_space-level file readahead. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2002, Linus Torvalds | 
|  | 5 | * | 
| Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 6 | * 09Apr2002	Andrew Morton | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | *		Initial version. | 
|  | 8 | */ | 
|  | 9 |  | 
|  | 10 | #include <linux/kernel.h> | 
|  | 11 | #include <linux/fs.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/mm.h> | 
|  | 14 | #include <linux/module.h> | 
|  | 15 | #include <linux/blkdev.h> | 
|  | 16 | #include <linux/backing-dev.h> | 
| Andrew Morton | 8bde37f | 2006-12-10 02:19:40 -0800 | [diff] [blame] | 17 | #include <linux/task_io_accounting_ops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/pagevec.h> | 
| Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 19 | #include <linux/pagemap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | /* | 
|  | 22 | * Initialise a struct file's readahead state.  Assumes that the caller has | 
|  | 23 | * memset *ra to zero. | 
|  | 24 | */ | 
|  | 25 | void | 
|  | 26 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | 
|  | 27 | { | 
|  | 28 | ra->ra_pages = mapping->backing_dev_info->ra_pages; | 
| Fengguang Wu | f4e6b49 | 2007-10-16 01:24:33 -0700 | [diff] [blame] | 29 | ra->prev_pos = -1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | } | 
| Steven Whitehouse | d41cc70 | 2006-01-30 08:53:33 +0000 | [diff] [blame] | 31 | EXPORT_SYMBOL_GPL(file_ra_state_init); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) | 
|  | 34 |  | 
| David Howells | 03fb3d2 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 35 | /* | 
|  | 36 | * see if a page needs releasing upon read_cache_pages() failure | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 37 | * - the caller of read_cache_pages() may have set PG_private or PG_fscache | 
|  | 38 | *   before calling, such as the NFS fs marking pages that are cached locally | 
|  | 39 | *   on disk, thus we need to give the fs a chance to clean up in the event of | 
|  | 40 | *   an error | 
| David Howells | 03fb3d2 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 41 | */ | 
|  | 42 | static void read_cache_pages_invalidate_page(struct address_space *mapping, | 
|  | 43 | struct page *page) | 
|  | 44 | { | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 45 | if (page_has_private(page)) { | 
| David Howells | 03fb3d2 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 46 | if (!trylock_page(page)) | 
|  | 47 | BUG(); | 
|  | 48 | page->mapping = mapping; | 
|  | 49 | do_invalidatepage(page, 0); | 
|  | 50 | page->mapping = NULL; | 
|  | 51 | unlock_page(page); | 
|  | 52 | } | 
|  | 53 | page_cache_release(page); | 
|  | 54 | } | 
|  | 55 |  | 
|  | 56 | /* | 
|  | 57 | * release a list of pages, invalidating them first if need be | 
|  | 58 | */ | 
|  | 59 | static void read_cache_pages_invalidate_pages(struct address_space *mapping, | 
|  | 60 | struct list_head *pages) | 
|  | 61 | { | 
|  | 62 | struct page *victim; | 
|  | 63 |  | 
|  | 64 | while (!list_empty(pages)) { | 
|  | 65 | victim = list_to_page(pages); | 
|  | 66 | list_del(&victim->lru); | 
|  | 67 | read_cache_pages_invalidate_page(mapping, victim); | 
|  | 68 | } | 
|  | 69 | } | 
|  | 70 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | /** | 
| Randy Dunlap | bd40cdd | 2006-06-25 05:48:08 -0700 | [diff] [blame] | 72 | * read_cache_pages - populate an address space with some pages & start reads against them | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | * @mapping: the address_space | 
|  | 74 | * @pages: The address of a list_head which contains the target pages.  These | 
|  | 75 | *   pages have their ->index populated and are otherwise uninitialised. | 
|  | 76 | * @filler: callback routine for filling a single page. | 
|  | 77 | * @data: private data for the callback routine. | 
|  | 78 | * | 
|  | 79 | * Hides the details of the LRU cache etc from the filesystems. | 
|  | 80 | */ | 
|  | 81 | int read_cache_pages(struct address_space *mapping, struct list_head *pages, | 
|  | 82 | int (*filler)(void *, struct page *), void *data) | 
|  | 83 | { | 
|  | 84 | struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | int ret = 0; | 
|  | 86 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | while (!list_empty(pages)) { | 
|  | 88 | page = list_to_page(pages); | 
|  | 89 | list_del(&page->lru); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 90 | if (add_to_page_cache_lru(page, mapping, | 
|  | 91 | page->index, GFP_KERNEL)) { | 
| David Howells | 03fb3d2 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 92 | read_cache_pages_invalidate_page(mapping, page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | continue; | 
|  | 94 | } | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 95 | page_cache_release(page); | 
|  | 96 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | ret = filler(data, page); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 98 | if (unlikely(ret)) { | 
| David Howells | 03fb3d2 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 99 | read_cache_pages_invalidate_pages(mapping, pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | break; | 
|  | 101 | } | 
| Andrew Morton | 8bde37f | 2006-12-10 02:19:40 -0800 | [diff] [blame] | 102 | task_io_account_read(PAGE_CACHE_SIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | return ret; | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | EXPORT_SYMBOL(read_cache_pages); | 
|  | 108 |  | 
|  | 109 | static int read_pages(struct address_space *mapping, struct file *filp, | 
|  | 110 | struct list_head *pages, unsigned nr_pages) | 
|  | 111 | { | 
| Jens Axboe | 5b417b1 | 2010-04-19 10:04:38 +0200 | [diff] [blame] | 112 | struct blk_plug plug; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | unsigned page_idx; | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 114 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 |  | 
| Jens Axboe | 5b417b1 | 2010-04-19 10:04:38 +0200 | [diff] [blame] | 116 | blk_start_plug(&plug); | 
|  | 117 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | if (mapping->a_ops->readpages) { | 
|  | 119 | ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); | 
| OGAWA Hirofumi | 029e332 | 2006-11-02 22:07:06 -0800 | [diff] [blame] | 120 | /* Clean up the remaining pages */ | 
|  | 121 | put_pages_list(pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | goto out; | 
|  | 123 | } | 
|  | 124 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 
|  | 126 | struct page *page = list_to_page(pages); | 
|  | 127 | list_del(&page->lru); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 128 | if (!add_to_page_cache_lru(page, mapping, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | page->index, GFP_KERNEL)) { | 
| Zach Brown | 9f1a3cf | 2006-06-25 05:46:46 -0700 | [diff] [blame] | 130 | mapping->a_ops->readpage(filp, page); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 131 | } | 
|  | 132 | page_cache_release(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | } | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 134 | ret = 0; | 
| Jens Axboe | 5b417b1 | 2010-04-19 10:04:38 +0200 | [diff] [blame] | 135 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | out: | 
| Jens Axboe | 5b417b1 | 2010-04-19 10:04:38 +0200 | [diff] [blame] | 137 | blk_finish_plug(&plug); | 
|  | 138 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | return ret; | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | /* | 
| Wu Fengguang | d30a110 | 2009-06-16 15:31:30 -0700 | [diff] [blame] | 143 | * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates all | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | * the pages first, then submits them all for I/O. This avoids the very bad | 
|  | 145 | * behaviour which would occur if page allocations are causing VM writeback. | 
|  | 146 | * We really don't want to intermingle reads and writes like that. | 
|  | 147 | * | 
|  | 148 | * Returns the number of pages requested, or the maximum amount of I/O allowed. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | */ | 
|  | 150 | static int | 
|  | 151 | __do_page_cache_readahead(struct address_space *mapping, struct file *filp, | 
| Fengguang Wu | 46fc3e7 | 2007-07-19 01:47:57 -0700 | [diff] [blame] | 152 | pgoff_t offset, unsigned long nr_to_read, | 
|  | 153 | unsigned long lookahead_size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | { | 
|  | 155 | struct inode *inode = mapping->host; | 
|  | 156 | struct page *page; | 
|  | 157 | unsigned long end_index;	/* The last page we want to read */ | 
|  | 158 | LIST_HEAD(page_pool); | 
|  | 159 | int page_idx; | 
|  | 160 | int ret = 0; | 
|  | 161 | loff_t isize = i_size_read(inode); | 
|  | 162 |  | 
|  | 163 | if (isize == 0) | 
|  | 164 | goto out; | 
|  | 165 |  | 
| Fengguang Wu | 46fc3e7 | 2007-07-19 01:47:57 -0700 | [diff] [blame] | 166 | end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 |  | 
|  | 168 | /* | 
|  | 169 | * Preallocate as many pages as we will need. | 
|  | 170 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | for (page_idx = 0; page_idx < nr_to_read; page_idx++) { | 
| Andrew Morton | 7361f4d | 2005-11-07 00:59:28 -0800 | [diff] [blame] | 172 | pgoff_t page_offset = offset + page_idx; | 
| Fengguang Wu | c743d96 | 2007-07-19 01:48:04 -0700 | [diff] [blame] | 173 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | if (page_offset > end_index) | 
|  | 175 | break; | 
|  | 176 |  | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 177 | rcu_read_lock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | page = radix_tree_lookup(&mapping->page_tree, page_offset); | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 179 | rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | if (page) | 
|  | 181 | continue; | 
|  | 182 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | page = page_cache_alloc_cold(mapping); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | if (!page) | 
|  | 185 | break; | 
|  | 186 | page->index = page_offset; | 
|  | 187 | list_add(&page->lru, &page_pool); | 
| Fengguang Wu | 46fc3e7 | 2007-07-19 01:47:57 -0700 | [diff] [blame] | 188 | if (page_idx == nr_to_read - lookahead_size) | 
|  | 189 | SetPageReadahead(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | ret++; | 
|  | 191 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 |  | 
|  | 193 | /* | 
|  | 194 | * Now start the IO.  We ignore I/O errors - if the page is not | 
|  | 195 | * uptodate then the caller will launch readpage again, and | 
|  | 196 | * will then handle the error. | 
|  | 197 | */ | 
|  | 198 | if (ret) | 
|  | 199 | read_pages(mapping, filp, &page_pool, ret); | 
|  | 200 | BUG_ON(!list_empty(&page_pool)); | 
|  | 201 | out: | 
|  | 202 | return ret; | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | /* | 
|  | 206 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much | 
|  | 207 | * memory at once. | 
|  | 208 | */ | 
|  | 209 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | 
| Andrew Morton | 7361f4d | 2005-11-07 00:59:28 -0800 | [diff] [blame] | 210 | pgoff_t offset, unsigned long nr_to_read) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | { | 
|  | 212 | int ret = 0; | 
|  | 213 |  | 
|  | 214 | if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) | 
|  | 215 | return -EINVAL; | 
|  | 216 |  | 
| Wu Fengguang | f7e839d | 2009-06-16 15:31:20 -0700 | [diff] [blame] | 217 | nr_to_read = max_sane_readahead(nr_to_read); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | while (nr_to_read) { | 
|  | 219 | int err; | 
|  | 220 |  | 
|  | 221 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; | 
|  | 222 |  | 
|  | 223 | if (this_chunk > nr_to_read) | 
|  | 224 | this_chunk = nr_to_read; | 
|  | 225 | err = __do_page_cache_readahead(mapping, filp, | 
| Fengguang Wu | 46fc3e7 | 2007-07-19 01:47:57 -0700 | [diff] [blame] | 226 | offset, this_chunk, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | if (err < 0) { | 
|  | 228 | ret = err; | 
|  | 229 | break; | 
|  | 230 | } | 
|  | 231 | ret += err; | 
|  | 232 | offset += this_chunk; | 
|  | 233 | nr_to_read -= this_chunk; | 
|  | 234 | } | 
|  | 235 | return ret; | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a | 
|  | 240 | * sensible upper limit. | 
|  | 241 | */ | 
|  | 242 | unsigned long max_sane_readahead(unsigned long nr) | 
|  | 243 | { | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 244 | return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE) | 
| Christoph Lameter | 05a0416 | 2007-02-10 01:43:05 -0800 | [diff] [blame] | 245 | + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | } | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 247 |  | 
|  | 248 | /* | 
|  | 249 | * Submit IO for the read-ahead request in file_ra_state. | 
|  | 250 | */ | 
| Wu Fengguang | d30a110 | 2009-06-16 15:31:30 -0700 | [diff] [blame] | 251 | unsigned long ra_submit(struct file_ra_state *ra, | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 252 | struct address_space *mapping, struct file *filp) | 
|  | 253 | { | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 254 | int actual; | 
|  | 255 |  | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 256 | actual = __do_page_cache_readahead(mapping, filp, | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 257 | ra->start, ra->size, ra->async_size); | 
| Fengguang Wu | 5ce1110 | 2007-07-19 01:47:59 -0700 | [diff] [blame] | 258 |  | 
|  | 259 | return actual; | 
|  | 260 | } | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 261 |  | 
|  | 262 | /* | 
| Fengguang Wu | c743d96 | 2007-07-19 01:48:04 -0700 | [diff] [blame] | 263 | * Set the initial window size, round to next power of 2 and square | 
|  | 264 | * for small size, x 4 for medium, and x 2 for large | 
|  | 265 | * for 128k (32 page) max ra | 
|  | 266 | * 1-8 page = 32k initial, > 8 page = 128k initial | 
|  | 267 | */ | 
|  | 268 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | 
|  | 269 | { | 
|  | 270 | unsigned long newsize = roundup_pow_of_two(size); | 
|  | 271 |  | 
|  | 272 | if (newsize <= max / 32) | 
|  | 273 | newsize = newsize * 4; | 
|  | 274 | else if (newsize <= max / 4) | 
|  | 275 | newsize = newsize * 2; | 
|  | 276 | else | 
|  | 277 | newsize = max; | 
|  | 278 |  | 
|  | 279 | return newsize; | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | /* | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 283 | *  Get the previous window size, ramp it up, and | 
|  | 284 | *  return it as the new window size. | 
|  | 285 | */ | 
| Fengguang Wu | c743d96 | 2007-07-19 01:48:04 -0700 | [diff] [blame] | 286 | static unsigned long get_next_ra_size(struct file_ra_state *ra, | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 287 | unsigned long max) | 
|  | 288 | { | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 289 | unsigned long cur = ra->size; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 290 | unsigned long newsize; | 
|  | 291 |  | 
|  | 292 | if (cur < max / 16) | 
| Fengguang Wu | c743d96 | 2007-07-19 01:48:04 -0700 | [diff] [blame] | 293 | newsize = 4 * cur; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 294 | else | 
| Fengguang Wu | c743d96 | 2007-07-19 01:48:04 -0700 | [diff] [blame] | 295 | newsize = 2 * cur; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 296 |  | 
|  | 297 | return min(newsize, max); | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 | /* | 
|  | 301 | * On-demand readahead design. | 
|  | 302 | * | 
|  | 303 | * The fields in struct file_ra_state represent the most-recently-executed | 
|  | 304 | * readahead attempt: | 
|  | 305 | * | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 306 | *                        |<----- async_size ---------| | 
|  | 307 | *     |------------------- size -------------------->| | 
|  | 308 | *     |==================#===========================| | 
|  | 309 | *     ^start             ^page marked with PG_readahead | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 310 | * | 
|  | 311 | * To overlap application thinking time and disk I/O time, we do | 
|  | 312 | * `readahead pipelining': Do not wait until the application consumed all | 
|  | 313 | * readahead pages and stalled on the missing page at readahead_index; | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 314 | * Instead, submit an asynchronous readahead I/O as soon as there are | 
|  | 315 | * only async_size pages left in the readahead window. Normally async_size | 
|  | 316 | * will be equal to size, for maximum pipelining. | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 317 | * | 
|  | 318 | * In interleaved sequential reads, concurrent streams on the same fd can | 
|  | 319 | * be invalidating each other's readahead state. So we flag the new readahead | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 320 | * page at (start+size-async_size) with PG_readahead, and use it as readahead | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 321 | * indicator. The flag won't be set on already cached pages, to avoid the | 
|  | 322 | * readahead-for-nothing fuss, saving pointless page cache lookups. | 
|  | 323 | * | 
| Fengguang Wu | f4e6b49 | 2007-10-16 01:24:33 -0700 | [diff] [blame] | 324 | * prev_pos tracks the last visited byte in the _previous_ read request. | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 325 | * It should be maintained by the caller, and will be used for detecting | 
|  | 326 | * small random reads. Note that the readahead algorithm checks loosely | 
|  | 327 | * for sequential patterns. Hence interleaved reads might be served as | 
|  | 328 | * sequential ones. | 
|  | 329 | * | 
|  | 330 | * There is a special-case: if the first page which the application tries to | 
|  | 331 | * read happens to be the first page of the file, it is assumed that a linear | 
|  | 332 | * read is about to happen and the window is immediately set to the initial size | 
|  | 333 | * based on I/O request size and the max_readahead. | 
|  | 334 | * | 
|  | 335 | * The code ramps up the readahead size aggressively at first, but slow down as | 
|  | 336 | * it approaches max_readhead. | 
|  | 337 | */ | 
|  | 338 |  | 
|  | 339 | /* | 
| Wu Fengguang | 10be0b3 | 2009-06-16 15:31:36 -0700 | [diff] [blame] | 340 | * Count contiguously cached pages from @offset-1 to @offset-@max, | 
|  | 341 | * this count is a conservative estimation of | 
|  | 342 | * 	- length of the sequential read sequence, or | 
|  | 343 | * 	- thrashing threshold in memory tight systems | 
|  | 344 | */ | 
|  | 345 | static pgoff_t count_history_pages(struct address_space *mapping, | 
|  | 346 | struct file_ra_state *ra, | 
|  | 347 | pgoff_t offset, unsigned long max) | 
|  | 348 | { | 
|  | 349 | pgoff_t head; | 
|  | 350 |  | 
|  | 351 | rcu_read_lock(); | 
|  | 352 | head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max); | 
|  | 353 | rcu_read_unlock(); | 
|  | 354 |  | 
|  | 355 | return offset - 1 - head; | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | /* | 
|  | 359 | * page cache context based read-ahead | 
|  | 360 | */ | 
|  | 361 | static int try_context_readahead(struct address_space *mapping, | 
|  | 362 | struct file_ra_state *ra, | 
|  | 363 | pgoff_t offset, | 
|  | 364 | unsigned long req_size, | 
|  | 365 | unsigned long max) | 
|  | 366 | { | 
|  | 367 | pgoff_t size; | 
|  | 368 |  | 
|  | 369 | size = count_history_pages(mapping, ra, offset, max); | 
|  | 370 |  | 
|  | 371 | /* | 
|  | 372 | * no history pages: | 
|  | 373 | * it could be a random read | 
|  | 374 | */ | 
|  | 375 | if (!size) | 
|  | 376 | return 0; | 
|  | 377 |  | 
|  | 378 | /* | 
|  | 379 | * starts from beginning of file: | 
|  | 380 | * it is a strong indication of long-run stream (or whole-file-read) | 
|  | 381 | */ | 
|  | 382 | if (size >= offset) | 
|  | 383 | size *= 2; | 
|  | 384 |  | 
|  | 385 | ra->start = offset; | 
|  | 386 | ra->size = get_init_ra_size(size + req_size, max); | 
|  | 387 | ra->async_size = ra->size; | 
|  | 388 |  | 
|  | 389 | return 1; | 
|  | 390 | } | 
|  | 391 |  | 
|  | 392 | /* | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 393 | * A minimal readahead algorithm for trivial sequential/random reads. | 
|  | 394 | */ | 
|  | 395 | static unsigned long | 
|  | 396 | ondemand_readahead(struct address_space *mapping, | 
|  | 397 | struct file_ra_state *ra, struct file *filp, | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 398 | bool hit_readahead_marker, pgoff_t offset, | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 399 | unsigned long req_size) | 
|  | 400 | { | 
| Wu Fengguang | fc31d16 | 2009-06-16 15:31:21 -0700 | [diff] [blame] | 401 | unsigned long max = max_sane_readahead(ra->ra_pages); | 
| Wu Fengguang | 045a252 | 2009-06-16 15:31:33 -0700 | [diff] [blame] | 402 |  | 
|  | 403 | /* | 
|  | 404 | * start of file | 
|  | 405 | */ | 
|  | 406 | if (!offset) | 
|  | 407 | goto initial_readahead; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 408 |  | 
|  | 409 | /* | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 410 | * It's the expected callback offset, assume sequential access. | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 411 | * Ramp up sizes, and push forward the readahead window. | 
|  | 412 | */ | 
| Wu Fengguang | 045a252 | 2009-06-16 15:31:33 -0700 | [diff] [blame] | 413 | if ((offset == (ra->start + ra->size - ra->async_size) || | 
|  | 414 | offset == (ra->start + ra->size))) { | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 415 | ra->start += ra->size; | 
|  | 416 | ra->size = get_next_ra_size(ra, max); | 
|  | 417 | ra->async_size = ra->size; | 
|  | 418 | goto readit; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 419 | } | 
|  | 420 |  | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 421 | /* | 
| Fengguang Wu | 6b10c6c | 2007-10-16 01:24:34 -0700 | [diff] [blame] | 422 | * Hit a marked page without valid readahead state. | 
|  | 423 | * E.g. interleaved reads. | 
|  | 424 | * Query the pagecache for async_size, which normally equals to | 
|  | 425 | * readahead size. Ramp it up and use it as the new readahead size. | 
|  | 426 | */ | 
|  | 427 | if (hit_readahead_marker) { | 
|  | 428 | pgoff_t start; | 
|  | 429 |  | 
| Nick Piggin | 30002ed | 2008-07-25 19:45:28 -0700 | [diff] [blame] | 430 | rcu_read_lock(); | 
| Wu Fengguang | caca7cb | 2009-06-16 15:31:21 -0700 | [diff] [blame] | 431 | start = radix_tree_next_hole(&mapping->page_tree, offset+1,max); | 
| Nick Piggin | 30002ed | 2008-07-25 19:45:28 -0700 | [diff] [blame] | 432 | rcu_read_unlock(); | 
| Fengguang Wu | 6b10c6c | 2007-10-16 01:24:34 -0700 | [diff] [blame] | 433 |  | 
|  | 434 | if (!start || start - offset > max) | 
|  | 435 | return 0; | 
|  | 436 |  | 
|  | 437 | ra->start = start; | 
|  | 438 | ra->size = start - offset;	/* old async_size */ | 
| Wu Fengguang | 160334a | 2009-06-16 15:31:23 -0700 | [diff] [blame] | 439 | ra->size += req_size; | 
| Fengguang Wu | 6b10c6c | 2007-10-16 01:24:34 -0700 | [diff] [blame] | 440 | ra->size = get_next_ra_size(ra, max); | 
|  | 441 | ra->async_size = ra->size; | 
|  | 442 | goto readit; | 
|  | 443 | } | 
|  | 444 |  | 
|  | 445 | /* | 
| Wu Fengguang | 045a252 | 2009-06-16 15:31:33 -0700 | [diff] [blame] | 446 | * oversize read | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 447 | */ | 
| Wu Fengguang | 045a252 | 2009-06-16 15:31:33 -0700 | [diff] [blame] | 448 | if (req_size > max) | 
|  | 449 | goto initial_readahead; | 
|  | 450 |  | 
|  | 451 | /* | 
|  | 452 | * sequential cache miss | 
|  | 453 | */ | 
|  | 454 | if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL) | 
|  | 455 | goto initial_readahead; | 
|  | 456 |  | 
|  | 457 | /* | 
| Wu Fengguang | 10be0b3 | 2009-06-16 15:31:36 -0700 | [diff] [blame] | 458 | * Query the page cache and look for the traces(cached history pages) | 
|  | 459 | * that a sequential stream would leave behind. | 
|  | 460 | */ | 
|  | 461 | if (try_context_readahead(mapping, ra, offset, req_size, max)) | 
|  | 462 | goto readit; | 
|  | 463 |  | 
|  | 464 | /* | 
| Wu Fengguang | 045a252 | 2009-06-16 15:31:33 -0700 | [diff] [blame] | 465 | * standalone, small random read | 
|  | 466 | * Read as is, and do not pollute the readahead state. | 
|  | 467 | */ | 
|  | 468 | return __do_page_cache_readahead(mapping, filp, offset, req_size, 0); | 
|  | 469 |  | 
|  | 470 | initial_readahead: | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 471 | ra->start = offset; | 
|  | 472 | ra->size = get_init_ra_size(req_size, max); | 
|  | 473 | ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 474 |  | 
| Fengguang Wu | f9acc8c | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 475 | readit: | 
| Wu Fengguang | 51daa88 | 2009-06-16 15:31:24 -0700 | [diff] [blame] | 476 | /* | 
|  | 477 | * Will this read hit the readahead marker made by itself? | 
|  | 478 | * If so, trigger the readahead marker hit now, and merge | 
|  | 479 | * the resulted next readahead window into the current one. | 
|  | 480 | */ | 
|  | 481 | if (offset == ra->start && ra->size == ra->async_size) { | 
|  | 482 | ra->async_size = get_next_ra_size(ra, max); | 
|  | 483 | ra->size += ra->async_size; | 
|  | 484 | } | 
|  | 485 |  | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 486 | return ra_submit(ra, mapping, filp); | 
|  | 487 | } | 
|  | 488 |  | 
|  | 489 | /** | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 490 | * page_cache_sync_readahead - generic file readahead | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 491 | * @mapping: address_space which holds the pagecache and I/O vectors | 
|  | 492 | * @ra: file_ra_state which holds the readahead state | 
|  | 493 | * @filp: passed on to ->readpage() and ->readpages() | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 494 | * @offset: start offset into @mapping, in pagecache page-sized units | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 495 | * @req_size: hint: total size of the read which the caller is performing in | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 496 | *            pagecache pages | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 497 | * | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 498 | * page_cache_sync_readahead() should be called when a cache miss happened: | 
|  | 499 | * it will submit the read.  The readahead logic may decide to piggyback more | 
|  | 500 | * pages onto the read request if access patterns suggest it will improve | 
|  | 501 | * performance. | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 502 | */ | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 503 | void page_cache_sync_readahead(struct address_space *mapping, | 
|  | 504 | struct file_ra_state *ra, struct file *filp, | 
|  | 505 | pgoff_t offset, unsigned long req_size) | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 506 | { | 
|  | 507 | /* no read-ahead */ | 
|  | 508 | if (!ra->ra_pages) | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 509 | return; | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 510 |  | 
| Wu Fengguang | 0141450 | 2010-03-05 13:42:03 -0800 | [diff] [blame] | 511 | /* be dumb */ | 
| Wu Fengguang | 70655c0 | 2010-04-06 14:34:53 -0700 | [diff] [blame] | 512 | if (filp && (filp->f_mode & FMODE_RANDOM)) { | 
| Wu Fengguang | 0141450 | 2010-03-05 13:42:03 -0800 | [diff] [blame] | 513 | force_page_cache_readahead(mapping, filp, offset, req_size); | 
|  | 514 | return; | 
|  | 515 | } | 
|  | 516 |  | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 517 | /* do read-ahead */ | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 518 | ondemand_readahead(mapping, ra, filp, false, offset, req_size); | 
| Fengguang Wu | 122a21d | 2007-07-19 01:48:01 -0700 | [diff] [blame] | 519 | } | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 520 | EXPORT_SYMBOL_GPL(page_cache_sync_readahead); | 
|  | 521 |  | 
|  | 522 | /** | 
|  | 523 | * page_cache_async_readahead - file readahead for marked pages | 
|  | 524 | * @mapping: address_space which holds the pagecache and I/O vectors | 
|  | 525 | * @ra: file_ra_state which holds the readahead state | 
|  | 526 | * @filp: passed on to ->readpage() and ->readpages() | 
|  | 527 | * @page: the page at @offset which has the PG_readahead flag set | 
|  | 528 | * @offset: start offset into @mapping, in pagecache page-sized units | 
|  | 529 | * @req_size: hint: total size of the read which the caller is performing in | 
|  | 530 | *            pagecache pages | 
|  | 531 | * | 
| Huang Shijie | bf8abe8 | 2010-05-24 14:32:36 -0700 | [diff] [blame] | 532 | * page_cache_async_readahead() should be called when a page is used which | 
| Randy Dunlap | f7850d9 | 2008-03-19 17:01:02 -0700 | [diff] [blame] | 533 | * has the PG_readahead flag; this is a marker to suggest that the application | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 534 | * has used up enough of the readahead window that we should start pulling in | 
| Randy Dunlap | f7850d9 | 2008-03-19 17:01:02 -0700 | [diff] [blame] | 535 | * more pages. | 
|  | 536 | */ | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 537 | void | 
|  | 538 | page_cache_async_readahead(struct address_space *mapping, | 
|  | 539 | struct file_ra_state *ra, struct file *filp, | 
|  | 540 | struct page *page, pgoff_t offset, | 
|  | 541 | unsigned long req_size) | 
|  | 542 | { | 
|  | 543 | /* no read-ahead */ | 
|  | 544 | if (!ra->ra_pages) | 
|  | 545 | return; | 
|  | 546 |  | 
|  | 547 | /* | 
|  | 548 | * Same bit is used for PG_readahead and PG_reclaim. | 
|  | 549 | */ | 
|  | 550 | if (PageWriteback(page)) | 
|  | 551 | return; | 
|  | 552 |  | 
|  | 553 | ClearPageReadahead(page); | 
|  | 554 |  | 
|  | 555 | /* | 
|  | 556 | * Defer asynchronous read-ahead on IO congestion. | 
|  | 557 | */ | 
|  | 558 | if (bdi_read_congested(mapping->backing_dev_info)) | 
|  | 559 | return; | 
|  | 560 |  | 
|  | 561 | /* do read-ahead */ | 
|  | 562 | ondemand_readahead(mapping, ra, filp, true, offset, req_size); | 
|  | 563 | } | 
|  | 564 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |