| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/mm/swap_state.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
|  | 5 | *  Swap reorganised 29.12.95, Stephen Tweedie | 
|  | 6 | * | 
|  | 7 | *  Rewritten to use page cache, (C) 1998 Stephen Tweedie | 
|  | 8 | */ | 
|  | 9 | #include <linux/module.h> | 
|  | 10 | #include <linux/mm.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 11 | #include <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/kernel_stat.h> | 
|  | 13 | #include <linux/swap.h> | 
| Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 14 | #include <linux/swapops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/init.h> | 
|  | 16 | #include <linux/pagemap.h> | 
|  | 17 | #include <linux/buffer_head.h> | 
|  | 18 | #include <linux/backing-dev.h> | 
| Hugh Dickins | c484d41 | 2006-01-06 00:10:55 -0800 | [diff] [blame] | 19 | #include <linux/pagevec.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 20 | #include <linux/migrate.h> | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 21 | #include <linux/page_cgroup.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 |  | 
|  | 23 | #include <asm/pgtable.h> | 
|  | 24 |  | 
|  | 25 | /* | 
|  | 26 | * swapper_space is a fiction, retained to simplify the path through | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 27 | * vmscan's shrink_page_list. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | */ | 
| Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 29 | static const struct address_space_operations swap_aops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | .writepage	= swap_writepage, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | .set_page_dirty	= __set_page_dirty_nobuffers, | 
| Christoph Lameter | e965f96 | 2006-02-01 03:05:41 -0800 | [diff] [blame] | 32 | .migratepage	= migrate_page, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | }; | 
|  | 34 |  | 
|  | 35 | static struct backing_dev_info swap_backing_dev_info = { | 
| Jens Axboe | d993831 | 2009-06-12 14:45:52 +0200 | [diff] [blame] | 36 | .name		= "swap", | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 37 | .capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | }; | 
|  | 39 |  | 
|  | 40 | struct address_space swapper_space = { | 
|  | 41 | .page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 42 | .tree_lock	= __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | .a_ops		= &swap_aops, | 
|  | 44 | .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), | 
|  | 45 | .backing_dev_info = &swap_backing_dev_info, | 
|  | 46 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
|  | 48 | #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0) | 
|  | 49 |  | 
|  | 50 | static struct { | 
|  | 51 | unsigned long add_total; | 
|  | 52 | unsigned long del_total; | 
|  | 53 | unsigned long find_success; | 
|  | 54 | unsigned long find_total; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | } swap_cache_info; | 
|  | 56 |  | 
|  | 57 | void show_swap_cache_info(void) | 
|  | 58 | { | 
| Johannes Weiner | 2c97b7f | 2008-07-25 19:46:01 -0700 | [diff] [blame] | 59 | printk("%lu pages in swap cache\n", total_swapcache_pages); | 
|  | 60 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | swap_cache_info.add_total, swap_cache_info.del_total, | 
| Hugh Dickins | bb63be0 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 62 | swap_cache_info.find_success, swap_cache_info.find_total); | 
| Hugh Dickins | 07279cd | 2008-08-20 14:09:05 -0700 | [diff] [blame] | 63 | printk("Free swap  = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); | 
|  | 65 | } | 
|  | 66 |  | 
|  | 67 | /* | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 68 | * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | * but sets SwapCache flag and private instead of mapping and index. | 
|  | 70 | */ | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 71 | static int __add_to_swap_cache(struct page *page, swp_entry_t entry) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | { | 
|  | 73 | int error; | 
|  | 74 |  | 
| Hugh Dickins | 51726b1 | 2009-01-06 14:39:25 -0800 | [diff] [blame] | 75 | VM_BUG_ON(!PageLocked(page)); | 
|  | 76 | VM_BUG_ON(PageSwapCache(page)); | 
|  | 77 | VM_BUG_ON(!PageSwapBacked(page)); | 
|  | 78 |  | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 79 | page_cache_get(page); | 
|  | 80 | SetPageSwapCache(page); | 
|  | 81 | set_page_private(page, entry.val); | 
|  | 82 |  | 
|  | 83 | spin_lock_irq(&swapper_space.tree_lock); | 
|  | 84 | error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); | 
|  | 85 | if (likely(!error)) { | 
|  | 86 | total_swapcache_pages++; | 
|  | 87 | __inc_zone_page_state(page, NR_FILE_PAGES); | 
|  | 88 | INC_CACHE_INFO(add_total); | 
|  | 89 | } | 
|  | 90 | spin_unlock_irq(&swapper_space.tree_lock); | 
|  | 91 |  | 
|  | 92 | if (unlikely(error)) { | 
| Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 93 | /* | 
|  | 94 | * Only the context which have set SWAP_HAS_CACHE flag | 
|  | 95 | * would call add_to_swap_cache(). | 
|  | 96 | * So add_to_swap_cache() doesn't returns -EEXIST. | 
|  | 97 | */ | 
|  | 98 | VM_BUG_ON(error == -EEXIST); | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 99 | set_page_private(page, 0UL); | 
|  | 100 | ClearPageSwapCache(page); | 
|  | 101 | page_cache_release(page); | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | return error; | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 |  | 
|  | 108 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | 
|  | 109 | { | 
|  | 110 | int error; | 
|  | 111 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | error = radix_tree_preload(gfp_mask); | 
|  | 113 | if (!error) { | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 114 | error = __add_to_swap_cache(page, entry); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | radix_tree_preload_end(); | 
| Hugh Dickins | fa1de90 | 2008-02-07 00:14:13 -0800 | [diff] [blame] | 116 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | return error; | 
|  | 118 | } | 
|  | 119 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | /* | 
|  | 121 | * This must be called only on pages that have | 
|  | 122 | * been verified to be in the swap cache. | 
|  | 123 | */ | 
|  | 124 | void __delete_from_swap_cache(struct page *page) | 
|  | 125 | { | 
| Hugh Dickins | 51726b1 | 2009-01-06 14:39:25 -0800 | [diff] [blame] | 126 | VM_BUG_ON(!PageLocked(page)); | 
|  | 127 | VM_BUG_ON(!PageSwapCache(page)); | 
|  | 128 | VM_BUG_ON(PageWriteback(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 |  | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 130 | radix_tree_delete(&swapper_space.page_tree, page_private(page)); | 
|  | 131 | set_page_private(page, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | ClearPageSwapCache(page); | 
|  | 133 | total_swapcache_pages--; | 
| Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 134 | __dec_zone_page_state(page, NR_FILE_PAGES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | INC_CACHE_INFO(del_total); | 
|  | 136 | } | 
|  | 137 |  | 
|  | 138 | /** | 
|  | 139 | * add_to_swap - allocate swap space for a page | 
|  | 140 | * @page: page we want to move to swap | 
|  | 141 | * | 
|  | 142 | * Allocate swap space for the page and add the page to the | 
|  | 143 | * swap cache.  Caller needs to hold the page lock. | 
|  | 144 | */ | 
| Hugh Dickins | ac47b00 | 2009-01-06 14:39:39 -0800 | [diff] [blame] | 145 | int add_to_swap(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | { | 
|  | 147 | swp_entry_t entry; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | int err; | 
|  | 149 |  | 
| Hugh Dickins | 51726b1 | 2009-01-06 14:39:25 -0800 | [diff] [blame] | 150 | VM_BUG_ON(!PageLocked(page)); | 
|  | 151 | VM_BUG_ON(!PageUptodate(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 |  | 
| Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 153 | entry = get_swap_page(); | 
|  | 154 | if (!entry.val) | 
|  | 155 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 |  | 
| Andrea Arcangeli | 3f04f62 | 2011-01-13 15:46:47 -0800 | [diff] [blame] | 157 | if (unlikely(PageTransHuge(page))) | 
|  | 158 | if (unlikely(split_huge_page(page))) { | 
|  | 159 | swapcache_free(entry, NULL); | 
|  | 160 | return 0; | 
|  | 161 | } | 
|  | 162 |  | 
| Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 163 | /* | 
|  | 164 | * Radix-tree node allocations from PF_MEMALLOC contexts could | 
|  | 165 | * completely exhaust the page allocator. __GFP_NOMEMALLOC | 
|  | 166 | * stops emergency reserves from being allocated. | 
|  | 167 | * | 
|  | 168 | * TODO: this could cause a theoretical memory reclaim | 
|  | 169 | * deadlock in the swap out path. | 
|  | 170 | */ | 
|  | 171 | /* | 
|  | 172 | * Add it to the swap cache and mark it dirty | 
|  | 173 | */ | 
|  | 174 | err = add_to_swap_cache(page, entry, | 
|  | 175 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 |  | 
| Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 177 | if (!err) {	/* Success */ | 
|  | 178 | SetPageDirty(page); | 
|  | 179 | return 1; | 
|  | 180 | } else {	/* -ENOMEM radix-tree allocation failure */ | 
|  | 181 | /* | 
|  | 182 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | 
|  | 183 | * clear SWAP_HAS_CACHE flag. | 
|  | 184 | */ | 
|  | 185 | swapcache_free(entry, NULL); | 
|  | 186 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | } | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | /* | 
|  | 191 | * This must be called only on pages that have | 
|  | 192 | * been verified to be in the swap cache and locked. | 
|  | 193 | * It will never put the page into the free list, | 
|  | 194 | * the caller has a reference on the page. | 
|  | 195 | */ | 
|  | 196 | void delete_from_swap_cache(struct page *page) | 
|  | 197 | { | 
|  | 198 | swp_entry_t entry; | 
|  | 199 |  | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 200 | entry.val = page_private(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 202 | spin_lock_irq(&swapper_space.tree_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | __delete_from_swap_cache(page); | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 204 | spin_unlock_irq(&swapper_space.tree_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 |  | 
| KAMEZAWA Hiroyuki | cb4b86b | 2009-06-16 15:32:52 -0700 | [diff] [blame] | 206 | swapcache_free(entry, page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | page_cache_release(page); | 
|  | 208 | } | 
|  | 209 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | /* | 
|  | 211 | * If we are the only user, then try to free up the swap cache. | 
|  | 212 | * | 
|  | 213 | * Its ok to check for PageSwapCache without the page lock | 
| Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 214 | * here because we are going to recheck again inside | 
|  | 215 | * try_to_free_swap() _with_ the lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | * 					- Marcelo | 
|  | 217 | */ | 
|  | 218 | static inline void free_swap_cache(struct page *page) | 
|  | 219 | { | 
| Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 220 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { | 
|  | 221 | try_to_free_swap(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | unlock_page(page); | 
|  | 223 | } | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | /* | 
|  | 227 | * Perform a free_page(), also freeing any swap cache associated with | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 228 | * this page if it is the last user of the page. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | */ | 
|  | 230 | void free_page_and_swap_cache(struct page *page) | 
|  | 231 | { | 
|  | 232 | free_swap_cache(page); | 
|  | 233 | page_cache_release(page); | 
|  | 234 | } | 
|  | 235 |  | 
|  | 236 | /* | 
|  | 237 | * Passed an array of pages, drop them all from swapcache and then release | 
|  | 238 | * them.  They are removed from the LRU and freed if this is their last use. | 
|  | 239 | */ | 
|  | 240 | void free_pages_and_swap_cache(struct page **pages, int nr) | 
|  | 241 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | struct page **pagep = pages; | 
|  | 243 |  | 
|  | 244 | lru_add_drain(); | 
|  | 245 | while (nr) { | 
| Hugh Dickins | c484d41 | 2006-01-06 00:10:55 -0800 | [diff] [blame] | 246 | int todo = min(nr, PAGEVEC_SIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | int i; | 
|  | 248 |  | 
|  | 249 | for (i = 0; i < todo; i++) | 
|  | 250 | free_swap_cache(pagep[i]); | 
|  | 251 | release_pages(pagep, todo, 0); | 
|  | 252 | pagep += todo; | 
|  | 253 | nr -= todo; | 
|  | 254 | } | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | /* | 
|  | 258 | * Lookup a swap entry in the swap cache. A found page will be returned | 
|  | 259 | * unlocked and with its refcount incremented - we rely on the kernel | 
|  | 260 | * lock getting page table operations atomic even if we drop the page | 
|  | 261 | * lock before returning. | 
|  | 262 | */ | 
|  | 263 | struct page * lookup_swap_cache(swp_entry_t entry) | 
|  | 264 | { | 
|  | 265 | struct page *page; | 
|  | 266 |  | 
|  | 267 | page = find_get_page(&swapper_space, entry.val); | 
|  | 268 |  | 
|  | 269 | if (page) | 
|  | 270 | INC_CACHE_INFO(find_success); | 
|  | 271 |  | 
|  | 272 | INC_CACHE_INFO(find_total); | 
|  | 273 | return page; | 
|  | 274 | } | 
|  | 275 |  | 
|  | 276 | /* | 
|  | 277 | * Locate a page of swap in physical memory, reserving swap cache space | 
|  | 278 | * and reading the disk if it is not already cached. | 
|  | 279 | * A failure return means that either the page allocation failed or that | 
|  | 280 | * the swap entry is no longer in use. | 
|  | 281 | */ | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 282 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | struct vm_area_struct *vma, unsigned long addr) | 
|  | 284 | { | 
|  | 285 | struct page *found_page, *new_page = NULL; | 
|  | 286 | int err; | 
|  | 287 |  | 
|  | 288 | do { | 
|  | 289 | /* | 
|  | 290 | * First check the swap cache.  Since this is normally | 
|  | 291 | * called after lookup_swap_cache() failed, re-calling | 
|  | 292 | * that would confuse statistics. | 
|  | 293 | */ | 
|  | 294 | found_page = find_get_page(&swapper_space, entry.val); | 
|  | 295 | if (found_page) | 
|  | 296 | break; | 
|  | 297 |  | 
|  | 298 | /* | 
|  | 299 | * Get a new page to read into from swap. | 
|  | 300 | */ | 
|  | 301 | if (!new_page) { | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 302 | new_page = alloc_page_vma(gfp_mask, vma, addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | if (!new_page) | 
|  | 304 | break;		/* Out of memory */ | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | /* | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 308 | * call radix_tree_preload() while we can wait. | 
|  | 309 | */ | 
|  | 310 | err = radix_tree_preload(gfp_mask & GFP_KERNEL); | 
|  | 311 | if (err) | 
|  | 312 | break; | 
|  | 313 |  | 
|  | 314 | /* | 
| Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 315 | * Swap entry may have been freed since our caller observed it. | 
|  | 316 | */ | 
| KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 317 | err = swapcache_prepare(entry); | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 318 | if (err == -EEXIST) {	/* seems racy */ | 
|  | 319 | radix_tree_preload_end(); | 
| KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 320 | continue; | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 321 | } | 
|  | 322 | if (err) {		/* swp entry is obsolete ? */ | 
|  | 323 | radix_tree_preload_end(); | 
| Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 324 | break; | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 325 | } | 
| Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 326 |  | 
| Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 327 | /* May fail (-ENOMEM) if radix-tree node allocation failed. */ | 
| Nick Piggin | f45840b | 2008-10-18 20:26:57 -0700 | [diff] [blame] | 328 | __set_page_locked(new_page); | 
| Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 329 | SetPageSwapBacked(new_page); | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 330 | err = __add_to_swap_cache(new_page, entry); | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 331 | if (likely(!err)) { | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 332 | radix_tree_preload_end(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | /* | 
|  | 334 | * Initiate read into locked page and return. | 
|  | 335 | */ | 
| Rik van Riel | c5fdae4 | 2008-10-18 20:26:36 -0700 | [diff] [blame] | 336 | lru_cache_add_anon(new_page); | 
| Minchan Kim | aca8bf3 | 2009-06-16 15:33:02 -0700 | [diff] [blame] | 337 | swap_readpage(new_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | return new_page; | 
|  | 339 | } | 
| Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 340 | radix_tree_preload_end(); | 
| Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 341 | ClearPageSwapBacked(new_page); | 
| Nick Piggin | f45840b | 2008-10-18 20:26:57 -0700 | [diff] [blame] | 342 | __clear_page_locked(new_page); | 
| Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 343 | /* | 
|  | 344 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | 
|  | 345 | * clear SWAP_HAS_CACHE flag. | 
|  | 346 | */ | 
| KAMEZAWA Hiroyuki | cb4b86b | 2009-06-16 15:32:52 -0700 | [diff] [blame] | 347 | swapcache_free(entry, NULL); | 
| Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 348 | } while (err != -ENOMEM); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 |  | 
|  | 350 | if (new_page) | 
|  | 351 | page_cache_release(new_page); | 
|  | 352 | return found_page; | 
|  | 353 | } | 
| Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 354 |  | 
|  | 355 | /** | 
|  | 356 | * swapin_readahead - swap in pages in hope we need them soon | 
|  | 357 | * @entry: swap entry of this memory | 
| Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 358 | * @gfp_mask: memory allocation flags | 
| Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 359 | * @vma: user vma this address belongs to | 
|  | 360 | * @addr: target address for mempolicy | 
|  | 361 | * | 
|  | 362 | * Returns the struct page for entry and addr, after queueing swapin. | 
|  | 363 | * | 
|  | 364 | * Primitive swap readahead code. We simply read an aligned block of | 
|  | 365 | * (1 << page_cluster) entries in the swap area. This method is chosen | 
|  | 366 | * because it doesn't cost us any seek time.  We also make sure to queue | 
|  | 367 | * the 'original' request together with the readahead ones... | 
|  | 368 | * | 
|  | 369 | * This has been extended to use the NUMA policies from the mm triggering | 
|  | 370 | * the readahead. | 
|  | 371 | * | 
|  | 372 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. | 
|  | 373 | */ | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 374 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, | 
| Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 375 | struct vm_area_struct *vma, unsigned long addr) | 
|  | 376 | { | 
|  | 377 | int nr_pages; | 
|  | 378 | struct page *page; | 
|  | 379 | unsigned long offset; | 
|  | 380 | unsigned long end_offset; | 
|  | 381 |  | 
|  | 382 | /* | 
|  | 383 | * Get starting offset for readaround, and number of pages to read. | 
|  | 384 | * Adjust starting address by readbehind (for NUMA interleave case)? | 
|  | 385 | * No, it's very unlikely that swap layout would follow vma layout, | 
|  | 386 | * more likely that neighbouring swap pages came from the same node: | 
|  | 387 | * so use the same "addr" to choose the same node for each swap read. | 
|  | 388 | */ | 
|  | 389 | nr_pages = valid_swaphandles(entry, &offset); | 
|  | 390 | for (end_offset = offset + nr_pages; offset < end_offset; offset++) { | 
|  | 391 | /* Ok, do the async read-ahead now */ | 
|  | 392 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 393 | gfp_mask, vma, addr); | 
| Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 394 | if (!page) | 
|  | 395 | break; | 
|  | 396 | page_cache_release(page); | 
|  | 397 | } | 
|  | 398 | lru_add_drain();	/* Push any new pages onto the LRU now */ | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 399 | return read_swap_cache_async(entry, gfp_mask, vma, addr); | 
| Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 400 | } |