| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Memory Migration functionality - linux/mm/migration.c | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter | 
|  | 5 | * | 
|  | 6 | * Page migration was first developed in the context of the memory hotplug | 
|  | 7 | * project. The main authors of the migration code are: | 
|  | 8 | * | 
|  | 9 | * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> | 
|  | 10 | * Hirokazu Takahashi <taka@valinux.co.jp> | 
|  | 11 | * Dave Hansen <haveblue@us.ibm.com> | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 12 | * Christoph Lameter | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 13 | */ | 
|  | 14 |  | 
|  | 15 | #include <linux/migrate.h> | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 16 | #include <linux/export.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 17 | #include <linux/swap.h> | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 18 | #include <linux/swapops.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 19 | #include <linux/pagemap.h> | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 20 | #include <linux/buffer_head.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 21 | #include <linux/mm_inline.h> | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 22 | #include <linux/nsproxy.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 23 | #include <linux/pagevec.h> | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 24 | #include <linux/ksm.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 25 | #include <linux/rmap.h> | 
|  | 26 | #include <linux/topology.h> | 
|  | 27 | #include <linux/cpu.h> | 
|  | 28 | #include <linux/cpuset.h> | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 29 | #include <linux/writeback.h> | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 30 | #include <linux/mempolicy.h> | 
|  | 31 | #include <linux/vmalloc.h> | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 32 | #include <linux/security.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 33 | #include <linux/memcontrol.h> | 
| Adrian Bunk | 4f5ca26 | 2008-07-23 21:27:02 -0700 | [diff] [blame] | 34 | #include <linux/syscalls.h> | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 35 | #include <linux/hugetlb.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 36 | #include <linux/gfp.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 37 |  | 
| Michal Nazarewicz | 0d1836c | 2010-12-21 17:24:26 -0800 | [diff] [blame] | 38 | #include <asm/tlbflush.h> | 
|  | 39 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 40 | #include "internal.h" | 
|  | 41 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 42 | /* | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 43 | * migrate_prep() needs to be called before we start compiling a list of pages | 
| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 44 | * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is | 
|  | 45 | * undesirable, use migrate_prep_local() | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 46 | */ | 
|  | 47 | int migrate_prep(void) | 
|  | 48 | { | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 49 | /* | 
|  | 50 | * Clear the LRU lists so pages can be isolated. | 
|  | 51 | * Note that pages may be moved off the LRU after we have | 
|  | 52 | * drained them. Those pages will fail to migrate like other | 
|  | 53 | * pages that may be busy. | 
|  | 54 | */ | 
|  | 55 | lru_add_drain_all(); | 
|  | 56 |  | 
|  | 57 | return 0; | 
|  | 58 | } | 
|  | 59 |  | 
| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 60 | /* Do the necessary work of migrate_prep but not if it involves other CPUs */ | 
|  | 61 | int migrate_prep_local(void) | 
|  | 62 | { | 
|  | 63 | lru_add_drain(); | 
|  | 64 |  | 
|  | 65 | return 0; | 
|  | 66 | } | 
|  | 67 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 68 | /* | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 69 | * Add isolated pages on the list back to the LRU under page lock | 
|  | 70 | * to avoid leaking evictable pages back onto unevictable list. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 71 | */ | 
| Minchan Kim | e13861d | 2010-05-24 14:31:59 -0700 | [diff] [blame] | 72 | void putback_lru_pages(struct list_head *l) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 73 | { | 
|  | 74 | struct page *page; | 
|  | 75 | struct page *page2; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 76 |  | 
|  | 77 | list_for_each_entry_safe(page, page2, l, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 78 | list_del(&page->lru); | 
| KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 79 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 
| Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 80 | page_is_file_cache(page)); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 81 | putback_lru_page(page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 82 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 83 | } | 
|  | 84 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 85 | /* | 
|  | 86 | * Restore a potential migration pte to a working pte entry | 
|  | 87 | */ | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 88 | static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | 
|  | 89 | unsigned long addr, void *old) | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 90 | { | 
|  | 91 | struct mm_struct *mm = vma->vm_mm; | 
|  | 92 | swp_entry_t entry; | 
|  | 93 | pgd_t *pgd; | 
|  | 94 | pud_t *pud; | 
|  | 95 | pmd_t *pmd; | 
|  | 96 | pte_t *ptep, pte; | 
|  | 97 | spinlock_t *ptl; | 
|  | 98 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 99 | if (unlikely(PageHuge(new))) { | 
|  | 100 | ptep = huge_pte_offset(mm, addr); | 
|  | 101 | if (!ptep) | 
|  | 102 | goto out; | 
|  | 103 | ptl = &mm->page_table_lock; | 
|  | 104 | } else { | 
|  | 105 | pgd = pgd_offset(mm, addr); | 
|  | 106 | if (!pgd_present(*pgd)) | 
|  | 107 | goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 108 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 109 | pud = pud_offset(pgd, addr); | 
|  | 110 | if (!pud_present(*pud)) | 
|  | 111 | goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 112 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 113 | pmd = pmd_offset(pud, addr); | 
| Andrea Arcangeli | 500d65d | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 114 | if (pmd_trans_huge(*pmd)) | 
|  | 115 | goto out; | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 116 | if (!pmd_present(*pmd)) | 
|  | 117 | goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 118 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 119 | ptep = pte_offset_map(pmd, addr); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 120 |  | 
| Hugh Dickins | 486cf46 | 2011-10-19 12:50:35 -0700 | [diff] [blame] | 121 | /* | 
|  | 122 | * Peek to check is_swap_pte() before taking ptlock?  No, we | 
|  | 123 | * can race mremap's move_ptes(), which skips anon_vma lock. | 
|  | 124 | */ | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 125 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 126 | ptl = pte_lockptr(mm, pmd); | 
|  | 127 | } | 
|  | 128 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 129 | spin_lock(ptl); | 
|  | 130 | pte = *ptep; | 
|  | 131 | if (!is_swap_pte(pte)) | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 132 | goto unlock; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 133 |  | 
|  | 134 | entry = pte_to_swp_entry(pte); | 
|  | 135 |  | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 136 | if (!is_migration_entry(entry) || | 
|  | 137 | migration_entry_to_page(entry) != old) | 
|  | 138 | goto unlock; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 139 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 140 | get_page(new); | 
|  | 141 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 
|  | 142 | if (is_write_migration_entry(entry)) | 
|  | 143 | pte = pte_mkwrite(pte); | 
| Andi Kleen | 3ef8fd7 | 2010-10-11 16:03:21 +0200 | [diff] [blame] | 144 | #ifdef CONFIG_HUGETLB_PAGE | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 145 | if (PageHuge(new)) | 
|  | 146 | pte = pte_mkhuge(pte); | 
| Andi Kleen | 3ef8fd7 | 2010-10-11 16:03:21 +0200 | [diff] [blame] | 147 | #endif | 
| KAMEZAWA Hiroyuki | 97ee052 | 2007-10-16 01:25:43 -0700 | [diff] [blame] | 148 | flush_cache_page(vma, addr, pte_pfn(pte)); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 149 | set_pte_at(mm, addr, ptep, pte); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 150 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 151 | if (PageHuge(new)) { | 
|  | 152 | if (PageAnon(new)) | 
|  | 153 | hugepage_add_anon_rmap(new, vma, addr); | 
|  | 154 | else | 
|  | 155 | page_dup_rmap(new); | 
|  | 156 | } else if (PageAnon(new)) | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 157 | page_add_anon_rmap(new, vma, addr); | 
|  | 158 | else | 
|  | 159 | page_add_file_rmap(new); | 
|  | 160 |  | 
|  | 161 | /* No need to invalidate - it was non-present before */ | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 162 | update_mmu_cache(vma, addr, ptep); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 163 | unlock: | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 164 | pte_unmap_unlock(ptep, ptl); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 165 | out: | 
|  | 166 | return SWAP_AGAIN; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 167 | } | 
|  | 168 |  | 
|  | 169 | /* | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 170 | * Get rid of all migration entries and replace them by | 
|  | 171 | * references to the indicated page. | 
|  | 172 | */ | 
|  | 173 | static void remove_migration_ptes(struct page *old, struct page *new) | 
|  | 174 | { | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 175 | rmap_walk(new, remove_migration_pte, old); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 176 | } | 
|  | 177 |  | 
|  | 178 | /* | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 179 | * Something used the pte of a page under migration. We need to | 
|  | 180 | * get to the page and wait until migration is finished. | 
|  | 181 | * When we return from this function the fault will be retried. | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 182 | */ | 
|  | 183 | void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | 
|  | 184 | unsigned long address) | 
|  | 185 | { | 
|  | 186 | pte_t *ptep, pte; | 
|  | 187 | spinlock_t *ptl; | 
|  | 188 | swp_entry_t entry; | 
|  | 189 | struct page *page; | 
|  | 190 |  | 
|  | 191 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | 
|  | 192 | pte = *ptep; | 
|  | 193 | if (!is_swap_pte(pte)) | 
|  | 194 | goto out; | 
|  | 195 |  | 
|  | 196 | entry = pte_to_swp_entry(pte); | 
|  | 197 | if (!is_migration_entry(entry)) | 
|  | 198 | goto out; | 
|  | 199 |  | 
|  | 200 | page = migration_entry_to_page(entry); | 
|  | 201 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 202 | /* | 
|  | 203 | * Once radix-tree replacement of page migration started, page_count | 
|  | 204 | * *must* be zero. And, we don't want to call wait_on_page_locked() | 
|  | 205 | * against a page without get_page(). | 
|  | 206 | * So, we use get_page_unless_zero(), here. Even failed, page fault | 
|  | 207 | * will occur again. | 
|  | 208 | */ | 
|  | 209 | if (!get_page_unless_zero(page)) | 
|  | 210 | goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 211 | pte_unmap_unlock(ptep, ptl); | 
|  | 212 | wait_on_page_locked(page); | 
|  | 213 | put_page(page); | 
|  | 214 | return; | 
|  | 215 | out: | 
|  | 216 | pte_unmap_unlock(ptep, ptl); | 
|  | 217 | } | 
|  | 218 |  | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 219 | #ifdef CONFIG_BLOCK | 
|  | 220 | /* Returns true if all buffers are successfully locked */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 221 | static bool buffer_migrate_lock_buffers(struct buffer_head *head, | 
|  | 222 | enum migrate_mode mode) | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 223 | { | 
|  | 224 | struct buffer_head *bh = head; | 
|  | 225 |  | 
|  | 226 | /* Simple case, sync compaction */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 227 | if (mode != MIGRATE_ASYNC) { | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 228 | do { | 
|  | 229 | get_bh(bh); | 
|  | 230 | lock_buffer(bh); | 
|  | 231 | bh = bh->b_this_page; | 
|  | 232 |  | 
|  | 233 | } while (bh != head); | 
|  | 234 |  | 
|  | 235 | return true; | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | /* async case, we cannot block on lock_buffer so use trylock_buffer */ | 
|  | 239 | do { | 
|  | 240 | get_bh(bh); | 
|  | 241 | if (!trylock_buffer(bh)) { | 
|  | 242 | /* | 
|  | 243 | * We failed to lock the buffer and cannot stall in | 
|  | 244 | * async migration. Release the taken locks | 
|  | 245 | */ | 
|  | 246 | struct buffer_head *failed_bh = bh; | 
|  | 247 | put_bh(failed_bh); | 
|  | 248 | bh = head; | 
|  | 249 | while (bh != failed_bh) { | 
|  | 250 | unlock_buffer(bh); | 
|  | 251 | put_bh(bh); | 
|  | 252 | bh = bh->b_this_page; | 
|  | 253 | } | 
|  | 254 | return false; | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | bh = bh->b_this_page; | 
|  | 258 | } while (bh != head); | 
|  | 259 | return true; | 
|  | 260 | } | 
|  | 261 | #else | 
|  | 262 | static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 263 | enum migrate_mode mode) | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 264 | { | 
|  | 265 | return true; | 
|  | 266 | } | 
|  | 267 | #endif /* CONFIG_BLOCK */ | 
|  | 268 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 269 | /* | 
| Christoph Lameter | c3fcf8a | 2006-06-23 02:03:32 -0700 | [diff] [blame] | 270 | * Replace the page in the mapping. | 
| Christoph Lameter | 5b5c712 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 271 | * | 
|  | 272 | * The number of remaining references must be: | 
|  | 273 | * 1 for anonymous pages without a mapping | 
|  | 274 | * 2 for pages with a mapping | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 275 | * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 276 | */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 277 | static int migrate_page_move_mapping(struct address_space *mapping, | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 278 | struct page *newpage, struct page *page, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 279 | struct buffer_head *head, enum migrate_mode mode) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 280 | { | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 281 | int expected_count; | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 282 | void **pslot; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 283 |  | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 284 | if (!mapping) { | 
| Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 285 | /* Anonymous page without mapping */ | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 286 | if (page_count(page) != 1) | 
|  | 287 | return -EAGAIN; | 
|  | 288 | return 0; | 
|  | 289 | } | 
|  | 290 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 291 | spin_lock_irq(&mapping->tree_lock); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 292 |  | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 293 | pslot = radix_tree_lookup_slot(&mapping->page_tree, | 
|  | 294 | page_index(page)); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 295 |  | 
| Johannes Weiner | edcf474 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 296 | expected_count = 2 + page_has_private(page); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 297 | if (page_count(page) != expected_count || | 
| Mel Gorman | 29c1f67 | 2011-01-13 15:47:21 -0800 | [diff] [blame] | 298 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 299 | spin_unlock_irq(&mapping->tree_lock); | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 300 | return -EAGAIN; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 301 | } | 
|  | 302 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 303 | if (!page_freeze_refs(page, expected_count)) { | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 304 | spin_unlock_irq(&mapping->tree_lock); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 305 | return -EAGAIN; | 
|  | 306 | } | 
|  | 307 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 308 | /* | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 309 | * In the async migration case of moving a page with buffers, lock the | 
|  | 310 | * buffers using trylock before the mapping is moved. If the mapping | 
|  | 311 | * was moved, we later failed to lock the buffers and could not move | 
|  | 312 | * the mapping back due to an elevated page count, we would have to | 
|  | 313 | * block waiting on other references to be dropped. | 
|  | 314 | */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 315 | if (mode == MIGRATE_ASYNC && head && | 
|  | 316 | !buffer_migrate_lock_buffers(head, mode)) { | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 317 | page_unfreeze_refs(page, expected_count); | 
|  | 318 | spin_unlock_irq(&mapping->tree_lock); | 
|  | 319 | return -EAGAIN; | 
|  | 320 | } | 
|  | 321 |  | 
|  | 322 | /* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 323 | * Now we know that no one else is looking at the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 324 | */ | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 325 | get_page(newpage);	/* add cache reference */ | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 326 | if (PageSwapCache(page)) { | 
|  | 327 | SetPageSwapCache(newpage); | 
|  | 328 | set_page_private(newpage, page_private(page)); | 
|  | 329 | } | 
|  | 330 |  | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 331 | radix_tree_replace_slot(pslot, newpage); | 
|  | 332 |  | 
|  | 333 | /* | 
| Jacobo Giralt | 937a94c | 2012-01-10 15:07:11 -0800 | [diff] [blame] | 334 | * Drop cache reference from old page by unfreezing | 
|  | 335 | * to one less reference. | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 336 | * We know this isn't the last reference. | 
|  | 337 | */ | 
| Jacobo Giralt | 937a94c | 2012-01-10 15:07:11 -0800 | [diff] [blame] | 338 | page_unfreeze_refs(page, expected_count - 1); | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 339 |  | 
| Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 340 | /* | 
|  | 341 | * If moved to a different zone then also account | 
|  | 342 | * the page for that zone. Other VM counters will be | 
|  | 343 | * taken care of when we establish references to the | 
|  | 344 | * new page and drop references to the old page. | 
|  | 345 | * | 
|  | 346 | * Note that anonymous pages are accounted for | 
|  | 347 | * via NR_FILE_PAGES and NR_ANON_PAGES if they | 
|  | 348 | * are mapped to swap space. | 
|  | 349 | */ | 
|  | 350 | __dec_zone_page_state(page, NR_FILE_PAGES); | 
|  | 351 | __inc_zone_page_state(newpage, NR_FILE_PAGES); | 
| Andrea Arcangeli | 99a15e2 | 2011-06-16 12:56:19 -0700 | [diff] [blame] | 352 | if (!PageSwapCache(page) && PageSwapBacked(page)) { | 
| KOSAKI Motohiro | 4b02108 | 2009-09-21 17:01:33 -0700 | [diff] [blame] | 353 | __dec_zone_page_state(page, NR_SHMEM); | 
|  | 354 | __inc_zone_page_state(newpage, NR_SHMEM); | 
|  | 355 | } | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 356 | spin_unlock_irq(&mapping->tree_lock); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 357 |  | 
|  | 358 | return 0; | 
|  | 359 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 360 |  | 
|  | 361 | /* | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 362 | * The expected number of remaining references is the same as that | 
|  | 363 | * of migrate_page_move_mapping(). | 
|  | 364 | */ | 
|  | 365 | int migrate_huge_page_move_mapping(struct address_space *mapping, | 
|  | 366 | struct page *newpage, struct page *page) | 
|  | 367 | { | 
|  | 368 | int expected_count; | 
|  | 369 | void **pslot; | 
|  | 370 |  | 
|  | 371 | if (!mapping) { | 
|  | 372 | if (page_count(page) != 1) | 
|  | 373 | return -EAGAIN; | 
|  | 374 | return 0; | 
|  | 375 | } | 
|  | 376 |  | 
|  | 377 | spin_lock_irq(&mapping->tree_lock); | 
|  | 378 |  | 
|  | 379 | pslot = radix_tree_lookup_slot(&mapping->page_tree, | 
|  | 380 | page_index(page)); | 
|  | 381 |  | 
|  | 382 | expected_count = 2 + page_has_private(page); | 
|  | 383 | if (page_count(page) != expected_count || | 
| Mel Gorman | 29c1f67 | 2011-01-13 15:47:21 -0800 | [diff] [blame] | 384 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 385 | spin_unlock_irq(&mapping->tree_lock); | 
|  | 386 | return -EAGAIN; | 
|  | 387 | } | 
|  | 388 |  | 
|  | 389 | if (!page_freeze_refs(page, expected_count)) { | 
|  | 390 | spin_unlock_irq(&mapping->tree_lock); | 
|  | 391 | return -EAGAIN; | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 | get_page(newpage); | 
|  | 395 |  | 
|  | 396 | radix_tree_replace_slot(pslot, newpage); | 
|  | 397 |  | 
| Jacobo Giralt | 937a94c | 2012-01-10 15:07:11 -0800 | [diff] [blame] | 398 | page_unfreeze_refs(page, expected_count - 1); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 399 |  | 
|  | 400 | spin_unlock_irq(&mapping->tree_lock); | 
|  | 401 | return 0; | 
|  | 402 | } | 
|  | 403 |  | 
|  | 404 | /* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 405 | * Copy the page to its new location | 
|  | 406 | */ | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 407 | void migrate_page_copy(struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 408 | { | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 409 | if (PageHuge(page)) | 
|  | 410 | copy_huge_page(newpage, page); | 
|  | 411 | else | 
|  | 412 | copy_highpage(newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 413 |  | 
|  | 414 | if (PageError(page)) | 
|  | 415 | SetPageError(newpage); | 
|  | 416 | if (PageReferenced(page)) | 
|  | 417 | SetPageReferenced(newpage); | 
|  | 418 | if (PageUptodate(page)) | 
|  | 419 | SetPageUptodate(newpage); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 420 | if (TestClearPageActive(page)) { | 
|  | 421 | VM_BUG_ON(PageUnevictable(page)); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 422 | SetPageActive(newpage); | 
| Lee Schermerhorn | 418b27e | 2009-12-14 17:59:54 -0800 | [diff] [blame] | 423 | } else if (TestClearPageUnevictable(page)) | 
|  | 424 | SetPageUnevictable(newpage); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 425 | if (PageChecked(page)) | 
|  | 426 | SetPageChecked(newpage); | 
|  | 427 | if (PageMappedToDisk(page)) | 
|  | 428 | SetPageMappedToDisk(newpage); | 
|  | 429 |  | 
|  | 430 | if (PageDirty(page)) { | 
|  | 431 | clear_page_dirty_for_io(page); | 
| Nick Piggin | 3a902c5 | 2008-04-30 00:55:16 -0700 | [diff] [blame] | 432 | /* | 
|  | 433 | * Want to mark the page and the radix tree as dirty, and | 
|  | 434 | * redo the accounting that clear_page_dirty_for_io undid, | 
|  | 435 | * but we can't use set_page_dirty because that function | 
|  | 436 | * is actually a signal that all of the page has become dirty. | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 437 | * Whereas only part of our page may be dirty. | 
| Nick Piggin | 3a902c5 | 2008-04-30 00:55:16 -0700 | [diff] [blame] | 438 | */ | 
|  | 439 | __set_page_dirty_nobuffers(newpage); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 440 | } | 
|  | 441 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 442 | mlock_migrate_page(newpage, page); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 443 | ksm_migrate_page(newpage, page); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 444 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 445 | ClearPageSwapCache(page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 446 | ClearPagePrivate(page); | 
|  | 447 | set_page_private(page, 0); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 448 |  | 
|  | 449 | /* | 
|  | 450 | * If any waiters have accumulated on the new page then | 
|  | 451 | * wake them up. | 
|  | 452 | */ | 
|  | 453 | if (PageWriteback(newpage)) | 
|  | 454 | end_page_writeback(newpage); | 
|  | 455 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 456 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 457 | /************************************************************ | 
|  | 458 | *                    Migration functions | 
|  | 459 | ***********************************************************/ | 
|  | 460 |  | 
|  | 461 | /* Always fail migration. Used for mappings that are not movable */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 462 | int fail_migrate_page(struct address_space *mapping, | 
|  | 463 | struct page *newpage, struct page *page) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 464 | { | 
|  | 465 | return -EIO; | 
|  | 466 | } | 
|  | 467 | EXPORT_SYMBOL(fail_migrate_page); | 
|  | 468 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 469 | /* | 
|  | 470 | * Common logic to directly migrate a single page suitable for | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 471 | * pages that do not use PagePrivate/PagePrivate2. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 472 | * | 
|  | 473 | * Pages are locked upon entry and exit. | 
|  | 474 | */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 475 | int migrate_page(struct address_space *mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 476 | struct page *newpage, struct page *page, | 
|  | 477 | enum migrate_mode mode) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 478 | { | 
|  | 479 | int rc; | 
|  | 480 |  | 
|  | 481 | BUG_ON(PageWriteback(page));	/* Writeback must be complete */ | 
|  | 482 |  | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 483 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 484 |  | 
|  | 485 | if (rc) | 
|  | 486 | return rc; | 
|  | 487 |  | 
|  | 488 | migrate_page_copy(newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 489 | return 0; | 
|  | 490 | } | 
|  | 491 | EXPORT_SYMBOL(migrate_page); | 
|  | 492 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 493 | #ifdef CONFIG_BLOCK | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 494 | /* | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 495 | * Migration function for pages with buffers. This function can only be used | 
|  | 496 | * if the underlying filesystem guarantees that no other references to "page" | 
|  | 497 | * exist. | 
|  | 498 | */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 499 | int buffer_migrate_page(struct address_space *mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 500 | struct page *newpage, struct page *page, enum migrate_mode mode) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 501 | { | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 502 | struct buffer_head *bh, *head; | 
|  | 503 | int rc; | 
|  | 504 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 505 | if (!page_has_buffers(page)) | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 506 | return migrate_page(mapping, newpage, page, mode); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 507 |  | 
|  | 508 | head = page_buffers(page); | 
|  | 509 |  | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 510 | rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 511 |  | 
|  | 512 | if (rc) | 
|  | 513 | return rc; | 
|  | 514 |  | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 515 | /* | 
|  | 516 | * In the async case, migrate_page_move_mapping locked the buffers | 
|  | 517 | * with an IRQ-safe spinlock held. In the sync case, the buffers | 
|  | 518 | * need to be locked now | 
|  | 519 | */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 520 | if (mode != MIGRATE_ASYNC) | 
|  | 521 | BUG_ON(!buffer_migrate_lock_buffers(head, mode)); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 522 |  | 
|  | 523 | ClearPagePrivate(page); | 
|  | 524 | set_page_private(newpage, page_private(page)); | 
|  | 525 | set_page_private(page, 0); | 
|  | 526 | put_page(page); | 
|  | 527 | get_page(newpage); | 
|  | 528 |  | 
|  | 529 | bh = head; | 
|  | 530 | do { | 
|  | 531 | set_bh_page(bh, newpage, bh_offset(bh)); | 
|  | 532 | bh = bh->b_this_page; | 
|  | 533 |  | 
|  | 534 | } while (bh != head); | 
|  | 535 |  | 
|  | 536 | SetPagePrivate(newpage); | 
|  | 537 |  | 
|  | 538 | migrate_page_copy(newpage, page); | 
|  | 539 |  | 
|  | 540 | bh = head; | 
|  | 541 | do { | 
|  | 542 | unlock_buffer(bh); | 
|  | 543 | put_bh(bh); | 
|  | 544 | bh = bh->b_this_page; | 
|  | 545 |  | 
|  | 546 | } while (bh != head); | 
|  | 547 |  | 
|  | 548 | return 0; | 
|  | 549 | } | 
|  | 550 | EXPORT_SYMBOL(buffer_migrate_page); | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 551 | #endif | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 552 |  | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 553 | /* | 
|  | 554 | * Writeback a page to clean the dirty state | 
|  | 555 | */ | 
|  | 556 | static int writeout(struct address_space *mapping, struct page *page) | 
|  | 557 | { | 
|  | 558 | struct writeback_control wbc = { | 
|  | 559 | .sync_mode = WB_SYNC_NONE, | 
|  | 560 | .nr_to_write = 1, | 
|  | 561 | .range_start = 0, | 
|  | 562 | .range_end = LLONG_MAX, | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 563 | .for_reclaim = 1 | 
|  | 564 | }; | 
|  | 565 | int rc; | 
|  | 566 |  | 
|  | 567 | if (!mapping->a_ops->writepage) | 
|  | 568 | /* No write method for the address space */ | 
|  | 569 | return -EINVAL; | 
|  | 570 |  | 
|  | 571 | if (!clear_page_dirty_for_io(page)) | 
|  | 572 | /* Someone else already triggered a write */ | 
|  | 573 | return -EAGAIN; | 
|  | 574 |  | 
|  | 575 | /* | 
|  | 576 | * A dirty page may imply that the underlying filesystem has | 
|  | 577 | * the page on some queue. So the page must be clean for | 
|  | 578 | * migration. Writeout may mean we loose the lock and the | 
|  | 579 | * page state is no longer what we checked for earlier. | 
|  | 580 | * At this point we know that the migration attempt cannot | 
|  | 581 | * be successful. | 
|  | 582 | */ | 
|  | 583 | remove_migration_ptes(page, page); | 
|  | 584 |  | 
|  | 585 | rc = mapping->a_ops->writepage(page, &wbc); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 586 |  | 
|  | 587 | if (rc != AOP_WRITEPAGE_ACTIVATE) | 
|  | 588 | /* unlocked. Relock */ | 
|  | 589 | lock_page(page); | 
|  | 590 |  | 
| Hugh Dickins | bda8550 | 2008-11-19 15:36:36 -0800 | [diff] [blame] | 591 | return (rc < 0) ? -EIO : -EAGAIN; | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 592 | } | 
|  | 593 |  | 
|  | 594 | /* | 
|  | 595 | * Default handling if a filesystem does not provide a migration function. | 
|  | 596 | */ | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 597 | static int fallback_migrate_page(struct address_space *mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 598 | struct page *newpage, struct page *page, enum migrate_mode mode) | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 599 | { | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 600 | if (PageDirty(page)) { | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 601 | /* Only writeback pages in full synchronous migration */ | 
|  | 602 | if (mode != MIGRATE_SYNC) | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 603 | return -EBUSY; | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 604 | return writeout(mapping, page); | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 605 | } | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 606 |  | 
|  | 607 | /* | 
|  | 608 | * Buffers may be managed in a filesystem specific way. | 
|  | 609 | * We must have no buffers or drop them. | 
|  | 610 | */ | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 611 | if (page_has_private(page) && | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 612 | !try_to_release_page(page, GFP_KERNEL)) | 
|  | 613 | return -EAGAIN; | 
|  | 614 |  | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 615 | return migrate_page(mapping, newpage, page, mode); | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 616 | } | 
|  | 617 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 618 | /* | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 619 | * Move a page to a newly allocated page | 
|  | 620 | * The page is locked and all ptes have been successfully removed. | 
|  | 621 | * | 
|  | 622 | * The new page will have replaced the old page if this function | 
|  | 623 | * is successful. | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 624 | * | 
|  | 625 | * Return value: | 
|  | 626 | *   < 0 - error code | 
|  | 627 | *  == 0 - success | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 628 | */ | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 629 | static int move_to_new_page(struct page *newpage, struct page *page, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 630 | int remap_swapcache, enum migrate_mode mode) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 631 | { | 
|  | 632 | struct address_space *mapping; | 
|  | 633 | int rc; | 
|  | 634 |  | 
|  | 635 | /* | 
|  | 636 | * Block others from accessing the page when we get around to | 
|  | 637 | * establishing additional references. We are the only one | 
|  | 638 | * holding a reference to the new page at this point. | 
|  | 639 | */ | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 640 | if (!trylock_page(newpage)) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 641 | BUG(); | 
|  | 642 |  | 
|  | 643 | /* Prepare mapping for the new page.*/ | 
|  | 644 | newpage->index = page->index; | 
|  | 645 | newpage->mapping = page->mapping; | 
| Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 646 | if (PageSwapBacked(page)) | 
|  | 647 | SetPageSwapBacked(newpage); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 648 |  | 
|  | 649 | mapping = page_mapping(page); | 
|  | 650 | if (!mapping) | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 651 | rc = migrate_page(mapping, newpage, page, mode); | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 652 | else if (mapping->a_ops->migratepage) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 653 | /* | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 654 | * Most pages have a mapping and most filesystems provide a | 
|  | 655 | * migratepage callback. Anonymous pages are part of swap | 
|  | 656 | * space which also has its own migratepage callback. This | 
|  | 657 | * is the most common path for page migration. | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 658 | */ | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 659 | rc = mapping->a_ops->migratepage(mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 660 | newpage, page, mode); | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 661 | else | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 662 | rc = fallback_migrate_page(mapping, newpage, page, mode); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 663 |  | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 664 | if (rc) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 665 | newpage->mapping = NULL; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 666 | } else { | 
|  | 667 | if (remap_swapcache) | 
|  | 668 | remove_migration_ptes(page, newpage); | 
| Konstantin Khlebnikov | 35512ec | 2012-02-03 15:37:13 -0800 | [diff] [blame] | 669 | page->mapping = NULL; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 670 | } | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 671 |  | 
|  | 672 | unlock_page(newpage); | 
|  | 673 |  | 
|  | 674 | return rc; | 
|  | 675 | } | 
|  | 676 |  | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 677 | static int __unmap_and_move(struct page *page, struct page *newpage, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 678 | int force, bool offlining, enum migrate_mode mode) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 679 | { | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 680 | int rc = -EAGAIN; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 681 | int remap_swapcache = 1; | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 682 | int charge = 0; | 
| KAMEZAWA Hiroyuki | 56039ef | 2011-03-23 16:42:19 -0700 | [diff] [blame] | 683 | struct mem_cgroup *mem; | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 684 | struct anon_vma *anon_vma = NULL; | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 685 |  | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 686 | if (!trylock_page(page)) { | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 687 | if (!force || mode == MIGRATE_ASYNC) | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 688 | goto out; | 
| Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 689 |  | 
|  | 690 | /* | 
|  | 691 | * It's not safe for direct compaction to call lock_page. | 
|  | 692 | * For example, during page readahead pages are added locked | 
|  | 693 | * to the LRU. Later, when the IO completes the pages are | 
|  | 694 | * marked uptodate and unlocked. However, the queueing | 
|  | 695 | * could be merging multiple pages for one bio (e.g. | 
|  | 696 | * mpage_readpages). If an allocation happens for the | 
|  | 697 | * second or third page, the process can end up locking | 
|  | 698 | * the same page twice and deadlocking. Rather than | 
|  | 699 | * trying to be clever about what pages can be locked, | 
|  | 700 | * avoid the use of lock_page for direct compaction | 
|  | 701 | * altogether. | 
|  | 702 | */ | 
|  | 703 | if (current->flags & PF_MEMALLOC) | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 704 | goto out; | 
| Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 705 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 706 | lock_page(page); | 
|  | 707 | } | 
|  | 708 |  | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 709 | /* | 
|  | 710 | * Only memory hotplug's offline_pages() caller has locked out KSM, | 
|  | 711 | * and can safely migrate a KSM page.  The other cases have skipped | 
|  | 712 | * PageKsm along with PageReserved - but it is only now when we have | 
|  | 713 | * the page lock that we can be certain it will not go KSM beneath us | 
|  | 714 | * (KSM will not upgrade a page from PageAnon to PageKsm when it sees | 
|  | 715 | * its pagecount raised, but only here do we take the page lock which | 
|  | 716 | * serializes that). | 
|  | 717 | */ | 
|  | 718 | if (PageKsm(page) && !offlining) { | 
|  | 719 | rc = -EBUSY; | 
|  | 720 | goto unlock; | 
|  | 721 | } | 
|  | 722 |  | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 723 | /* charge against new page */ | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 724 | charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL); | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 725 | if (charge == -ENOMEM) { | 
|  | 726 | rc = -ENOMEM; | 
|  | 727 | goto unlock; | 
|  | 728 | } | 
|  | 729 | BUG_ON(charge); | 
|  | 730 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 731 | if (PageWriteback(page)) { | 
| Andrea Arcangeli | 11bc82d | 2011-03-22 16:33:11 -0700 | [diff] [blame] | 732 | /* | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 733 | * Only in the case of a full syncronous migration is it | 
|  | 734 | * necessary to wait for PageWriteback. In the async case, | 
|  | 735 | * the retry loop is too short and in the sync-light case, | 
|  | 736 | * the overhead of stalling is too much | 
| Andrea Arcangeli | 11bc82d | 2011-03-22 16:33:11 -0700 | [diff] [blame] | 737 | */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 738 | if (mode != MIGRATE_SYNC) { | 
| Andrea Arcangeli | 11bc82d | 2011-03-22 16:33:11 -0700 | [diff] [blame] | 739 | rc = -EBUSY; | 
|  | 740 | goto uncharge; | 
|  | 741 | } | 
|  | 742 | if (!force) | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 743 | goto uncharge; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 744 | wait_on_page_writeback(page); | 
|  | 745 | } | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 746 | /* | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 747 | * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, | 
|  | 748 | * we cannot notice that anon_vma is freed while we migrates a page. | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 749 | * This get_anon_vma() delays freeing anon_vma pointer until the end | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 750 | * of migration. File cache pages are no problem because of page_lock() | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 751 | * File Caches may use write_page() or lock_page() in migration, then, | 
|  | 752 | * just care Anon page here. | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 753 | */ | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 754 | if (PageAnon(page)) { | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 755 | /* | 
|  | 756 | * Only page_lock_anon_vma() understands the subtleties of | 
|  | 757 | * getting a hold on an anon_vma from outside one of its mms. | 
|  | 758 | */ | 
| Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 759 | anon_vma = page_get_anon_vma(page); | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 760 | if (anon_vma) { | 
|  | 761 | /* | 
| Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 762 | * Anon page | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 763 | */ | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 764 | } else if (PageSwapCache(page)) { | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 765 | /* | 
|  | 766 | * We cannot be sure that the anon_vma of an unmapped | 
|  | 767 | * swapcache page is safe to use because we don't | 
|  | 768 | * know in advance if the VMA that this page belonged | 
|  | 769 | * to still exists. If the VMA and others sharing the | 
|  | 770 | * data have been freed, then the anon_vma could | 
|  | 771 | * already be invalid. | 
|  | 772 | * | 
|  | 773 | * To avoid this possibility, swapcache pages get | 
|  | 774 | * migrated but are not remapped when migration | 
|  | 775 | * completes | 
|  | 776 | */ | 
|  | 777 | remap_swapcache = 0; | 
|  | 778 | } else { | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 779 | goto uncharge; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 780 | } | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 781 | } | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 782 |  | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 783 | /* | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 784 | * Corner case handling: | 
|  | 785 | * 1. When a new swap-cache page is read into, it is added to the LRU | 
|  | 786 | * and treated as swapcache but it has no rmap yet. | 
|  | 787 | * Calling try_to_unmap() against a page->mapping==NULL page will | 
|  | 788 | * trigger a BUG.  So handle it here. | 
|  | 789 | * 2. An orphaned page (see truncate_complete_page) might have | 
|  | 790 | * fs-private metadata. The page can be picked up due to memory | 
|  | 791 | * offlining.  Everywhere else except page reclaim, the page is | 
|  | 792 | * invisible to the vm, so the page can not be migrated.  So try to | 
|  | 793 | * free the metadata, so the page can be freed. | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 794 | */ | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 795 | if (!page->mapping) { | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 796 | VM_BUG_ON(PageAnon(page)); | 
|  | 797 | if (page_has_private(page)) { | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 798 | try_to_free_buffers(page); | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 799 | goto uncharge; | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 800 | } | 
| Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 801 | goto skip_unmap; | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 802 | } | 
|  | 803 |  | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 804 | /* Establish migration ptes or remove ptes */ | 
| Andi Kleen | 14fa31b | 2009-09-16 11:50:10 +0200 | [diff] [blame] | 805 | try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 806 |  | 
| Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 807 | skip_unmap: | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 808 | if (!page_mapped(page)) | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 809 | rc = move_to_new_page(newpage, page, remap_swapcache, mode); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 810 |  | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 811 | if (rc && remap_swapcache) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 812 | remove_migration_ptes(page, page); | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 813 |  | 
|  | 814 | /* Drop an anon_vma reference if we took one */ | 
| Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 815 | if (anon_vma) | 
| Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 816 | put_anon_vma(anon_vma); | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 817 |  | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 818 | uncharge: | 
|  | 819 | if (!charge) | 
| Daisuke Nishimura | 50de1dd | 2011-01-13 15:47:43 -0800 | [diff] [blame] | 820 | mem_cgroup_end_migration(mem, page, newpage, rc == 0); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 821 | unlock: | 
|  | 822 | unlock_page(page); | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 823 | out: | 
|  | 824 | return rc; | 
|  | 825 | } | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 826 |  | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 827 | /* | 
|  | 828 | * Obtain the lock on page, remove all ptes and migrate the page | 
|  | 829 | * to the newly allocated page in newpage. | 
|  | 830 | */ | 
|  | 831 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 832 | struct page *page, int force, bool offlining, | 
|  | 833 | enum migrate_mode mode) | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 834 | { | 
|  | 835 | int rc = 0; | 
|  | 836 | int *result = NULL; | 
|  | 837 | struct page *newpage = get_new_page(page, private, &result); | 
|  | 838 |  | 
|  | 839 | if (!newpage) | 
|  | 840 | return -ENOMEM; | 
|  | 841 |  | 
|  | 842 | if (page_count(page) == 1) { | 
|  | 843 | /* page was freed from under us. So we are done. */ | 
|  | 844 | goto out; | 
|  | 845 | } | 
|  | 846 |  | 
|  | 847 | if (unlikely(PageTransHuge(page))) | 
|  | 848 | if (unlikely(split_huge_page(page))) | 
|  | 849 | goto out; | 
|  | 850 |  | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 851 | rc = __unmap_and_move(page, newpage, force, offlining, mode); | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 852 | out: | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 853 | if (rc != -EAGAIN) { | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 854 | /* | 
|  | 855 | * A page that has been migrated has all references | 
|  | 856 | * removed and will be freed. A page that has not been | 
|  | 857 | * migrated will have kepts its references and be | 
|  | 858 | * restored. | 
|  | 859 | */ | 
|  | 860 | list_del(&page->lru); | 
| KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 861 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 
| Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 862 | page_is_file_cache(page)); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 863 | putback_lru_page(page); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 864 | } | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 865 | /* | 
|  | 866 | * Move the new page to the LRU. If migration was not successful | 
|  | 867 | * then this will free the page. | 
|  | 868 | */ | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 869 | putback_lru_page(newpage); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 870 | if (result) { | 
|  | 871 | if (rc) | 
|  | 872 | *result = rc; | 
|  | 873 | else | 
|  | 874 | *result = page_to_nid(newpage); | 
|  | 875 | } | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 876 | return rc; | 
|  | 877 | } | 
|  | 878 |  | 
|  | 879 | /* | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 880 | * Counterpart of unmap_and_move_page() for hugepage migration. | 
|  | 881 | * | 
|  | 882 | * This function doesn't wait the completion of hugepage I/O | 
|  | 883 | * because there is no race between I/O and migration for hugepage. | 
|  | 884 | * Note that currently hugepage I/O occurs only in direct I/O | 
|  | 885 | * where no lock is held and PG_writeback is irrelevant, | 
|  | 886 | * and writeback status of all subpages are counted in the reference | 
|  | 887 | * count of the head page (i.e. if all subpages of a 2MB hugepage are | 
|  | 888 | * under direct I/O, the reference of the head page is 512 and a bit more.) | 
|  | 889 | * This means that when we try to migrate hugepage whose subpages are | 
|  | 890 | * doing direct I/O, some references remain after try_to_unmap() and | 
|  | 891 | * hugepage migration fails without data corruption. | 
|  | 892 | * | 
|  | 893 | * There is also no race when direct I/O is issued on the page under migration, | 
|  | 894 | * because then pte is replaced with migration swap entry and direct I/O code | 
|  | 895 | * will wait in the page fault for migration to complete. | 
|  | 896 | */ | 
|  | 897 | static int unmap_and_move_huge_page(new_page_t get_new_page, | 
|  | 898 | unsigned long private, struct page *hpage, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 899 | int force, bool offlining, | 
|  | 900 | enum migrate_mode mode) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 901 | { | 
|  | 902 | int rc = 0; | 
|  | 903 | int *result = NULL; | 
|  | 904 | struct page *new_hpage = get_new_page(hpage, private, &result); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 905 | struct anon_vma *anon_vma = NULL; | 
|  | 906 |  | 
|  | 907 | if (!new_hpage) | 
|  | 908 | return -ENOMEM; | 
|  | 909 |  | 
|  | 910 | rc = -EAGAIN; | 
|  | 911 |  | 
|  | 912 | if (!trylock_page(hpage)) { | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 913 | if (!force || mode != MIGRATE_SYNC) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 914 | goto out; | 
|  | 915 | lock_page(hpage); | 
|  | 916 | } | 
|  | 917 |  | 
| Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 918 | if (PageAnon(hpage)) | 
|  | 919 | anon_vma = page_get_anon_vma(hpage); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 920 |  | 
|  | 921 | try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 
|  | 922 |  | 
|  | 923 | if (!page_mapped(hpage)) | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 924 | rc = move_to_new_page(new_hpage, hpage, 1, mode); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 925 |  | 
|  | 926 | if (rc) | 
|  | 927 | remove_migration_ptes(hpage, hpage); | 
|  | 928 |  | 
| Hugh Dickins | fd4a466 | 2011-01-13 15:47:31 -0800 | [diff] [blame] | 929 | if (anon_vma) | 
| Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 930 | put_anon_vma(anon_vma); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 931 | unlock_page(hpage); | 
|  | 932 |  | 
| Hillf Danton | 0976133 | 2011-12-08 14:34:20 -0800 | [diff] [blame] | 933 | out: | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 934 | if (rc != -EAGAIN) { | 
|  | 935 | list_del(&hpage->lru); | 
|  | 936 | put_page(hpage); | 
|  | 937 | } | 
|  | 938 |  | 
|  | 939 | put_page(new_hpage); | 
|  | 940 |  | 
|  | 941 | if (result) { | 
|  | 942 | if (rc) | 
|  | 943 | *result = rc; | 
|  | 944 | else | 
|  | 945 | *result = page_to_nid(new_hpage); | 
|  | 946 | } | 
|  | 947 | return rc; | 
|  | 948 | } | 
|  | 949 |  | 
|  | 950 | /* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 951 | * migrate_pages | 
|  | 952 | * | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 953 | * The function takes one list of pages to migrate and a function | 
|  | 954 | * that determines from the page to be migrated and the private data | 
|  | 955 | * the target of the move and allocates the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 956 | * | 
|  | 957 | * The function returns after 10 attempts or if no pages | 
|  | 958 | * are movable anymore because to has become empty | 
| Minchan Kim | cf608ac | 2010-10-26 14:21:29 -0700 | [diff] [blame] | 959 | * or no retryable pages exist anymore. | 
|  | 960 | * Caller should call putback_lru_pages to return pages to the LRU | 
| Minchan Kim | 28bd657 | 2011-01-25 15:07:26 -0800 | [diff] [blame] | 961 | * or free list only if ret != 0. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 962 | * | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 963 | * Return: Number of pages not migrated or error code. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 964 | */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 965 | int migrate_pages(struct list_head *from, | 
| Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 966 | new_page_t get_new_page, unsigned long private, bool offlining, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 967 | enum migrate_mode mode) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 968 | { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 969 | int retry = 1; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 970 | int nr_failed = 0; | 
|  | 971 | int pass = 0; | 
|  | 972 | struct page *page; | 
|  | 973 | struct page *page2; | 
|  | 974 | int swapwrite = current->flags & PF_SWAPWRITE; | 
|  | 975 | int rc; | 
|  | 976 |  | 
|  | 977 | if (!swapwrite) | 
|  | 978 | current->flags |= PF_SWAPWRITE; | 
|  | 979 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 980 | for(pass = 0; pass < 10 && retry; pass++) { | 
|  | 981 | retry = 0; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 982 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 983 | list_for_each_entry_safe(page, page2, from, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 984 | cond_resched(); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 985 |  | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 986 | rc = unmap_and_move(get_new_page, private, | 
| Mel Gorman | 77f1fe6 | 2011-01-13 15:45:57 -0800 | [diff] [blame] | 987 | page, pass > 2, offlining, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 988 | mode); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 989 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 990 | switch(rc) { | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 991 | case -ENOMEM: | 
|  | 992 | goto out; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 993 | case -EAGAIN: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 994 | retry++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 995 | break; | 
|  | 996 | case 0: | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 997 | break; | 
|  | 998 | default: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 999 | /* Permanent failure */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 1000 | nr_failed++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1001 | break; | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 1002 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1003 | } | 
|  | 1004 | } | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 1005 | rc = 0; | 
|  | 1006 | out: | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1007 | if (!swapwrite) | 
|  | 1008 | current->flags &= ~PF_SWAPWRITE; | 
|  | 1009 |  | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 1010 | if (rc) | 
|  | 1011 | return rc; | 
|  | 1012 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1013 | return nr_failed + retry; | 
|  | 1014 | } | 
|  | 1015 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1016 | int migrate_huge_pages(struct list_head *from, | 
| Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 1017 | new_page_t get_new_page, unsigned long private, bool offlining, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 1018 | enum migrate_mode mode) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1019 | { | 
|  | 1020 | int retry = 1; | 
|  | 1021 | int nr_failed = 0; | 
|  | 1022 | int pass = 0; | 
|  | 1023 | struct page *page; | 
|  | 1024 | struct page *page2; | 
|  | 1025 | int rc; | 
|  | 1026 |  | 
|  | 1027 | for (pass = 0; pass < 10 && retry; pass++) { | 
|  | 1028 | retry = 0; | 
|  | 1029 |  | 
|  | 1030 | list_for_each_entry_safe(page, page2, from, lru) { | 
|  | 1031 | cond_resched(); | 
|  | 1032 |  | 
|  | 1033 | rc = unmap_and_move_huge_page(get_new_page, | 
| Mel Gorman | 77f1fe6 | 2011-01-13 15:45:57 -0800 | [diff] [blame] | 1034 | private, page, pass > 2, offlining, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 1035 | mode); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1036 |  | 
|  | 1037 | switch(rc) { | 
|  | 1038 | case -ENOMEM: | 
|  | 1039 | goto out; | 
|  | 1040 | case -EAGAIN: | 
|  | 1041 | retry++; | 
|  | 1042 | break; | 
|  | 1043 | case 0: | 
|  | 1044 | break; | 
|  | 1045 | default: | 
|  | 1046 | /* Permanent failure */ | 
|  | 1047 | nr_failed++; | 
|  | 1048 | break; | 
|  | 1049 | } | 
|  | 1050 | } | 
|  | 1051 | } | 
|  | 1052 | rc = 0; | 
|  | 1053 | out: | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1054 | if (rc) | 
|  | 1055 | return rc; | 
|  | 1056 |  | 
|  | 1057 | return nr_failed + retry; | 
|  | 1058 | } | 
|  | 1059 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1060 | #ifdef CONFIG_NUMA | 
|  | 1061 | /* | 
|  | 1062 | * Move a list of individual pages | 
|  | 1063 | */ | 
|  | 1064 | struct page_to_node { | 
|  | 1065 | unsigned long addr; | 
|  | 1066 | struct page *page; | 
|  | 1067 | int node; | 
|  | 1068 | int status; | 
|  | 1069 | }; | 
|  | 1070 |  | 
|  | 1071 | static struct page *new_page_node(struct page *p, unsigned long private, | 
|  | 1072 | int **result) | 
|  | 1073 | { | 
|  | 1074 | struct page_to_node *pm = (struct page_to_node *)private; | 
|  | 1075 |  | 
|  | 1076 | while (pm->node != MAX_NUMNODES && pm->page != p) | 
|  | 1077 | pm++; | 
|  | 1078 |  | 
|  | 1079 | if (pm->node == MAX_NUMNODES) | 
|  | 1080 | return NULL; | 
|  | 1081 |  | 
|  | 1082 | *result = &pm->status; | 
|  | 1083 |  | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 1084 | return alloc_pages_exact_node(pm->node, | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 1085 | GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1086 | } | 
|  | 1087 |  | 
|  | 1088 | /* | 
|  | 1089 | * Move a set of pages as indicated in the pm array. The addr | 
|  | 1090 | * field must be set to the virtual address of the page to be moved | 
|  | 1091 | * and the node number must contain a valid target node. | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1092 | * The pm array ends with node = MAX_NUMNODES. | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1093 | */ | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1094 | static int do_move_page_to_node_array(struct mm_struct *mm, | 
|  | 1095 | struct page_to_node *pm, | 
|  | 1096 | int migrate_all) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1097 | { | 
|  | 1098 | int err; | 
|  | 1099 | struct page_to_node *pp; | 
|  | 1100 | LIST_HEAD(pagelist); | 
|  | 1101 |  | 
|  | 1102 | down_read(&mm->mmap_sem); | 
|  | 1103 |  | 
|  | 1104 | /* | 
|  | 1105 | * Build a list of pages to migrate | 
|  | 1106 | */ | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1107 | for (pp = pm; pp->node != MAX_NUMNODES; pp++) { | 
|  | 1108 | struct vm_area_struct *vma; | 
|  | 1109 | struct page *page; | 
|  | 1110 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1111 | err = -EFAULT; | 
|  | 1112 | vma = find_vma(mm, pp->addr); | 
| Gleb Natapov | 70384dc | 2010-10-26 14:22:07 -0700 | [diff] [blame] | 1113 | if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1114 | goto set_status; | 
|  | 1115 |  | 
| Andrea Arcangeli | 500d65d | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 1116 | page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); | 
| Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 1117 |  | 
|  | 1118 | err = PTR_ERR(page); | 
|  | 1119 | if (IS_ERR(page)) | 
|  | 1120 | goto set_status; | 
|  | 1121 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1122 | err = -ENOENT; | 
|  | 1123 | if (!page) | 
|  | 1124 | goto set_status; | 
|  | 1125 |  | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1126 | /* Use PageReserved to check for zero page */ | 
|  | 1127 | if (PageReserved(page) || PageKsm(page)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1128 | goto put_and_set; | 
|  | 1129 |  | 
|  | 1130 | pp->page = page; | 
|  | 1131 | err = page_to_nid(page); | 
|  | 1132 |  | 
|  | 1133 | if (err == pp->node) | 
|  | 1134 | /* | 
|  | 1135 | * Node already in the right place | 
|  | 1136 | */ | 
|  | 1137 | goto put_and_set; | 
|  | 1138 |  | 
|  | 1139 | err = -EACCES; | 
|  | 1140 | if (page_mapcount(page) > 1 && | 
|  | 1141 | !migrate_all) | 
|  | 1142 | goto put_and_set; | 
|  | 1143 |  | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 1144 | err = isolate_lru_page(page); | 
| KOSAKI Motohiro | 6d9c285 | 2009-12-14 17:58:11 -0800 | [diff] [blame] | 1145 | if (!err) { | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 1146 | list_add_tail(&page->lru, &pagelist); | 
| KOSAKI Motohiro | 6d9c285 | 2009-12-14 17:58:11 -0800 | [diff] [blame] | 1147 | inc_zone_page_state(page, NR_ISOLATED_ANON + | 
|  | 1148 | page_is_file_cache(page)); | 
|  | 1149 | } | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1150 | put_and_set: | 
|  | 1151 | /* | 
|  | 1152 | * Either remove the duplicate refcount from | 
|  | 1153 | * isolate_lru_page() or drop the page ref if it was | 
|  | 1154 | * not isolated. | 
|  | 1155 | */ | 
|  | 1156 | put_page(page); | 
|  | 1157 | set_status: | 
|  | 1158 | pp->status = err; | 
|  | 1159 | } | 
|  | 1160 |  | 
| Brice Goglin | e78bbfa | 2008-10-18 20:27:15 -0700 | [diff] [blame] | 1161 | err = 0; | 
| Minchan Kim | cf608ac | 2010-10-26 14:21:29 -0700 | [diff] [blame] | 1162 | if (!list_empty(&pagelist)) { | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1163 | err = migrate_pages(&pagelist, new_page_node, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 1164 | (unsigned long)pm, 0, MIGRATE_SYNC); | 
| Minchan Kim | cf608ac | 2010-10-26 14:21:29 -0700 | [diff] [blame] | 1165 | if (err) | 
|  | 1166 | putback_lru_pages(&pagelist); | 
|  | 1167 | } | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1168 |  | 
|  | 1169 | up_read(&mm->mmap_sem); | 
|  | 1170 | return err; | 
|  | 1171 | } | 
|  | 1172 |  | 
|  | 1173 | /* | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1174 | * Migrate an array of page address onto an array of nodes and fill | 
|  | 1175 | * the corresponding array of status. | 
|  | 1176 | */ | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1177 | static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1178 | unsigned long nr_pages, | 
|  | 1179 | const void __user * __user *pages, | 
|  | 1180 | const int __user *nodes, | 
|  | 1181 | int __user *status, int flags) | 
|  | 1182 | { | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1183 | struct page_to_node *pm; | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1184 | unsigned long chunk_nr_pages; | 
|  | 1185 | unsigned long chunk_start; | 
|  | 1186 | int err; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1187 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1188 | err = -ENOMEM; | 
|  | 1189 | pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); | 
|  | 1190 | if (!pm) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1191 | goto out; | 
| Brice Goglin | 35282a2 | 2009-06-16 15:32:43 -0700 | [diff] [blame] | 1192 |  | 
|  | 1193 | migrate_prep(); | 
|  | 1194 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1195 | /* | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1196 | * Store a chunk of page_to_node array in a page, | 
|  | 1197 | * but keep the last one as a marker | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1198 | */ | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1199 | chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1200 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1201 | for (chunk_start = 0; | 
|  | 1202 | chunk_start < nr_pages; | 
|  | 1203 | chunk_start += chunk_nr_pages) { | 
|  | 1204 | int j; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1205 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1206 | if (chunk_start + chunk_nr_pages > nr_pages) | 
|  | 1207 | chunk_nr_pages = nr_pages - chunk_start; | 
|  | 1208 |  | 
|  | 1209 | /* fill the chunk pm with addrs and nodes from user-space */ | 
|  | 1210 | for (j = 0; j < chunk_nr_pages; j++) { | 
|  | 1211 | const void __user *p; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1212 | int node; | 
|  | 1213 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1214 | err = -EFAULT; | 
|  | 1215 | if (get_user(p, pages + j + chunk_start)) | 
|  | 1216 | goto out_pm; | 
|  | 1217 | pm[j].addr = (unsigned long) p; | 
|  | 1218 |  | 
|  | 1219 | if (get_user(node, nodes + j + chunk_start)) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1220 | goto out_pm; | 
|  | 1221 |  | 
|  | 1222 | err = -ENODEV; | 
| Linus Torvalds | 6f5a55f | 2010-02-05 16:16:50 -0800 | [diff] [blame] | 1223 | if (node < 0 || node >= MAX_NUMNODES) | 
|  | 1224 | goto out_pm; | 
|  | 1225 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1226 | if (!node_state(node, N_HIGH_MEMORY)) | 
|  | 1227 | goto out_pm; | 
|  | 1228 |  | 
|  | 1229 | err = -EACCES; | 
|  | 1230 | if (!node_isset(node, task_nodes)) | 
|  | 1231 | goto out_pm; | 
|  | 1232 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1233 | pm[j].node = node; | 
|  | 1234 | } | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1235 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1236 | /* End marker for this chunk */ | 
|  | 1237 | pm[chunk_nr_pages].node = MAX_NUMNODES; | 
|  | 1238 |  | 
|  | 1239 | /* Migrate this chunk */ | 
|  | 1240 | err = do_move_page_to_node_array(mm, pm, | 
|  | 1241 | flags & MPOL_MF_MOVE_ALL); | 
|  | 1242 | if (err < 0) | 
|  | 1243 | goto out_pm; | 
|  | 1244 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1245 | /* Return status information */ | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1246 | for (j = 0; j < chunk_nr_pages; j++) | 
|  | 1247 | if (put_user(pm[j].status, status + j + chunk_start)) { | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1248 | err = -EFAULT; | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1249 | goto out_pm; | 
|  | 1250 | } | 
|  | 1251 | } | 
|  | 1252 | err = 0; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1253 |  | 
|  | 1254 | out_pm: | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1255 | free_page((unsigned long)pm); | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1256 | out: | 
|  | 1257 | return err; | 
|  | 1258 | } | 
|  | 1259 |  | 
|  | 1260 | /* | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1261 | * Determine the nodes of an array of pages and store it in an array of status. | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1262 | */ | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1263 | static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, | 
|  | 1264 | const void __user **pages, int *status) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1265 | { | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1266 | unsigned long i; | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1267 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1268 | down_read(&mm->mmap_sem); | 
|  | 1269 |  | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1270 | for (i = 0; i < nr_pages; i++) { | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1271 | unsigned long addr = (unsigned long)(*pages); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1272 | struct vm_area_struct *vma; | 
|  | 1273 | struct page *page; | 
| KOSAKI Motohiro | c095adb | 2008-12-16 16:06:43 +0900 | [diff] [blame] | 1274 | int err = -EFAULT; | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1275 |  | 
|  | 1276 | vma = find_vma(mm, addr); | 
| Gleb Natapov | 70384dc | 2010-10-26 14:22:07 -0700 | [diff] [blame] | 1277 | if (!vma || addr < vma->vm_start) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1278 | goto set_status; | 
|  | 1279 |  | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1280 | page = follow_page(vma, addr, 0); | 
| Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 1281 |  | 
|  | 1282 | err = PTR_ERR(page); | 
|  | 1283 | if (IS_ERR(page)) | 
|  | 1284 | goto set_status; | 
|  | 1285 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1286 | err = -ENOENT; | 
|  | 1287 | /* Use PageReserved to check for zero page */ | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1288 | if (!page || PageReserved(page) || PageKsm(page)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1289 | goto set_status; | 
|  | 1290 |  | 
|  | 1291 | err = page_to_nid(page); | 
|  | 1292 | set_status: | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1293 | *status = err; | 
|  | 1294 |  | 
|  | 1295 | pages++; | 
|  | 1296 | status++; | 
|  | 1297 | } | 
|  | 1298 |  | 
|  | 1299 | up_read(&mm->mmap_sem); | 
|  | 1300 | } | 
|  | 1301 |  | 
|  | 1302 | /* | 
|  | 1303 | * Determine the nodes of a user array of pages and store it in | 
|  | 1304 | * a user array of status. | 
|  | 1305 | */ | 
|  | 1306 | static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, | 
|  | 1307 | const void __user * __user *pages, | 
|  | 1308 | int __user *status) | 
|  | 1309 | { | 
|  | 1310 | #define DO_PAGES_STAT_CHUNK_NR 16 | 
|  | 1311 | const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; | 
|  | 1312 | int chunk_status[DO_PAGES_STAT_CHUNK_NR]; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1313 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1314 | while (nr_pages) { | 
|  | 1315 | unsigned long chunk_nr; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1316 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1317 | chunk_nr = nr_pages; | 
|  | 1318 | if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) | 
|  | 1319 | chunk_nr = DO_PAGES_STAT_CHUNK_NR; | 
|  | 1320 |  | 
|  | 1321 | if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) | 
|  | 1322 | break; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1323 |  | 
|  | 1324 | do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); | 
|  | 1325 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1326 | if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) | 
|  | 1327 | break; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1328 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1329 | pages += chunk_nr; | 
|  | 1330 | status += chunk_nr; | 
|  | 1331 | nr_pages -= chunk_nr; | 
|  | 1332 | } | 
|  | 1333 | return nr_pages ? -EFAULT : 0; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1334 | } | 
|  | 1335 |  | 
|  | 1336 | /* | 
|  | 1337 | * Move a list of pages in the address space of the currently executing | 
|  | 1338 | * process. | 
|  | 1339 | */ | 
| Heiko Carstens | 938bb9f | 2009-01-14 14:14:30 +0100 | [diff] [blame] | 1340 | SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, | 
|  | 1341 | const void __user * __user *, pages, | 
|  | 1342 | const int __user *, nodes, | 
|  | 1343 | int __user *, status, int, flags) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1344 | { | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1345 | const struct cred *cred = current_cred(), *tcred; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1346 | struct task_struct *task; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1347 | struct mm_struct *mm; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1348 | int err; | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1349 | nodemask_t task_nodes; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1350 |  | 
|  | 1351 | /* Check flags */ | 
|  | 1352 | if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) | 
|  | 1353 | return -EINVAL; | 
|  | 1354 |  | 
|  | 1355 | if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) | 
|  | 1356 | return -EPERM; | 
|  | 1357 |  | 
|  | 1358 | /* Find the mm_struct */ | 
| Greg Thelen | a879bf5 | 2011-02-25 14:44:13 -0800 | [diff] [blame] | 1359 | rcu_read_lock(); | 
| Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1360 | task = pid ? find_task_by_vpid(pid) : current; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1361 | if (!task) { | 
| Greg Thelen | a879bf5 | 2011-02-25 14:44:13 -0800 | [diff] [blame] | 1362 | rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1363 | return -ESRCH; | 
|  | 1364 | } | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1365 | get_task_struct(task); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1366 |  | 
|  | 1367 | /* | 
|  | 1368 | * Check if this process has the right to modify the specified | 
|  | 1369 | * process. The right exists if the process has administrative | 
|  | 1370 | * capabilities, superuser privileges or the same | 
|  | 1371 | * userid as the target process. | 
|  | 1372 | */ | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1373 | tcred = __task_cred(task); | 
| David Howells | b6dff3e | 2008-11-14 10:39:16 +1100 | [diff] [blame] | 1374 | if (cred->euid != tcred->suid && cred->euid != tcred->uid && | 
|  | 1375 | cred->uid  != tcred->suid && cred->uid  != tcred->uid && | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1376 | !capable(CAP_SYS_NICE)) { | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1377 | rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1378 | err = -EPERM; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1379 | goto out; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1380 | } | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1381 | rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1382 |  | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1383 | err = security_task_movememory(task); | 
|  | 1384 | if (err) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1385 | goto out; | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1386 |  | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1387 | task_nodes = cpuset_mems_allowed(task); | 
|  | 1388 | mm = get_task_mm(task); | 
|  | 1389 | put_task_struct(task); | 
|  | 1390 |  | 
| Sasha Levin | 6e8b09e | 2012-04-25 16:01:53 -0700 | [diff] [blame] | 1391 | if (!mm) | 
|  | 1392 | return -EINVAL; | 
|  | 1393 |  | 
|  | 1394 | if (nodes) | 
|  | 1395 | err = do_pages_move(mm, task_nodes, nr_pages, pages, | 
|  | 1396 | nodes, status, flags); | 
|  | 1397 | else | 
|  | 1398 | err = do_pages_stat(mm, nr_pages, pages, status); | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1399 |  | 
|  | 1400 | mmput(mm); | 
|  | 1401 | return err; | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1402 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1403 | out: | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1404 | put_task_struct(task); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1405 | return err; | 
|  | 1406 | } | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1407 |  | 
| Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 1408 | /* | 
|  | 1409 | * Call migration functions in the vma_ops that may prepare | 
|  | 1410 | * memory in a vm for migration. migration functions may perform | 
|  | 1411 | * the migration for vmas that do not have an underlying page struct. | 
|  | 1412 | */ | 
|  | 1413 | int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, | 
|  | 1414 | const nodemask_t *from, unsigned long flags) | 
|  | 1415 | { | 
|  | 1416 | struct vm_area_struct *vma; | 
|  | 1417 | int err = 0; | 
|  | 1418 |  | 
| Daisuke Nishimura | 1001c9f | 2009-02-11 13:04:18 -0800 | [diff] [blame] | 1419 | for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { | 
| Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 1420 | if (vma->vm_ops && vma->vm_ops->migrate) { | 
|  | 1421 | err = vma->vm_ops->migrate(vma, to, from, flags); | 
|  | 1422 | if (err) | 
|  | 1423 | break; | 
|  | 1424 | } | 
|  | 1425 | } | 
|  | 1426 | return err; | 
|  | 1427 | } | 
| Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 1428 | #endif |