| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Memory Migration functionality - linux/mm/migration.c | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter | 
|  | 5 | * | 
|  | 6 | * Page migration was first developed in the context of the memory hotplug | 
|  | 7 | * project. The main authors of the migration code are: | 
|  | 8 | * | 
|  | 9 | * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> | 
|  | 10 | * Hirokazu Takahashi <taka@valinux.co.jp> | 
|  | 11 | * Dave Hansen <haveblue@us.ibm.com> | 
|  | 12 | * Christoph Lameter <clameter@sgi.com> | 
|  | 13 | */ | 
|  | 14 |  | 
|  | 15 | #include <linux/migrate.h> | 
|  | 16 | #include <linux/module.h> | 
|  | 17 | #include <linux/swap.h> | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 18 | #include <linux/swapops.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 19 | #include <linux/pagemap.h> | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 20 | #include <linux/buffer_head.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 21 | #include <linux/mm_inline.h> | 
|  | 22 | #include <linux/pagevec.h> | 
|  | 23 | #include <linux/rmap.h> | 
|  | 24 | #include <linux/topology.h> | 
|  | 25 | #include <linux/cpu.h> | 
|  | 26 | #include <linux/cpuset.h> | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 27 | #include <linux/writeback.h> | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 28 | #include <linux/mempolicy.h> | 
|  | 29 | #include <linux/vmalloc.h> | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 30 | #include <linux/security.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 31 |  | 
|  | 32 | #include "internal.h" | 
|  | 33 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 34 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) | 
|  | 35 |  | 
|  | 36 | /* | 
|  | 37 | * Isolate one page from the LRU lists. If successful put it onto | 
|  | 38 | * the indicated list with elevated page count. | 
|  | 39 | * | 
|  | 40 | * Result: | 
|  | 41 | *  -EBUSY: page not on LRU list | 
|  | 42 | *  0: page removed from LRU list and added to the specified list. | 
|  | 43 | */ | 
|  | 44 | int isolate_lru_page(struct page *page, struct list_head *pagelist) | 
|  | 45 | { | 
|  | 46 | int ret = -EBUSY; | 
|  | 47 |  | 
|  | 48 | if (PageLRU(page)) { | 
|  | 49 | struct zone *zone = page_zone(page); | 
|  | 50 |  | 
|  | 51 | spin_lock_irq(&zone->lru_lock); | 
|  | 52 | if (PageLRU(page)) { | 
|  | 53 | ret = 0; | 
|  | 54 | get_page(page); | 
|  | 55 | ClearPageLRU(page); | 
|  | 56 | if (PageActive(page)) | 
|  | 57 | del_page_from_active_list(zone, page); | 
|  | 58 | else | 
|  | 59 | del_page_from_inactive_list(zone, page); | 
|  | 60 | list_add_tail(&page->lru, pagelist); | 
|  | 61 | } | 
|  | 62 | spin_unlock_irq(&zone->lru_lock); | 
|  | 63 | } | 
|  | 64 | return ret; | 
|  | 65 | } | 
|  | 66 |  | 
|  | 67 | /* | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 68 | * migrate_prep() needs to be called before we start compiling a list of pages | 
|  | 69 | * to be migrated using isolate_lru_page(). | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 70 | */ | 
|  | 71 | int migrate_prep(void) | 
|  | 72 | { | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 73 | /* | 
|  | 74 | * Clear the LRU lists so pages can be isolated. | 
|  | 75 | * Note that pages may be moved off the LRU after we have | 
|  | 76 | * drained them. Those pages will fail to migrate like other | 
|  | 77 | * pages that may be busy. | 
|  | 78 | */ | 
|  | 79 | lru_add_drain_all(); | 
|  | 80 |  | 
|  | 81 | return 0; | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | static inline void move_to_lru(struct page *page) | 
|  | 85 | { | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 86 | if (PageActive(page)) { | 
|  | 87 | /* | 
|  | 88 | * lru_cache_add_active checks that | 
|  | 89 | * the PG_active bit is off. | 
|  | 90 | */ | 
|  | 91 | ClearPageActive(page); | 
|  | 92 | lru_cache_add_active(page); | 
|  | 93 | } else { | 
|  | 94 | lru_cache_add(page); | 
|  | 95 | } | 
|  | 96 | put_page(page); | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | /* | 
|  | 100 | * Add isolated pages on the list back to the LRU. | 
|  | 101 | * | 
|  | 102 | * returns the number of pages put back. | 
|  | 103 | */ | 
|  | 104 | int putback_lru_pages(struct list_head *l) | 
|  | 105 | { | 
|  | 106 | struct page *page; | 
|  | 107 | struct page *page2; | 
|  | 108 | int count = 0; | 
|  | 109 |  | 
|  | 110 | list_for_each_entry_safe(page, page2, l, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 111 | list_del(&page->lru); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 112 | move_to_lru(page); | 
|  | 113 | count++; | 
|  | 114 | } | 
|  | 115 | return count; | 
|  | 116 | } | 
|  | 117 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 118 | static inline int is_swap_pte(pte_t pte) | 
|  | 119 | { | 
|  | 120 | return !pte_none(pte) && !pte_present(pte) && !pte_file(pte); | 
|  | 121 | } | 
|  | 122 |  | 
|  | 123 | /* | 
|  | 124 | * Restore a potential migration pte to a working pte entry | 
|  | 125 | */ | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 126 | static void remove_migration_pte(struct vm_area_struct *vma, | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 127 | struct page *old, struct page *new) | 
|  | 128 | { | 
|  | 129 | struct mm_struct *mm = vma->vm_mm; | 
|  | 130 | swp_entry_t entry; | 
|  | 131 | pgd_t *pgd; | 
|  | 132 | pud_t *pud; | 
|  | 133 | pmd_t *pmd; | 
|  | 134 | pte_t *ptep, pte; | 
|  | 135 | spinlock_t *ptl; | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 136 | unsigned long addr = page_address_in_vma(new, vma); | 
|  | 137 |  | 
|  | 138 | if (addr == -EFAULT) | 
|  | 139 | return; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 140 |  | 
|  | 141 | pgd = pgd_offset(mm, addr); | 
|  | 142 | if (!pgd_present(*pgd)) | 
|  | 143 | return; | 
|  | 144 |  | 
|  | 145 | pud = pud_offset(pgd, addr); | 
|  | 146 | if (!pud_present(*pud)) | 
|  | 147 | return; | 
|  | 148 |  | 
|  | 149 | pmd = pmd_offset(pud, addr); | 
|  | 150 | if (!pmd_present(*pmd)) | 
|  | 151 | return; | 
|  | 152 |  | 
|  | 153 | ptep = pte_offset_map(pmd, addr); | 
|  | 154 |  | 
|  | 155 | if (!is_swap_pte(*ptep)) { | 
|  | 156 | pte_unmap(ptep); | 
|  | 157 | return; | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | ptl = pte_lockptr(mm, pmd); | 
|  | 161 | spin_lock(ptl); | 
|  | 162 | pte = *ptep; | 
|  | 163 | if (!is_swap_pte(pte)) | 
|  | 164 | goto out; | 
|  | 165 |  | 
|  | 166 | entry = pte_to_swp_entry(pte); | 
|  | 167 |  | 
|  | 168 | if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) | 
|  | 169 | goto out; | 
|  | 170 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 171 | get_page(new); | 
|  | 172 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 
|  | 173 | if (is_write_migration_entry(entry)) | 
|  | 174 | pte = pte_mkwrite(pte); | 
|  | 175 | set_pte_at(mm, addr, ptep, pte); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 176 |  | 
|  | 177 | if (PageAnon(new)) | 
|  | 178 | page_add_anon_rmap(new, vma, addr); | 
|  | 179 | else | 
|  | 180 | page_add_file_rmap(new); | 
|  | 181 |  | 
|  | 182 | /* No need to invalidate - it was non-present before */ | 
|  | 183 | update_mmu_cache(vma, addr, pte); | 
|  | 184 | lazy_mmu_prot_update(pte); | 
|  | 185 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 186 | out: | 
|  | 187 | pte_unmap_unlock(ptep, ptl); | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | /* | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 191 | * Note that remove_file_migration_ptes will only work on regular mappings, | 
|  | 192 | * Nonlinear mappings do not use migration entries. | 
|  | 193 | */ | 
|  | 194 | static void remove_file_migration_ptes(struct page *old, struct page *new) | 
|  | 195 | { | 
|  | 196 | struct vm_area_struct *vma; | 
|  | 197 | struct address_space *mapping = page_mapping(new); | 
|  | 198 | struct prio_tree_iter iter; | 
|  | 199 | pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
|  | 200 |  | 
|  | 201 | if (!mapping) | 
|  | 202 | return; | 
|  | 203 |  | 
|  | 204 | spin_lock(&mapping->i_mmap_lock); | 
|  | 205 |  | 
|  | 206 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) | 
|  | 207 | remove_migration_pte(vma, old, new); | 
|  | 208 |  | 
|  | 209 | spin_unlock(&mapping->i_mmap_lock); | 
|  | 210 | } | 
|  | 211 |  | 
|  | 212 | /* | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 213 | * Must hold mmap_sem lock on at least one of the vmas containing | 
|  | 214 | * the page so that the anon_vma cannot vanish. | 
|  | 215 | */ | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 216 | static void remove_anon_migration_ptes(struct page *old, struct page *new) | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 217 | { | 
|  | 218 | struct anon_vma *anon_vma; | 
|  | 219 | struct vm_area_struct *vma; | 
|  | 220 | unsigned long mapping; | 
|  | 221 |  | 
|  | 222 | mapping = (unsigned long)new->mapping; | 
|  | 223 |  | 
|  | 224 | if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) | 
|  | 225 | return; | 
|  | 226 |  | 
|  | 227 | /* | 
|  | 228 | * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. | 
|  | 229 | */ | 
|  | 230 | anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); | 
|  | 231 | spin_lock(&anon_vma->lock); | 
|  | 232 |  | 
|  | 233 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 234 | remove_migration_pte(vma, old, new); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 235 |  | 
|  | 236 | spin_unlock(&anon_vma->lock); | 
|  | 237 | } | 
|  | 238 |  | 
|  | 239 | /* | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 240 | * Get rid of all migration entries and replace them by | 
|  | 241 | * references to the indicated page. | 
|  | 242 | */ | 
|  | 243 | static void remove_migration_ptes(struct page *old, struct page *new) | 
|  | 244 | { | 
|  | 245 | if (PageAnon(new)) | 
|  | 246 | remove_anon_migration_ptes(old, new); | 
|  | 247 | else | 
|  | 248 | remove_file_migration_ptes(old, new); | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | /* | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 252 | * Something used the pte of a page under migration. We need to | 
|  | 253 | * get to the page and wait until migration is finished. | 
|  | 254 | * When we return from this function the fault will be retried. | 
|  | 255 | * | 
|  | 256 | * This function is called from do_swap_page(). | 
|  | 257 | */ | 
|  | 258 | void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | 
|  | 259 | unsigned long address) | 
|  | 260 | { | 
|  | 261 | pte_t *ptep, pte; | 
|  | 262 | spinlock_t *ptl; | 
|  | 263 | swp_entry_t entry; | 
|  | 264 | struct page *page; | 
|  | 265 |  | 
|  | 266 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | 
|  | 267 | pte = *ptep; | 
|  | 268 | if (!is_swap_pte(pte)) | 
|  | 269 | goto out; | 
|  | 270 |  | 
|  | 271 | entry = pte_to_swp_entry(pte); | 
|  | 272 | if (!is_migration_entry(entry)) | 
|  | 273 | goto out; | 
|  | 274 |  | 
|  | 275 | page = migration_entry_to_page(entry); | 
|  | 276 |  | 
|  | 277 | get_page(page); | 
|  | 278 | pte_unmap_unlock(ptep, ptl); | 
|  | 279 | wait_on_page_locked(page); | 
|  | 280 | put_page(page); | 
|  | 281 | return; | 
|  | 282 | out: | 
|  | 283 | pte_unmap_unlock(ptep, ptl); | 
|  | 284 | } | 
|  | 285 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 286 | /* | 
| Christoph Lameter | c3fcf8a | 2006-06-23 02:03:32 -0700 | [diff] [blame] | 287 | * Replace the page in the mapping. | 
| Christoph Lameter | 5b5c712 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 288 | * | 
|  | 289 | * The number of remaining references must be: | 
|  | 290 | * 1 for anonymous pages without a mapping | 
|  | 291 | * 2 for pages with a mapping | 
|  | 292 | * 3 for pages with a mapping and PagePrivate set. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 293 | */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 294 | static int migrate_page_move_mapping(struct address_space *mapping, | 
|  | 295 | struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 296 | { | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 297 | struct page **radix_pointer; | 
|  | 298 |  | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 299 | if (!mapping) { | 
|  | 300 | /* Anonymous page */ | 
|  | 301 | if (page_count(page) != 1) | 
|  | 302 | return -EAGAIN; | 
|  | 303 | return 0; | 
|  | 304 | } | 
|  | 305 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 306 | write_lock_irq(&mapping->tree_lock); | 
|  | 307 |  | 
|  | 308 | radix_pointer = (struct page **)radix_tree_lookup_slot( | 
|  | 309 | &mapping->page_tree, | 
|  | 310 | page_index(page)); | 
|  | 311 |  | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 312 | if (page_count(page) != 2 + !!PagePrivate(page) || | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 313 | *radix_pointer != page) { | 
|  | 314 | write_unlock_irq(&mapping->tree_lock); | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 315 | return -EAGAIN; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 316 | } | 
|  | 317 |  | 
|  | 318 | /* | 
|  | 319 | * Now we know that no one else is looking at the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 320 | */ | 
|  | 321 | get_page(newpage); | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 322 | #ifdef CONFIG_SWAP | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 323 | if (PageSwapCache(page)) { | 
|  | 324 | SetPageSwapCache(newpage); | 
|  | 325 | set_page_private(newpage, page_private(page)); | 
|  | 326 | } | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 327 | #endif | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 328 |  | 
|  | 329 | *radix_pointer = newpage; | 
|  | 330 | __put_page(page); | 
|  | 331 | write_unlock_irq(&mapping->tree_lock); | 
|  | 332 |  | 
|  | 333 | return 0; | 
|  | 334 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 335 |  | 
|  | 336 | /* | 
|  | 337 | * Copy the page to its new location | 
|  | 338 | */ | 
| Christoph Lameter | e7340f7 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 339 | static void migrate_page_copy(struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 340 | { | 
|  | 341 | copy_highpage(newpage, page); | 
|  | 342 |  | 
|  | 343 | if (PageError(page)) | 
|  | 344 | SetPageError(newpage); | 
|  | 345 | if (PageReferenced(page)) | 
|  | 346 | SetPageReferenced(newpage); | 
|  | 347 | if (PageUptodate(page)) | 
|  | 348 | SetPageUptodate(newpage); | 
|  | 349 | if (PageActive(page)) | 
|  | 350 | SetPageActive(newpage); | 
|  | 351 | if (PageChecked(page)) | 
|  | 352 | SetPageChecked(newpage); | 
|  | 353 | if (PageMappedToDisk(page)) | 
|  | 354 | SetPageMappedToDisk(newpage); | 
|  | 355 |  | 
|  | 356 | if (PageDirty(page)) { | 
|  | 357 | clear_page_dirty_for_io(page); | 
|  | 358 | set_page_dirty(newpage); | 
|  | 359 | } | 
|  | 360 |  | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 361 | #ifdef CONFIG_SWAP | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 362 | ClearPageSwapCache(page); | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 363 | #endif | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 364 | ClearPageActive(page); | 
|  | 365 | ClearPagePrivate(page); | 
|  | 366 | set_page_private(page, 0); | 
|  | 367 | page->mapping = NULL; | 
|  | 368 |  | 
|  | 369 | /* | 
|  | 370 | * If any waiters have accumulated on the new page then | 
|  | 371 | * wake them up. | 
|  | 372 | */ | 
|  | 373 | if (PageWriteback(newpage)) | 
|  | 374 | end_page_writeback(newpage); | 
|  | 375 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 376 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 377 | /************************************************************ | 
|  | 378 | *                    Migration functions | 
|  | 379 | ***********************************************************/ | 
|  | 380 |  | 
|  | 381 | /* Always fail migration. Used for mappings that are not movable */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 382 | int fail_migrate_page(struct address_space *mapping, | 
|  | 383 | struct page *newpage, struct page *page) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 384 | { | 
|  | 385 | return -EIO; | 
|  | 386 | } | 
|  | 387 | EXPORT_SYMBOL(fail_migrate_page); | 
|  | 388 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 389 | /* | 
|  | 390 | * Common logic to directly migrate a single page suitable for | 
|  | 391 | * pages that do not use PagePrivate. | 
|  | 392 | * | 
|  | 393 | * Pages are locked upon entry and exit. | 
|  | 394 | */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 395 | int migrate_page(struct address_space *mapping, | 
|  | 396 | struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 397 | { | 
|  | 398 | int rc; | 
|  | 399 |  | 
|  | 400 | BUG_ON(PageWriteback(page));	/* Writeback must be complete */ | 
|  | 401 |  | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 402 | rc = migrate_page_move_mapping(mapping, newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 403 |  | 
|  | 404 | if (rc) | 
|  | 405 | return rc; | 
|  | 406 |  | 
|  | 407 | migrate_page_copy(newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 408 | return 0; | 
|  | 409 | } | 
|  | 410 | EXPORT_SYMBOL(migrate_page); | 
|  | 411 |  | 
|  | 412 | /* | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 413 | * Migration function for pages with buffers. This function can only be used | 
|  | 414 | * if the underlying filesystem guarantees that no other references to "page" | 
|  | 415 | * exist. | 
|  | 416 | */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 417 | int buffer_migrate_page(struct address_space *mapping, | 
|  | 418 | struct page *newpage, struct page *page) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 419 | { | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 420 | struct buffer_head *bh, *head; | 
|  | 421 | int rc; | 
|  | 422 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 423 | if (!page_has_buffers(page)) | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 424 | return migrate_page(mapping, newpage, page); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 425 |  | 
|  | 426 | head = page_buffers(page); | 
|  | 427 |  | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 428 | rc = migrate_page_move_mapping(mapping, newpage, page); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 429 |  | 
|  | 430 | if (rc) | 
|  | 431 | return rc; | 
|  | 432 |  | 
|  | 433 | bh = head; | 
|  | 434 | do { | 
|  | 435 | get_bh(bh); | 
|  | 436 | lock_buffer(bh); | 
|  | 437 | bh = bh->b_this_page; | 
|  | 438 |  | 
|  | 439 | } while (bh != head); | 
|  | 440 |  | 
|  | 441 | ClearPagePrivate(page); | 
|  | 442 | set_page_private(newpage, page_private(page)); | 
|  | 443 | set_page_private(page, 0); | 
|  | 444 | put_page(page); | 
|  | 445 | get_page(newpage); | 
|  | 446 |  | 
|  | 447 | bh = head; | 
|  | 448 | do { | 
|  | 449 | set_bh_page(bh, newpage, bh_offset(bh)); | 
|  | 450 | bh = bh->b_this_page; | 
|  | 451 |  | 
|  | 452 | } while (bh != head); | 
|  | 453 |  | 
|  | 454 | SetPagePrivate(newpage); | 
|  | 455 |  | 
|  | 456 | migrate_page_copy(newpage, page); | 
|  | 457 |  | 
|  | 458 | bh = head; | 
|  | 459 | do { | 
|  | 460 | unlock_buffer(bh); | 
|  | 461 | put_bh(bh); | 
|  | 462 | bh = bh->b_this_page; | 
|  | 463 |  | 
|  | 464 | } while (bh != head); | 
|  | 465 |  | 
|  | 466 | return 0; | 
|  | 467 | } | 
|  | 468 | EXPORT_SYMBOL(buffer_migrate_page); | 
|  | 469 |  | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 470 | /* | 
|  | 471 | * Writeback a page to clean the dirty state | 
|  | 472 | */ | 
|  | 473 | static int writeout(struct address_space *mapping, struct page *page) | 
|  | 474 | { | 
|  | 475 | struct writeback_control wbc = { | 
|  | 476 | .sync_mode = WB_SYNC_NONE, | 
|  | 477 | .nr_to_write = 1, | 
|  | 478 | .range_start = 0, | 
|  | 479 | .range_end = LLONG_MAX, | 
|  | 480 | .nonblocking = 1, | 
|  | 481 | .for_reclaim = 1 | 
|  | 482 | }; | 
|  | 483 | int rc; | 
|  | 484 |  | 
|  | 485 | if (!mapping->a_ops->writepage) | 
|  | 486 | /* No write method for the address space */ | 
|  | 487 | return -EINVAL; | 
|  | 488 |  | 
|  | 489 | if (!clear_page_dirty_for_io(page)) | 
|  | 490 | /* Someone else already triggered a write */ | 
|  | 491 | return -EAGAIN; | 
|  | 492 |  | 
|  | 493 | /* | 
|  | 494 | * A dirty page may imply that the underlying filesystem has | 
|  | 495 | * the page on some queue. So the page must be clean for | 
|  | 496 | * migration. Writeout may mean we loose the lock and the | 
|  | 497 | * page state is no longer what we checked for earlier. | 
|  | 498 | * At this point we know that the migration attempt cannot | 
|  | 499 | * be successful. | 
|  | 500 | */ | 
|  | 501 | remove_migration_ptes(page, page); | 
|  | 502 |  | 
|  | 503 | rc = mapping->a_ops->writepage(page, &wbc); | 
|  | 504 | if (rc < 0) | 
|  | 505 | /* I/O Error writing */ | 
|  | 506 | return -EIO; | 
|  | 507 |  | 
|  | 508 | if (rc != AOP_WRITEPAGE_ACTIVATE) | 
|  | 509 | /* unlocked. Relock */ | 
|  | 510 | lock_page(page); | 
|  | 511 |  | 
|  | 512 | return -EAGAIN; | 
|  | 513 | } | 
|  | 514 |  | 
|  | 515 | /* | 
|  | 516 | * Default handling if a filesystem does not provide a migration function. | 
|  | 517 | */ | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 518 | static int fallback_migrate_page(struct address_space *mapping, | 
|  | 519 | struct page *newpage, struct page *page) | 
|  | 520 | { | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 521 | if (PageDirty(page)) | 
|  | 522 | return writeout(mapping, page); | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 523 |  | 
|  | 524 | /* | 
|  | 525 | * Buffers may be managed in a filesystem specific way. | 
|  | 526 | * We must have no buffers or drop them. | 
|  | 527 | */ | 
|  | 528 | if (page_has_buffers(page) && | 
|  | 529 | !try_to_release_page(page, GFP_KERNEL)) | 
|  | 530 | return -EAGAIN; | 
|  | 531 |  | 
|  | 532 | return migrate_page(mapping, newpage, page); | 
|  | 533 | } | 
|  | 534 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 535 | /* | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 536 | * Move a page to a newly allocated page | 
|  | 537 | * The page is locked and all ptes have been successfully removed. | 
|  | 538 | * | 
|  | 539 | * The new page will have replaced the old page if this function | 
|  | 540 | * is successful. | 
|  | 541 | */ | 
|  | 542 | static int move_to_new_page(struct page *newpage, struct page *page) | 
|  | 543 | { | 
|  | 544 | struct address_space *mapping; | 
|  | 545 | int rc; | 
|  | 546 |  | 
|  | 547 | /* | 
|  | 548 | * Block others from accessing the page when we get around to | 
|  | 549 | * establishing additional references. We are the only one | 
|  | 550 | * holding a reference to the new page at this point. | 
|  | 551 | */ | 
|  | 552 | if (TestSetPageLocked(newpage)) | 
|  | 553 | BUG(); | 
|  | 554 |  | 
|  | 555 | /* Prepare mapping for the new page.*/ | 
|  | 556 | newpage->index = page->index; | 
|  | 557 | newpage->mapping = page->mapping; | 
|  | 558 |  | 
|  | 559 | mapping = page_mapping(page); | 
|  | 560 | if (!mapping) | 
|  | 561 | rc = migrate_page(mapping, newpage, page); | 
|  | 562 | else if (mapping->a_ops->migratepage) | 
|  | 563 | /* | 
|  | 564 | * Most pages have a mapping and most filesystems | 
|  | 565 | * should provide a migration function. Anonymous | 
|  | 566 | * pages are part of swap space which also has its | 
|  | 567 | * own migration function. This is the most common | 
|  | 568 | * path for page migration. | 
|  | 569 | */ | 
|  | 570 | rc = mapping->a_ops->migratepage(mapping, | 
|  | 571 | newpage, page); | 
|  | 572 | else | 
|  | 573 | rc = fallback_migrate_page(mapping, newpage, page); | 
|  | 574 |  | 
|  | 575 | if (!rc) | 
|  | 576 | remove_migration_ptes(page, newpage); | 
|  | 577 | else | 
|  | 578 | newpage->mapping = NULL; | 
|  | 579 |  | 
|  | 580 | unlock_page(newpage); | 
|  | 581 |  | 
|  | 582 | return rc; | 
|  | 583 | } | 
|  | 584 |  | 
|  | 585 | /* | 
|  | 586 | * Obtain the lock on page, remove all ptes and migrate the page | 
|  | 587 | * to the newly allocated page in newpage. | 
|  | 588 | */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 589 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | 
|  | 590 | struct page *page, int force) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 591 | { | 
|  | 592 | int rc = 0; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 593 | int *result = NULL; | 
|  | 594 | struct page *newpage = get_new_page(page, private, &result); | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 595 |  | 
|  | 596 | if (!newpage) | 
|  | 597 | return -ENOMEM; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 598 |  | 
|  | 599 | if (page_count(page) == 1) | 
|  | 600 | /* page was freed from under us. So we are done. */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 601 | goto move_newpage; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 602 |  | 
|  | 603 | rc = -EAGAIN; | 
|  | 604 | if (TestSetPageLocked(page)) { | 
|  | 605 | if (!force) | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 606 | goto move_newpage; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 607 | lock_page(page); | 
|  | 608 | } | 
|  | 609 |  | 
|  | 610 | if (PageWriteback(page)) { | 
|  | 611 | if (!force) | 
|  | 612 | goto unlock; | 
|  | 613 | wait_on_page_writeback(page); | 
|  | 614 | } | 
|  | 615 |  | 
|  | 616 | /* | 
|  | 617 | * Establish migration ptes or remove ptes | 
|  | 618 | */ | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 619 | try_to_unmap(page, 1); | 
|  | 620 | if (!page_mapped(page)) | 
|  | 621 | rc = move_to_new_page(newpage, page); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 622 |  | 
|  | 623 | if (rc) | 
|  | 624 | remove_migration_ptes(page, page); | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 625 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 626 | unlock: | 
|  | 627 | unlock_page(page); | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 628 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 629 | if (rc != -EAGAIN) { | 
| Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 630 | /* | 
|  | 631 | * A page that has been migrated has all references | 
|  | 632 | * removed and will be freed. A page that has not been | 
|  | 633 | * migrated will have kepts its references and be | 
|  | 634 | * restored. | 
|  | 635 | */ | 
|  | 636 | list_del(&page->lru); | 
|  | 637 | move_to_lru(page); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 638 | } | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 639 |  | 
|  | 640 | move_newpage: | 
|  | 641 | /* | 
|  | 642 | * Move the new page to the LRU. If migration was not successful | 
|  | 643 | * then this will free the page. | 
|  | 644 | */ | 
|  | 645 | move_to_lru(newpage); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 646 | if (result) { | 
|  | 647 | if (rc) | 
|  | 648 | *result = rc; | 
|  | 649 | else | 
|  | 650 | *result = page_to_nid(newpage); | 
|  | 651 | } | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 652 | return rc; | 
|  | 653 | } | 
|  | 654 |  | 
|  | 655 | /* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 656 | * migrate_pages | 
|  | 657 | * | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 658 | * The function takes one list of pages to migrate and a function | 
|  | 659 | * that determines from the page to be migrated and the private data | 
|  | 660 | * the target of the move and allocates the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 661 | * | 
|  | 662 | * The function returns after 10 attempts or if no pages | 
|  | 663 | * are movable anymore because to has become empty | 
| Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 664 | * or no retryable pages exist anymore. All pages will be | 
|  | 665 | * retruned to the LRU or freed. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 666 | * | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 667 | * Return: Number of pages not migrated or error code. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 668 | */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 669 | int migrate_pages(struct list_head *from, | 
|  | 670 | new_page_t get_new_page, unsigned long private) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 671 | { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 672 | int retry = 1; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 673 | int nr_failed = 0; | 
|  | 674 | int pass = 0; | 
|  | 675 | struct page *page; | 
|  | 676 | struct page *page2; | 
|  | 677 | int swapwrite = current->flags & PF_SWAPWRITE; | 
|  | 678 | int rc; | 
|  | 679 |  | 
|  | 680 | if (!swapwrite) | 
|  | 681 | current->flags |= PF_SWAPWRITE; | 
|  | 682 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 683 | for(pass = 0; pass < 10 && retry; pass++) { | 
|  | 684 | retry = 0; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 685 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 686 | list_for_each_entry_safe(page, page2, from, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 687 | cond_resched(); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 688 |  | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 689 | rc = unmap_and_move(get_new_page, private, | 
|  | 690 | page, pass > 2); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 691 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 692 | switch(rc) { | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 693 | case -ENOMEM: | 
|  | 694 | goto out; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 695 | case -EAGAIN: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 696 | retry++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 697 | break; | 
|  | 698 | case 0: | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 699 | break; | 
|  | 700 | default: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 701 | /* Permanent failure */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 702 | nr_failed++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 703 | break; | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 704 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 705 | } | 
|  | 706 | } | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 707 | rc = 0; | 
|  | 708 | out: | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 709 | if (!swapwrite) | 
|  | 710 | current->flags &= ~PF_SWAPWRITE; | 
|  | 711 |  | 
| Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 712 | putback_lru_pages(from); | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 713 |  | 
|  | 714 | if (rc) | 
|  | 715 | return rc; | 
|  | 716 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 717 | return nr_failed + retry; | 
|  | 718 | } | 
|  | 719 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 720 | #ifdef CONFIG_NUMA | 
|  | 721 | /* | 
|  | 722 | * Move a list of individual pages | 
|  | 723 | */ | 
|  | 724 | struct page_to_node { | 
|  | 725 | unsigned long addr; | 
|  | 726 | struct page *page; | 
|  | 727 | int node; | 
|  | 728 | int status; | 
|  | 729 | }; | 
|  | 730 |  | 
|  | 731 | static struct page *new_page_node(struct page *p, unsigned long private, | 
|  | 732 | int **result) | 
|  | 733 | { | 
|  | 734 | struct page_to_node *pm = (struct page_to_node *)private; | 
|  | 735 |  | 
|  | 736 | while (pm->node != MAX_NUMNODES && pm->page != p) | 
|  | 737 | pm++; | 
|  | 738 |  | 
|  | 739 | if (pm->node == MAX_NUMNODES) | 
|  | 740 | return NULL; | 
|  | 741 |  | 
|  | 742 | *result = &pm->status; | 
|  | 743 |  | 
|  | 744 | return alloc_pages_node(pm->node, GFP_HIGHUSER, 0); | 
|  | 745 | } | 
|  | 746 |  | 
|  | 747 | /* | 
|  | 748 | * Move a set of pages as indicated in the pm array. The addr | 
|  | 749 | * field must be set to the virtual address of the page to be moved | 
|  | 750 | * and the node number must contain a valid target node. | 
|  | 751 | */ | 
|  | 752 | static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, | 
|  | 753 | int migrate_all) | 
|  | 754 | { | 
|  | 755 | int err; | 
|  | 756 | struct page_to_node *pp; | 
|  | 757 | LIST_HEAD(pagelist); | 
|  | 758 |  | 
|  | 759 | down_read(&mm->mmap_sem); | 
|  | 760 |  | 
|  | 761 | /* | 
|  | 762 | * Build a list of pages to migrate | 
|  | 763 | */ | 
|  | 764 | migrate_prep(); | 
|  | 765 | for (pp = pm; pp->node != MAX_NUMNODES; pp++) { | 
|  | 766 | struct vm_area_struct *vma; | 
|  | 767 | struct page *page; | 
|  | 768 |  | 
|  | 769 | /* | 
|  | 770 | * A valid page pointer that will not match any of the | 
|  | 771 | * pages that will be moved. | 
|  | 772 | */ | 
|  | 773 | pp->page = ZERO_PAGE(0); | 
|  | 774 |  | 
|  | 775 | err = -EFAULT; | 
|  | 776 | vma = find_vma(mm, pp->addr); | 
|  | 777 | if (!vma) | 
|  | 778 | goto set_status; | 
|  | 779 |  | 
|  | 780 | page = follow_page(vma, pp->addr, FOLL_GET); | 
|  | 781 | err = -ENOENT; | 
|  | 782 | if (!page) | 
|  | 783 | goto set_status; | 
|  | 784 |  | 
|  | 785 | if (PageReserved(page))		/* Check for zero page */ | 
|  | 786 | goto put_and_set; | 
|  | 787 |  | 
|  | 788 | pp->page = page; | 
|  | 789 | err = page_to_nid(page); | 
|  | 790 |  | 
|  | 791 | if (err == pp->node) | 
|  | 792 | /* | 
|  | 793 | * Node already in the right place | 
|  | 794 | */ | 
|  | 795 | goto put_and_set; | 
|  | 796 |  | 
|  | 797 | err = -EACCES; | 
|  | 798 | if (page_mapcount(page) > 1 && | 
|  | 799 | !migrate_all) | 
|  | 800 | goto put_and_set; | 
|  | 801 |  | 
|  | 802 | err = isolate_lru_page(page, &pagelist); | 
|  | 803 | put_and_set: | 
|  | 804 | /* | 
|  | 805 | * Either remove the duplicate refcount from | 
|  | 806 | * isolate_lru_page() or drop the page ref if it was | 
|  | 807 | * not isolated. | 
|  | 808 | */ | 
|  | 809 | put_page(page); | 
|  | 810 | set_status: | 
|  | 811 | pp->status = err; | 
|  | 812 | } | 
|  | 813 |  | 
|  | 814 | if (!list_empty(&pagelist)) | 
|  | 815 | err = migrate_pages(&pagelist, new_page_node, | 
|  | 816 | (unsigned long)pm); | 
|  | 817 | else | 
|  | 818 | err = -ENOENT; | 
|  | 819 |  | 
|  | 820 | up_read(&mm->mmap_sem); | 
|  | 821 | return err; | 
|  | 822 | } | 
|  | 823 |  | 
|  | 824 | /* | 
|  | 825 | * Determine the nodes of a list of pages. The addr in the pm array | 
|  | 826 | * must have been set to the virtual address of which we want to determine | 
|  | 827 | * the node number. | 
|  | 828 | */ | 
|  | 829 | static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm) | 
|  | 830 | { | 
|  | 831 | down_read(&mm->mmap_sem); | 
|  | 832 |  | 
|  | 833 | for ( ; pm->node != MAX_NUMNODES; pm++) { | 
|  | 834 | struct vm_area_struct *vma; | 
|  | 835 | struct page *page; | 
|  | 836 | int err; | 
|  | 837 |  | 
|  | 838 | err = -EFAULT; | 
|  | 839 | vma = find_vma(mm, pm->addr); | 
|  | 840 | if (!vma) | 
|  | 841 | goto set_status; | 
|  | 842 |  | 
|  | 843 | page = follow_page(vma, pm->addr, 0); | 
|  | 844 | err = -ENOENT; | 
|  | 845 | /* Use PageReserved to check for zero page */ | 
|  | 846 | if (!page || PageReserved(page)) | 
|  | 847 | goto set_status; | 
|  | 848 |  | 
|  | 849 | err = page_to_nid(page); | 
|  | 850 | set_status: | 
|  | 851 | pm->status = err; | 
|  | 852 | } | 
|  | 853 |  | 
|  | 854 | up_read(&mm->mmap_sem); | 
|  | 855 | return 0; | 
|  | 856 | } | 
|  | 857 |  | 
|  | 858 | /* | 
|  | 859 | * Move a list of pages in the address space of the currently executing | 
|  | 860 | * process. | 
|  | 861 | */ | 
|  | 862 | asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, | 
|  | 863 | const void __user * __user *pages, | 
|  | 864 | const int __user *nodes, | 
|  | 865 | int __user *status, int flags) | 
|  | 866 | { | 
|  | 867 | int err = 0; | 
|  | 868 | int i; | 
|  | 869 | struct task_struct *task; | 
|  | 870 | nodemask_t task_nodes; | 
|  | 871 | struct mm_struct *mm; | 
|  | 872 | struct page_to_node *pm = NULL; | 
|  | 873 |  | 
|  | 874 | /* Check flags */ | 
|  | 875 | if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) | 
|  | 876 | return -EINVAL; | 
|  | 877 |  | 
|  | 878 | if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) | 
|  | 879 | return -EPERM; | 
|  | 880 |  | 
|  | 881 | /* Find the mm_struct */ | 
|  | 882 | read_lock(&tasklist_lock); | 
|  | 883 | task = pid ? find_task_by_pid(pid) : current; | 
|  | 884 | if (!task) { | 
|  | 885 | read_unlock(&tasklist_lock); | 
|  | 886 | return -ESRCH; | 
|  | 887 | } | 
|  | 888 | mm = get_task_mm(task); | 
|  | 889 | read_unlock(&tasklist_lock); | 
|  | 890 |  | 
|  | 891 | if (!mm) | 
|  | 892 | return -EINVAL; | 
|  | 893 |  | 
|  | 894 | /* | 
|  | 895 | * Check if this process has the right to modify the specified | 
|  | 896 | * process. The right exists if the process has administrative | 
|  | 897 | * capabilities, superuser privileges or the same | 
|  | 898 | * userid as the target process. | 
|  | 899 | */ | 
|  | 900 | if ((current->euid != task->suid) && (current->euid != task->uid) && | 
|  | 901 | (current->uid != task->suid) && (current->uid != task->uid) && | 
|  | 902 | !capable(CAP_SYS_NICE)) { | 
|  | 903 | err = -EPERM; | 
|  | 904 | goto out2; | 
|  | 905 | } | 
|  | 906 |  | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 907 | err = security_task_movememory(task); | 
|  | 908 | if (err) | 
|  | 909 | goto out2; | 
|  | 910 |  | 
|  | 911 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 912 | task_nodes = cpuset_mems_allowed(task); | 
|  | 913 |  | 
|  | 914 | /* Limit nr_pages so that the multiplication may not overflow */ | 
|  | 915 | if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) { | 
|  | 916 | err = -E2BIG; | 
|  | 917 | goto out2; | 
|  | 918 | } | 
|  | 919 |  | 
|  | 920 | pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node)); | 
|  | 921 | if (!pm) { | 
|  | 922 | err = -ENOMEM; | 
|  | 923 | goto out2; | 
|  | 924 | } | 
|  | 925 |  | 
|  | 926 | /* | 
|  | 927 | * Get parameters from user space and initialize the pm | 
|  | 928 | * array. Return various errors if the user did something wrong. | 
|  | 929 | */ | 
|  | 930 | for (i = 0; i < nr_pages; i++) { | 
|  | 931 | const void *p; | 
|  | 932 |  | 
|  | 933 | err = -EFAULT; | 
|  | 934 | if (get_user(p, pages + i)) | 
|  | 935 | goto out; | 
|  | 936 |  | 
|  | 937 | pm[i].addr = (unsigned long)p; | 
|  | 938 | if (nodes) { | 
|  | 939 | int node; | 
|  | 940 |  | 
|  | 941 | if (get_user(node, nodes + i)) | 
|  | 942 | goto out; | 
|  | 943 |  | 
|  | 944 | err = -ENODEV; | 
|  | 945 | if (!node_online(node)) | 
|  | 946 | goto out; | 
|  | 947 |  | 
|  | 948 | err = -EACCES; | 
|  | 949 | if (!node_isset(node, task_nodes)) | 
|  | 950 | goto out; | 
|  | 951 |  | 
|  | 952 | pm[i].node = node; | 
|  | 953 | } | 
|  | 954 | } | 
|  | 955 | /* End marker */ | 
|  | 956 | pm[nr_pages].node = MAX_NUMNODES; | 
|  | 957 |  | 
|  | 958 | if (nodes) | 
|  | 959 | err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL); | 
|  | 960 | else | 
|  | 961 | err = do_pages_stat(mm, pm); | 
|  | 962 |  | 
|  | 963 | if (err >= 0) | 
|  | 964 | /* Return status information */ | 
|  | 965 | for (i = 0; i < nr_pages; i++) | 
|  | 966 | if (put_user(pm[i].status, status + i)) | 
|  | 967 | err = -EFAULT; | 
|  | 968 |  | 
|  | 969 | out: | 
|  | 970 | vfree(pm); | 
|  | 971 | out2: | 
|  | 972 | mmput(mm); | 
|  | 973 | return err; | 
|  | 974 | } | 
|  | 975 | #endif | 
|  | 976 |  | 
| Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 977 | /* | 
|  | 978 | * Call migration functions in the vma_ops that may prepare | 
|  | 979 | * memory in a vm for migration. migration functions may perform | 
|  | 980 | * the migration for vmas that do not have an underlying page struct. | 
|  | 981 | */ | 
|  | 982 | int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, | 
|  | 983 | const nodemask_t *from, unsigned long flags) | 
|  | 984 | { | 
|  | 985 | struct vm_area_struct *vma; | 
|  | 986 | int err = 0; | 
|  | 987 |  | 
|  | 988 | for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) { | 
|  | 989 | if (vma->vm_ops && vma->vm_ops->migrate) { | 
|  | 990 | err = vma->vm_ops->migrate(vma, to, from, flags); | 
|  | 991 | if (err) | 
|  | 992 | break; | 
|  | 993 | } | 
|  | 994 | } | 
|  | 995 | return err; | 
|  | 996 | } |