| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Memory Migration functionality - linux/mm/migration.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter | 
 | 5 |  * | 
 | 6 |  * Page migration was first developed in the context of the memory hotplug | 
 | 7 |  * project. The main authors of the migration code are: | 
 | 8 |  * | 
 | 9 |  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> | 
 | 10 |  * Hirokazu Takahashi <taka@valinux.co.jp> | 
 | 11 |  * Dave Hansen <haveblue@us.ibm.com> | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 12 |  * Christoph Lameter | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 13 |  */ | 
 | 14 |  | 
 | 15 | #include <linux/migrate.h> | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 16 | #include <linux/export.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 17 | #include <linux/swap.h> | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 18 | #include <linux/swapops.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 19 | #include <linux/pagemap.h> | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 20 | #include <linux/buffer_head.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 21 | #include <linux/mm_inline.h> | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 22 | #include <linux/nsproxy.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 23 | #include <linux/pagevec.h> | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 24 | #include <linux/ksm.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 25 | #include <linux/rmap.h> | 
 | 26 | #include <linux/topology.h> | 
 | 27 | #include <linux/cpu.h> | 
 | 28 | #include <linux/cpuset.h> | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 29 | #include <linux/writeback.h> | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 30 | #include <linux/mempolicy.h> | 
 | 31 | #include <linux/vmalloc.h> | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 32 | #include <linux/security.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 33 | #include <linux/memcontrol.h> | 
| Adrian Bunk | 4f5ca26 | 2008-07-23 21:27:02 -0700 | [diff] [blame] | 34 | #include <linux/syscalls.h> | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 35 | #include <linux/hugetlb.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 36 | #include <linux/gfp.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 37 |  | 
| Michal Nazarewicz | 0d1836c | 2010-12-21 17:24:26 -0800 | [diff] [blame] | 38 | #include <asm/tlbflush.h> | 
 | 39 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 40 | #include "internal.h" | 
 | 41 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 42 | /* | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 43 |  * migrate_prep() needs to be called before we start compiling a list of pages | 
| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 44 |  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is | 
 | 45 |  * undesirable, use migrate_prep_local() | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 46 |  */ | 
 | 47 | int migrate_prep(void) | 
 | 48 | { | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 49 | 	/* | 
 | 50 | 	 * Clear the LRU lists so pages can be isolated. | 
 | 51 | 	 * Note that pages may be moved off the LRU after we have | 
 | 52 | 	 * drained them. Those pages will fail to migrate like other | 
 | 53 | 	 * pages that may be busy. | 
 | 54 | 	 */ | 
 | 55 | 	lru_add_drain_all(); | 
 | 56 |  | 
 | 57 | 	return 0; | 
 | 58 | } | 
 | 59 |  | 
| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 60 | /* Do the necessary work of migrate_prep but not if it involves other CPUs */ | 
 | 61 | int migrate_prep_local(void) | 
 | 62 | { | 
 | 63 | 	lru_add_drain(); | 
 | 64 |  | 
 | 65 | 	return 0; | 
 | 66 | } | 
 | 67 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 68 | /* | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 69 |  * Add isolated pages on the list back to the LRU under page lock | 
 | 70 |  * to avoid leaking evictable pages back onto unevictable list. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 71 |  */ | 
| Minchan Kim | e13861d | 2010-05-24 14:31:59 -0700 | [diff] [blame] | 72 | void putback_lru_pages(struct list_head *l) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 73 | { | 
 | 74 | 	struct page *page; | 
 | 75 | 	struct page *page2; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 76 |  | 
 | 77 | 	list_for_each_entry_safe(page, page2, l, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 78 | 		list_del(&page->lru); | 
| KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 79 | 		dec_zone_page_state(page, NR_ISOLATED_ANON + | 
| Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 80 | 				page_is_file_cache(page)); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 81 | 		putback_lru_page(page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 82 | 	} | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 83 | } | 
 | 84 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 85 | /* | 
 | 86 |  * Restore a potential migration pte to a working pte entry | 
 | 87 |  */ | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 88 | static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | 
 | 89 | 				 unsigned long addr, void *old) | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 90 | { | 
 | 91 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 92 | 	swp_entry_t entry; | 
 | 93 |  	pgd_t *pgd; | 
 | 94 |  	pud_t *pud; | 
 | 95 |  	pmd_t *pmd; | 
 | 96 | 	pte_t *ptep, pte; | 
 | 97 |  	spinlock_t *ptl; | 
 | 98 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 99 | 	if (unlikely(PageHuge(new))) { | 
 | 100 | 		ptep = huge_pte_offset(mm, addr); | 
 | 101 | 		if (!ptep) | 
 | 102 | 			goto out; | 
 | 103 | 		ptl = &mm->page_table_lock; | 
 | 104 | 	} else { | 
 | 105 | 		pgd = pgd_offset(mm, addr); | 
 | 106 | 		if (!pgd_present(*pgd)) | 
 | 107 | 			goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 108 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 109 | 		pud = pud_offset(pgd, addr); | 
 | 110 | 		if (!pud_present(*pud)) | 
 | 111 | 			goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 112 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 113 | 		pmd = pmd_offset(pud, addr); | 
| Andrea Arcangeli | 500d65d | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 114 | 		if (pmd_trans_huge(*pmd)) | 
 | 115 | 			goto out; | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 116 | 		if (!pmd_present(*pmd)) | 
 | 117 | 			goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 118 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 119 | 		ptep = pte_offset_map(pmd, addr); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 120 |  | 
| Hugh Dickins | 486cf46 | 2011-10-19 12:50:35 -0700 | [diff] [blame] | 121 | 		/* | 
 | 122 | 		 * Peek to check is_swap_pte() before taking ptlock?  No, we | 
 | 123 | 		 * can race mremap's move_ptes(), which skips anon_vma lock. | 
 | 124 | 		 */ | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 125 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 126 | 		ptl = pte_lockptr(mm, pmd); | 
 | 127 | 	} | 
 | 128 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 129 |  	spin_lock(ptl); | 
 | 130 | 	pte = *ptep; | 
 | 131 | 	if (!is_swap_pte(pte)) | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 132 | 		goto unlock; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 133 |  | 
 | 134 | 	entry = pte_to_swp_entry(pte); | 
 | 135 |  | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 136 | 	if (!is_migration_entry(entry) || | 
 | 137 | 	    migration_entry_to_page(entry) != old) | 
 | 138 | 		goto unlock; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 139 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 140 | 	get_page(new); | 
 | 141 | 	pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 
 | 142 | 	if (is_write_migration_entry(entry)) | 
 | 143 | 		pte = pte_mkwrite(pte); | 
| Andi Kleen | 3ef8fd7 | 2010-10-11 16:03:21 +0200 | [diff] [blame] | 144 | #ifdef CONFIG_HUGETLB_PAGE | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 145 | 	if (PageHuge(new)) | 
 | 146 | 		pte = pte_mkhuge(pte); | 
| Andi Kleen | 3ef8fd7 | 2010-10-11 16:03:21 +0200 | [diff] [blame] | 147 | #endif | 
| Leonid Yegoshin | 11e88c5 | 2013-05-24 15:55:18 -0700 | [diff] [blame] | 148 | 	flush_dcache_page(new); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 149 | 	set_pte_at(mm, addr, ptep, pte); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 150 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 151 | 	if (PageHuge(new)) { | 
 | 152 | 		if (PageAnon(new)) | 
 | 153 | 			hugepage_add_anon_rmap(new, vma, addr); | 
 | 154 | 		else | 
 | 155 | 			page_dup_rmap(new); | 
 | 156 | 	} else if (PageAnon(new)) | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 157 | 		page_add_anon_rmap(new, vma, addr); | 
 | 158 | 	else | 
 | 159 | 		page_add_file_rmap(new); | 
 | 160 |  | 
 | 161 | 	/* No need to invalidate - it was non-present before */ | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 162 | 	update_mmu_cache(vma, addr, ptep); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 163 | unlock: | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 164 | 	pte_unmap_unlock(ptep, ptl); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 165 | out: | 
 | 166 | 	return SWAP_AGAIN; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 167 | } | 
 | 168 |  | 
 | 169 | /* | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 170 |  * Get rid of all migration entries and replace them by | 
 | 171 |  * references to the indicated page. | 
 | 172 |  */ | 
 | 173 | static void remove_migration_ptes(struct page *old, struct page *new) | 
 | 174 | { | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 175 | 	rmap_walk(new, remove_migration_pte, old); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 176 | } | 
 | 177 |  | 
 | 178 | /* | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 179 |  * Something used the pte of a page under migration. We need to | 
 | 180 |  * get to the page and wait until migration is finished. | 
 | 181 |  * When we return from this function the fault will be retried. | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 182 |  */ | 
| Naoya Horiguchi | f517dfe | 2013-06-12 14:05:04 -0700 | [diff] [blame] | 183 | static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, | 
 | 184 | 				spinlock_t *ptl) | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 185 | { | 
| Naoya Horiguchi | f517dfe | 2013-06-12 14:05:04 -0700 | [diff] [blame] | 186 | 	pte_t pte; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 187 | 	swp_entry_t entry; | 
 | 188 | 	struct page *page; | 
 | 189 |  | 
| Naoya Horiguchi | f517dfe | 2013-06-12 14:05:04 -0700 | [diff] [blame] | 190 | 	spin_lock(ptl); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 191 | 	pte = *ptep; | 
 | 192 | 	if (!is_swap_pte(pte)) | 
 | 193 | 		goto out; | 
 | 194 |  | 
 | 195 | 	entry = pte_to_swp_entry(pte); | 
 | 196 | 	if (!is_migration_entry(entry)) | 
 | 197 | 		goto out; | 
 | 198 |  | 
 | 199 | 	page = migration_entry_to_page(entry); | 
 | 200 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 201 | 	/* | 
 | 202 | 	 * Once radix-tree replacement of page migration started, page_count | 
 | 203 | 	 * *must* be zero. And, we don't want to call wait_on_page_locked() | 
 | 204 | 	 * against a page without get_page(). | 
 | 205 | 	 * So, we use get_page_unless_zero(), here. Even failed, page fault | 
 | 206 | 	 * will occur again. | 
 | 207 | 	 */ | 
 | 208 | 	if (!get_page_unless_zero(page)) | 
 | 209 | 		goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 210 | 	pte_unmap_unlock(ptep, ptl); | 
 | 211 | 	wait_on_page_locked(page); | 
 | 212 | 	put_page(page); | 
 | 213 | 	return; | 
 | 214 | out: | 
 | 215 | 	pte_unmap_unlock(ptep, ptl); | 
 | 216 | } | 
 | 217 |  | 
| Naoya Horiguchi | f517dfe | 2013-06-12 14:05:04 -0700 | [diff] [blame] | 218 | void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | 
 | 219 | 				unsigned long address) | 
 | 220 | { | 
 | 221 | 	spinlock_t *ptl = pte_lockptr(mm, pmd); | 
 | 222 | 	pte_t *ptep = pte_offset_map(pmd, address); | 
 | 223 | 	__migration_entry_wait(mm, ptep, ptl); | 
 | 224 | } | 
 | 225 |  | 
 | 226 | void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte) | 
 | 227 | { | 
 | 228 | 	spinlock_t *ptl = &(mm)->page_table_lock; | 
 | 229 | 	__migration_entry_wait(mm, pte, ptl); | 
 | 230 | } | 
 | 231 |  | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 232 | #ifdef CONFIG_BLOCK | 
 | 233 | /* Returns true if all buffers are successfully locked */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 234 | static bool buffer_migrate_lock_buffers(struct buffer_head *head, | 
 | 235 | 							enum migrate_mode mode) | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 236 | { | 
 | 237 | 	struct buffer_head *bh = head; | 
 | 238 |  | 
 | 239 | 	/* Simple case, sync compaction */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 240 | 	if (mode != MIGRATE_ASYNC) { | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 241 | 		do { | 
 | 242 | 			get_bh(bh); | 
 | 243 | 			lock_buffer(bh); | 
 | 244 | 			bh = bh->b_this_page; | 
 | 245 |  | 
 | 246 | 		} while (bh != head); | 
 | 247 |  | 
 | 248 | 		return true; | 
 | 249 | 	} | 
 | 250 |  | 
 | 251 | 	/* async case, we cannot block on lock_buffer so use trylock_buffer */ | 
 | 252 | 	do { | 
 | 253 | 		get_bh(bh); | 
 | 254 | 		if (!trylock_buffer(bh)) { | 
 | 255 | 			/* | 
 | 256 | 			 * We failed to lock the buffer and cannot stall in | 
 | 257 | 			 * async migration. Release the taken locks | 
 | 258 | 			 */ | 
 | 259 | 			struct buffer_head *failed_bh = bh; | 
 | 260 | 			put_bh(failed_bh); | 
 | 261 | 			bh = head; | 
 | 262 | 			while (bh != failed_bh) { | 
 | 263 | 				unlock_buffer(bh); | 
 | 264 | 				put_bh(bh); | 
 | 265 | 				bh = bh->b_this_page; | 
 | 266 | 			} | 
 | 267 | 			return false; | 
 | 268 | 		} | 
 | 269 |  | 
 | 270 | 		bh = bh->b_this_page; | 
 | 271 | 	} while (bh != head); | 
 | 272 | 	return true; | 
 | 273 | } | 
 | 274 | #else | 
 | 275 | static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 276 | 							enum migrate_mode mode) | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 277 | { | 
 | 278 | 	return true; | 
 | 279 | } | 
 | 280 | #endif /* CONFIG_BLOCK */ | 
 | 281 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 282 | /* | 
| Christoph Lameter | c3fcf8a | 2006-06-23 02:03:32 -0700 | [diff] [blame] | 283 |  * Replace the page in the mapping. | 
| Christoph Lameter | 5b5c712 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 284 |  * | 
 | 285 |  * The number of remaining references must be: | 
 | 286 |  * 1 for anonymous pages without a mapping | 
 | 287 |  * 2 for pages with a mapping | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 288 |  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 289 |  */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 290 | static int migrate_page_move_mapping(struct address_space *mapping, | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 291 | 		struct page *newpage, struct page *page, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 292 | 		struct buffer_head *head, enum migrate_mode mode) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 293 | { | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 294 | 	int expected_count; | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 295 | 	void **pslot; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 296 |  | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 297 | 	if (!mapping) { | 
| Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 298 | 		/* Anonymous page without mapping */ | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 299 | 		if (page_count(page) != 1) | 
 | 300 | 			return -EAGAIN; | 
 | 301 | 		return 0; | 
 | 302 | 	} | 
 | 303 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 304 | 	spin_lock_irq(&mapping->tree_lock); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 305 |  | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 306 | 	pslot = radix_tree_lookup_slot(&mapping->page_tree, | 
 | 307 |  					page_index(page)); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 308 |  | 
| Johannes Weiner | edcf474 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 309 | 	expected_count = 2 + page_has_private(page); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 310 | 	if (page_count(page) != expected_count || | 
| Mel Gorman | 29c1f67 | 2011-01-13 15:47:21 -0800 | [diff] [blame] | 311 | 		radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 312 | 		spin_unlock_irq(&mapping->tree_lock); | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 313 | 		return -EAGAIN; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 314 | 	} | 
 | 315 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 316 | 	if (!page_freeze_refs(page, expected_count)) { | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 317 | 		spin_unlock_irq(&mapping->tree_lock); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 318 | 		return -EAGAIN; | 
 | 319 | 	} | 
 | 320 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 321 | 	/* | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 322 | 	 * In the async migration case of moving a page with buffers, lock the | 
 | 323 | 	 * buffers using trylock before the mapping is moved. If the mapping | 
 | 324 | 	 * was moved, we later failed to lock the buffers and could not move | 
 | 325 | 	 * the mapping back due to an elevated page count, we would have to | 
 | 326 | 	 * block waiting on other references to be dropped. | 
 | 327 | 	 */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 328 | 	if (mode == MIGRATE_ASYNC && head && | 
 | 329 | 			!buffer_migrate_lock_buffers(head, mode)) { | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 330 | 		page_unfreeze_refs(page, expected_count); | 
 | 331 | 		spin_unlock_irq(&mapping->tree_lock); | 
 | 332 | 		return -EAGAIN; | 
 | 333 | 	} | 
 | 334 |  | 
 | 335 | 	/* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 336 | 	 * Now we know that no one else is looking at the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 337 | 	 */ | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 338 | 	get_page(newpage);	/* add cache reference */ | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 339 | 	if (PageSwapCache(page)) { | 
 | 340 | 		SetPageSwapCache(newpage); | 
 | 341 | 		set_page_private(newpage, page_private(page)); | 
 | 342 | 	} | 
 | 343 |  | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 344 | 	radix_tree_replace_slot(pslot, newpage); | 
 | 345 |  | 
 | 346 | 	/* | 
| Jacobo Giralt | 937a94c | 2012-01-10 15:07:11 -0800 | [diff] [blame] | 347 | 	 * Drop cache reference from old page by unfreezing | 
 | 348 | 	 * to one less reference. | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 349 | 	 * We know this isn't the last reference. | 
 | 350 | 	 */ | 
| Jacobo Giralt | 937a94c | 2012-01-10 15:07:11 -0800 | [diff] [blame] | 351 | 	page_unfreeze_refs(page, expected_count - 1); | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 352 |  | 
| Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 353 | 	/* | 
 | 354 | 	 * If moved to a different zone then also account | 
 | 355 | 	 * the page for that zone. Other VM counters will be | 
 | 356 | 	 * taken care of when we establish references to the | 
 | 357 | 	 * new page and drop references to the old page. | 
 | 358 | 	 * | 
 | 359 | 	 * Note that anonymous pages are accounted for | 
 | 360 | 	 * via NR_FILE_PAGES and NR_ANON_PAGES if they | 
 | 361 | 	 * are mapped to swap space. | 
 | 362 | 	 */ | 
 | 363 | 	__dec_zone_page_state(page, NR_FILE_PAGES); | 
 | 364 | 	__inc_zone_page_state(newpage, NR_FILE_PAGES); | 
| Andrea Arcangeli | 99a15e2 | 2011-06-16 12:56:19 -0700 | [diff] [blame] | 365 | 	if (!PageSwapCache(page) && PageSwapBacked(page)) { | 
| KOSAKI Motohiro | 4b02108 | 2009-09-21 17:01:33 -0700 | [diff] [blame] | 366 | 		__dec_zone_page_state(page, NR_SHMEM); | 
 | 367 | 		__inc_zone_page_state(newpage, NR_SHMEM); | 
 | 368 | 	} | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 369 | 	spin_unlock_irq(&mapping->tree_lock); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 370 |  | 
 | 371 | 	return 0; | 
 | 372 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 373 |  | 
 | 374 | /* | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 375 |  * The expected number of remaining references is the same as that | 
 | 376 |  * of migrate_page_move_mapping(). | 
 | 377 |  */ | 
 | 378 | int migrate_huge_page_move_mapping(struct address_space *mapping, | 
 | 379 | 				   struct page *newpage, struct page *page) | 
 | 380 | { | 
 | 381 | 	int expected_count; | 
 | 382 | 	void **pslot; | 
 | 383 |  | 
 | 384 | 	if (!mapping) { | 
 | 385 | 		if (page_count(page) != 1) | 
 | 386 | 			return -EAGAIN; | 
 | 387 | 		return 0; | 
 | 388 | 	} | 
 | 389 |  | 
 | 390 | 	spin_lock_irq(&mapping->tree_lock); | 
 | 391 |  | 
 | 392 | 	pslot = radix_tree_lookup_slot(&mapping->page_tree, | 
 | 393 | 					page_index(page)); | 
 | 394 |  | 
 | 395 | 	expected_count = 2 + page_has_private(page); | 
 | 396 | 	if (page_count(page) != expected_count || | 
| Mel Gorman | 29c1f67 | 2011-01-13 15:47:21 -0800 | [diff] [blame] | 397 | 		radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 398 | 		spin_unlock_irq(&mapping->tree_lock); | 
 | 399 | 		return -EAGAIN; | 
 | 400 | 	} | 
 | 401 |  | 
 | 402 | 	if (!page_freeze_refs(page, expected_count)) { | 
 | 403 | 		spin_unlock_irq(&mapping->tree_lock); | 
 | 404 | 		return -EAGAIN; | 
 | 405 | 	} | 
 | 406 |  | 
 | 407 | 	get_page(newpage); | 
 | 408 |  | 
 | 409 | 	radix_tree_replace_slot(pslot, newpage); | 
 | 410 |  | 
| Jacobo Giralt | 937a94c | 2012-01-10 15:07:11 -0800 | [diff] [blame] | 411 | 	page_unfreeze_refs(page, expected_count - 1); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 412 |  | 
 | 413 | 	spin_unlock_irq(&mapping->tree_lock); | 
 | 414 | 	return 0; | 
 | 415 | } | 
 | 416 |  | 
 | 417 | /* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 418 |  * Copy the page to its new location | 
 | 419 |  */ | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 420 | void migrate_page_copy(struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 421 | { | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 422 | 	if (PageHuge(page)) | 
 | 423 | 		copy_huge_page(newpage, page); | 
 | 424 | 	else | 
 | 425 | 		copy_highpage(newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 426 |  | 
 | 427 | 	if (PageError(page)) | 
 | 428 | 		SetPageError(newpage); | 
 | 429 | 	if (PageReferenced(page)) | 
 | 430 | 		SetPageReferenced(newpage); | 
 | 431 | 	if (PageUptodate(page)) | 
 | 432 | 		SetPageUptodate(newpage); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 433 | 	if (TestClearPageActive(page)) { | 
 | 434 | 		VM_BUG_ON(PageUnevictable(page)); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 435 | 		SetPageActive(newpage); | 
| Lee Schermerhorn | 418b27e | 2009-12-14 17:59:54 -0800 | [diff] [blame] | 436 | 	} else if (TestClearPageUnevictable(page)) | 
 | 437 | 		SetPageUnevictable(newpage); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 438 | 	if (PageChecked(page)) | 
 | 439 | 		SetPageChecked(newpage); | 
 | 440 | 	if (PageMappedToDisk(page)) | 
 | 441 | 		SetPageMappedToDisk(newpage); | 
 | 442 |  | 
 | 443 | 	if (PageDirty(page)) { | 
 | 444 | 		clear_page_dirty_for_io(page); | 
| Nick Piggin | 3a902c5 | 2008-04-30 00:55:16 -0700 | [diff] [blame] | 445 | 		/* | 
 | 446 | 		 * Want to mark the page and the radix tree as dirty, and | 
 | 447 | 		 * redo the accounting that clear_page_dirty_for_io undid, | 
 | 448 | 		 * but we can't use set_page_dirty because that function | 
 | 449 | 		 * is actually a signal that all of the page has become dirty. | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 450 | 		 * Whereas only part of our page may be dirty. | 
| Nick Piggin | 3a902c5 | 2008-04-30 00:55:16 -0700 | [diff] [blame] | 451 | 		 */ | 
 | 452 | 		__set_page_dirty_nobuffers(newpage); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 453 |  	} | 
 | 454 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 455 | 	mlock_migrate_page(newpage, page); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 456 | 	ksm_migrate_page(newpage, page); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 457 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 458 | 	ClearPageSwapCache(page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 459 | 	ClearPagePrivate(page); | 
 | 460 | 	set_page_private(page, 0); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 461 |  | 
 | 462 | 	/* | 
 | 463 | 	 * If any waiters have accumulated on the new page then | 
 | 464 | 	 * wake them up. | 
 | 465 | 	 */ | 
 | 466 | 	if (PageWriteback(newpage)) | 
 | 467 | 		end_page_writeback(newpage); | 
 | 468 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 469 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 470 | /************************************************************ | 
 | 471 |  *                    Migration functions | 
 | 472 |  ***********************************************************/ | 
 | 473 |  | 
 | 474 | /* Always fail migration. Used for mappings that are not movable */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 475 | int fail_migrate_page(struct address_space *mapping, | 
 | 476 | 			struct page *newpage, struct page *page) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 477 | { | 
 | 478 | 	return -EIO; | 
 | 479 | } | 
 | 480 | EXPORT_SYMBOL(fail_migrate_page); | 
 | 481 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 482 | /* | 
 | 483 |  * Common logic to directly migrate a single page suitable for | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 484 |  * pages that do not use PagePrivate/PagePrivate2. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 485 |  * | 
 | 486 |  * Pages are locked upon entry and exit. | 
 | 487 |  */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 488 | int migrate_page(struct address_space *mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 489 | 		struct page *newpage, struct page *page, | 
 | 490 | 		enum migrate_mode mode) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 491 | { | 
 | 492 | 	int rc; | 
 | 493 |  | 
 | 494 | 	BUG_ON(PageWriteback(page));	/* Writeback must be complete */ | 
 | 495 |  | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 496 | 	rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 497 |  | 
 | 498 | 	if (rc) | 
 | 499 | 		return rc; | 
 | 500 |  | 
 | 501 | 	migrate_page_copy(newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 502 | 	return 0; | 
 | 503 | } | 
 | 504 | EXPORT_SYMBOL(migrate_page); | 
 | 505 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 506 | #ifdef CONFIG_BLOCK | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 507 | /* | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 508 |  * Migration function for pages with buffers. This function can only be used | 
 | 509 |  * if the underlying filesystem guarantees that no other references to "page" | 
 | 510 |  * exist. | 
 | 511 |  */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 512 | int buffer_migrate_page(struct address_space *mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 513 | 		struct page *newpage, struct page *page, enum migrate_mode mode) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 514 | { | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 515 | 	struct buffer_head *bh, *head; | 
 | 516 | 	int rc; | 
 | 517 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 518 | 	if (!page_has_buffers(page)) | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 519 | 		return migrate_page(mapping, newpage, page, mode); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 520 |  | 
 | 521 | 	head = page_buffers(page); | 
 | 522 |  | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 523 | 	rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 524 |  | 
 | 525 | 	if (rc) | 
 | 526 | 		return rc; | 
 | 527 |  | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 528 | 	/* | 
 | 529 | 	 * In the async case, migrate_page_move_mapping locked the buffers | 
 | 530 | 	 * with an IRQ-safe spinlock held. In the sync case, the buffers | 
 | 531 | 	 * need to be locked now | 
 | 532 | 	 */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 533 | 	if (mode != MIGRATE_ASYNC) | 
 | 534 | 		BUG_ON(!buffer_migrate_lock_buffers(head, mode)); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 535 |  | 
 | 536 | 	ClearPagePrivate(page); | 
 | 537 | 	set_page_private(newpage, page_private(page)); | 
 | 538 | 	set_page_private(page, 0); | 
 | 539 | 	put_page(page); | 
 | 540 | 	get_page(newpage); | 
 | 541 |  | 
 | 542 | 	bh = head; | 
 | 543 | 	do { | 
 | 544 | 		set_bh_page(bh, newpage, bh_offset(bh)); | 
 | 545 | 		bh = bh->b_this_page; | 
 | 546 |  | 
 | 547 | 	} while (bh != head); | 
 | 548 |  | 
 | 549 | 	SetPagePrivate(newpage); | 
 | 550 |  | 
 | 551 | 	migrate_page_copy(newpage, page); | 
 | 552 |  | 
 | 553 | 	bh = head; | 
 | 554 | 	do { | 
 | 555 | 		unlock_buffer(bh); | 
 | 556 |  		put_bh(bh); | 
 | 557 | 		bh = bh->b_this_page; | 
 | 558 |  | 
 | 559 | 	} while (bh != head); | 
 | 560 |  | 
 | 561 | 	return 0; | 
 | 562 | } | 
 | 563 | EXPORT_SYMBOL(buffer_migrate_page); | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 564 | #endif | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 565 |  | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 566 | /* | 
 | 567 |  * Writeback a page to clean the dirty state | 
 | 568 |  */ | 
 | 569 | static int writeout(struct address_space *mapping, struct page *page) | 
 | 570 | { | 
 | 571 | 	struct writeback_control wbc = { | 
 | 572 | 		.sync_mode = WB_SYNC_NONE, | 
 | 573 | 		.nr_to_write = 1, | 
 | 574 | 		.range_start = 0, | 
 | 575 | 		.range_end = LLONG_MAX, | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 576 | 		.for_reclaim = 1 | 
 | 577 | 	}; | 
 | 578 | 	int rc; | 
 | 579 |  | 
 | 580 | 	if (!mapping->a_ops->writepage) | 
 | 581 | 		/* No write method for the address space */ | 
 | 582 | 		return -EINVAL; | 
 | 583 |  | 
 | 584 | 	if (!clear_page_dirty_for_io(page)) | 
 | 585 | 		/* Someone else already triggered a write */ | 
 | 586 | 		return -EAGAIN; | 
 | 587 |  | 
 | 588 | 	/* | 
 | 589 | 	 * A dirty page may imply that the underlying filesystem has | 
 | 590 | 	 * the page on some queue. So the page must be clean for | 
 | 591 | 	 * migration. Writeout may mean we loose the lock and the | 
 | 592 | 	 * page state is no longer what we checked for earlier. | 
 | 593 | 	 * At this point we know that the migration attempt cannot | 
 | 594 | 	 * be successful. | 
 | 595 | 	 */ | 
 | 596 | 	remove_migration_ptes(page, page); | 
 | 597 |  | 
 | 598 | 	rc = mapping->a_ops->writepage(page, &wbc); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 599 |  | 
 | 600 | 	if (rc != AOP_WRITEPAGE_ACTIVATE) | 
 | 601 | 		/* unlocked. Relock */ | 
 | 602 | 		lock_page(page); | 
 | 603 |  | 
| Hugh Dickins | bda8550 | 2008-11-19 15:36:36 -0800 | [diff] [blame] | 604 | 	return (rc < 0) ? -EIO : -EAGAIN; | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 605 | } | 
 | 606 |  | 
 | 607 | /* | 
 | 608 |  * Default handling if a filesystem does not provide a migration function. | 
 | 609 |  */ | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 610 | static int fallback_migrate_page(struct address_space *mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 611 | 	struct page *newpage, struct page *page, enum migrate_mode mode) | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 612 | { | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 613 | 	if (PageDirty(page)) { | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 614 | 		/* Only writeback pages in full synchronous migration */ | 
 | 615 | 		if (mode != MIGRATE_SYNC) | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 616 | 			return -EBUSY; | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 617 | 		return writeout(mapping, page); | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 618 | 	} | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 619 |  | 
 | 620 | 	/* | 
 | 621 | 	 * Buffers may be managed in a filesystem specific way. | 
 | 622 | 	 * We must have no buffers or drop them. | 
 | 623 | 	 */ | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 624 | 	if (page_has_private(page) && | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 625 | 	    !try_to_release_page(page, GFP_KERNEL)) | 
 | 626 | 		return -EAGAIN; | 
 | 627 |  | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 628 | 	return migrate_page(mapping, newpage, page, mode); | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 629 | } | 
 | 630 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 631 | /* | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 632 |  * Move a page to a newly allocated page | 
 | 633 |  * The page is locked and all ptes have been successfully removed. | 
 | 634 |  * | 
 | 635 |  * The new page will have replaced the old page if this function | 
 | 636 |  * is successful. | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 637 |  * | 
 | 638 |  * Return value: | 
 | 639 |  *   < 0 - error code | 
 | 640 |  *  == 0 - success | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 641 |  */ | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 642 | static int move_to_new_page(struct page *newpage, struct page *page, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 643 | 				int remap_swapcache, enum migrate_mode mode) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 644 | { | 
 | 645 | 	struct address_space *mapping; | 
 | 646 | 	int rc; | 
 | 647 |  | 
 | 648 | 	/* | 
 | 649 | 	 * Block others from accessing the page when we get around to | 
 | 650 | 	 * establishing additional references. We are the only one | 
 | 651 | 	 * holding a reference to the new page at this point. | 
 | 652 | 	 */ | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 653 | 	if (!trylock_page(newpage)) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 654 | 		BUG(); | 
 | 655 |  | 
 | 656 | 	/* Prepare mapping for the new page.*/ | 
 | 657 | 	newpage->index = page->index; | 
 | 658 | 	newpage->mapping = page->mapping; | 
| Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 659 | 	if (PageSwapBacked(page)) | 
 | 660 | 		SetPageSwapBacked(newpage); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 661 |  | 
 | 662 | 	mapping = page_mapping(page); | 
 | 663 | 	if (!mapping) | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 664 | 		rc = migrate_page(mapping, newpage, page, mode); | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 665 | 	else if (mapping->a_ops->migratepage) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 666 | 		/* | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 667 | 		 * Most pages have a mapping and most filesystems provide a | 
 | 668 | 		 * migratepage callback. Anonymous pages are part of swap | 
 | 669 | 		 * space which also has its own migratepage callback. This | 
 | 670 | 		 * is the most common path for page migration. | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 671 | 		 */ | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 672 | 		rc = mapping->a_ops->migratepage(mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 673 | 						newpage, page, mode); | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 674 | 	else | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 675 | 		rc = fallback_migrate_page(mapping, newpage, page, mode); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 676 |  | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 677 | 	if (rc) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 678 | 		newpage->mapping = NULL; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 679 | 	} else { | 
 | 680 | 		if (remap_swapcache) | 
 | 681 | 			remove_migration_ptes(page, newpage); | 
| Konstantin Khlebnikov | 35512ec | 2012-02-03 15:37:13 -0800 | [diff] [blame] | 682 | 		page->mapping = NULL; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 683 | 	} | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 684 |  | 
 | 685 | 	unlock_page(newpage); | 
 | 686 |  | 
 | 687 | 	return rc; | 
 | 688 | } | 
 | 689 |  | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 690 | static int __unmap_and_move(struct page *page, struct page *newpage, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 691 | 			int force, bool offlining, enum migrate_mode mode) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 692 | { | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 693 | 	int rc = -EAGAIN; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 694 | 	int remap_swapcache = 1; | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 695 | 	int charge = 0; | 
| KAMEZAWA Hiroyuki | 56039ef | 2011-03-23 16:42:19 -0700 | [diff] [blame] | 696 | 	struct mem_cgroup *mem; | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 697 | 	struct anon_vma *anon_vma = NULL; | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 698 |  | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 699 | 	if (!trylock_page(page)) { | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 700 | 		if (!force || mode == MIGRATE_ASYNC) | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 701 | 			goto out; | 
| Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 702 |  | 
 | 703 | 		/* | 
 | 704 | 		 * It's not safe for direct compaction to call lock_page. | 
 | 705 | 		 * For example, during page readahead pages are added locked | 
 | 706 | 		 * to the LRU. Later, when the IO completes the pages are | 
 | 707 | 		 * marked uptodate and unlocked. However, the queueing | 
 | 708 | 		 * could be merging multiple pages for one bio (e.g. | 
 | 709 | 		 * mpage_readpages). If an allocation happens for the | 
 | 710 | 		 * second or third page, the process can end up locking | 
 | 711 | 		 * the same page twice and deadlocking. Rather than | 
 | 712 | 		 * trying to be clever about what pages can be locked, | 
 | 713 | 		 * avoid the use of lock_page for direct compaction | 
 | 714 | 		 * altogether. | 
 | 715 | 		 */ | 
 | 716 | 		if (current->flags & PF_MEMALLOC) | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 717 | 			goto out; | 
| Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 718 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 719 | 		lock_page(page); | 
 | 720 | 	} | 
 | 721 |  | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 722 | 	/* | 
 | 723 | 	 * Only memory hotplug's offline_pages() caller has locked out KSM, | 
 | 724 | 	 * and can safely migrate a KSM page.  The other cases have skipped | 
 | 725 | 	 * PageKsm along with PageReserved - but it is only now when we have | 
 | 726 | 	 * the page lock that we can be certain it will not go KSM beneath us | 
 | 727 | 	 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees | 
 | 728 | 	 * its pagecount raised, but only here do we take the page lock which | 
 | 729 | 	 * serializes that). | 
 | 730 | 	 */ | 
 | 731 | 	if (PageKsm(page) && !offlining) { | 
 | 732 | 		rc = -EBUSY; | 
 | 733 | 		goto unlock; | 
 | 734 | 	} | 
 | 735 |  | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 736 | 	/* charge against new page */ | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 737 | 	charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL); | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 738 | 	if (charge == -ENOMEM) { | 
 | 739 | 		rc = -ENOMEM; | 
 | 740 | 		goto unlock; | 
 | 741 | 	} | 
 | 742 | 	BUG_ON(charge); | 
 | 743 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 744 | 	if (PageWriteback(page)) { | 
| Andrea Arcangeli | 11bc82d | 2011-03-22 16:33:11 -0700 | [diff] [blame] | 745 | 		/* | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 746 | 		 * Only in the case of a full syncronous migration is it | 
 | 747 | 		 * necessary to wait for PageWriteback. In the async case, | 
 | 748 | 		 * the retry loop is too short and in the sync-light case, | 
 | 749 | 		 * the overhead of stalling is too much | 
| Andrea Arcangeli | 11bc82d | 2011-03-22 16:33:11 -0700 | [diff] [blame] | 750 | 		 */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 751 | 		if (mode != MIGRATE_SYNC) { | 
| Andrea Arcangeli | 11bc82d | 2011-03-22 16:33:11 -0700 | [diff] [blame] | 752 | 			rc = -EBUSY; | 
 | 753 | 			goto uncharge; | 
 | 754 | 		} | 
 | 755 | 		if (!force) | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 756 | 			goto uncharge; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 757 | 		wait_on_page_writeback(page); | 
 | 758 | 	} | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 759 | 	/* | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 760 | 	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, | 
 | 761 | 	 * we cannot notice that anon_vma is freed while we migrates a page. | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 762 | 	 * This get_anon_vma() delays freeing anon_vma pointer until the end | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 763 | 	 * of migration. File cache pages are no problem because of page_lock() | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 764 | 	 * File Caches may use write_page() or lock_page() in migration, then, | 
 | 765 | 	 * just care Anon page here. | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 766 | 	 */ | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 767 | 	if (PageAnon(page)) { | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 768 | 		/* | 
 | 769 | 		 * Only page_lock_anon_vma() understands the subtleties of | 
 | 770 | 		 * getting a hold on an anon_vma from outside one of its mms. | 
 | 771 | 		 */ | 
| Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 772 | 		anon_vma = page_get_anon_vma(page); | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 773 | 		if (anon_vma) { | 
 | 774 | 			/* | 
| Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 775 | 			 * Anon page | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 776 | 			 */ | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 777 | 		} else if (PageSwapCache(page)) { | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 778 | 			/* | 
 | 779 | 			 * We cannot be sure that the anon_vma of an unmapped | 
 | 780 | 			 * swapcache page is safe to use because we don't | 
 | 781 | 			 * know in advance if the VMA that this page belonged | 
 | 782 | 			 * to still exists. If the VMA and others sharing the | 
 | 783 | 			 * data have been freed, then the anon_vma could | 
 | 784 | 			 * already be invalid. | 
 | 785 | 			 * | 
 | 786 | 			 * To avoid this possibility, swapcache pages get | 
 | 787 | 			 * migrated but are not remapped when migration | 
 | 788 | 			 * completes | 
 | 789 | 			 */ | 
 | 790 | 			remap_swapcache = 0; | 
 | 791 | 		} else { | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 792 | 			goto uncharge; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 793 | 		} | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 794 | 	} | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 795 |  | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 796 | 	/* | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 797 | 	 * Corner case handling: | 
 | 798 | 	 * 1. When a new swap-cache page is read into, it is added to the LRU | 
 | 799 | 	 * and treated as swapcache but it has no rmap yet. | 
 | 800 | 	 * Calling try_to_unmap() against a page->mapping==NULL page will | 
 | 801 | 	 * trigger a BUG.  So handle it here. | 
 | 802 | 	 * 2. An orphaned page (see truncate_complete_page) might have | 
 | 803 | 	 * fs-private metadata. The page can be picked up due to memory | 
 | 804 | 	 * offlining.  Everywhere else except page reclaim, the page is | 
 | 805 | 	 * invisible to the vm, so the page can not be migrated.  So try to | 
 | 806 | 	 * free the metadata, so the page can be freed. | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 807 | 	 */ | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 808 | 	if (!page->mapping) { | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 809 | 		VM_BUG_ON(PageAnon(page)); | 
 | 810 | 		if (page_has_private(page)) { | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 811 | 			try_to_free_buffers(page); | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 812 | 			goto uncharge; | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 813 | 		} | 
| Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 814 | 		goto skip_unmap; | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 815 | 	} | 
 | 816 |  | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 817 | 	/* Establish migration ptes or remove ptes */ | 
| Andi Kleen | 14fa31b | 2009-09-16 11:50:10 +0200 | [diff] [blame] | 818 | 	try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 819 |  | 
| Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 820 | skip_unmap: | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 821 | 	if (!page_mapped(page)) | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 822 | 		rc = move_to_new_page(newpage, page, remap_swapcache, mode); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 823 |  | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 824 | 	if (rc && remap_swapcache) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 825 | 		remove_migration_ptes(page, page); | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 826 |  | 
 | 827 | 	/* Drop an anon_vma reference if we took one */ | 
| Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 828 | 	if (anon_vma) | 
| Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 829 | 		put_anon_vma(anon_vma); | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 830 |  | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 831 | uncharge: | 
 | 832 | 	if (!charge) | 
| Daisuke Nishimura | 50de1dd | 2011-01-13 15:47:43 -0800 | [diff] [blame] | 833 | 		mem_cgroup_end_migration(mem, page, newpage, rc == 0); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 834 | unlock: | 
 | 835 | 	unlock_page(page); | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 836 | out: | 
 | 837 | 	return rc; | 
 | 838 | } | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 839 |  | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 840 | /* | 
 | 841 |  * Obtain the lock on page, remove all ptes and migrate the page | 
 | 842 |  * to the newly allocated page in newpage. | 
 | 843 |  */ | 
 | 844 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 845 | 			struct page *page, int force, bool offlining, | 
 | 846 | 			enum migrate_mode mode) | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 847 | { | 
 | 848 | 	int rc = 0; | 
 | 849 | 	int *result = NULL; | 
 | 850 | 	struct page *newpage = get_new_page(page, private, &result); | 
 | 851 |  | 
 | 852 | 	if (!newpage) | 
 | 853 | 		return -ENOMEM; | 
 | 854 |  | 
 | 855 | 	if (page_count(page) == 1) { | 
 | 856 | 		/* page was freed from under us. So we are done. */ | 
 | 857 | 		goto out; | 
 | 858 | 	} | 
 | 859 |  | 
 | 860 | 	if (unlikely(PageTransHuge(page))) | 
 | 861 | 		if (unlikely(split_huge_page(page))) | 
 | 862 | 			goto out; | 
 | 863 |  | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 864 | 	rc = __unmap_and_move(page, newpage, force, offlining, mode); | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 865 | out: | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 866 | 	if (rc != -EAGAIN) { | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 867 | 		/* | 
 | 868 | 		 * A page that has been migrated has all references | 
 | 869 | 		 * removed and will be freed. A page that has not been | 
 | 870 | 		 * migrated will have kepts its references and be | 
 | 871 | 		 * restored. | 
 | 872 | 		 */ | 
 | 873 | 		list_del(&page->lru); | 
| KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 874 | 		dec_zone_page_state(page, NR_ISOLATED_ANON + | 
| Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 875 | 				page_is_file_cache(page)); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 876 | 		putback_lru_page(page); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 877 | 	} | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 878 | 	/* | 
 | 879 | 	 * Move the new page to the LRU. If migration was not successful | 
 | 880 | 	 * then this will free the page. | 
 | 881 | 	 */ | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 882 | 	putback_lru_page(newpage); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 883 | 	if (result) { | 
 | 884 | 		if (rc) | 
 | 885 | 			*result = rc; | 
 | 886 | 		else | 
 | 887 | 			*result = page_to_nid(newpage); | 
 | 888 | 	} | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 889 | 	return rc; | 
 | 890 | } | 
 | 891 |  | 
 | 892 | /* | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 893 |  * Counterpart of unmap_and_move_page() for hugepage migration. | 
 | 894 |  * | 
 | 895 |  * This function doesn't wait the completion of hugepage I/O | 
 | 896 |  * because there is no race between I/O and migration for hugepage. | 
 | 897 |  * Note that currently hugepage I/O occurs only in direct I/O | 
 | 898 |  * where no lock is held and PG_writeback is irrelevant, | 
 | 899 |  * and writeback status of all subpages are counted in the reference | 
 | 900 |  * count of the head page (i.e. if all subpages of a 2MB hugepage are | 
 | 901 |  * under direct I/O, the reference of the head page is 512 and a bit more.) | 
 | 902 |  * This means that when we try to migrate hugepage whose subpages are | 
 | 903 |  * doing direct I/O, some references remain after try_to_unmap() and | 
 | 904 |  * hugepage migration fails without data corruption. | 
 | 905 |  * | 
 | 906 |  * There is also no race when direct I/O is issued on the page under migration, | 
 | 907 |  * because then pte is replaced with migration swap entry and direct I/O code | 
 | 908 |  * will wait in the page fault for migration to complete. | 
 | 909 |  */ | 
 | 910 | static int unmap_and_move_huge_page(new_page_t get_new_page, | 
 | 911 | 				unsigned long private, struct page *hpage, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 912 | 				int force, bool offlining, | 
 | 913 | 				enum migrate_mode mode) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 914 | { | 
 | 915 | 	int rc = 0; | 
 | 916 | 	int *result = NULL; | 
 | 917 | 	struct page *new_hpage = get_new_page(hpage, private, &result); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 918 | 	struct anon_vma *anon_vma = NULL; | 
 | 919 |  | 
 | 920 | 	if (!new_hpage) | 
 | 921 | 		return -ENOMEM; | 
 | 922 |  | 
 | 923 | 	rc = -EAGAIN; | 
 | 924 |  | 
 | 925 | 	if (!trylock_page(hpage)) { | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 926 | 		if (!force || mode != MIGRATE_SYNC) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 927 | 			goto out; | 
 | 928 | 		lock_page(hpage); | 
 | 929 | 	} | 
 | 930 |  | 
| Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 931 | 	if (PageAnon(hpage)) | 
 | 932 | 		anon_vma = page_get_anon_vma(hpage); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 933 |  | 
 | 934 | 	try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 
 | 935 |  | 
 | 936 | 	if (!page_mapped(hpage)) | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 937 | 		rc = move_to_new_page(new_hpage, hpage, 1, mode); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 938 |  | 
 | 939 | 	if (rc) | 
 | 940 | 		remove_migration_ptes(hpage, hpage); | 
 | 941 |  | 
| Hugh Dickins | fd4a466 | 2011-01-13 15:47:31 -0800 | [diff] [blame] | 942 | 	if (anon_vma) | 
| Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 943 | 		put_anon_vma(anon_vma); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 944 | 	unlock_page(hpage); | 
 | 945 |  | 
| Hillf Danton | 0976133 | 2011-12-08 14:34:20 -0800 | [diff] [blame] | 946 | out: | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 947 | 	if (rc != -EAGAIN) { | 
 | 948 | 		list_del(&hpage->lru); | 
 | 949 | 		put_page(hpage); | 
 | 950 | 	} | 
 | 951 |  | 
 | 952 | 	put_page(new_hpage); | 
 | 953 |  | 
 | 954 | 	if (result) { | 
 | 955 | 		if (rc) | 
 | 956 | 			*result = rc; | 
 | 957 | 		else | 
 | 958 | 			*result = page_to_nid(new_hpage); | 
 | 959 | 	} | 
 | 960 | 	return rc; | 
 | 961 | } | 
 | 962 |  | 
 | 963 | /* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 964 |  * migrate_pages | 
 | 965 |  * | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 966 |  * The function takes one list of pages to migrate and a function | 
 | 967 |  * that determines from the page to be migrated and the private data | 
 | 968 |  * the target of the move and allocates the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 969 |  * | 
 | 970 |  * The function returns after 10 attempts or if no pages | 
 | 971 |  * are movable anymore because to has become empty | 
| Minchan Kim | cf608ac | 2010-10-26 14:21:29 -0700 | [diff] [blame] | 972 |  * or no retryable pages exist anymore. | 
 | 973 |  * Caller should call putback_lru_pages to return pages to the LRU | 
| Minchan Kim | 28bd657 | 2011-01-25 15:07:26 -0800 | [diff] [blame] | 974 |  * or free list only if ret != 0. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 975 |  * | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 976 |  * Return: Number of pages not migrated or error code. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 977 |  */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 978 | int migrate_pages(struct list_head *from, | 
| Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 979 | 		new_page_t get_new_page, unsigned long private, bool offlining, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 980 | 		enum migrate_mode mode) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 981 | { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 982 | 	int retry = 1; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 983 | 	int nr_failed = 0; | 
 | 984 | 	int pass = 0; | 
 | 985 | 	struct page *page; | 
 | 986 | 	struct page *page2; | 
 | 987 | 	int swapwrite = current->flags & PF_SWAPWRITE; | 
 | 988 | 	int rc; | 
 | 989 |  | 
 | 990 | 	if (!swapwrite) | 
 | 991 | 		current->flags |= PF_SWAPWRITE; | 
 | 992 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 993 | 	for(pass = 0; pass < 10 && retry; pass++) { | 
 | 994 | 		retry = 0; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 995 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 996 | 		list_for_each_entry_safe(page, page2, from, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 997 | 			cond_resched(); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 998 |  | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 999 | 			rc = unmap_and_move(get_new_page, private, | 
| Mel Gorman | 77f1fe6 | 2011-01-13 15:45:57 -0800 | [diff] [blame] | 1000 | 						page, pass > 2, offlining, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 1001 | 						mode); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1002 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1003 | 			switch(rc) { | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 1004 | 			case -ENOMEM: | 
 | 1005 | 				goto out; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1006 | 			case -EAGAIN: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 1007 | 				retry++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1008 | 				break; | 
 | 1009 | 			case 0: | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1010 | 				break; | 
 | 1011 | 			default: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 1012 | 				/* Permanent failure */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 1013 | 				nr_failed++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1014 | 				break; | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 1015 | 			} | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1016 | 		} | 
 | 1017 | 	} | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 1018 | 	rc = 0; | 
 | 1019 | out: | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1020 | 	if (!swapwrite) | 
 | 1021 | 		current->flags &= ~PF_SWAPWRITE; | 
 | 1022 |  | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 1023 | 	if (rc) | 
 | 1024 | 		return rc; | 
 | 1025 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1026 | 	return nr_failed + retry; | 
 | 1027 | } | 
 | 1028 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1029 | int migrate_huge_pages(struct list_head *from, | 
| Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 1030 | 		new_page_t get_new_page, unsigned long private, bool offlining, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 1031 | 		enum migrate_mode mode) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1032 | { | 
 | 1033 | 	int retry = 1; | 
 | 1034 | 	int nr_failed = 0; | 
 | 1035 | 	int pass = 0; | 
 | 1036 | 	struct page *page; | 
 | 1037 | 	struct page *page2; | 
 | 1038 | 	int rc; | 
 | 1039 |  | 
 | 1040 | 	for (pass = 0; pass < 10 && retry; pass++) { | 
 | 1041 | 		retry = 0; | 
 | 1042 |  | 
 | 1043 | 		list_for_each_entry_safe(page, page2, from, lru) { | 
 | 1044 | 			cond_resched(); | 
 | 1045 |  | 
 | 1046 | 			rc = unmap_and_move_huge_page(get_new_page, | 
| Mel Gorman | 77f1fe6 | 2011-01-13 15:45:57 -0800 | [diff] [blame] | 1047 | 					private, page, pass > 2, offlining, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 1048 | 					mode); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1049 |  | 
 | 1050 | 			switch(rc) { | 
 | 1051 | 			case -ENOMEM: | 
 | 1052 | 				goto out; | 
 | 1053 | 			case -EAGAIN: | 
 | 1054 | 				retry++; | 
 | 1055 | 				break; | 
 | 1056 | 			case 0: | 
 | 1057 | 				break; | 
 | 1058 | 			default: | 
 | 1059 | 				/* Permanent failure */ | 
 | 1060 | 				nr_failed++; | 
 | 1061 | 				break; | 
 | 1062 | 			} | 
 | 1063 | 		} | 
 | 1064 | 	} | 
 | 1065 | 	rc = 0; | 
 | 1066 | out: | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1067 | 	if (rc) | 
 | 1068 | 		return rc; | 
 | 1069 |  | 
 | 1070 | 	return nr_failed + retry; | 
 | 1071 | } | 
 | 1072 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1073 | #ifdef CONFIG_NUMA | 
 | 1074 | /* | 
 | 1075 |  * Move a list of individual pages | 
 | 1076 |  */ | 
 | 1077 | struct page_to_node { | 
 | 1078 | 	unsigned long addr; | 
 | 1079 | 	struct page *page; | 
 | 1080 | 	int node; | 
 | 1081 | 	int status; | 
 | 1082 | }; | 
 | 1083 |  | 
 | 1084 | static struct page *new_page_node(struct page *p, unsigned long private, | 
 | 1085 | 		int **result) | 
 | 1086 | { | 
 | 1087 | 	struct page_to_node *pm = (struct page_to_node *)private; | 
 | 1088 |  | 
 | 1089 | 	while (pm->node != MAX_NUMNODES && pm->page != p) | 
 | 1090 | 		pm++; | 
 | 1091 |  | 
 | 1092 | 	if (pm->node == MAX_NUMNODES) | 
 | 1093 | 		return NULL; | 
 | 1094 |  | 
 | 1095 | 	*result = &pm->status; | 
 | 1096 |  | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 1097 | 	return alloc_pages_exact_node(pm->node, | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 1098 | 				GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1099 | } | 
 | 1100 |  | 
 | 1101 | /* | 
 | 1102 |  * Move a set of pages as indicated in the pm array. The addr | 
 | 1103 |  * field must be set to the virtual address of the page to be moved | 
 | 1104 |  * and the node number must contain a valid target node. | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1105 |  * The pm array ends with node = MAX_NUMNODES. | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1106 |  */ | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1107 | static int do_move_page_to_node_array(struct mm_struct *mm, | 
 | 1108 | 				      struct page_to_node *pm, | 
 | 1109 | 				      int migrate_all) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1110 | { | 
 | 1111 | 	int err; | 
 | 1112 | 	struct page_to_node *pp; | 
 | 1113 | 	LIST_HEAD(pagelist); | 
 | 1114 |  | 
 | 1115 | 	down_read(&mm->mmap_sem); | 
 | 1116 |  | 
 | 1117 | 	/* | 
 | 1118 | 	 * Build a list of pages to migrate | 
 | 1119 | 	 */ | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1120 | 	for (pp = pm; pp->node != MAX_NUMNODES; pp++) { | 
 | 1121 | 		struct vm_area_struct *vma; | 
 | 1122 | 		struct page *page; | 
 | 1123 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1124 | 		err = -EFAULT; | 
 | 1125 | 		vma = find_vma(mm, pp->addr); | 
| Gleb Natapov | 70384dc | 2010-10-26 14:22:07 -0700 | [diff] [blame] | 1126 | 		if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1127 | 			goto set_status; | 
 | 1128 |  | 
| Andrea Arcangeli | 500d65d | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 1129 | 		page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); | 
| Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 1130 |  | 
 | 1131 | 		err = PTR_ERR(page); | 
 | 1132 | 		if (IS_ERR(page)) | 
 | 1133 | 			goto set_status; | 
 | 1134 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1135 | 		err = -ENOENT; | 
 | 1136 | 		if (!page) | 
 | 1137 | 			goto set_status; | 
 | 1138 |  | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1139 | 		/* Use PageReserved to check for zero page */ | 
 | 1140 | 		if (PageReserved(page) || PageKsm(page)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1141 | 			goto put_and_set; | 
 | 1142 |  | 
 | 1143 | 		pp->page = page; | 
 | 1144 | 		err = page_to_nid(page); | 
 | 1145 |  | 
 | 1146 | 		if (err == pp->node) | 
 | 1147 | 			/* | 
 | 1148 | 			 * Node already in the right place | 
 | 1149 | 			 */ | 
 | 1150 | 			goto put_and_set; | 
 | 1151 |  | 
 | 1152 | 		err = -EACCES; | 
 | 1153 | 		if (page_mapcount(page) > 1 && | 
 | 1154 | 				!migrate_all) | 
 | 1155 | 			goto put_and_set; | 
 | 1156 |  | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 1157 | 		err = isolate_lru_page(page); | 
| KOSAKI Motohiro | 6d9c285 | 2009-12-14 17:58:11 -0800 | [diff] [blame] | 1158 | 		if (!err) { | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 1159 | 			list_add_tail(&page->lru, &pagelist); | 
| KOSAKI Motohiro | 6d9c285 | 2009-12-14 17:58:11 -0800 | [diff] [blame] | 1160 | 			inc_zone_page_state(page, NR_ISOLATED_ANON + | 
 | 1161 | 					    page_is_file_cache(page)); | 
 | 1162 | 		} | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1163 | put_and_set: | 
 | 1164 | 		/* | 
 | 1165 | 		 * Either remove the duplicate refcount from | 
 | 1166 | 		 * isolate_lru_page() or drop the page ref if it was | 
 | 1167 | 		 * not isolated. | 
 | 1168 | 		 */ | 
 | 1169 | 		put_page(page); | 
 | 1170 | set_status: | 
 | 1171 | 		pp->status = err; | 
 | 1172 | 	} | 
 | 1173 |  | 
| Brice Goglin | e78bbfa | 2008-10-18 20:27:15 -0700 | [diff] [blame] | 1174 | 	err = 0; | 
| Minchan Kim | cf608ac | 2010-10-26 14:21:29 -0700 | [diff] [blame] | 1175 | 	if (!list_empty(&pagelist)) { | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1176 | 		err = migrate_pages(&pagelist, new_page_node, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 1177 | 				(unsigned long)pm, 0, MIGRATE_SYNC); | 
| Minchan Kim | cf608ac | 2010-10-26 14:21:29 -0700 | [diff] [blame] | 1178 | 		if (err) | 
 | 1179 | 			putback_lru_pages(&pagelist); | 
 | 1180 | 	} | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1181 |  | 
 | 1182 | 	up_read(&mm->mmap_sem); | 
 | 1183 | 	return err; | 
 | 1184 | } | 
 | 1185 |  | 
 | 1186 | /* | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1187 |  * Migrate an array of page address onto an array of nodes and fill | 
 | 1188 |  * the corresponding array of status. | 
 | 1189 |  */ | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1190 | static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1191 | 			 unsigned long nr_pages, | 
 | 1192 | 			 const void __user * __user *pages, | 
 | 1193 | 			 const int __user *nodes, | 
 | 1194 | 			 int __user *status, int flags) | 
 | 1195 | { | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1196 | 	struct page_to_node *pm; | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1197 | 	unsigned long chunk_nr_pages; | 
 | 1198 | 	unsigned long chunk_start; | 
 | 1199 | 	int err; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1200 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1201 | 	err = -ENOMEM; | 
 | 1202 | 	pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); | 
 | 1203 | 	if (!pm) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1204 | 		goto out; | 
| Brice Goglin | 35282a2 | 2009-06-16 15:32:43 -0700 | [diff] [blame] | 1205 |  | 
 | 1206 | 	migrate_prep(); | 
 | 1207 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1208 | 	/* | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1209 | 	 * Store a chunk of page_to_node array in a page, | 
 | 1210 | 	 * but keep the last one as a marker | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1211 | 	 */ | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1212 | 	chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1213 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1214 | 	for (chunk_start = 0; | 
 | 1215 | 	     chunk_start < nr_pages; | 
 | 1216 | 	     chunk_start += chunk_nr_pages) { | 
 | 1217 | 		int j; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1218 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1219 | 		if (chunk_start + chunk_nr_pages > nr_pages) | 
 | 1220 | 			chunk_nr_pages = nr_pages - chunk_start; | 
 | 1221 |  | 
 | 1222 | 		/* fill the chunk pm with addrs and nodes from user-space */ | 
 | 1223 | 		for (j = 0; j < chunk_nr_pages; j++) { | 
 | 1224 | 			const void __user *p; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1225 | 			int node; | 
 | 1226 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1227 | 			err = -EFAULT; | 
 | 1228 | 			if (get_user(p, pages + j + chunk_start)) | 
 | 1229 | 				goto out_pm; | 
 | 1230 | 			pm[j].addr = (unsigned long) p; | 
 | 1231 |  | 
 | 1232 | 			if (get_user(node, nodes + j + chunk_start)) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1233 | 				goto out_pm; | 
 | 1234 |  | 
 | 1235 | 			err = -ENODEV; | 
| Linus Torvalds | 6f5a55f | 2010-02-05 16:16:50 -0800 | [diff] [blame] | 1236 | 			if (node < 0 || node >= MAX_NUMNODES) | 
 | 1237 | 				goto out_pm; | 
 | 1238 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1239 | 			if (!node_state(node, N_HIGH_MEMORY)) | 
 | 1240 | 				goto out_pm; | 
 | 1241 |  | 
 | 1242 | 			err = -EACCES; | 
 | 1243 | 			if (!node_isset(node, task_nodes)) | 
 | 1244 | 				goto out_pm; | 
 | 1245 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1246 | 			pm[j].node = node; | 
 | 1247 | 		} | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1248 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1249 | 		/* End marker for this chunk */ | 
 | 1250 | 		pm[chunk_nr_pages].node = MAX_NUMNODES; | 
 | 1251 |  | 
 | 1252 | 		/* Migrate this chunk */ | 
 | 1253 | 		err = do_move_page_to_node_array(mm, pm, | 
 | 1254 | 						 flags & MPOL_MF_MOVE_ALL); | 
 | 1255 | 		if (err < 0) | 
 | 1256 | 			goto out_pm; | 
 | 1257 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1258 | 		/* Return status information */ | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1259 | 		for (j = 0; j < chunk_nr_pages; j++) | 
 | 1260 | 			if (put_user(pm[j].status, status + j + chunk_start)) { | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1261 | 				err = -EFAULT; | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1262 | 				goto out_pm; | 
 | 1263 | 			} | 
 | 1264 | 	} | 
 | 1265 | 	err = 0; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1266 |  | 
 | 1267 | out_pm: | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1268 | 	free_page((unsigned long)pm); | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1269 | out: | 
 | 1270 | 	return err; | 
 | 1271 | } | 
 | 1272 |  | 
 | 1273 | /* | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1274 |  * Determine the nodes of an array of pages and store it in an array of status. | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1275 |  */ | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1276 | static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, | 
 | 1277 | 				const void __user **pages, int *status) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1278 | { | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1279 | 	unsigned long i; | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1280 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1281 | 	down_read(&mm->mmap_sem); | 
 | 1282 |  | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1283 | 	for (i = 0; i < nr_pages; i++) { | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1284 | 		unsigned long addr = (unsigned long)(*pages); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1285 | 		struct vm_area_struct *vma; | 
 | 1286 | 		struct page *page; | 
| KOSAKI Motohiro | c095adb | 2008-12-16 16:06:43 +0900 | [diff] [blame] | 1287 | 		int err = -EFAULT; | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1288 |  | 
 | 1289 | 		vma = find_vma(mm, addr); | 
| Gleb Natapov | 70384dc | 2010-10-26 14:22:07 -0700 | [diff] [blame] | 1290 | 		if (!vma || addr < vma->vm_start) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1291 | 			goto set_status; | 
 | 1292 |  | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1293 | 		page = follow_page(vma, addr, 0); | 
| Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 1294 |  | 
 | 1295 | 		err = PTR_ERR(page); | 
 | 1296 | 		if (IS_ERR(page)) | 
 | 1297 | 			goto set_status; | 
 | 1298 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1299 | 		err = -ENOENT; | 
 | 1300 | 		/* Use PageReserved to check for zero page */ | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1301 | 		if (!page || PageReserved(page) || PageKsm(page)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1302 | 			goto set_status; | 
 | 1303 |  | 
 | 1304 | 		err = page_to_nid(page); | 
 | 1305 | set_status: | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1306 | 		*status = err; | 
 | 1307 |  | 
 | 1308 | 		pages++; | 
 | 1309 | 		status++; | 
 | 1310 | 	} | 
 | 1311 |  | 
 | 1312 | 	up_read(&mm->mmap_sem); | 
 | 1313 | } | 
 | 1314 |  | 
 | 1315 | /* | 
 | 1316 |  * Determine the nodes of a user array of pages and store it in | 
 | 1317 |  * a user array of status. | 
 | 1318 |  */ | 
 | 1319 | static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, | 
 | 1320 | 			 const void __user * __user *pages, | 
 | 1321 | 			 int __user *status) | 
 | 1322 | { | 
 | 1323 | #define DO_PAGES_STAT_CHUNK_NR 16 | 
 | 1324 | 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; | 
 | 1325 | 	int chunk_status[DO_PAGES_STAT_CHUNK_NR]; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1326 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1327 | 	while (nr_pages) { | 
 | 1328 | 		unsigned long chunk_nr; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1329 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1330 | 		chunk_nr = nr_pages; | 
 | 1331 | 		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) | 
 | 1332 | 			chunk_nr = DO_PAGES_STAT_CHUNK_NR; | 
 | 1333 |  | 
 | 1334 | 		if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) | 
 | 1335 | 			break; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1336 |  | 
 | 1337 | 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); | 
 | 1338 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1339 | 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) | 
 | 1340 | 			break; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1341 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1342 | 		pages += chunk_nr; | 
 | 1343 | 		status += chunk_nr; | 
 | 1344 | 		nr_pages -= chunk_nr; | 
 | 1345 | 	} | 
 | 1346 | 	return nr_pages ? -EFAULT : 0; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1347 | } | 
 | 1348 |  | 
 | 1349 | /* | 
 | 1350 |  * Move a list of pages in the address space of the currently executing | 
 | 1351 |  * process. | 
 | 1352 |  */ | 
| Heiko Carstens | 938bb9f | 2009-01-14 14:14:30 +0100 | [diff] [blame] | 1353 | SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, | 
 | 1354 | 		const void __user * __user *, pages, | 
 | 1355 | 		const int __user *, nodes, | 
 | 1356 | 		int __user *, status, int, flags) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1357 | { | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1358 | 	const struct cred *cred = current_cred(), *tcred; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1359 | 	struct task_struct *task; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1360 | 	struct mm_struct *mm; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1361 | 	int err; | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1362 | 	nodemask_t task_nodes; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1363 |  | 
 | 1364 | 	/* Check flags */ | 
 | 1365 | 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) | 
 | 1366 | 		return -EINVAL; | 
 | 1367 |  | 
 | 1368 | 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) | 
 | 1369 | 		return -EPERM; | 
 | 1370 |  | 
 | 1371 | 	/* Find the mm_struct */ | 
| Greg Thelen | a879bf5 | 2011-02-25 14:44:13 -0800 | [diff] [blame] | 1372 | 	rcu_read_lock(); | 
| Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1373 | 	task = pid ? find_task_by_vpid(pid) : current; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1374 | 	if (!task) { | 
| Greg Thelen | a879bf5 | 2011-02-25 14:44:13 -0800 | [diff] [blame] | 1375 | 		rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1376 | 		return -ESRCH; | 
 | 1377 | 	} | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1378 | 	get_task_struct(task); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1379 |  | 
 | 1380 | 	/* | 
 | 1381 | 	 * Check if this process has the right to modify the specified | 
 | 1382 | 	 * process. The right exists if the process has administrative | 
 | 1383 | 	 * capabilities, superuser privileges or the same | 
 | 1384 | 	 * userid as the target process. | 
 | 1385 | 	 */ | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1386 | 	tcred = __task_cred(task); | 
| David Howells | b6dff3e | 2008-11-14 10:39:16 +1100 | [diff] [blame] | 1387 | 	if (cred->euid != tcred->suid && cred->euid != tcred->uid && | 
 | 1388 | 	    cred->uid  != tcred->suid && cred->uid  != tcred->uid && | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1389 | 	    !capable(CAP_SYS_NICE)) { | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1390 | 		rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1391 | 		err = -EPERM; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1392 | 		goto out; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1393 | 	} | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1394 | 	rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1395 |  | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1396 |  	err = security_task_movememory(task); | 
 | 1397 |  	if (err) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1398 | 		goto out; | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1399 |  | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1400 | 	task_nodes = cpuset_mems_allowed(task); | 
 | 1401 | 	mm = get_task_mm(task); | 
 | 1402 | 	put_task_struct(task); | 
 | 1403 |  | 
| Sasha Levin | 6e8b09e | 2012-04-25 16:01:53 -0700 | [diff] [blame] | 1404 | 	if (!mm) | 
 | 1405 | 		return -EINVAL; | 
 | 1406 |  | 
 | 1407 | 	if (nodes) | 
 | 1408 | 		err = do_pages_move(mm, task_nodes, nr_pages, pages, | 
 | 1409 | 				    nodes, status, flags); | 
 | 1410 | 	else | 
 | 1411 | 		err = do_pages_stat(mm, nr_pages, pages, status); | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1412 |  | 
 | 1413 | 	mmput(mm); | 
 | 1414 | 	return err; | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1415 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1416 | out: | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1417 | 	put_task_struct(task); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1418 | 	return err; | 
 | 1419 | } | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1420 |  | 
| Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 1421 | /* | 
 | 1422 |  * Call migration functions in the vma_ops that may prepare | 
 | 1423 |  * memory in a vm for migration. migration functions may perform | 
 | 1424 |  * the migration for vmas that do not have an underlying page struct. | 
 | 1425 |  */ | 
 | 1426 | int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, | 
 | 1427 | 	const nodemask_t *from, unsigned long flags) | 
 | 1428 | { | 
 | 1429 |  	struct vm_area_struct *vma; | 
 | 1430 |  	int err = 0; | 
 | 1431 |  | 
| Daisuke Nishimura | 1001c9f | 2009-02-11 13:04:18 -0800 | [diff] [blame] | 1432 | 	for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { | 
| Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 1433 |  		if (vma->vm_ops && vma->vm_ops->migrate) { | 
 | 1434 |  			err = vma->vm_ops->migrate(vma, to, from, flags); | 
 | 1435 |  			if (err) | 
 | 1436 |  				break; | 
 | 1437 |  		} | 
 | 1438 |  	} | 
 | 1439 |  	return err; | 
 | 1440 | } | 
| Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 1441 | #endif |