| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Memory Migration functionality - linux/mm/migration.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter | 
 | 5 |  * | 
 | 6 |  * Page migration was first developed in the context of the memory hotplug | 
 | 7 |  * project. The main authors of the migration code are: | 
 | 8 |  * | 
 | 9 |  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> | 
 | 10 |  * Hirokazu Takahashi <taka@valinux.co.jp> | 
 | 11 |  * Dave Hansen <haveblue@us.ibm.com> | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 12 |  * Christoph Lameter | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 13 |  */ | 
 | 14 |  | 
 | 15 | #include <linux/migrate.h> | 
 | 16 | #include <linux/module.h> | 
 | 17 | #include <linux/swap.h> | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 18 | #include <linux/swapops.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 19 | #include <linux/pagemap.h> | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 20 | #include <linux/buffer_head.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 21 | #include <linux/mm_inline.h> | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 22 | #include <linux/nsproxy.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 23 | #include <linux/pagevec.h> | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 24 | #include <linux/ksm.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 25 | #include <linux/rmap.h> | 
 | 26 | #include <linux/topology.h> | 
 | 27 | #include <linux/cpu.h> | 
 | 28 | #include <linux/cpuset.h> | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 29 | #include <linux/writeback.h> | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 30 | #include <linux/mempolicy.h> | 
 | 31 | #include <linux/vmalloc.h> | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 32 | #include <linux/security.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 33 | #include <linux/memcontrol.h> | 
| Adrian Bunk | 4f5ca26 | 2008-07-23 21:27:02 -0700 | [diff] [blame] | 34 | #include <linux/syscalls.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 35 | #include <linux/gfp.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 36 |  | 
 | 37 | #include "internal.h" | 
 | 38 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 39 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) | 
 | 40 |  | 
 | 41 | /* | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 42 |  * migrate_prep() needs to be called before we start compiling a list of pages | 
| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 43 |  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is | 
 | 44 |  * undesirable, use migrate_prep_local() | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 45 |  */ | 
 | 46 | int migrate_prep(void) | 
 | 47 | { | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 48 | 	/* | 
 | 49 | 	 * Clear the LRU lists so pages can be isolated. | 
 | 50 | 	 * Note that pages may be moved off the LRU after we have | 
 | 51 | 	 * drained them. Those pages will fail to migrate like other | 
 | 52 | 	 * pages that may be busy. | 
 | 53 | 	 */ | 
 | 54 | 	lru_add_drain_all(); | 
 | 55 |  | 
 | 56 | 	return 0; | 
 | 57 | } | 
 | 58 |  | 
| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 59 | /* Do the necessary work of migrate_prep but not if it involves other CPUs */ | 
 | 60 | int migrate_prep_local(void) | 
 | 61 | { | 
 | 62 | 	lru_add_drain(); | 
 | 63 |  | 
 | 64 | 	return 0; | 
 | 65 | } | 
 | 66 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 67 | /* | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 68 |  * Add isolated pages on the list back to the LRU under page lock | 
 | 69 |  * to avoid leaking evictable pages back onto unevictable list. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 70 |  */ | 
| Minchan Kim | e13861d | 2010-05-24 14:31:59 -0700 | [diff] [blame] | 71 | void putback_lru_pages(struct list_head *l) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 72 | { | 
 | 73 | 	struct page *page; | 
 | 74 | 	struct page *page2; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 75 |  | 
 | 76 | 	list_for_each_entry_safe(page, page2, l, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 77 | 		list_del(&page->lru); | 
| KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 78 | 		dec_zone_page_state(page, NR_ISOLATED_ANON + | 
| Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 79 | 				page_is_file_cache(page)); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 80 | 		putback_lru_page(page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 81 | 	} | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 82 | } | 
 | 83 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 84 | /* | 
 | 85 |  * Restore a potential migration pte to a working pte entry | 
 | 86 |  */ | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 87 | static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | 
 | 88 | 				 unsigned long addr, void *old) | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 89 | { | 
 | 90 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 91 | 	swp_entry_t entry; | 
 | 92 |  	pgd_t *pgd; | 
 | 93 |  	pud_t *pud; | 
 | 94 |  	pmd_t *pmd; | 
 | 95 | 	pte_t *ptep, pte; | 
 | 96 |  	spinlock_t *ptl; | 
 | 97 |  | 
 | 98 |  	pgd = pgd_offset(mm, addr); | 
 | 99 | 	if (!pgd_present(*pgd)) | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 100 | 		goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 101 |  | 
 | 102 | 	pud = pud_offset(pgd, addr); | 
 | 103 | 	if (!pud_present(*pud)) | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 104 | 		goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 105 |  | 
 | 106 | 	pmd = pmd_offset(pud, addr); | 
 | 107 | 	if (!pmd_present(*pmd)) | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 108 | 		goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 109 |  | 
 | 110 | 	ptep = pte_offset_map(pmd, addr); | 
 | 111 |  | 
 | 112 | 	if (!is_swap_pte(*ptep)) { | 
 | 113 | 		pte_unmap(ptep); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 114 | 		goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 115 |  	} | 
 | 116 |  | 
 | 117 |  	ptl = pte_lockptr(mm, pmd); | 
 | 118 |  	spin_lock(ptl); | 
 | 119 | 	pte = *ptep; | 
 | 120 | 	if (!is_swap_pte(pte)) | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 121 | 		goto unlock; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 122 |  | 
 | 123 | 	entry = pte_to_swp_entry(pte); | 
 | 124 |  | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 125 | 	if (!is_migration_entry(entry) || | 
 | 126 | 	    migration_entry_to_page(entry) != old) | 
 | 127 | 		goto unlock; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 128 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 129 | 	get_page(new); | 
 | 130 | 	pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 
 | 131 | 	if (is_write_migration_entry(entry)) | 
 | 132 | 		pte = pte_mkwrite(pte); | 
| KAMEZAWA Hiroyuki | 97ee052 | 2007-10-16 01:25:43 -0700 | [diff] [blame] | 133 | 	flush_cache_page(vma, addr, pte_pfn(pte)); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 134 | 	set_pte_at(mm, addr, ptep, pte); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 135 |  | 
 | 136 | 	if (PageAnon(new)) | 
 | 137 | 		page_add_anon_rmap(new, vma, addr); | 
 | 138 | 	else | 
 | 139 | 		page_add_file_rmap(new); | 
 | 140 |  | 
 | 141 | 	/* No need to invalidate - it was non-present before */ | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 142 | 	update_mmu_cache(vma, addr, ptep); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 143 | unlock: | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 144 | 	pte_unmap_unlock(ptep, ptl); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 145 | out: | 
 | 146 | 	return SWAP_AGAIN; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 147 | } | 
 | 148 |  | 
 | 149 | /* | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 150 |  * Get rid of all migration entries and replace them by | 
 | 151 |  * references to the indicated page. | 
 | 152 |  */ | 
 | 153 | static void remove_migration_ptes(struct page *old, struct page *new) | 
 | 154 | { | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 155 | 	rmap_walk(new, remove_migration_pte, old); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 156 | } | 
 | 157 |  | 
 | 158 | /* | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 159 |  * Something used the pte of a page under migration. We need to | 
 | 160 |  * get to the page and wait until migration is finished. | 
 | 161 |  * When we return from this function the fault will be retried. | 
 | 162 |  * | 
 | 163 |  * This function is called from do_swap_page(). | 
 | 164 |  */ | 
 | 165 | void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | 
 | 166 | 				unsigned long address) | 
 | 167 | { | 
 | 168 | 	pte_t *ptep, pte; | 
 | 169 | 	spinlock_t *ptl; | 
 | 170 | 	swp_entry_t entry; | 
 | 171 | 	struct page *page; | 
 | 172 |  | 
 | 173 | 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | 
 | 174 | 	pte = *ptep; | 
 | 175 | 	if (!is_swap_pte(pte)) | 
 | 176 | 		goto out; | 
 | 177 |  | 
 | 178 | 	entry = pte_to_swp_entry(pte); | 
 | 179 | 	if (!is_migration_entry(entry)) | 
 | 180 | 		goto out; | 
 | 181 |  | 
 | 182 | 	page = migration_entry_to_page(entry); | 
 | 183 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 184 | 	/* | 
 | 185 | 	 * Once radix-tree replacement of page migration started, page_count | 
 | 186 | 	 * *must* be zero. And, we don't want to call wait_on_page_locked() | 
 | 187 | 	 * against a page without get_page(). | 
 | 188 | 	 * So, we use get_page_unless_zero(), here. Even failed, page fault | 
 | 189 | 	 * will occur again. | 
 | 190 | 	 */ | 
 | 191 | 	if (!get_page_unless_zero(page)) | 
 | 192 | 		goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 193 | 	pte_unmap_unlock(ptep, ptl); | 
 | 194 | 	wait_on_page_locked(page); | 
 | 195 | 	put_page(page); | 
 | 196 | 	return; | 
 | 197 | out: | 
 | 198 | 	pte_unmap_unlock(ptep, ptl); | 
 | 199 | } | 
 | 200 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 201 | /* | 
| Christoph Lameter | c3fcf8a | 2006-06-23 02:03:32 -0700 | [diff] [blame] | 202 |  * Replace the page in the mapping. | 
| Christoph Lameter | 5b5c712 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 203 |  * | 
 | 204 |  * The number of remaining references must be: | 
 | 205 |  * 1 for anonymous pages without a mapping | 
 | 206 |  * 2 for pages with a mapping | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 207 |  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 208 |  */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 209 | static int migrate_page_move_mapping(struct address_space *mapping, | 
 | 210 | 		struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 211 | { | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 212 | 	int expected_count; | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 213 | 	void **pslot; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 214 |  | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 215 | 	if (!mapping) { | 
| Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 216 | 		/* Anonymous page without mapping */ | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 217 | 		if (page_count(page) != 1) | 
 | 218 | 			return -EAGAIN; | 
 | 219 | 		return 0; | 
 | 220 | 	} | 
 | 221 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 222 | 	spin_lock_irq(&mapping->tree_lock); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 223 |  | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 224 | 	pslot = radix_tree_lookup_slot(&mapping->page_tree, | 
 | 225 |  					page_index(page)); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 226 |  | 
| Johannes Weiner | edcf474 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 227 | 	expected_count = 2 + page_has_private(page); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 228 | 	if (page_count(page) != expected_count || | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 229 | 			(struct page *)radix_tree_deref_slot(pslot) != page) { | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 230 | 		spin_unlock_irq(&mapping->tree_lock); | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 231 | 		return -EAGAIN; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 232 | 	} | 
 | 233 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 234 | 	if (!page_freeze_refs(page, expected_count)) { | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 235 | 		spin_unlock_irq(&mapping->tree_lock); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 236 | 		return -EAGAIN; | 
 | 237 | 	} | 
 | 238 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 239 | 	/* | 
 | 240 | 	 * Now we know that no one else is looking at the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 241 | 	 */ | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 242 | 	get_page(newpage);	/* add cache reference */ | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 243 | 	if (PageSwapCache(page)) { | 
 | 244 | 		SetPageSwapCache(newpage); | 
 | 245 | 		set_page_private(newpage, page_private(page)); | 
 | 246 | 	} | 
 | 247 |  | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 248 | 	radix_tree_replace_slot(pslot, newpage); | 
 | 249 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 250 | 	page_unfreeze_refs(page, expected_count); | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 251 | 	/* | 
 | 252 | 	 * Drop cache reference from old page. | 
 | 253 | 	 * We know this isn't the last reference. | 
 | 254 | 	 */ | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 255 | 	__put_page(page); | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 256 |  | 
| Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 257 | 	/* | 
 | 258 | 	 * If moved to a different zone then also account | 
 | 259 | 	 * the page for that zone. Other VM counters will be | 
 | 260 | 	 * taken care of when we establish references to the | 
 | 261 | 	 * new page and drop references to the old page. | 
 | 262 | 	 * | 
 | 263 | 	 * Note that anonymous pages are accounted for | 
 | 264 | 	 * via NR_FILE_PAGES and NR_ANON_PAGES if they | 
 | 265 | 	 * are mapped to swap space. | 
 | 266 | 	 */ | 
 | 267 | 	__dec_zone_page_state(page, NR_FILE_PAGES); | 
 | 268 | 	__inc_zone_page_state(newpage, NR_FILE_PAGES); | 
| KOSAKI Motohiro | 4b02108 | 2009-09-21 17:01:33 -0700 | [diff] [blame] | 269 | 	if (PageSwapBacked(page)) { | 
 | 270 | 		__dec_zone_page_state(page, NR_SHMEM); | 
 | 271 | 		__inc_zone_page_state(newpage, NR_SHMEM); | 
 | 272 | 	} | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 273 | 	spin_unlock_irq(&mapping->tree_lock); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 274 |  | 
 | 275 | 	return 0; | 
 | 276 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 277 |  | 
 | 278 | /* | 
 | 279 |  * Copy the page to its new location | 
 | 280 |  */ | 
| Christoph Lameter | e7340f7 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 281 | static void migrate_page_copy(struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 282 | { | 
 | 283 | 	copy_highpage(newpage, page); | 
 | 284 |  | 
 | 285 | 	if (PageError(page)) | 
 | 286 | 		SetPageError(newpage); | 
 | 287 | 	if (PageReferenced(page)) | 
 | 288 | 		SetPageReferenced(newpage); | 
 | 289 | 	if (PageUptodate(page)) | 
 | 290 | 		SetPageUptodate(newpage); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 291 | 	if (TestClearPageActive(page)) { | 
 | 292 | 		VM_BUG_ON(PageUnevictable(page)); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 293 | 		SetPageActive(newpage); | 
| Lee Schermerhorn | 418b27e | 2009-12-14 17:59:54 -0800 | [diff] [blame] | 294 | 	} else if (TestClearPageUnevictable(page)) | 
 | 295 | 		SetPageUnevictable(newpage); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 296 | 	if (PageChecked(page)) | 
 | 297 | 		SetPageChecked(newpage); | 
 | 298 | 	if (PageMappedToDisk(page)) | 
 | 299 | 		SetPageMappedToDisk(newpage); | 
 | 300 |  | 
 | 301 | 	if (PageDirty(page)) { | 
 | 302 | 		clear_page_dirty_for_io(page); | 
| Nick Piggin | 3a902c5 | 2008-04-30 00:55:16 -0700 | [diff] [blame] | 303 | 		/* | 
 | 304 | 		 * Want to mark the page and the radix tree as dirty, and | 
 | 305 | 		 * redo the accounting that clear_page_dirty_for_io undid, | 
 | 306 | 		 * but we can't use set_page_dirty because that function | 
 | 307 | 		 * is actually a signal that all of the page has become dirty. | 
 | 308 | 		 * Wheras only part of our page may be dirty. | 
 | 309 | 		 */ | 
 | 310 | 		__set_page_dirty_nobuffers(newpage); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 311 |  	} | 
 | 312 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 313 | 	mlock_migrate_page(newpage, page); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 314 | 	ksm_migrate_page(newpage, page); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 315 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 316 | 	ClearPageSwapCache(page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 317 | 	ClearPagePrivate(page); | 
 | 318 | 	set_page_private(page, 0); | 
 | 319 | 	page->mapping = NULL; | 
 | 320 |  | 
 | 321 | 	/* | 
 | 322 | 	 * If any waiters have accumulated on the new page then | 
 | 323 | 	 * wake them up. | 
 | 324 | 	 */ | 
 | 325 | 	if (PageWriteback(newpage)) | 
 | 326 | 		end_page_writeback(newpage); | 
 | 327 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 328 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 329 | /************************************************************ | 
 | 330 |  *                    Migration functions | 
 | 331 |  ***********************************************************/ | 
 | 332 |  | 
 | 333 | /* Always fail migration. Used for mappings that are not movable */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 334 | int fail_migrate_page(struct address_space *mapping, | 
 | 335 | 			struct page *newpage, struct page *page) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 336 | { | 
 | 337 | 	return -EIO; | 
 | 338 | } | 
 | 339 | EXPORT_SYMBOL(fail_migrate_page); | 
 | 340 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 341 | /* | 
 | 342 |  * Common logic to directly migrate a single page suitable for | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 343 |  * pages that do not use PagePrivate/PagePrivate2. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 344 |  * | 
 | 345 |  * Pages are locked upon entry and exit. | 
 | 346 |  */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 347 | int migrate_page(struct address_space *mapping, | 
 | 348 | 		struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 349 | { | 
 | 350 | 	int rc; | 
 | 351 |  | 
 | 352 | 	BUG_ON(PageWriteback(page));	/* Writeback must be complete */ | 
 | 353 |  | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 354 | 	rc = migrate_page_move_mapping(mapping, newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 355 |  | 
 | 356 | 	if (rc) | 
 | 357 | 		return rc; | 
 | 358 |  | 
 | 359 | 	migrate_page_copy(newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 360 | 	return 0; | 
 | 361 | } | 
 | 362 | EXPORT_SYMBOL(migrate_page); | 
 | 363 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 364 | #ifdef CONFIG_BLOCK | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 365 | /* | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 366 |  * Migration function for pages with buffers. This function can only be used | 
 | 367 |  * if the underlying filesystem guarantees that no other references to "page" | 
 | 368 |  * exist. | 
 | 369 |  */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 370 | int buffer_migrate_page(struct address_space *mapping, | 
 | 371 | 		struct page *newpage, struct page *page) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 372 | { | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 373 | 	struct buffer_head *bh, *head; | 
 | 374 | 	int rc; | 
 | 375 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 376 | 	if (!page_has_buffers(page)) | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 377 | 		return migrate_page(mapping, newpage, page); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 378 |  | 
 | 379 | 	head = page_buffers(page); | 
 | 380 |  | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 381 | 	rc = migrate_page_move_mapping(mapping, newpage, page); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 382 |  | 
 | 383 | 	if (rc) | 
 | 384 | 		return rc; | 
 | 385 |  | 
 | 386 | 	bh = head; | 
 | 387 | 	do { | 
 | 388 | 		get_bh(bh); | 
 | 389 | 		lock_buffer(bh); | 
 | 390 | 		bh = bh->b_this_page; | 
 | 391 |  | 
 | 392 | 	} while (bh != head); | 
 | 393 |  | 
 | 394 | 	ClearPagePrivate(page); | 
 | 395 | 	set_page_private(newpage, page_private(page)); | 
 | 396 | 	set_page_private(page, 0); | 
 | 397 | 	put_page(page); | 
 | 398 | 	get_page(newpage); | 
 | 399 |  | 
 | 400 | 	bh = head; | 
 | 401 | 	do { | 
 | 402 | 		set_bh_page(bh, newpage, bh_offset(bh)); | 
 | 403 | 		bh = bh->b_this_page; | 
 | 404 |  | 
 | 405 | 	} while (bh != head); | 
 | 406 |  | 
 | 407 | 	SetPagePrivate(newpage); | 
 | 408 |  | 
 | 409 | 	migrate_page_copy(newpage, page); | 
 | 410 |  | 
 | 411 | 	bh = head; | 
 | 412 | 	do { | 
 | 413 | 		unlock_buffer(bh); | 
 | 414 |  		put_bh(bh); | 
 | 415 | 		bh = bh->b_this_page; | 
 | 416 |  | 
 | 417 | 	} while (bh != head); | 
 | 418 |  | 
 | 419 | 	return 0; | 
 | 420 | } | 
 | 421 | EXPORT_SYMBOL(buffer_migrate_page); | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 422 | #endif | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 423 |  | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 424 | /* | 
 | 425 |  * Writeback a page to clean the dirty state | 
 | 426 |  */ | 
 | 427 | static int writeout(struct address_space *mapping, struct page *page) | 
 | 428 | { | 
 | 429 | 	struct writeback_control wbc = { | 
 | 430 | 		.sync_mode = WB_SYNC_NONE, | 
 | 431 | 		.nr_to_write = 1, | 
 | 432 | 		.range_start = 0, | 
 | 433 | 		.range_end = LLONG_MAX, | 
 | 434 | 		.nonblocking = 1, | 
 | 435 | 		.for_reclaim = 1 | 
 | 436 | 	}; | 
 | 437 | 	int rc; | 
 | 438 |  | 
 | 439 | 	if (!mapping->a_ops->writepage) | 
 | 440 | 		/* No write method for the address space */ | 
 | 441 | 		return -EINVAL; | 
 | 442 |  | 
 | 443 | 	if (!clear_page_dirty_for_io(page)) | 
 | 444 | 		/* Someone else already triggered a write */ | 
 | 445 | 		return -EAGAIN; | 
 | 446 |  | 
 | 447 | 	/* | 
 | 448 | 	 * A dirty page may imply that the underlying filesystem has | 
 | 449 | 	 * the page on some queue. So the page must be clean for | 
 | 450 | 	 * migration. Writeout may mean we loose the lock and the | 
 | 451 | 	 * page state is no longer what we checked for earlier. | 
 | 452 | 	 * At this point we know that the migration attempt cannot | 
 | 453 | 	 * be successful. | 
 | 454 | 	 */ | 
 | 455 | 	remove_migration_ptes(page, page); | 
 | 456 |  | 
 | 457 | 	rc = mapping->a_ops->writepage(page, &wbc); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 458 |  | 
 | 459 | 	if (rc != AOP_WRITEPAGE_ACTIVATE) | 
 | 460 | 		/* unlocked. Relock */ | 
 | 461 | 		lock_page(page); | 
 | 462 |  | 
| Hugh Dickins | bda8550 | 2008-11-19 15:36:36 -0800 | [diff] [blame] | 463 | 	return (rc < 0) ? -EIO : -EAGAIN; | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 464 | } | 
 | 465 |  | 
 | 466 | /* | 
 | 467 |  * Default handling if a filesystem does not provide a migration function. | 
 | 468 |  */ | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 469 | static int fallback_migrate_page(struct address_space *mapping, | 
 | 470 | 	struct page *newpage, struct page *page) | 
 | 471 | { | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 472 | 	if (PageDirty(page)) | 
 | 473 | 		return writeout(mapping, page); | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 474 |  | 
 | 475 | 	/* | 
 | 476 | 	 * Buffers may be managed in a filesystem specific way. | 
 | 477 | 	 * We must have no buffers or drop them. | 
 | 478 | 	 */ | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 479 | 	if (page_has_private(page) && | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 480 | 	    !try_to_release_page(page, GFP_KERNEL)) | 
 | 481 | 		return -EAGAIN; | 
 | 482 |  | 
 | 483 | 	return migrate_page(mapping, newpage, page); | 
 | 484 | } | 
 | 485 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 486 | /* | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 487 |  * Move a page to a newly allocated page | 
 | 488 |  * The page is locked and all ptes have been successfully removed. | 
 | 489 |  * | 
 | 490 |  * The new page will have replaced the old page if this function | 
 | 491 |  * is successful. | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 492 |  * | 
 | 493 |  * Return value: | 
 | 494 |  *   < 0 - error code | 
 | 495 |  *  == 0 - success | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 496 |  */ | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 497 | static int move_to_new_page(struct page *newpage, struct page *page, | 
 | 498 | 						int remap_swapcache) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 499 | { | 
 | 500 | 	struct address_space *mapping; | 
 | 501 | 	int rc; | 
 | 502 |  | 
 | 503 | 	/* | 
 | 504 | 	 * Block others from accessing the page when we get around to | 
 | 505 | 	 * establishing additional references. We are the only one | 
 | 506 | 	 * holding a reference to the new page at this point. | 
 | 507 | 	 */ | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 508 | 	if (!trylock_page(newpage)) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 509 | 		BUG(); | 
 | 510 |  | 
 | 511 | 	/* Prepare mapping for the new page.*/ | 
 | 512 | 	newpage->index = page->index; | 
 | 513 | 	newpage->mapping = page->mapping; | 
| Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 514 | 	if (PageSwapBacked(page)) | 
 | 515 | 		SetPageSwapBacked(newpage); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 516 |  | 
 | 517 | 	mapping = page_mapping(page); | 
 | 518 | 	if (!mapping) | 
 | 519 | 		rc = migrate_page(mapping, newpage, page); | 
 | 520 | 	else if (mapping->a_ops->migratepage) | 
 | 521 | 		/* | 
 | 522 | 		 * Most pages have a mapping and most filesystems | 
 | 523 | 		 * should provide a migration function. Anonymous | 
 | 524 | 		 * pages are part of swap space which also has its | 
 | 525 | 		 * own migration function. This is the most common | 
 | 526 | 		 * path for page migration. | 
 | 527 | 		 */ | 
 | 528 | 		rc = mapping->a_ops->migratepage(mapping, | 
 | 529 | 						newpage, page); | 
 | 530 | 	else | 
 | 531 | 		rc = fallback_migrate_page(mapping, newpage, page); | 
 | 532 |  | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 533 | 	if (rc) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 534 | 		newpage->mapping = NULL; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 535 | 	} else { | 
 | 536 | 		if (remap_swapcache) | 
 | 537 | 			remove_migration_ptes(page, newpage); | 
 | 538 | 	} | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 539 |  | 
 | 540 | 	unlock_page(newpage); | 
 | 541 |  | 
 | 542 | 	return rc; | 
 | 543 | } | 
 | 544 |  | 
 | 545 | /* | 
 | 546 |  * Obtain the lock on page, remove all ptes and migrate the page | 
 | 547 |  * to the newly allocated page in newpage. | 
 | 548 |  */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 549 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 550 | 			struct page *page, int force, int offlining) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 551 | { | 
 | 552 | 	int rc = 0; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 553 | 	int *result = NULL; | 
 | 554 | 	struct page *newpage = get_new_page(page, private, &result); | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 555 | 	int remap_swapcache = 1; | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 556 | 	int rcu_locked = 0; | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 557 | 	int charge = 0; | 
| KAMEZAWA Hiroyuki | e00e431 | 2009-11-11 14:26:26 -0800 | [diff] [blame] | 558 | 	struct mem_cgroup *mem = NULL; | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 559 | 	struct anon_vma *anon_vma = NULL; | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 560 |  | 
 | 561 | 	if (!newpage) | 
 | 562 | 		return -ENOMEM; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 563 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 564 | 	if (page_count(page) == 1) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 565 | 		/* page was freed from under us. So we are done. */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 566 | 		goto move_newpage; | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 567 | 	} | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 568 |  | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 569 | 	/* prepare cgroup just returns 0 or -ENOMEM */ | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 570 | 	rc = -EAGAIN; | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 571 |  | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 572 | 	if (!trylock_page(page)) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 573 | 		if (!force) | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 574 | 			goto move_newpage; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 575 | 		lock_page(page); | 
 | 576 | 	} | 
 | 577 |  | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 578 | 	/* | 
 | 579 | 	 * Only memory hotplug's offline_pages() caller has locked out KSM, | 
 | 580 | 	 * and can safely migrate a KSM page.  The other cases have skipped | 
 | 581 | 	 * PageKsm along with PageReserved - but it is only now when we have | 
 | 582 | 	 * the page lock that we can be certain it will not go KSM beneath us | 
 | 583 | 	 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees | 
 | 584 | 	 * its pagecount raised, but only here do we take the page lock which | 
 | 585 | 	 * serializes that). | 
 | 586 | 	 */ | 
 | 587 | 	if (PageKsm(page) && !offlining) { | 
 | 588 | 		rc = -EBUSY; | 
 | 589 | 		goto unlock; | 
 | 590 | 	} | 
 | 591 |  | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 592 | 	/* charge against new page */ | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 593 | 	charge = mem_cgroup_prepare_migration(page, newpage, &mem); | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 594 | 	if (charge == -ENOMEM) { | 
 | 595 | 		rc = -ENOMEM; | 
 | 596 | 		goto unlock; | 
 | 597 | 	} | 
 | 598 | 	BUG_ON(charge); | 
 | 599 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 600 | 	if (PageWriteback(page)) { | 
 | 601 | 		if (!force) | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 602 | 			goto uncharge; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 603 | 		wait_on_page_writeback(page); | 
 | 604 | 	} | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 605 | 	/* | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 606 | 	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, | 
 | 607 | 	 * we cannot notice that anon_vma is freed while we migrates a page. | 
 | 608 | 	 * This rcu_read_lock() delays freeing anon_vma pointer until the end | 
 | 609 | 	 * of migration. File cache pages are no problem because of page_lock() | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 610 | 	 * File Caches may use write_page() or lock_page() in migration, then, | 
 | 611 | 	 * just care Anon page here. | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 612 | 	 */ | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 613 | 	if (PageAnon(page)) { | 
 | 614 | 		rcu_read_lock(); | 
 | 615 | 		rcu_locked = 1; | 
| Mel Gorman | 67b9509 | 2010-05-24 14:32:19 -0700 | [diff] [blame] | 616 |  | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 617 | 		/* Determine how to safely use anon_vma */ | 
 | 618 | 		if (!page_mapped(page)) { | 
 | 619 | 			if (!PageSwapCache(page)) | 
 | 620 | 				goto rcu_unlock; | 
| Mel Gorman | 67b9509 | 2010-05-24 14:32:19 -0700 | [diff] [blame] | 621 |  | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 622 | 			/* | 
 | 623 | 			 * We cannot be sure that the anon_vma of an unmapped | 
 | 624 | 			 * swapcache page is safe to use because we don't | 
 | 625 | 			 * know in advance if the VMA that this page belonged | 
 | 626 | 			 * to still exists. If the VMA and others sharing the | 
 | 627 | 			 * data have been freed, then the anon_vma could | 
 | 628 | 			 * already be invalid. | 
 | 629 | 			 * | 
 | 630 | 			 * To avoid this possibility, swapcache pages get | 
 | 631 | 			 * migrated but are not remapped when migration | 
 | 632 | 			 * completes | 
 | 633 | 			 */ | 
 | 634 | 			remap_swapcache = 0; | 
 | 635 | 		} else { | 
 | 636 | 			/* | 
 | 637 | 			 * Take a reference count on the anon_vma if the | 
 | 638 | 			 * page is mapped so that it is guaranteed to | 
 | 639 | 			 * exist when the page is remapped later | 
 | 640 | 			 */ | 
 | 641 | 			anon_vma = page_anon_vma(page); | 
 | 642 | 			atomic_inc(&anon_vma->external_refcount); | 
 | 643 | 		} | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 644 | 	} | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 645 |  | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 646 | 	/* | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 647 | 	 * Corner case handling: | 
 | 648 | 	 * 1. When a new swap-cache page is read into, it is added to the LRU | 
 | 649 | 	 * and treated as swapcache but it has no rmap yet. | 
 | 650 | 	 * Calling try_to_unmap() against a page->mapping==NULL page will | 
 | 651 | 	 * trigger a BUG.  So handle it here. | 
 | 652 | 	 * 2. An orphaned page (see truncate_complete_page) might have | 
 | 653 | 	 * fs-private metadata. The page can be picked up due to memory | 
 | 654 | 	 * offlining.  Everywhere else except page reclaim, the page is | 
 | 655 | 	 * invisible to the vm, so the page can not be migrated.  So try to | 
 | 656 | 	 * free the metadata, so the page can be freed. | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 657 | 	 */ | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 658 | 	if (!page->mapping) { | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 659 | 		if (!PageAnon(page) && page_has_private(page)) { | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 660 | 			/* | 
 | 661 | 			 * Go direct to try_to_free_buffers() here because | 
 | 662 | 			 * a) that's what try_to_release_page() would do anyway | 
 | 663 | 			 * b) we may be under rcu_read_lock() here, so we can't | 
 | 664 | 			 *    use GFP_KERNEL which is what try_to_release_page() | 
 | 665 | 			 *    needs to be effective. | 
 | 666 | 			 */ | 
 | 667 | 			try_to_free_buffers(page); | 
| Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 668 | 			goto rcu_unlock; | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 669 | 		} | 
| Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 670 | 		goto skip_unmap; | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 671 | 	} | 
 | 672 |  | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 673 | 	/* Establish migration ptes or remove ptes */ | 
| Andi Kleen | 14fa31b | 2009-09-16 11:50:10 +0200 | [diff] [blame] | 674 | 	try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 675 |  | 
| Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 676 | skip_unmap: | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 677 | 	if (!page_mapped(page)) | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 678 | 		rc = move_to_new_page(newpage, page, remap_swapcache); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 679 |  | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 680 | 	if (rc && remap_swapcache) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 681 | 		remove_migration_ptes(page, page); | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 682 | rcu_unlock: | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 683 |  | 
 | 684 | 	/* Drop an anon_vma reference if we took one */ | 
| Mel Gorman | 7f60c21 | 2010-05-24 14:32:18 -0700 | [diff] [blame] | 685 | 	if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) { | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 686 | 		int empty = list_empty(&anon_vma->head); | 
| Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 687 | 		anon_vma_unlock(anon_vma); | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 688 | 		if (empty) | 
 | 689 | 			anon_vma_free(anon_vma); | 
 | 690 | 	} | 
 | 691 |  | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 692 | 	if (rcu_locked) | 
 | 693 | 		rcu_read_unlock(); | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 694 | uncharge: | 
 | 695 | 	if (!charge) | 
 | 696 | 		mem_cgroup_end_migration(mem, page, newpage); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 697 | unlock: | 
 | 698 | 	unlock_page(page); | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 699 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 700 | 	if (rc != -EAGAIN) { | 
| Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 701 |  		/* | 
 | 702 |  		 * A page that has been migrated has all references | 
 | 703 |  		 * removed and will be freed. A page that has not been | 
 | 704 |  		 * migrated will have kepts its references and be | 
 | 705 |  		 * restored. | 
 | 706 |  		 */ | 
 | 707 |  		list_del(&page->lru); | 
| KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 708 | 		dec_zone_page_state(page, NR_ISOLATED_ANON + | 
| Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 709 | 				page_is_file_cache(page)); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 710 | 		putback_lru_page(page); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 711 | 	} | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 712 |  | 
 | 713 | move_newpage: | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 714 |  | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 715 | 	/* | 
 | 716 | 	 * Move the new page to the LRU. If migration was not successful | 
 | 717 | 	 * then this will free the page. | 
 | 718 | 	 */ | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 719 | 	putback_lru_page(newpage); | 
 | 720 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 721 | 	if (result) { | 
 | 722 | 		if (rc) | 
 | 723 | 			*result = rc; | 
 | 724 | 		else | 
 | 725 | 			*result = page_to_nid(newpage); | 
 | 726 | 	} | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 727 | 	return rc; | 
 | 728 | } | 
 | 729 |  | 
 | 730 | /* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 731 |  * migrate_pages | 
 | 732 |  * | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 733 |  * The function takes one list of pages to migrate and a function | 
 | 734 |  * that determines from the page to be migrated and the private data | 
 | 735 |  * the target of the move and allocates the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 736 |  * | 
 | 737 |  * The function returns after 10 attempts or if no pages | 
 | 738 |  * are movable anymore because to has become empty | 
| Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 739 |  * or no retryable pages exist anymore. All pages will be | 
| Gabriel Craciunescu | e9534b3 | 2007-10-20 02:13:26 +0200 | [diff] [blame] | 740 |  * returned to the LRU or freed. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 741 |  * | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 742 |  * Return: Number of pages not migrated or error code. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 743 |  */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 744 | int migrate_pages(struct list_head *from, | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 745 | 		new_page_t get_new_page, unsigned long private, int offlining) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 746 | { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 747 | 	int retry = 1; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 748 | 	int nr_failed = 0; | 
 | 749 | 	int pass = 0; | 
 | 750 | 	struct page *page; | 
 | 751 | 	struct page *page2; | 
 | 752 | 	int swapwrite = current->flags & PF_SWAPWRITE; | 
 | 753 | 	int rc; | 
 | 754 |  | 
 | 755 | 	if (!swapwrite) | 
 | 756 | 		current->flags |= PF_SWAPWRITE; | 
 | 757 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 758 | 	for(pass = 0; pass < 10 && retry; pass++) { | 
 | 759 | 		retry = 0; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 760 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 761 | 		list_for_each_entry_safe(page, page2, from, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 762 | 			cond_resched(); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 763 |  | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 764 | 			rc = unmap_and_move(get_new_page, private, | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 765 | 						page, pass > 2, offlining); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 766 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 767 | 			switch(rc) { | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 768 | 			case -ENOMEM: | 
 | 769 | 				goto out; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 770 | 			case -EAGAIN: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 771 | 				retry++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 772 | 				break; | 
 | 773 | 			case 0: | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 774 | 				break; | 
 | 775 | 			default: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 776 | 				/* Permanent failure */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 777 | 				nr_failed++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 778 | 				break; | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 779 | 			} | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 780 | 		} | 
 | 781 | 	} | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 782 | 	rc = 0; | 
 | 783 | out: | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 784 | 	if (!swapwrite) | 
 | 785 | 		current->flags &= ~PF_SWAPWRITE; | 
 | 786 |  | 
| Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 787 | 	putback_lru_pages(from); | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 788 |  | 
 | 789 | 	if (rc) | 
 | 790 | 		return rc; | 
 | 791 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 792 | 	return nr_failed + retry; | 
 | 793 | } | 
 | 794 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 795 | #ifdef CONFIG_NUMA | 
 | 796 | /* | 
 | 797 |  * Move a list of individual pages | 
 | 798 |  */ | 
 | 799 | struct page_to_node { | 
 | 800 | 	unsigned long addr; | 
 | 801 | 	struct page *page; | 
 | 802 | 	int node; | 
 | 803 | 	int status; | 
 | 804 | }; | 
 | 805 |  | 
 | 806 | static struct page *new_page_node(struct page *p, unsigned long private, | 
 | 807 | 		int **result) | 
 | 808 | { | 
 | 809 | 	struct page_to_node *pm = (struct page_to_node *)private; | 
 | 810 |  | 
 | 811 | 	while (pm->node != MAX_NUMNODES && pm->page != p) | 
 | 812 | 		pm++; | 
 | 813 |  | 
 | 814 | 	if (pm->node == MAX_NUMNODES) | 
 | 815 | 		return NULL; | 
 | 816 |  | 
 | 817 | 	*result = &pm->status; | 
 | 818 |  | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 819 | 	return alloc_pages_exact_node(pm->node, | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 820 | 				GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 821 | } | 
 | 822 |  | 
 | 823 | /* | 
 | 824 |  * Move a set of pages as indicated in the pm array. The addr | 
 | 825 |  * field must be set to the virtual address of the page to be moved | 
 | 826 |  * and the node number must contain a valid target node. | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 827 |  * The pm array ends with node = MAX_NUMNODES. | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 828 |  */ | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 829 | static int do_move_page_to_node_array(struct mm_struct *mm, | 
 | 830 | 				      struct page_to_node *pm, | 
 | 831 | 				      int migrate_all) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 832 | { | 
 | 833 | 	int err; | 
 | 834 | 	struct page_to_node *pp; | 
 | 835 | 	LIST_HEAD(pagelist); | 
 | 836 |  | 
 | 837 | 	down_read(&mm->mmap_sem); | 
 | 838 |  | 
 | 839 | 	/* | 
 | 840 | 	 * Build a list of pages to migrate | 
 | 841 | 	 */ | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 842 | 	for (pp = pm; pp->node != MAX_NUMNODES; pp++) { | 
 | 843 | 		struct vm_area_struct *vma; | 
 | 844 | 		struct page *page; | 
 | 845 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 846 | 		err = -EFAULT; | 
 | 847 | 		vma = find_vma(mm, pp->addr); | 
| Christoph Lameter | 0dc952d | 2007-03-05 00:30:33 -0800 | [diff] [blame] | 848 | 		if (!vma || !vma_migratable(vma)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 849 | 			goto set_status; | 
 | 850 |  | 
 | 851 | 		page = follow_page(vma, pp->addr, FOLL_GET); | 
| Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 852 |  | 
 | 853 | 		err = PTR_ERR(page); | 
 | 854 | 		if (IS_ERR(page)) | 
 | 855 | 			goto set_status; | 
 | 856 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 857 | 		err = -ENOENT; | 
 | 858 | 		if (!page) | 
 | 859 | 			goto set_status; | 
 | 860 |  | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 861 | 		/* Use PageReserved to check for zero page */ | 
 | 862 | 		if (PageReserved(page) || PageKsm(page)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 863 | 			goto put_and_set; | 
 | 864 |  | 
 | 865 | 		pp->page = page; | 
 | 866 | 		err = page_to_nid(page); | 
 | 867 |  | 
 | 868 | 		if (err == pp->node) | 
 | 869 | 			/* | 
 | 870 | 			 * Node already in the right place | 
 | 871 | 			 */ | 
 | 872 | 			goto put_and_set; | 
 | 873 |  | 
 | 874 | 		err = -EACCES; | 
 | 875 | 		if (page_mapcount(page) > 1 && | 
 | 876 | 				!migrate_all) | 
 | 877 | 			goto put_and_set; | 
 | 878 |  | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 879 | 		err = isolate_lru_page(page); | 
| KOSAKI Motohiro | 6d9c285 | 2009-12-14 17:58:11 -0800 | [diff] [blame] | 880 | 		if (!err) { | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 881 | 			list_add_tail(&page->lru, &pagelist); | 
| KOSAKI Motohiro | 6d9c285 | 2009-12-14 17:58:11 -0800 | [diff] [blame] | 882 | 			inc_zone_page_state(page, NR_ISOLATED_ANON + | 
 | 883 | 					    page_is_file_cache(page)); | 
 | 884 | 		} | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 885 | put_and_set: | 
 | 886 | 		/* | 
 | 887 | 		 * Either remove the duplicate refcount from | 
 | 888 | 		 * isolate_lru_page() or drop the page ref if it was | 
 | 889 | 		 * not isolated. | 
 | 890 | 		 */ | 
 | 891 | 		put_page(page); | 
 | 892 | set_status: | 
 | 893 | 		pp->status = err; | 
 | 894 | 	} | 
 | 895 |  | 
| Brice Goglin | e78bbfa | 2008-10-18 20:27:15 -0700 | [diff] [blame] | 896 | 	err = 0; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 897 | 	if (!list_empty(&pagelist)) | 
 | 898 | 		err = migrate_pages(&pagelist, new_page_node, | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 899 | 				(unsigned long)pm, 0); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 900 |  | 
 | 901 | 	up_read(&mm->mmap_sem); | 
 | 902 | 	return err; | 
 | 903 | } | 
 | 904 |  | 
 | 905 | /* | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 906 |  * Migrate an array of page address onto an array of nodes and fill | 
 | 907 |  * the corresponding array of status. | 
 | 908 |  */ | 
 | 909 | static int do_pages_move(struct mm_struct *mm, struct task_struct *task, | 
 | 910 | 			 unsigned long nr_pages, | 
 | 911 | 			 const void __user * __user *pages, | 
 | 912 | 			 const int __user *nodes, | 
 | 913 | 			 int __user *status, int flags) | 
 | 914 | { | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 915 | 	struct page_to_node *pm; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 916 | 	nodemask_t task_nodes; | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 917 | 	unsigned long chunk_nr_pages; | 
 | 918 | 	unsigned long chunk_start; | 
 | 919 | 	int err; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 920 |  | 
 | 921 | 	task_nodes = cpuset_mems_allowed(task); | 
 | 922 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 923 | 	err = -ENOMEM; | 
 | 924 | 	pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); | 
 | 925 | 	if (!pm) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 926 | 		goto out; | 
| Brice Goglin | 35282a2 | 2009-06-16 15:32:43 -0700 | [diff] [blame] | 927 |  | 
 | 928 | 	migrate_prep(); | 
 | 929 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 930 | 	/* | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 931 | 	 * Store a chunk of page_to_node array in a page, | 
 | 932 | 	 * but keep the last one as a marker | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 933 | 	 */ | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 934 | 	chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 935 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 936 | 	for (chunk_start = 0; | 
 | 937 | 	     chunk_start < nr_pages; | 
 | 938 | 	     chunk_start += chunk_nr_pages) { | 
 | 939 | 		int j; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 940 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 941 | 		if (chunk_start + chunk_nr_pages > nr_pages) | 
 | 942 | 			chunk_nr_pages = nr_pages - chunk_start; | 
 | 943 |  | 
 | 944 | 		/* fill the chunk pm with addrs and nodes from user-space */ | 
 | 945 | 		for (j = 0; j < chunk_nr_pages; j++) { | 
 | 946 | 			const void __user *p; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 947 | 			int node; | 
 | 948 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 949 | 			err = -EFAULT; | 
 | 950 | 			if (get_user(p, pages + j + chunk_start)) | 
 | 951 | 				goto out_pm; | 
 | 952 | 			pm[j].addr = (unsigned long) p; | 
 | 953 |  | 
 | 954 | 			if (get_user(node, nodes + j + chunk_start)) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 955 | 				goto out_pm; | 
 | 956 |  | 
 | 957 | 			err = -ENODEV; | 
| Linus Torvalds | 6f5a55f1 | 2010-02-05 16:16:50 -0800 | [diff] [blame] | 958 | 			if (node < 0 || node >= MAX_NUMNODES) | 
 | 959 | 				goto out_pm; | 
 | 960 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 961 | 			if (!node_state(node, N_HIGH_MEMORY)) | 
 | 962 | 				goto out_pm; | 
 | 963 |  | 
 | 964 | 			err = -EACCES; | 
 | 965 | 			if (!node_isset(node, task_nodes)) | 
 | 966 | 				goto out_pm; | 
 | 967 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 968 | 			pm[j].node = node; | 
 | 969 | 		} | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 970 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 971 | 		/* End marker for this chunk */ | 
 | 972 | 		pm[chunk_nr_pages].node = MAX_NUMNODES; | 
 | 973 |  | 
 | 974 | 		/* Migrate this chunk */ | 
 | 975 | 		err = do_move_page_to_node_array(mm, pm, | 
 | 976 | 						 flags & MPOL_MF_MOVE_ALL); | 
 | 977 | 		if (err < 0) | 
 | 978 | 			goto out_pm; | 
 | 979 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 980 | 		/* Return status information */ | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 981 | 		for (j = 0; j < chunk_nr_pages; j++) | 
 | 982 | 			if (put_user(pm[j].status, status + j + chunk_start)) { | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 983 | 				err = -EFAULT; | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 984 | 				goto out_pm; | 
 | 985 | 			} | 
 | 986 | 	} | 
 | 987 | 	err = 0; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 988 |  | 
 | 989 | out_pm: | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 990 | 	free_page((unsigned long)pm); | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 991 | out: | 
 | 992 | 	return err; | 
 | 993 | } | 
 | 994 |  | 
 | 995 | /* | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 996 |  * Determine the nodes of an array of pages and store it in an array of status. | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 997 |  */ | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 998 | static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, | 
 | 999 | 				const void __user **pages, int *status) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1000 | { | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1001 | 	unsigned long i; | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1002 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1003 | 	down_read(&mm->mmap_sem); | 
 | 1004 |  | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1005 | 	for (i = 0; i < nr_pages; i++) { | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1006 | 		unsigned long addr = (unsigned long)(*pages); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1007 | 		struct vm_area_struct *vma; | 
 | 1008 | 		struct page *page; | 
| KOSAKI Motohiro | c095adb | 2008-12-16 16:06:43 +0900 | [diff] [blame] | 1009 | 		int err = -EFAULT; | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1010 |  | 
 | 1011 | 		vma = find_vma(mm, addr); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1012 | 		if (!vma) | 
 | 1013 | 			goto set_status; | 
 | 1014 |  | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1015 | 		page = follow_page(vma, addr, 0); | 
| Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 1016 |  | 
 | 1017 | 		err = PTR_ERR(page); | 
 | 1018 | 		if (IS_ERR(page)) | 
 | 1019 | 			goto set_status; | 
 | 1020 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1021 | 		err = -ENOENT; | 
 | 1022 | 		/* Use PageReserved to check for zero page */ | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1023 | 		if (!page || PageReserved(page) || PageKsm(page)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1024 | 			goto set_status; | 
 | 1025 |  | 
 | 1026 | 		err = page_to_nid(page); | 
 | 1027 | set_status: | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1028 | 		*status = err; | 
 | 1029 |  | 
 | 1030 | 		pages++; | 
 | 1031 | 		status++; | 
 | 1032 | 	} | 
 | 1033 |  | 
 | 1034 | 	up_read(&mm->mmap_sem); | 
 | 1035 | } | 
 | 1036 |  | 
 | 1037 | /* | 
 | 1038 |  * Determine the nodes of a user array of pages and store it in | 
 | 1039 |  * a user array of status. | 
 | 1040 |  */ | 
 | 1041 | static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, | 
 | 1042 | 			 const void __user * __user *pages, | 
 | 1043 | 			 int __user *status) | 
 | 1044 | { | 
 | 1045 | #define DO_PAGES_STAT_CHUNK_NR 16 | 
 | 1046 | 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; | 
 | 1047 | 	int chunk_status[DO_PAGES_STAT_CHUNK_NR]; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1048 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1049 | 	while (nr_pages) { | 
 | 1050 | 		unsigned long chunk_nr; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1051 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1052 | 		chunk_nr = nr_pages; | 
 | 1053 | 		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) | 
 | 1054 | 			chunk_nr = DO_PAGES_STAT_CHUNK_NR; | 
 | 1055 |  | 
 | 1056 | 		if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) | 
 | 1057 | 			break; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1058 |  | 
 | 1059 | 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); | 
 | 1060 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1061 | 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) | 
 | 1062 | 			break; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1063 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1064 | 		pages += chunk_nr; | 
 | 1065 | 		status += chunk_nr; | 
 | 1066 | 		nr_pages -= chunk_nr; | 
 | 1067 | 	} | 
 | 1068 | 	return nr_pages ? -EFAULT : 0; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1069 | } | 
 | 1070 |  | 
 | 1071 | /* | 
 | 1072 |  * Move a list of pages in the address space of the currently executing | 
 | 1073 |  * process. | 
 | 1074 |  */ | 
| Heiko Carstens | 938bb9f | 2009-01-14 14:14:30 +0100 | [diff] [blame] | 1075 | SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, | 
 | 1076 | 		const void __user * __user *, pages, | 
 | 1077 | 		const int __user *, nodes, | 
 | 1078 | 		int __user *, status, int, flags) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1079 | { | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1080 | 	const struct cred *cred = current_cred(), *tcred; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1081 | 	struct task_struct *task; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1082 | 	struct mm_struct *mm; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1083 | 	int err; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1084 |  | 
 | 1085 | 	/* Check flags */ | 
 | 1086 | 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) | 
 | 1087 | 		return -EINVAL; | 
 | 1088 |  | 
 | 1089 | 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) | 
 | 1090 | 		return -EPERM; | 
 | 1091 |  | 
 | 1092 | 	/* Find the mm_struct */ | 
 | 1093 | 	read_lock(&tasklist_lock); | 
| Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1094 | 	task = pid ? find_task_by_vpid(pid) : current; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1095 | 	if (!task) { | 
 | 1096 | 		read_unlock(&tasklist_lock); | 
 | 1097 | 		return -ESRCH; | 
 | 1098 | 	} | 
 | 1099 | 	mm = get_task_mm(task); | 
 | 1100 | 	read_unlock(&tasklist_lock); | 
 | 1101 |  | 
 | 1102 | 	if (!mm) | 
 | 1103 | 		return -EINVAL; | 
 | 1104 |  | 
 | 1105 | 	/* | 
 | 1106 | 	 * Check if this process has the right to modify the specified | 
 | 1107 | 	 * process. The right exists if the process has administrative | 
 | 1108 | 	 * capabilities, superuser privileges or the same | 
 | 1109 | 	 * userid as the target process. | 
 | 1110 | 	 */ | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1111 | 	rcu_read_lock(); | 
 | 1112 | 	tcred = __task_cred(task); | 
| David Howells | b6dff3e | 2008-11-14 10:39:16 +1100 | [diff] [blame] | 1113 | 	if (cred->euid != tcred->suid && cred->euid != tcred->uid && | 
 | 1114 | 	    cred->uid  != tcred->suid && cred->uid  != tcred->uid && | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1115 | 	    !capable(CAP_SYS_NICE)) { | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1116 | 		rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1117 | 		err = -EPERM; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1118 | 		goto out; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1119 | 	} | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1120 | 	rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1121 |  | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1122 |  	err = security_task_movememory(task); | 
 | 1123 |  	if (err) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1124 | 		goto out; | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1125 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1126 | 	if (nodes) { | 
 | 1127 | 		err = do_pages_move(mm, task, nr_pages, pages, nodes, status, | 
 | 1128 | 				    flags); | 
 | 1129 | 	} else { | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1130 | 		err = do_pages_stat(mm, nr_pages, pages, status); | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1131 | 	} | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1132 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1133 | out: | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1134 | 	mmput(mm); | 
 | 1135 | 	return err; | 
 | 1136 | } | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1137 |  | 
| Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 1138 | /* | 
 | 1139 |  * Call migration functions in the vma_ops that may prepare | 
 | 1140 |  * memory in a vm for migration. migration functions may perform | 
 | 1141 |  * the migration for vmas that do not have an underlying page struct. | 
 | 1142 |  */ | 
 | 1143 | int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, | 
 | 1144 | 	const nodemask_t *from, unsigned long flags) | 
 | 1145 | { | 
 | 1146 |  	struct vm_area_struct *vma; | 
 | 1147 |  	int err = 0; | 
 | 1148 |  | 
| Daisuke Nishimura | 1001c9f | 2009-02-11 13:04:18 -0800 | [diff] [blame] | 1149 | 	for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { | 
| Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 1150 |  		if (vma->vm_ops && vma->vm_ops->migrate) { | 
 | 1151 |  			err = vma->vm_ops->migrate(vma, to, from, flags); | 
 | 1152 |  			if (err) | 
 | 1153 |  				break; | 
 | 1154 |  		} | 
 | 1155 |  	} | 
 | 1156 |  	return err; | 
 | 1157 | } | 
| Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 1158 | #endif |