| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Memory Migration functionality - linux/mm/migration.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter | 
 | 5 |  * | 
 | 6 |  * Page migration was first developed in the context of the memory hotplug | 
 | 7 |  * project. The main authors of the migration code are: | 
 | 8 |  * | 
 | 9 |  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> | 
 | 10 |  * Hirokazu Takahashi <taka@valinux.co.jp> | 
 | 11 |  * Dave Hansen <haveblue@us.ibm.com> | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 12 |  * Christoph Lameter | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 13 |  */ | 
 | 14 |  | 
 | 15 | #include <linux/migrate.h> | 
 | 16 | #include <linux/module.h> | 
 | 17 | #include <linux/swap.h> | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 18 | #include <linux/swapops.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 19 | #include <linux/pagemap.h> | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 20 | #include <linux/buffer_head.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 21 | #include <linux/mm_inline.h> | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 22 | #include <linux/nsproxy.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 23 | #include <linux/pagevec.h> | 
 | 24 | #include <linux/rmap.h> | 
 | 25 | #include <linux/topology.h> | 
 | 26 | #include <linux/cpu.h> | 
 | 27 | #include <linux/cpuset.h> | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 28 | #include <linux/writeback.h> | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 29 | #include <linux/mempolicy.h> | 
 | 30 | #include <linux/vmalloc.h> | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 31 | #include <linux/security.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 32 | #include <linux/memcontrol.h> | 
| Adrian Bunk | 4f5ca26 | 2008-07-23 21:27:02 -0700 | [diff] [blame] | 33 | #include <linux/syscalls.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 34 |  | 
 | 35 | #include "internal.h" | 
 | 36 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 37 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) | 
 | 38 |  | 
 | 39 | /* | 
 | 40 |  * Isolate one page from the LRU lists. If successful put it onto | 
 | 41 |  * the indicated list with elevated page count. | 
 | 42 |  * | 
 | 43 |  * Result: | 
 | 44 |  *  -EBUSY: page not on LRU list | 
 | 45 |  *  0: page removed from LRU list and added to the specified list. | 
 | 46 |  */ | 
 | 47 | int isolate_lru_page(struct page *page, struct list_head *pagelist) | 
 | 48 | { | 
 | 49 | 	int ret = -EBUSY; | 
 | 50 |  | 
 | 51 | 	if (PageLRU(page)) { | 
 | 52 | 		struct zone *zone = page_zone(page); | 
 | 53 |  | 
 | 54 | 		spin_lock_irq(&zone->lru_lock); | 
| KAMEZAWA Hiroyuki | 3dd9fe8 | 2007-07-26 10:41:08 -0700 | [diff] [blame] | 55 | 		if (PageLRU(page) && get_page_unless_zero(page)) { | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 56 | 			ret = 0; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 57 | 			ClearPageLRU(page); | 
 | 58 | 			if (PageActive(page)) | 
 | 59 | 				del_page_from_active_list(zone, page); | 
 | 60 | 			else | 
 | 61 | 				del_page_from_inactive_list(zone, page); | 
 | 62 | 			list_add_tail(&page->lru, pagelist); | 
 | 63 | 		} | 
 | 64 | 		spin_unlock_irq(&zone->lru_lock); | 
 | 65 | 	} | 
 | 66 | 	return ret; | 
 | 67 | } | 
 | 68 |  | 
 | 69 | /* | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 70 |  * migrate_prep() needs to be called before we start compiling a list of pages | 
 | 71 |  * to be migrated using isolate_lru_page(). | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 72 |  */ | 
 | 73 | int migrate_prep(void) | 
 | 74 | { | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 75 | 	/* | 
 | 76 | 	 * Clear the LRU lists so pages can be isolated. | 
 | 77 | 	 * Note that pages may be moved off the LRU after we have | 
 | 78 | 	 * drained them. Those pages will fail to migrate like other | 
 | 79 | 	 * pages that may be busy. | 
 | 80 | 	 */ | 
 | 81 | 	lru_add_drain_all(); | 
 | 82 |  | 
 | 83 | 	return 0; | 
 | 84 | } | 
 | 85 |  | 
 | 86 | static inline void move_to_lru(struct page *page) | 
 | 87 | { | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 88 | 	if (PageActive(page)) { | 
 | 89 | 		/* | 
 | 90 | 		 * lru_cache_add_active checks that | 
 | 91 | 		 * the PG_active bit is off. | 
 | 92 | 		 */ | 
 | 93 | 		ClearPageActive(page); | 
 | 94 | 		lru_cache_add_active(page); | 
 | 95 | 	} else { | 
 | 96 | 		lru_cache_add(page); | 
 | 97 | 	} | 
 | 98 | 	put_page(page); | 
 | 99 | } | 
 | 100 |  | 
 | 101 | /* | 
 | 102 |  * Add isolated pages on the list back to the LRU. | 
 | 103 |  * | 
 | 104 |  * returns the number of pages put back. | 
 | 105 |  */ | 
 | 106 | int putback_lru_pages(struct list_head *l) | 
 | 107 | { | 
 | 108 | 	struct page *page; | 
 | 109 | 	struct page *page2; | 
 | 110 | 	int count = 0; | 
 | 111 |  | 
 | 112 | 	list_for_each_entry_safe(page, page2, l, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 113 | 		list_del(&page->lru); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 114 | 		move_to_lru(page); | 
 | 115 | 		count++; | 
 | 116 | 	} | 
 | 117 | 	return count; | 
 | 118 | } | 
 | 119 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 120 | /* | 
 | 121 |  * Restore a potential migration pte to a working pte entry | 
 | 122 |  */ | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 123 | static void remove_migration_pte(struct vm_area_struct *vma, | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 124 | 		struct page *old, struct page *new) | 
 | 125 | { | 
 | 126 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 127 | 	swp_entry_t entry; | 
 | 128 |  	pgd_t *pgd; | 
 | 129 |  	pud_t *pud; | 
 | 130 |  	pmd_t *pmd; | 
 | 131 | 	pte_t *ptep, pte; | 
 | 132 |  	spinlock_t *ptl; | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 133 | 	unsigned long addr = page_address_in_vma(new, vma); | 
 | 134 |  | 
 | 135 | 	if (addr == -EFAULT) | 
 | 136 | 		return; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 137 |  | 
 | 138 |  	pgd = pgd_offset(mm, addr); | 
 | 139 | 	if (!pgd_present(*pgd)) | 
 | 140 |                 return; | 
 | 141 |  | 
 | 142 | 	pud = pud_offset(pgd, addr); | 
 | 143 | 	if (!pud_present(*pud)) | 
 | 144 |                 return; | 
 | 145 |  | 
 | 146 | 	pmd = pmd_offset(pud, addr); | 
 | 147 | 	if (!pmd_present(*pmd)) | 
 | 148 | 		return; | 
 | 149 |  | 
 | 150 | 	ptep = pte_offset_map(pmd, addr); | 
 | 151 |  | 
 | 152 | 	if (!is_swap_pte(*ptep)) { | 
 | 153 | 		pte_unmap(ptep); | 
 | 154 |  		return; | 
 | 155 |  	} | 
 | 156 |  | 
 | 157 |  	ptl = pte_lockptr(mm, pmd); | 
 | 158 |  	spin_lock(ptl); | 
 | 159 | 	pte = *ptep; | 
 | 160 | 	if (!is_swap_pte(pte)) | 
 | 161 | 		goto out; | 
 | 162 |  | 
 | 163 | 	entry = pte_to_swp_entry(pte); | 
 | 164 |  | 
 | 165 | 	if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) | 
 | 166 | 		goto out; | 
 | 167 |  | 
| Hugh Dickins | 98837c7 | 2008-03-04 14:29:06 -0800 | [diff] [blame] | 168 | 	/* | 
 | 169 | 	 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge. | 
 | 170 | 	 * Failure is not an option here: we're now expected to remove every | 
 | 171 | 	 * migration pte, and will cause crashes otherwise.  Normally this | 
 | 172 | 	 * is not an issue: mem_cgroup_prepare_migration bumped up the old | 
 | 173 | 	 * page_cgroup count for safety, that's now attached to the new page, | 
 | 174 | 	 * so this charge should just be another incrementation of the count, | 
 | 175 | 	 * to keep in balance with rmap.c's mem_cgroup_uncharging.  But if | 
 | 176 | 	 * there's been a force_empty, those reference counts may no longer | 
 | 177 | 	 * be reliable, and this charge can actually fail: oh well, we don't | 
 | 178 | 	 * make the situation any worse by proceeding as if it had succeeded. | 
 | 179 | 	 */ | 
 | 180 | 	mem_cgroup_charge(new, mm, GFP_ATOMIC); | 
 | 181 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 182 | 	get_page(new); | 
 | 183 | 	pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 
 | 184 | 	if (is_write_migration_entry(entry)) | 
 | 185 | 		pte = pte_mkwrite(pte); | 
| KAMEZAWA Hiroyuki | 97ee052 | 2007-10-16 01:25:43 -0700 | [diff] [blame] | 186 | 	flush_cache_page(vma, addr, pte_pfn(pte)); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 187 | 	set_pte_at(mm, addr, ptep, pte); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 188 |  | 
 | 189 | 	if (PageAnon(new)) | 
 | 190 | 		page_add_anon_rmap(new, vma, addr); | 
 | 191 | 	else | 
 | 192 | 		page_add_file_rmap(new); | 
 | 193 |  | 
 | 194 | 	/* No need to invalidate - it was non-present before */ | 
 | 195 | 	update_mmu_cache(vma, addr, pte); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 196 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 197 | out: | 
 | 198 | 	pte_unmap_unlock(ptep, ptl); | 
 | 199 | } | 
 | 200 |  | 
 | 201 | /* | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 202 |  * Note that remove_file_migration_ptes will only work on regular mappings, | 
 | 203 |  * Nonlinear mappings do not use migration entries. | 
 | 204 |  */ | 
 | 205 | static void remove_file_migration_ptes(struct page *old, struct page *new) | 
 | 206 | { | 
 | 207 | 	struct vm_area_struct *vma; | 
 | 208 | 	struct address_space *mapping = page_mapping(new); | 
 | 209 | 	struct prio_tree_iter iter; | 
 | 210 | 	pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 
 | 211 |  | 
 | 212 | 	if (!mapping) | 
 | 213 | 		return; | 
 | 214 |  | 
 | 215 | 	spin_lock(&mapping->i_mmap_lock); | 
 | 216 |  | 
 | 217 | 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) | 
 | 218 | 		remove_migration_pte(vma, old, new); | 
 | 219 |  | 
 | 220 | 	spin_unlock(&mapping->i_mmap_lock); | 
 | 221 | } | 
 | 222 |  | 
 | 223 | /* | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 224 |  * Must hold mmap_sem lock on at least one of the vmas containing | 
 | 225 |  * the page so that the anon_vma cannot vanish. | 
 | 226 |  */ | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 227 | static void remove_anon_migration_ptes(struct page *old, struct page *new) | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 228 | { | 
 | 229 | 	struct anon_vma *anon_vma; | 
 | 230 | 	struct vm_area_struct *vma; | 
 | 231 | 	unsigned long mapping; | 
 | 232 |  | 
 | 233 | 	mapping = (unsigned long)new->mapping; | 
 | 234 |  | 
 | 235 | 	if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) | 
 | 236 | 		return; | 
 | 237 |  | 
 | 238 | 	/* | 
 | 239 | 	 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. | 
 | 240 | 	 */ | 
 | 241 | 	anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); | 
 | 242 | 	spin_lock(&anon_vma->lock); | 
 | 243 |  | 
 | 244 | 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 245 | 		remove_migration_pte(vma, old, new); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 246 |  | 
 | 247 | 	spin_unlock(&anon_vma->lock); | 
 | 248 | } | 
 | 249 |  | 
 | 250 | /* | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 251 |  * Get rid of all migration entries and replace them by | 
 | 252 |  * references to the indicated page. | 
 | 253 |  */ | 
 | 254 | static void remove_migration_ptes(struct page *old, struct page *new) | 
 | 255 | { | 
 | 256 | 	if (PageAnon(new)) | 
 | 257 | 		remove_anon_migration_ptes(old, new); | 
 | 258 | 	else | 
 | 259 | 		remove_file_migration_ptes(old, new); | 
 | 260 | } | 
 | 261 |  | 
 | 262 | /* | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 263 |  * Something used the pte of a page under migration. We need to | 
 | 264 |  * get to the page and wait until migration is finished. | 
 | 265 |  * When we return from this function the fault will be retried. | 
 | 266 |  * | 
 | 267 |  * This function is called from do_swap_page(). | 
 | 268 |  */ | 
 | 269 | void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | 
 | 270 | 				unsigned long address) | 
 | 271 | { | 
 | 272 | 	pte_t *ptep, pte; | 
 | 273 | 	spinlock_t *ptl; | 
 | 274 | 	swp_entry_t entry; | 
 | 275 | 	struct page *page; | 
 | 276 |  | 
 | 277 | 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | 
 | 278 | 	pte = *ptep; | 
 | 279 | 	if (!is_swap_pte(pte)) | 
 | 280 | 		goto out; | 
 | 281 |  | 
 | 282 | 	entry = pte_to_swp_entry(pte); | 
 | 283 | 	if (!is_migration_entry(entry)) | 
 | 284 | 		goto out; | 
 | 285 |  | 
 | 286 | 	page = migration_entry_to_page(entry); | 
 | 287 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 288 | 	/* | 
 | 289 | 	 * Once radix-tree replacement of page migration started, page_count | 
 | 290 | 	 * *must* be zero. And, we don't want to call wait_on_page_locked() | 
 | 291 | 	 * against a page without get_page(). | 
 | 292 | 	 * So, we use get_page_unless_zero(), here. Even failed, page fault | 
 | 293 | 	 * will occur again. | 
 | 294 | 	 */ | 
 | 295 | 	if (!get_page_unless_zero(page)) | 
 | 296 | 		goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 297 | 	pte_unmap_unlock(ptep, ptl); | 
 | 298 | 	wait_on_page_locked(page); | 
 | 299 | 	put_page(page); | 
 | 300 | 	return; | 
 | 301 | out: | 
 | 302 | 	pte_unmap_unlock(ptep, ptl); | 
 | 303 | } | 
 | 304 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 305 | /* | 
| Christoph Lameter | c3fcf8a | 2006-06-23 02:03:32 -0700 | [diff] [blame] | 306 |  * Replace the page in the mapping. | 
| Christoph Lameter | 5b5c712 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 307 |  * | 
 | 308 |  * The number of remaining references must be: | 
 | 309 |  * 1 for anonymous pages without a mapping | 
 | 310 |  * 2 for pages with a mapping | 
 | 311 |  * 3 for pages with a mapping and PagePrivate set. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 312 |  */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 313 | static int migrate_page_move_mapping(struct address_space *mapping, | 
 | 314 | 		struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 315 | { | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 316 | 	int expected_count; | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 317 | 	void **pslot; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 318 |  | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 319 | 	if (!mapping) { | 
| Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 320 | 		/* Anonymous page without mapping */ | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 321 | 		if (page_count(page) != 1) | 
 | 322 | 			return -EAGAIN; | 
 | 323 | 		return 0; | 
 | 324 | 	} | 
 | 325 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 326 | 	spin_lock_irq(&mapping->tree_lock); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 327 |  | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 328 | 	pslot = radix_tree_lookup_slot(&mapping->page_tree, | 
 | 329 |  					page_index(page)); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 330 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 331 | 	expected_count = 2 + !!PagePrivate(page); | 
 | 332 | 	if (page_count(page) != expected_count || | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 333 | 			(struct page *)radix_tree_deref_slot(pslot) != page) { | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 334 | 		spin_unlock_irq(&mapping->tree_lock); | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 335 | 		return -EAGAIN; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 336 | 	} | 
 | 337 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 338 | 	if (!page_freeze_refs(page, expected_count)) { | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 339 | 		spin_unlock_irq(&mapping->tree_lock); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 340 | 		return -EAGAIN; | 
 | 341 | 	} | 
 | 342 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 343 | 	/* | 
 | 344 | 	 * Now we know that no one else is looking at the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 345 | 	 */ | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 346 | 	get_page(newpage);	/* add cache reference */ | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 347 | #ifdef CONFIG_SWAP | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 348 | 	if (PageSwapCache(page)) { | 
 | 349 | 		SetPageSwapCache(newpage); | 
 | 350 | 		set_page_private(newpage, page_private(page)); | 
 | 351 | 	} | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 352 | #endif | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 353 |  | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 354 | 	radix_tree_replace_slot(pslot, newpage); | 
 | 355 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 356 | 	page_unfreeze_refs(page, expected_count); | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 357 | 	/* | 
 | 358 | 	 * Drop cache reference from old page. | 
 | 359 | 	 * We know this isn't the last reference. | 
 | 360 | 	 */ | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 361 | 	__put_page(page); | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 362 |  | 
| Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 363 | 	/* | 
 | 364 | 	 * If moved to a different zone then also account | 
 | 365 | 	 * the page for that zone. Other VM counters will be | 
 | 366 | 	 * taken care of when we establish references to the | 
 | 367 | 	 * new page and drop references to the old page. | 
 | 368 | 	 * | 
 | 369 | 	 * Note that anonymous pages are accounted for | 
 | 370 | 	 * via NR_FILE_PAGES and NR_ANON_PAGES if they | 
 | 371 | 	 * are mapped to swap space. | 
 | 372 | 	 */ | 
 | 373 | 	__dec_zone_page_state(page, NR_FILE_PAGES); | 
 | 374 | 	__inc_zone_page_state(newpage, NR_FILE_PAGES); | 
 | 375 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 376 | 	spin_unlock_irq(&mapping->tree_lock); | 
 | 377 | 	if (!PageSwapCache(newpage)) | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 378 | 		mem_cgroup_uncharge_cache_page(page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 379 |  | 
 | 380 | 	return 0; | 
 | 381 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 382 |  | 
 | 383 | /* | 
 | 384 |  * Copy the page to its new location | 
 | 385 |  */ | 
| Christoph Lameter | e7340f7 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 386 | static void migrate_page_copy(struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 387 | { | 
 | 388 | 	copy_highpage(newpage, page); | 
 | 389 |  | 
 | 390 | 	if (PageError(page)) | 
 | 391 | 		SetPageError(newpage); | 
 | 392 | 	if (PageReferenced(page)) | 
 | 393 | 		SetPageReferenced(newpage); | 
 | 394 | 	if (PageUptodate(page)) | 
 | 395 | 		SetPageUptodate(newpage); | 
 | 396 | 	if (PageActive(page)) | 
 | 397 | 		SetPageActive(newpage); | 
 | 398 | 	if (PageChecked(page)) | 
 | 399 | 		SetPageChecked(newpage); | 
 | 400 | 	if (PageMappedToDisk(page)) | 
 | 401 | 		SetPageMappedToDisk(newpage); | 
 | 402 |  | 
 | 403 | 	if (PageDirty(page)) { | 
 | 404 | 		clear_page_dirty_for_io(page); | 
| Nick Piggin | 3a902c5 | 2008-04-30 00:55:16 -0700 | [diff] [blame] | 405 | 		/* | 
 | 406 | 		 * Want to mark the page and the radix tree as dirty, and | 
 | 407 | 		 * redo the accounting that clear_page_dirty_for_io undid, | 
 | 408 | 		 * but we can't use set_page_dirty because that function | 
 | 409 | 		 * is actually a signal that all of the page has become dirty. | 
 | 410 | 		 * Wheras only part of our page may be dirty. | 
 | 411 | 		 */ | 
 | 412 | 		__set_page_dirty_nobuffers(newpage); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 413 |  	} | 
 | 414 |  | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 415 | #ifdef CONFIG_SWAP | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 416 | 	ClearPageSwapCache(page); | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 417 | #endif | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 418 | 	ClearPageActive(page); | 
 | 419 | 	ClearPagePrivate(page); | 
 | 420 | 	set_page_private(page, 0); | 
 | 421 | 	page->mapping = NULL; | 
 | 422 |  | 
 | 423 | 	/* | 
 | 424 | 	 * If any waiters have accumulated on the new page then | 
 | 425 | 	 * wake them up. | 
 | 426 | 	 */ | 
 | 427 | 	if (PageWriteback(newpage)) | 
 | 428 | 		end_page_writeback(newpage); | 
 | 429 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 430 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 431 | /************************************************************ | 
 | 432 |  *                    Migration functions | 
 | 433 |  ***********************************************************/ | 
 | 434 |  | 
 | 435 | /* Always fail migration. Used for mappings that are not movable */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 436 | int fail_migrate_page(struct address_space *mapping, | 
 | 437 | 			struct page *newpage, struct page *page) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 438 | { | 
 | 439 | 	return -EIO; | 
 | 440 | } | 
 | 441 | EXPORT_SYMBOL(fail_migrate_page); | 
 | 442 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 443 | /* | 
 | 444 |  * Common logic to directly migrate a single page suitable for | 
 | 445 |  * pages that do not use PagePrivate. | 
 | 446 |  * | 
 | 447 |  * Pages are locked upon entry and exit. | 
 | 448 |  */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 449 | int migrate_page(struct address_space *mapping, | 
 | 450 | 		struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 451 | { | 
 | 452 | 	int rc; | 
 | 453 |  | 
 | 454 | 	BUG_ON(PageWriteback(page));	/* Writeback must be complete */ | 
 | 455 |  | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 456 | 	rc = migrate_page_move_mapping(mapping, newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 457 |  | 
 | 458 | 	if (rc) | 
 | 459 | 		return rc; | 
 | 460 |  | 
 | 461 | 	migrate_page_copy(newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 462 | 	return 0; | 
 | 463 | } | 
 | 464 | EXPORT_SYMBOL(migrate_page); | 
 | 465 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 466 | #ifdef CONFIG_BLOCK | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 467 | /* | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 468 |  * Migration function for pages with buffers. This function can only be used | 
 | 469 |  * if the underlying filesystem guarantees that no other references to "page" | 
 | 470 |  * exist. | 
 | 471 |  */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 472 | int buffer_migrate_page(struct address_space *mapping, | 
 | 473 | 		struct page *newpage, struct page *page) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 474 | { | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 475 | 	struct buffer_head *bh, *head; | 
 | 476 | 	int rc; | 
 | 477 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 478 | 	if (!page_has_buffers(page)) | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 479 | 		return migrate_page(mapping, newpage, page); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 480 |  | 
 | 481 | 	head = page_buffers(page); | 
 | 482 |  | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 483 | 	rc = migrate_page_move_mapping(mapping, newpage, page); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 484 |  | 
 | 485 | 	if (rc) | 
 | 486 | 		return rc; | 
 | 487 |  | 
 | 488 | 	bh = head; | 
 | 489 | 	do { | 
 | 490 | 		get_bh(bh); | 
 | 491 | 		lock_buffer(bh); | 
 | 492 | 		bh = bh->b_this_page; | 
 | 493 |  | 
 | 494 | 	} while (bh != head); | 
 | 495 |  | 
 | 496 | 	ClearPagePrivate(page); | 
 | 497 | 	set_page_private(newpage, page_private(page)); | 
 | 498 | 	set_page_private(page, 0); | 
 | 499 | 	put_page(page); | 
 | 500 | 	get_page(newpage); | 
 | 501 |  | 
 | 502 | 	bh = head; | 
 | 503 | 	do { | 
 | 504 | 		set_bh_page(bh, newpage, bh_offset(bh)); | 
 | 505 | 		bh = bh->b_this_page; | 
 | 506 |  | 
 | 507 | 	} while (bh != head); | 
 | 508 |  | 
 | 509 | 	SetPagePrivate(newpage); | 
 | 510 |  | 
 | 511 | 	migrate_page_copy(newpage, page); | 
 | 512 |  | 
 | 513 | 	bh = head; | 
 | 514 | 	do { | 
 | 515 | 		unlock_buffer(bh); | 
 | 516 |  		put_bh(bh); | 
 | 517 | 		bh = bh->b_this_page; | 
 | 518 |  | 
 | 519 | 	} while (bh != head); | 
 | 520 |  | 
 | 521 | 	return 0; | 
 | 522 | } | 
 | 523 | EXPORT_SYMBOL(buffer_migrate_page); | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 524 | #endif | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 525 |  | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 526 | /* | 
 | 527 |  * Writeback a page to clean the dirty state | 
 | 528 |  */ | 
 | 529 | static int writeout(struct address_space *mapping, struct page *page) | 
 | 530 | { | 
 | 531 | 	struct writeback_control wbc = { | 
 | 532 | 		.sync_mode = WB_SYNC_NONE, | 
 | 533 | 		.nr_to_write = 1, | 
 | 534 | 		.range_start = 0, | 
 | 535 | 		.range_end = LLONG_MAX, | 
 | 536 | 		.nonblocking = 1, | 
 | 537 | 		.for_reclaim = 1 | 
 | 538 | 	}; | 
 | 539 | 	int rc; | 
 | 540 |  | 
 | 541 | 	if (!mapping->a_ops->writepage) | 
 | 542 | 		/* No write method for the address space */ | 
 | 543 | 		return -EINVAL; | 
 | 544 |  | 
 | 545 | 	if (!clear_page_dirty_for_io(page)) | 
 | 546 | 		/* Someone else already triggered a write */ | 
 | 547 | 		return -EAGAIN; | 
 | 548 |  | 
 | 549 | 	/* | 
 | 550 | 	 * A dirty page may imply that the underlying filesystem has | 
 | 551 | 	 * the page on some queue. So the page must be clean for | 
 | 552 | 	 * migration. Writeout may mean we loose the lock and the | 
 | 553 | 	 * page state is no longer what we checked for earlier. | 
 | 554 | 	 * At this point we know that the migration attempt cannot | 
 | 555 | 	 * be successful. | 
 | 556 | 	 */ | 
 | 557 | 	remove_migration_ptes(page, page); | 
 | 558 |  | 
 | 559 | 	rc = mapping->a_ops->writepage(page, &wbc); | 
 | 560 | 	if (rc < 0) | 
 | 561 | 		/* I/O Error writing */ | 
 | 562 | 		return -EIO; | 
 | 563 |  | 
 | 564 | 	if (rc != AOP_WRITEPAGE_ACTIVATE) | 
 | 565 | 		/* unlocked. Relock */ | 
 | 566 | 		lock_page(page); | 
 | 567 |  | 
 | 568 | 	return -EAGAIN; | 
 | 569 | } | 
 | 570 |  | 
 | 571 | /* | 
 | 572 |  * Default handling if a filesystem does not provide a migration function. | 
 | 573 |  */ | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 574 | static int fallback_migrate_page(struct address_space *mapping, | 
 | 575 | 	struct page *newpage, struct page *page) | 
 | 576 | { | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 577 | 	if (PageDirty(page)) | 
 | 578 | 		return writeout(mapping, page); | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 579 |  | 
 | 580 | 	/* | 
 | 581 | 	 * Buffers may be managed in a filesystem specific way. | 
 | 582 | 	 * We must have no buffers or drop them. | 
 | 583 | 	 */ | 
| David Howells | b398f6b | 2006-08-29 19:05:58 +0100 | [diff] [blame] | 584 | 	if (PagePrivate(page) && | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 585 | 	    !try_to_release_page(page, GFP_KERNEL)) | 
 | 586 | 		return -EAGAIN; | 
 | 587 |  | 
 | 588 | 	return migrate_page(mapping, newpage, page); | 
 | 589 | } | 
 | 590 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 591 | /* | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 592 |  * Move a page to a newly allocated page | 
 | 593 |  * The page is locked and all ptes have been successfully removed. | 
 | 594 |  * | 
 | 595 |  * The new page will have replaced the old page if this function | 
 | 596 |  * is successful. | 
 | 597 |  */ | 
 | 598 | static int move_to_new_page(struct page *newpage, struct page *page) | 
 | 599 | { | 
 | 600 | 	struct address_space *mapping; | 
 | 601 | 	int rc; | 
 | 602 |  | 
 | 603 | 	/* | 
 | 604 | 	 * Block others from accessing the page when we get around to | 
 | 605 | 	 * establishing additional references. We are the only one | 
 | 606 | 	 * holding a reference to the new page at this point. | 
 | 607 | 	 */ | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 608 | 	if (!trylock_page(newpage)) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 609 | 		BUG(); | 
 | 610 |  | 
 | 611 | 	/* Prepare mapping for the new page.*/ | 
 | 612 | 	newpage->index = page->index; | 
 | 613 | 	newpage->mapping = page->mapping; | 
 | 614 |  | 
 | 615 | 	mapping = page_mapping(page); | 
 | 616 | 	if (!mapping) | 
 | 617 | 		rc = migrate_page(mapping, newpage, page); | 
 | 618 | 	else if (mapping->a_ops->migratepage) | 
 | 619 | 		/* | 
 | 620 | 		 * Most pages have a mapping and most filesystems | 
 | 621 | 		 * should provide a migration function. Anonymous | 
 | 622 | 		 * pages are part of swap space which also has its | 
 | 623 | 		 * own migration function. This is the most common | 
 | 624 | 		 * path for page migration. | 
 | 625 | 		 */ | 
 | 626 | 		rc = mapping->a_ops->migratepage(mapping, | 
 | 627 | 						newpage, page); | 
 | 628 | 	else | 
 | 629 | 		rc = fallback_migrate_page(mapping, newpage, page); | 
 | 630 |  | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 631 | 	if (!rc) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 632 | 		remove_migration_ptes(page, newpage); | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 633 | 	} else | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 634 | 		newpage->mapping = NULL; | 
 | 635 |  | 
 | 636 | 	unlock_page(newpage); | 
 | 637 |  | 
 | 638 | 	return rc; | 
 | 639 | } | 
 | 640 |  | 
 | 641 | /* | 
 | 642 |  * Obtain the lock on page, remove all ptes and migrate the page | 
 | 643 |  * to the newly allocated page in newpage. | 
 | 644 |  */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 645 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | 
 | 646 | 			struct page *page, int force) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 647 | { | 
 | 648 | 	int rc = 0; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 649 | 	int *result = NULL; | 
 | 650 | 	struct page *newpage = get_new_page(page, private, &result); | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 651 | 	int rcu_locked = 0; | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 652 | 	int charge = 0; | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 653 |  | 
 | 654 | 	if (!newpage) | 
 | 655 | 		return -ENOMEM; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 656 |  | 
 | 657 | 	if (page_count(page) == 1) | 
 | 658 | 		/* page was freed from under us. So we are done. */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 659 | 		goto move_newpage; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 660 |  | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 661 | 	charge = mem_cgroup_prepare_migration(page, newpage); | 
 | 662 | 	if (charge == -ENOMEM) { | 
 | 663 | 		rc = -ENOMEM; | 
 | 664 | 		goto move_newpage; | 
 | 665 | 	} | 
 | 666 | 	/* prepare cgroup just returns 0 or -ENOMEM */ | 
 | 667 | 	BUG_ON(charge); | 
 | 668 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 669 | 	rc = -EAGAIN; | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 670 | 	if (!trylock_page(page)) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 671 | 		if (!force) | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 672 | 			goto move_newpage; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 673 | 		lock_page(page); | 
 | 674 | 	} | 
 | 675 |  | 
 | 676 | 	if (PageWriteback(page)) { | 
 | 677 | 		if (!force) | 
 | 678 | 			goto unlock; | 
 | 679 | 		wait_on_page_writeback(page); | 
 | 680 | 	} | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 681 | 	/* | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 682 | 	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, | 
 | 683 | 	 * we cannot notice that anon_vma is freed while we migrates a page. | 
 | 684 | 	 * This rcu_read_lock() delays freeing anon_vma pointer until the end | 
 | 685 | 	 * of migration. File cache pages are no problem because of page_lock() | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 686 | 	 * File Caches may use write_page() or lock_page() in migration, then, | 
 | 687 | 	 * just care Anon page here. | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 688 | 	 */ | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 689 | 	if (PageAnon(page)) { | 
 | 690 | 		rcu_read_lock(); | 
 | 691 | 		rcu_locked = 1; | 
 | 692 | 	} | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 693 |  | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 694 | 	/* | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 695 | 	 * Corner case handling: | 
 | 696 | 	 * 1. When a new swap-cache page is read into, it is added to the LRU | 
 | 697 | 	 * and treated as swapcache but it has no rmap yet. | 
 | 698 | 	 * Calling try_to_unmap() against a page->mapping==NULL page will | 
 | 699 | 	 * trigger a BUG.  So handle it here. | 
 | 700 | 	 * 2. An orphaned page (see truncate_complete_page) might have | 
 | 701 | 	 * fs-private metadata. The page can be picked up due to memory | 
 | 702 | 	 * offlining.  Everywhere else except page reclaim, the page is | 
 | 703 | 	 * invisible to the vm, so the page can not be migrated.  So try to | 
 | 704 | 	 * free the metadata, so the page can be freed. | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 705 | 	 */ | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 706 | 	if (!page->mapping) { | 
 | 707 | 		if (!PageAnon(page) && PagePrivate(page)) { | 
 | 708 | 			/* | 
 | 709 | 			 * Go direct to try_to_free_buffers() here because | 
 | 710 | 			 * a) that's what try_to_release_page() would do anyway | 
 | 711 | 			 * b) we may be under rcu_read_lock() here, so we can't | 
 | 712 | 			 *    use GFP_KERNEL which is what try_to_release_page() | 
 | 713 | 			 *    needs to be effective. | 
 | 714 | 			 */ | 
 | 715 | 			try_to_free_buffers(page); | 
 | 716 | 		} | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 717 | 		goto rcu_unlock; | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 718 | 	} | 
 | 719 |  | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 720 | 	/* Establish migration ptes or remove ptes */ | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 721 | 	try_to_unmap(page, 1); | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 722 |  | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 723 | 	if (!page_mapped(page)) | 
 | 724 | 		rc = move_to_new_page(newpage, page); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 725 |  | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 726 | 	if (rc) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 727 | 		remove_migration_ptes(page, page); | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 728 | rcu_unlock: | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 729 | 	if (rcu_locked) | 
 | 730 | 		rcu_read_unlock(); | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 731 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 732 | unlock: | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 733 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 734 | 	unlock_page(page); | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 735 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 736 | 	if (rc != -EAGAIN) { | 
| Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 737 |  		/* | 
 | 738 |  		 * A page that has been migrated has all references | 
 | 739 |  		 * removed and will be freed. A page that has not been | 
 | 740 |  		 * migrated will have kepts its references and be | 
 | 741 |  		 * restored. | 
 | 742 |  		 */ | 
 | 743 |  		list_del(&page->lru); | 
 | 744 |  		move_to_lru(page); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 745 | 	} | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 746 |  | 
 | 747 | move_newpage: | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 748 | 	if (!charge) | 
 | 749 | 		mem_cgroup_end_migration(newpage); | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 750 | 	/* | 
 | 751 | 	 * Move the new page to the LRU. If migration was not successful | 
 | 752 | 	 * then this will free the page. | 
 | 753 | 	 */ | 
 | 754 | 	move_to_lru(newpage); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 755 | 	if (result) { | 
 | 756 | 		if (rc) | 
 | 757 | 			*result = rc; | 
 | 758 | 		else | 
 | 759 | 			*result = page_to_nid(newpage); | 
 | 760 | 	} | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 761 | 	return rc; | 
 | 762 | } | 
 | 763 |  | 
 | 764 | /* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 765 |  * migrate_pages | 
 | 766 |  * | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 767 |  * The function takes one list of pages to migrate and a function | 
 | 768 |  * that determines from the page to be migrated and the private data | 
 | 769 |  * the target of the move and allocates the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 770 |  * | 
 | 771 |  * The function returns after 10 attempts or if no pages | 
 | 772 |  * are movable anymore because to has become empty | 
| Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 773 |  * or no retryable pages exist anymore. All pages will be | 
| Gabriel Craciunescu | e9534b3 | 2007-10-20 02:13:26 +0200 | [diff] [blame] | 774 |  * returned to the LRU or freed. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 775 |  * | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 776 |  * Return: Number of pages not migrated or error code. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 777 |  */ | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 778 | int migrate_pages(struct list_head *from, | 
 | 779 | 		new_page_t get_new_page, unsigned long private) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 780 | { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 781 | 	int retry = 1; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 782 | 	int nr_failed = 0; | 
 | 783 | 	int pass = 0; | 
 | 784 | 	struct page *page; | 
 | 785 | 	struct page *page2; | 
 | 786 | 	int swapwrite = current->flags & PF_SWAPWRITE; | 
 | 787 | 	int rc; | 
 | 788 |  | 
 | 789 | 	if (!swapwrite) | 
 | 790 | 		current->flags |= PF_SWAPWRITE; | 
 | 791 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 792 | 	for(pass = 0; pass < 10 && retry; pass++) { | 
 | 793 | 		retry = 0; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 794 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 795 | 		list_for_each_entry_safe(page, page2, from, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 796 | 			cond_resched(); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 797 |  | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 798 | 			rc = unmap_and_move(get_new_page, private, | 
 | 799 | 						page, pass > 2); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 800 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 801 | 			switch(rc) { | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 802 | 			case -ENOMEM: | 
 | 803 | 				goto out; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 804 | 			case -EAGAIN: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 805 | 				retry++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 806 | 				break; | 
 | 807 | 			case 0: | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 808 | 				break; | 
 | 809 | 			default: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 810 | 				/* Permanent failure */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 811 | 				nr_failed++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 812 | 				break; | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 813 | 			} | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 814 | 		} | 
 | 815 | 	} | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 816 | 	rc = 0; | 
 | 817 | out: | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 818 | 	if (!swapwrite) | 
 | 819 | 		current->flags &= ~PF_SWAPWRITE; | 
 | 820 |  | 
| Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 821 | 	putback_lru_pages(from); | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 822 |  | 
 | 823 | 	if (rc) | 
 | 824 | 		return rc; | 
 | 825 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 826 | 	return nr_failed + retry; | 
 | 827 | } | 
 | 828 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 829 | #ifdef CONFIG_NUMA | 
 | 830 | /* | 
 | 831 |  * Move a list of individual pages | 
 | 832 |  */ | 
 | 833 | struct page_to_node { | 
 | 834 | 	unsigned long addr; | 
 | 835 | 	struct page *page; | 
 | 836 | 	int node; | 
 | 837 | 	int status; | 
 | 838 | }; | 
 | 839 |  | 
 | 840 | static struct page *new_page_node(struct page *p, unsigned long private, | 
 | 841 | 		int **result) | 
 | 842 | { | 
 | 843 | 	struct page_to_node *pm = (struct page_to_node *)private; | 
 | 844 |  | 
 | 845 | 	while (pm->node != MAX_NUMNODES && pm->page != p) | 
 | 846 | 		pm++; | 
 | 847 |  | 
 | 848 | 	if (pm->node == MAX_NUMNODES) | 
 | 849 | 		return NULL; | 
 | 850 |  | 
 | 851 | 	*result = &pm->status; | 
 | 852 |  | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 853 | 	return alloc_pages_node(pm->node, | 
 | 854 | 				GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 855 | } | 
 | 856 |  | 
 | 857 | /* | 
 | 858 |  * Move a set of pages as indicated in the pm array. The addr | 
 | 859 |  * field must be set to the virtual address of the page to be moved | 
 | 860 |  * and the node number must contain a valid target node. | 
 | 861 |  */ | 
 | 862 | static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, | 
 | 863 | 				int migrate_all) | 
 | 864 | { | 
 | 865 | 	int err; | 
 | 866 | 	struct page_to_node *pp; | 
 | 867 | 	LIST_HEAD(pagelist); | 
 | 868 |  | 
 | 869 | 	down_read(&mm->mmap_sem); | 
 | 870 |  | 
 | 871 | 	/* | 
 | 872 | 	 * Build a list of pages to migrate | 
 | 873 | 	 */ | 
 | 874 | 	migrate_prep(); | 
 | 875 | 	for (pp = pm; pp->node != MAX_NUMNODES; pp++) { | 
 | 876 | 		struct vm_area_struct *vma; | 
 | 877 | 		struct page *page; | 
 | 878 |  | 
 | 879 | 		/* | 
 | 880 | 		 * A valid page pointer that will not match any of the | 
 | 881 | 		 * pages that will be moved. | 
 | 882 | 		 */ | 
 | 883 | 		pp->page = ZERO_PAGE(0); | 
 | 884 |  | 
 | 885 | 		err = -EFAULT; | 
 | 886 | 		vma = find_vma(mm, pp->addr); | 
| Christoph Lameter | 0dc952d | 2007-03-05 00:30:33 -0800 | [diff] [blame] | 887 | 		if (!vma || !vma_migratable(vma)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 888 | 			goto set_status; | 
 | 889 |  | 
 | 890 | 		page = follow_page(vma, pp->addr, FOLL_GET); | 
| Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 891 |  | 
 | 892 | 		err = PTR_ERR(page); | 
 | 893 | 		if (IS_ERR(page)) | 
 | 894 | 			goto set_status; | 
 | 895 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 896 | 		err = -ENOENT; | 
 | 897 | 		if (!page) | 
 | 898 | 			goto set_status; | 
 | 899 |  | 
 | 900 | 		if (PageReserved(page))		/* Check for zero page */ | 
 | 901 | 			goto put_and_set; | 
 | 902 |  | 
 | 903 | 		pp->page = page; | 
 | 904 | 		err = page_to_nid(page); | 
 | 905 |  | 
 | 906 | 		if (err == pp->node) | 
 | 907 | 			/* | 
 | 908 | 			 * Node already in the right place | 
 | 909 | 			 */ | 
 | 910 | 			goto put_and_set; | 
 | 911 |  | 
 | 912 | 		err = -EACCES; | 
 | 913 | 		if (page_mapcount(page) > 1 && | 
 | 914 | 				!migrate_all) | 
 | 915 | 			goto put_and_set; | 
 | 916 |  | 
 | 917 | 		err = isolate_lru_page(page, &pagelist); | 
 | 918 | put_and_set: | 
 | 919 | 		/* | 
 | 920 | 		 * Either remove the duplicate refcount from | 
 | 921 | 		 * isolate_lru_page() or drop the page ref if it was | 
 | 922 | 		 * not isolated. | 
 | 923 | 		 */ | 
 | 924 | 		put_page(page); | 
 | 925 | set_status: | 
 | 926 | 		pp->status = err; | 
 | 927 | 	} | 
 | 928 |  | 
 | 929 | 	if (!list_empty(&pagelist)) | 
 | 930 | 		err = migrate_pages(&pagelist, new_page_node, | 
 | 931 | 				(unsigned long)pm); | 
 | 932 | 	else | 
 | 933 | 		err = -ENOENT; | 
 | 934 |  | 
 | 935 | 	up_read(&mm->mmap_sem); | 
 | 936 | 	return err; | 
 | 937 | } | 
 | 938 |  | 
 | 939 | /* | 
 | 940 |  * Determine the nodes of a list of pages. The addr in the pm array | 
 | 941 |  * must have been set to the virtual address of which we want to determine | 
 | 942 |  * the node number. | 
 | 943 |  */ | 
 | 944 | static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm) | 
 | 945 | { | 
 | 946 | 	down_read(&mm->mmap_sem); | 
 | 947 |  | 
 | 948 | 	for ( ; pm->node != MAX_NUMNODES; pm++) { | 
 | 949 | 		struct vm_area_struct *vma; | 
 | 950 | 		struct page *page; | 
 | 951 | 		int err; | 
 | 952 |  | 
 | 953 | 		err = -EFAULT; | 
 | 954 | 		vma = find_vma(mm, pm->addr); | 
 | 955 | 		if (!vma) | 
 | 956 | 			goto set_status; | 
 | 957 |  | 
 | 958 | 		page = follow_page(vma, pm->addr, 0); | 
| Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 959 |  | 
 | 960 | 		err = PTR_ERR(page); | 
 | 961 | 		if (IS_ERR(page)) | 
 | 962 | 			goto set_status; | 
 | 963 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 964 | 		err = -ENOENT; | 
 | 965 | 		/* Use PageReserved to check for zero page */ | 
 | 966 | 		if (!page || PageReserved(page)) | 
 | 967 | 			goto set_status; | 
 | 968 |  | 
 | 969 | 		err = page_to_nid(page); | 
 | 970 | set_status: | 
 | 971 | 		pm->status = err; | 
 | 972 | 	} | 
 | 973 |  | 
 | 974 | 	up_read(&mm->mmap_sem); | 
 | 975 | 	return 0; | 
 | 976 | } | 
 | 977 |  | 
 | 978 | /* | 
 | 979 |  * Move a list of pages in the address space of the currently executing | 
 | 980 |  * process. | 
 | 981 |  */ | 
 | 982 | asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, | 
 | 983 | 			const void __user * __user *pages, | 
 | 984 | 			const int __user *nodes, | 
 | 985 | 			int __user *status, int flags) | 
 | 986 | { | 
 | 987 | 	int err = 0; | 
 | 988 | 	int i; | 
 | 989 | 	struct task_struct *task; | 
 | 990 | 	nodemask_t task_nodes; | 
 | 991 | 	struct mm_struct *mm; | 
 | 992 | 	struct page_to_node *pm = NULL; | 
 | 993 |  | 
 | 994 | 	/* Check flags */ | 
 | 995 | 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) | 
 | 996 | 		return -EINVAL; | 
 | 997 |  | 
 | 998 | 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) | 
 | 999 | 		return -EPERM; | 
 | 1000 |  | 
 | 1001 | 	/* Find the mm_struct */ | 
 | 1002 | 	read_lock(&tasklist_lock); | 
| Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1003 | 	task = pid ? find_task_by_vpid(pid) : current; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1004 | 	if (!task) { | 
 | 1005 | 		read_unlock(&tasklist_lock); | 
 | 1006 | 		return -ESRCH; | 
 | 1007 | 	} | 
 | 1008 | 	mm = get_task_mm(task); | 
 | 1009 | 	read_unlock(&tasklist_lock); | 
 | 1010 |  | 
 | 1011 | 	if (!mm) | 
 | 1012 | 		return -EINVAL; | 
 | 1013 |  | 
 | 1014 | 	/* | 
 | 1015 | 	 * Check if this process has the right to modify the specified | 
 | 1016 | 	 * process. The right exists if the process has administrative | 
 | 1017 | 	 * capabilities, superuser privileges or the same | 
 | 1018 | 	 * userid as the target process. | 
 | 1019 | 	 */ | 
 | 1020 | 	if ((current->euid != task->suid) && (current->euid != task->uid) && | 
 | 1021 | 	    (current->uid != task->suid) && (current->uid != task->uid) && | 
 | 1022 | 	    !capable(CAP_SYS_NICE)) { | 
 | 1023 | 		err = -EPERM; | 
 | 1024 | 		goto out2; | 
 | 1025 | 	} | 
 | 1026 |  | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1027 |  	err = security_task_movememory(task); | 
 | 1028 |  	if (err) | 
 | 1029 |  		goto out2; | 
 | 1030 |  | 
 | 1031 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1032 | 	task_nodes = cpuset_mems_allowed(task); | 
 | 1033 |  | 
 | 1034 | 	/* Limit nr_pages so that the multiplication may not overflow */ | 
 | 1035 | 	if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) { | 
 | 1036 | 		err = -E2BIG; | 
 | 1037 | 		goto out2; | 
 | 1038 | 	} | 
 | 1039 |  | 
 | 1040 | 	pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node)); | 
 | 1041 | 	if (!pm) { | 
 | 1042 | 		err = -ENOMEM; | 
 | 1043 | 		goto out2; | 
 | 1044 | 	} | 
 | 1045 |  | 
 | 1046 | 	/* | 
 | 1047 | 	 * Get parameters from user space and initialize the pm | 
 | 1048 | 	 * array. Return various errors if the user did something wrong. | 
 | 1049 | 	 */ | 
 | 1050 | 	for (i = 0; i < nr_pages; i++) { | 
| Al Viro | 9d966d4 | 2007-10-14 19:34:10 +0100 | [diff] [blame] | 1051 | 		const void __user *p; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1052 |  | 
 | 1053 | 		err = -EFAULT; | 
 | 1054 | 		if (get_user(p, pages + i)) | 
 | 1055 | 			goto out; | 
 | 1056 |  | 
 | 1057 | 		pm[i].addr = (unsigned long)p; | 
 | 1058 | 		if (nodes) { | 
 | 1059 | 			int node; | 
 | 1060 |  | 
 | 1061 | 			if (get_user(node, nodes + i)) | 
 | 1062 | 				goto out; | 
 | 1063 |  | 
 | 1064 | 			err = -ENODEV; | 
| Christoph Lameter | 56bbd65 | 2007-10-16 01:25:35 -0700 | [diff] [blame] | 1065 | 			if (!node_state(node, N_HIGH_MEMORY)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1066 | 				goto out; | 
 | 1067 |  | 
 | 1068 | 			err = -EACCES; | 
 | 1069 | 			if (!node_isset(node, task_nodes)) | 
 | 1070 | 				goto out; | 
 | 1071 |  | 
 | 1072 | 			pm[i].node = node; | 
| Stephen Rothwell | 8ce0846 | 2006-11-02 22:07:28 -0800 | [diff] [blame] | 1073 | 		} else | 
 | 1074 | 			pm[i].node = 0;	/* anything to not match MAX_NUMNODES */ | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1075 | 	} | 
 | 1076 | 	/* End marker */ | 
 | 1077 | 	pm[nr_pages].node = MAX_NUMNODES; | 
 | 1078 |  | 
 | 1079 | 	if (nodes) | 
 | 1080 | 		err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL); | 
 | 1081 | 	else | 
 | 1082 | 		err = do_pages_stat(mm, pm); | 
 | 1083 |  | 
 | 1084 | 	if (err >= 0) | 
 | 1085 | 		/* Return status information */ | 
 | 1086 | 		for (i = 0; i < nr_pages; i++) | 
 | 1087 | 			if (put_user(pm[i].status, status + i)) | 
 | 1088 | 				err = -EFAULT; | 
 | 1089 |  | 
 | 1090 | out: | 
 | 1091 | 	vfree(pm); | 
 | 1092 | out2: | 
 | 1093 | 	mmput(mm); | 
 | 1094 | 	return err; | 
 | 1095 | } | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1096 |  | 
| Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 1097 | /* | 
 | 1098 |  * Call migration functions in the vma_ops that may prepare | 
 | 1099 |  * memory in a vm for migration. migration functions may perform | 
 | 1100 |  * the migration for vmas that do not have an underlying page struct. | 
 | 1101 |  */ | 
 | 1102 | int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, | 
 | 1103 | 	const nodemask_t *from, unsigned long flags) | 
 | 1104 | { | 
 | 1105 |  	struct vm_area_struct *vma; | 
 | 1106 |  	int err = 0; | 
 | 1107 |  | 
 | 1108 |  	for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) { | 
 | 1109 |  		if (vma->vm_ops && vma->vm_ops->migrate) { | 
 | 1110 |  			err = vma->vm_ops->migrate(vma, to, from, flags); | 
 | 1111 |  			if (err) | 
 | 1112 |  				break; | 
 | 1113 |  		} | 
 | 1114 |  	} | 
 | 1115 |  	return err; | 
 | 1116 | } | 
| Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 1117 | #endif |