| /* | 
 |  *  linux/mm/memory_hotplug.c | 
 |  * | 
 |  *  Copyright (C) | 
 |  */ | 
 |  | 
 | #include <linux/stddef.h> | 
 | #include <linux/mm.h> | 
 | #include <linux/swap.h> | 
 | #include <linux/interrupt.h> | 
 | #include <linux/pagemap.h> | 
 | #include <linux/bootmem.h> | 
 | #include <linux/compiler.h> | 
 | #include <linux/module.h> | 
 | #include <linux/pagevec.h> | 
 | #include <linux/writeback.h> | 
 | #include <linux/slab.h> | 
 | #include <linux/sysctl.h> | 
 | #include <linux/cpu.h> | 
 | #include <linux/memory.h> | 
 | #include <linux/memory_hotplug.h> | 
 | #include <linux/highmem.h> | 
 | #include <linux/vmalloc.h> | 
 | #include <linux/ioport.h> | 
 | #include <linux/cpuset.h> | 
 | #include <linux/delay.h> | 
 | #include <linux/migrate.h> | 
 | #include <linux/page-isolation.h> | 
 |  | 
 | #include <asm/tlbflush.h> | 
 |  | 
 | #include "internal.h" | 
 |  | 
 | /* add this memory to iomem resource */ | 
 | static struct resource *register_memory_resource(u64 start, u64 size) | 
 | { | 
 | 	struct resource *res; | 
 | 	res = kzalloc(sizeof(struct resource), GFP_KERNEL); | 
 | 	BUG_ON(!res); | 
 |  | 
 | 	res->name = "System RAM"; | 
 | 	res->start = start; | 
 | 	res->end = start + size - 1; | 
 | 	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 
 | 	if (request_resource(&iomem_resource, res) < 0) { | 
 | 		printk("System RAM resource %llx - %llx cannot be added\n", | 
 | 		(unsigned long long)res->start, (unsigned long long)res->end); | 
 | 		kfree(res); | 
 | 		res = NULL; | 
 | 	} | 
 | 	return res; | 
 | } | 
 |  | 
 | static void release_memory_resource(struct resource *res) | 
 | { | 
 | 	if (!res) | 
 | 		return; | 
 | 	release_resource(res); | 
 | 	kfree(res); | 
 | 	return; | 
 | } | 
 |  | 
 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | 
 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 
 | static void get_page_bootmem(unsigned long info,  struct page *page, int magic) | 
 | { | 
 | 	atomic_set(&page->_mapcount, magic); | 
 | 	SetPagePrivate(page); | 
 | 	set_page_private(page, info); | 
 | 	atomic_inc(&page->_count); | 
 | } | 
 |  | 
 | void put_page_bootmem(struct page *page) | 
 | { | 
 | 	int magic; | 
 |  | 
 | 	magic = atomic_read(&page->_mapcount); | 
 | 	BUG_ON(magic >= -1); | 
 |  | 
 | 	if (atomic_dec_return(&page->_count) == 1) { | 
 | 		ClearPagePrivate(page); | 
 | 		set_page_private(page, 0); | 
 | 		reset_page_mapcount(page); | 
 | 		__free_pages_bootmem(page, 0); | 
 | 	} | 
 |  | 
 | } | 
 |  | 
 | void register_page_bootmem_info_section(unsigned long start_pfn) | 
 | { | 
 | 	unsigned long *usemap, mapsize, section_nr, i; | 
 | 	struct mem_section *ms; | 
 | 	struct page *page, *memmap; | 
 |  | 
 | 	if (!pfn_valid(start_pfn)) | 
 | 		return; | 
 |  | 
 | 	section_nr = pfn_to_section_nr(start_pfn); | 
 | 	ms = __nr_to_section(section_nr); | 
 |  | 
 | 	/* Get section's memmap address */ | 
 | 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | 
 |  | 
 | 	/* | 
 | 	 * Get page for the memmap's phys address | 
 | 	 * XXX: need more consideration for sparse_vmemmap... | 
 | 	 */ | 
 | 	page = virt_to_page(memmap); | 
 | 	mapsize = sizeof(struct page) * PAGES_PER_SECTION; | 
 | 	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | 
 |  | 
 | 	/* remember memmap's page */ | 
 | 	for (i = 0; i < mapsize; i++, page++) | 
 | 		get_page_bootmem(section_nr, page, SECTION_INFO); | 
 |  | 
 | 	usemap = __nr_to_section(section_nr)->pageblock_flags; | 
 | 	page = virt_to_page(usemap); | 
 |  | 
 | 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | 
 |  | 
 | 	for (i = 0; i < mapsize; i++, page++) | 
 | 		get_page_bootmem(section_nr, page, MIX_INFO); | 
 |  | 
 | } | 
 |  | 
 | void register_page_bootmem_info_node(struct pglist_data *pgdat) | 
 | { | 
 | 	unsigned long i, pfn, end_pfn, nr_pages; | 
 | 	int node = pgdat->node_id; | 
 | 	struct page *page; | 
 | 	struct zone *zone; | 
 |  | 
 | 	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | 
 | 	page = virt_to_page(pgdat); | 
 |  | 
 | 	for (i = 0; i < nr_pages; i++, page++) | 
 | 		get_page_bootmem(node, page, NODE_INFO); | 
 |  | 
 | 	zone = &pgdat->node_zones[0]; | 
 | 	for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { | 
 | 		if (zone->wait_table) { | 
 | 			nr_pages = zone->wait_table_hash_nr_entries | 
 | 				* sizeof(wait_queue_head_t); | 
 | 			nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; | 
 | 			page = virt_to_page(zone->wait_table); | 
 |  | 
 | 			for (i = 0; i < nr_pages; i++, page++) | 
 | 				get_page_bootmem(node, page, NODE_INFO); | 
 | 		} | 
 | 	} | 
 |  | 
 | 	pfn = pgdat->node_start_pfn; | 
 | 	end_pfn = pfn + pgdat->node_spanned_pages; | 
 |  | 
 | 	/* register_section info */ | 
 | 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) | 
 | 		register_page_bootmem_info_section(pfn); | 
 |  | 
 | } | 
 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | 
 |  | 
 | static void grow_zone_span(struct zone *zone, unsigned long start_pfn, | 
 | 			   unsigned long end_pfn) | 
 | { | 
 | 	unsigned long old_zone_end_pfn; | 
 |  | 
 | 	zone_span_writelock(zone); | 
 |  | 
 | 	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | 
 | 	if (start_pfn < zone->zone_start_pfn) | 
 | 		zone->zone_start_pfn = start_pfn; | 
 |  | 
 | 	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - | 
 | 				zone->zone_start_pfn; | 
 |  | 
 | 	zone_span_writeunlock(zone); | 
 | } | 
 |  | 
 | static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, | 
 | 			    unsigned long end_pfn) | 
 | { | 
 | 	unsigned long old_pgdat_end_pfn = | 
 | 		pgdat->node_start_pfn + pgdat->node_spanned_pages; | 
 |  | 
 | 	if (start_pfn < pgdat->node_start_pfn) | 
 | 		pgdat->node_start_pfn = start_pfn; | 
 |  | 
 | 	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - | 
 | 					pgdat->node_start_pfn; | 
 | } | 
 |  | 
 | static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) | 
 | { | 
 | 	struct pglist_data *pgdat = zone->zone_pgdat; | 
 | 	int nr_pages = PAGES_PER_SECTION; | 
 | 	int nid = pgdat->node_id; | 
 | 	int zone_type; | 
 | 	unsigned long flags; | 
 |  | 
 | 	zone_type = zone - pgdat->node_zones; | 
 | 	if (!zone->wait_table) { | 
 | 		int ret; | 
 |  | 
 | 		ret = init_currently_empty_zone(zone, phys_start_pfn, | 
 | 						nr_pages, MEMMAP_HOTPLUG); | 
 | 		if (ret) | 
 | 			return ret; | 
 | 	} | 
 | 	pgdat_resize_lock(zone->zone_pgdat, &flags); | 
 | 	grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); | 
 | 	grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, | 
 | 			phys_start_pfn + nr_pages); | 
 | 	pgdat_resize_unlock(zone->zone_pgdat, &flags); | 
 | 	memmap_init_zone(nr_pages, nid, zone_type, | 
 | 			 phys_start_pfn, MEMMAP_HOTPLUG); | 
 | 	return 0; | 
 | } | 
 |  | 
 | static int __add_section(struct zone *zone, unsigned long phys_start_pfn) | 
 | { | 
 | 	int nr_pages = PAGES_PER_SECTION; | 
 | 	int ret; | 
 |  | 
 | 	if (pfn_valid(phys_start_pfn)) | 
 | 		return -EEXIST; | 
 |  | 
 | 	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); | 
 |  | 
 | 	if (ret < 0) | 
 | 		return ret; | 
 |  | 
 | 	ret = __add_zone(zone, phys_start_pfn); | 
 |  | 
 | 	if (ret < 0) | 
 | 		return ret; | 
 |  | 
 | 	return register_new_memory(__pfn_to_section(phys_start_pfn)); | 
 | } | 
 |  | 
 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 
 | static int __remove_section(struct zone *zone, struct mem_section *ms) | 
 | { | 
 | 	/* | 
 | 	 * XXX: Freeing memmap with vmemmap is not implement yet. | 
 | 	 *      This should be removed later. | 
 | 	 */ | 
 | 	return -EBUSY; | 
 | } | 
 | #else | 
 | static int __remove_section(struct zone *zone, struct mem_section *ms) | 
 | { | 
 | 	unsigned long flags; | 
 | 	struct pglist_data *pgdat = zone->zone_pgdat; | 
 | 	int ret = -EINVAL; | 
 |  | 
 | 	if (!valid_section(ms)) | 
 | 		return ret; | 
 |  | 
 | 	ret = unregister_memory_section(ms); | 
 | 	if (ret) | 
 | 		return ret; | 
 |  | 
 | 	pgdat_resize_lock(pgdat, &flags); | 
 | 	sparse_remove_one_section(zone, ms); | 
 | 	pgdat_resize_unlock(pgdat, &flags); | 
 | 	return 0; | 
 | } | 
 | #endif | 
 |  | 
 | /* | 
 |  * Reasonably generic function for adding memory.  It is | 
 |  * expected that archs that support memory hotplug will | 
 |  * call this function after deciding the zone to which to | 
 |  * add the new pages. | 
 |  */ | 
 | int __add_pages(struct zone *zone, unsigned long phys_start_pfn, | 
 | 		 unsigned long nr_pages) | 
 | { | 
 | 	unsigned long i; | 
 | 	int err = 0; | 
 | 	int start_sec, end_sec; | 
 | 	/* during initialize mem_map, align hot-added range to section */ | 
 | 	start_sec = pfn_to_section_nr(phys_start_pfn); | 
 | 	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | 
 |  | 
 | 	for (i = start_sec; i <= end_sec; i++) { | 
 | 		err = __add_section(zone, i << PFN_SECTION_SHIFT); | 
 |  | 
 | 		/* | 
 | 		 * EEXIST is finally dealt with by ioresource collision | 
 | 		 * check. see add_memory() => register_memory_resource() | 
 | 		 * Warning will be printed if there is collision. | 
 | 		 */ | 
 | 		if (err && (err != -EEXIST)) | 
 | 			break; | 
 | 		err = 0; | 
 | 	} | 
 |  | 
 | 	return err; | 
 | } | 
 | EXPORT_SYMBOL_GPL(__add_pages); | 
 |  | 
 | /** | 
 |  * __remove_pages() - remove sections of pages from a zone | 
 |  * @zone: zone from which pages need to be removed | 
 |  * @phys_start_pfn: starting pageframe (must be aligned to start of a section) | 
 |  * @nr_pages: number of pages to remove (must be multiple of section size) | 
 |  * | 
 |  * Generic helper function to remove section mappings and sysfs entries | 
 |  * for the section of the memory we are removing. Caller needs to make | 
 |  * sure that pages are marked reserved and zones are adjust properly by | 
 |  * calling offline_pages(). | 
 |  */ | 
 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | 
 | 		 unsigned long nr_pages) | 
 | { | 
 | 	unsigned long i, ret = 0; | 
 | 	int sections_to_remove; | 
 |  | 
 | 	/* | 
 | 	 * We can only remove entire sections | 
 | 	 */ | 
 | 	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); | 
 | 	BUG_ON(nr_pages % PAGES_PER_SECTION); | 
 |  | 
 | 	release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE); | 
 |  | 
 | 	sections_to_remove = nr_pages / PAGES_PER_SECTION; | 
 | 	for (i = 0; i < sections_to_remove; i++) { | 
 | 		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | 
 | 		ret = __remove_section(zone, __pfn_to_section(pfn)); | 
 | 		if (ret) | 
 | 			break; | 
 | 	} | 
 | 	return ret; | 
 | } | 
 | EXPORT_SYMBOL_GPL(__remove_pages); | 
 |  | 
 | void online_page(struct page *page) | 
 | { | 
 | 	totalram_pages++; | 
 | 	num_physpages++; | 
 |  | 
 | #ifdef CONFIG_HIGHMEM | 
 | 	if (PageHighMem(page)) | 
 | 		totalhigh_pages++; | 
 | #endif | 
 |  | 
 | #ifdef CONFIG_FLATMEM | 
 | 	max_mapnr = max(page_to_pfn(page), max_mapnr); | 
 | #endif | 
 |  | 
 | 	ClearPageReserved(page); | 
 | 	init_page_count(page); | 
 | 	__free_page(page); | 
 | } | 
 |  | 
 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, | 
 | 			void *arg) | 
 | { | 
 | 	unsigned long i; | 
 | 	unsigned long onlined_pages = *(unsigned long *)arg; | 
 | 	struct page *page; | 
 | 	if (PageReserved(pfn_to_page(start_pfn))) | 
 | 		for (i = 0; i < nr_pages; i++) { | 
 | 			page = pfn_to_page(start_pfn + i); | 
 | 			online_page(page); | 
 | 			onlined_pages++; | 
 | 		} | 
 | 	*(unsigned long *)arg = onlined_pages; | 
 | 	return 0; | 
 | } | 
 |  | 
 |  | 
 | int online_pages(unsigned long pfn, unsigned long nr_pages) | 
 | { | 
 | 	unsigned long onlined_pages = 0; | 
 | 	struct zone *zone; | 
 | 	int need_zonelists_rebuild = 0; | 
 | 	int nid; | 
 | 	int ret; | 
 | 	struct memory_notify arg; | 
 |  | 
 | 	arg.start_pfn = pfn; | 
 | 	arg.nr_pages = nr_pages; | 
 | 	arg.status_change_nid = -1; | 
 |  | 
 | 	nid = page_to_nid(pfn_to_page(pfn)); | 
 | 	if (node_present_pages(nid) == 0) | 
 | 		arg.status_change_nid = nid; | 
 |  | 
 | 	ret = memory_notify(MEM_GOING_ONLINE, &arg); | 
 | 	ret = notifier_to_errno(ret); | 
 | 	if (ret) { | 
 | 		memory_notify(MEM_CANCEL_ONLINE, &arg); | 
 | 		return ret; | 
 | 	} | 
 | 	/* | 
 | 	 * This doesn't need a lock to do pfn_to_page(). | 
 | 	 * The section can't be removed here because of the | 
 | 	 * memory_block->state_mutex. | 
 | 	 */ | 
 | 	zone = page_zone(pfn_to_page(pfn)); | 
 | 	/* | 
 | 	 * If this zone is not populated, then it is not in zonelist. | 
 | 	 * This means the page allocator ignores this zone. | 
 | 	 * So, zonelist must be updated after online. | 
 | 	 */ | 
 | 	if (!populated_zone(zone)) | 
 | 		need_zonelists_rebuild = 1; | 
 |  | 
 | 	ret = walk_memory_resource(pfn, nr_pages, &onlined_pages, | 
 | 		online_pages_range); | 
 | 	if (ret) { | 
 | 		printk(KERN_DEBUG "online_pages %lx at %lx failed\n", | 
 | 			nr_pages, pfn); | 
 | 		memory_notify(MEM_CANCEL_ONLINE, &arg); | 
 | 		return ret; | 
 | 	} | 
 |  | 
 | 	zone->present_pages += onlined_pages; | 
 | 	zone->zone_pgdat->node_present_pages += onlined_pages; | 
 |  | 
 | 	setup_per_zone_pages_min(); | 
 | 	if (onlined_pages) { | 
 | 		kswapd_run(zone_to_nid(zone)); | 
 | 		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); | 
 | 	} | 
 |  | 
 | 	if (need_zonelists_rebuild) | 
 | 		build_all_zonelists(); | 
 | 	vm_total_pages = nr_free_pagecache_pages(); | 
 | 	writeback_set_ratelimit(); | 
 |  | 
 | 	if (onlined_pages) | 
 | 		memory_notify(MEM_ONLINE, &arg); | 
 |  | 
 | 	return 0; | 
 | } | 
 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ | 
 |  | 
 | static pg_data_t *hotadd_new_pgdat(int nid, u64 start) | 
 | { | 
 | 	struct pglist_data *pgdat; | 
 | 	unsigned long zones_size[MAX_NR_ZONES] = {0}; | 
 | 	unsigned long zholes_size[MAX_NR_ZONES] = {0}; | 
 | 	unsigned long start_pfn = start >> PAGE_SHIFT; | 
 |  | 
 | 	pgdat = arch_alloc_nodedata(nid); | 
 | 	if (!pgdat) | 
 | 		return NULL; | 
 |  | 
 | 	arch_refresh_nodedata(nid, pgdat); | 
 |  | 
 | 	/* we can use NODE_DATA(nid) from here */ | 
 |  | 
 | 	/* init node's zones as empty zones, we don't have any present pages.*/ | 
 | 	free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size); | 
 |  | 
 | 	return pgdat; | 
 | } | 
 |  | 
 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | 
 | { | 
 | 	arch_refresh_nodedata(nid, NULL); | 
 | 	arch_free_nodedata(pgdat); | 
 | 	return; | 
 | } | 
 |  | 
 |  | 
 | int add_memory(int nid, u64 start, u64 size) | 
 | { | 
 | 	pg_data_t *pgdat = NULL; | 
 | 	int new_pgdat = 0; | 
 | 	struct resource *res; | 
 | 	int ret; | 
 |  | 
 | 	res = register_memory_resource(start, size); | 
 | 	if (!res) | 
 | 		return -EEXIST; | 
 |  | 
 | 	if (!node_online(nid)) { | 
 | 		pgdat = hotadd_new_pgdat(nid, start); | 
 | 		if (!pgdat) | 
 | 			return -ENOMEM; | 
 | 		new_pgdat = 1; | 
 | 	} | 
 |  | 
 | 	/* call arch's memory hotadd */ | 
 | 	ret = arch_add_memory(nid, start, size); | 
 |  | 
 | 	if (ret < 0) | 
 | 		goto error; | 
 |  | 
 | 	/* we online node here. we can't roll back from here. */ | 
 | 	node_set_online(nid); | 
 |  | 
 | 	cpuset_track_online_nodes(); | 
 |  | 
 | 	if (new_pgdat) { | 
 | 		ret = register_one_node(nid); | 
 | 		/* | 
 | 		 * If sysfs file of new node can't create, cpu on the node | 
 | 		 * can't be hot-added. There is no rollback way now. | 
 | 		 * So, check by BUG_ON() to catch it reluctantly.. | 
 | 		 */ | 
 | 		BUG_ON(ret); | 
 | 	} | 
 |  | 
 | 	return ret; | 
 | error: | 
 | 	/* rollback pgdat allocation and others */ | 
 | 	if (new_pgdat) | 
 | 		rollback_node_hotadd(nid, pgdat); | 
 | 	if (res) | 
 | 		release_memory_resource(res); | 
 |  | 
 | 	return ret; | 
 | } | 
 | EXPORT_SYMBOL_GPL(add_memory); | 
 |  | 
 | #ifdef CONFIG_MEMORY_HOTREMOVE | 
 | /* | 
 |  * Confirm all pages in a range [start, end) is belongs to the same zone. | 
 |  */ | 
 | static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | 
 | { | 
 | 	unsigned long pfn; | 
 | 	struct zone *zone = NULL; | 
 | 	struct page *page; | 
 | 	int i; | 
 | 	for (pfn = start_pfn; | 
 | 	     pfn < end_pfn; | 
 | 	     pfn += MAX_ORDER_NR_PAGES) { | 
 | 		i = 0; | 
 | 		/* This is just a CONFIG_HOLES_IN_ZONE check.*/ | 
 | 		while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) | 
 | 			i++; | 
 | 		if (i == MAX_ORDER_NR_PAGES) | 
 | 			continue; | 
 | 		page = pfn_to_page(pfn + i); | 
 | 		if (zone && page_zone(page) != zone) | 
 | 			return 0; | 
 | 		zone = page_zone(page); | 
 | 	} | 
 | 	return 1; | 
 | } | 
 |  | 
 | /* | 
 |  * Scanning pfn is much easier than scanning lru list. | 
 |  * Scan pfn from start to end and Find LRU page. | 
 |  */ | 
 | int scan_lru_pages(unsigned long start, unsigned long end) | 
 | { | 
 | 	unsigned long pfn; | 
 | 	struct page *page; | 
 | 	for (pfn = start; pfn < end; pfn++) { | 
 | 		if (pfn_valid(pfn)) { | 
 | 			page = pfn_to_page(pfn); | 
 | 			if (PageLRU(page)) | 
 | 				return pfn; | 
 | 		} | 
 | 	} | 
 | 	return 0; | 
 | } | 
 |  | 
 | static struct page * | 
 | hotremove_migrate_alloc(struct page *page, | 
 | 			unsigned long private, | 
 | 			int **x) | 
 | { | 
 | 	/* This should be improoooooved!! */ | 
 | 	return alloc_page(GFP_HIGHUSER_PAGECACHE); | 
 | } | 
 |  | 
 |  | 
 | #define NR_OFFLINE_AT_ONCE_PAGES	(256) | 
 | static int | 
 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | 
 | { | 
 | 	unsigned long pfn; | 
 | 	struct page *page; | 
 | 	int move_pages = NR_OFFLINE_AT_ONCE_PAGES; | 
 | 	int not_managed = 0; | 
 | 	int ret = 0; | 
 | 	LIST_HEAD(source); | 
 |  | 
 | 	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { | 
 | 		if (!pfn_valid(pfn)) | 
 | 			continue; | 
 | 		page = pfn_to_page(pfn); | 
 | 		if (!page_count(page)) | 
 | 			continue; | 
 | 		/* | 
 | 		 * We can skip free pages. And we can only deal with pages on | 
 | 		 * LRU. | 
 | 		 */ | 
 | 		ret = isolate_lru_page(page, &source); | 
 | 		if (!ret) { /* Success */ | 
 | 			move_pages--; | 
 | 		} else { | 
 | 			/* Becasue we don't have big zone->lock. we should | 
 | 			   check this again here. */ | 
 | 			if (page_count(page)) | 
 | 				not_managed++; | 
 | #ifdef CONFIG_DEBUG_VM | 
 | 			printk(KERN_INFO "removing from LRU failed" | 
 | 					 " %lx/%d/%lx\n", | 
 | 				pfn, page_count(page), page->flags); | 
 | #endif | 
 | 		} | 
 | 	} | 
 | 	ret = -EBUSY; | 
 | 	if (not_managed) { | 
 | 		if (!list_empty(&source)) | 
 | 			putback_lru_pages(&source); | 
 | 		goto out; | 
 | 	} | 
 | 	ret = 0; | 
 | 	if (list_empty(&source)) | 
 | 		goto out; | 
 | 	/* this function returns # of failed pages */ | 
 | 	ret = migrate_pages(&source, hotremove_migrate_alloc, 0); | 
 |  | 
 | out: | 
 | 	return ret; | 
 | } | 
 |  | 
 | /* | 
 |  * remove from free_area[] and mark all as Reserved. | 
 |  */ | 
 | static int | 
 | offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, | 
 | 			void *data) | 
 | { | 
 | 	__offline_isolated_pages(start, start + nr_pages); | 
 | 	return 0; | 
 | } | 
 |  | 
 | static void | 
 | offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | 
 | { | 
 | 	walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL, | 
 | 				offline_isolated_pages_cb); | 
 | } | 
 |  | 
 | /* | 
 |  * Check all pages in range, recoreded as memory resource, are isolated. | 
 |  */ | 
 | static int | 
 | check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, | 
 | 			void *data) | 
 | { | 
 | 	int ret; | 
 | 	long offlined = *(long *)data; | 
 | 	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages); | 
 | 	offlined = nr_pages; | 
 | 	if (!ret) | 
 | 		*(long *)data += offlined; | 
 | 	return ret; | 
 | } | 
 |  | 
 | static long | 
 | check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | 
 | { | 
 | 	long offlined = 0; | 
 | 	int ret; | 
 |  | 
 | 	ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined, | 
 | 			check_pages_isolated_cb); | 
 | 	if (ret < 0) | 
 | 		offlined = (long)ret; | 
 | 	return offlined; | 
 | } | 
 |  | 
 | int offline_pages(unsigned long start_pfn, | 
 | 		  unsigned long end_pfn, unsigned long timeout) | 
 | { | 
 | 	unsigned long pfn, nr_pages, expire; | 
 | 	long offlined_pages; | 
 | 	int ret, drain, retry_max, node; | 
 | 	struct zone *zone; | 
 | 	struct memory_notify arg; | 
 |  | 
 | 	BUG_ON(start_pfn >= end_pfn); | 
 | 	/* at least, alignment against pageblock is necessary */ | 
 | 	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) | 
 | 		return -EINVAL; | 
 | 	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) | 
 | 		return -EINVAL; | 
 | 	/* This makes hotplug much easier...and readable. | 
 | 	   we assume this for now. .*/ | 
 | 	if (!test_pages_in_a_zone(start_pfn, end_pfn)) | 
 | 		return -EINVAL; | 
 |  | 
 | 	zone = page_zone(pfn_to_page(start_pfn)); | 
 | 	node = zone_to_nid(zone); | 
 | 	nr_pages = end_pfn - start_pfn; | 
 |  | 
 | 	/* set above range as isolated */ | 
 | 	ret = start_isolate_page_range(start_pfn, end_pfn); | 
 | 	if (ret) | 
 | 		return ret; | 
 |  | 
 | 	arg.start_pfn = start_pfn; | 
 | 	arg.nr_pages = nr_pages; | 
 | 	arg.status_change_nid = -1; | 
 | 	if (nr_pages >= node_present_pages(node)) | 
 | 		arg.status_change_nid = node; | 
 |  | 
 | 	ret = memory_notify(MEM_GOING_OFFLINE, &arg); | 
 | 	ret = notifier_to_errno(ret); | 
 | 	if (ret) | 
 | 		goto failed_removal; | 
 |  | 
 | 	pfn = start_pfn; | 
 | 	expire = jiffies + timeout; | 
 | 	drain = 0; | 
 | 	retry_max = 5; | 
 | repeat: | 
 | 	/* start memory hot removal */ | 
 | 	ret = -EAGAIN; | 
 | 	if (time_after(jiffies, expire)) | 
 | 		goto failed_removal; | 
 | 	ret = -EINTR; | 
 | 	if (signal_pending(current)) | 
 | 		goto failed_removal; | 
 | 	ret = 0; | 
 | 	if (drain) { | 
 | 		lru_add_drain_all(); | 
 | 		flush_scheduled_work(); | 
 | 		cond_resched(); | 
 | 		drain_all_pages(); | 
 | 	} | 
 |  | 
 | 	pfn = scan_lru_pages(start_pfn, end_pfn); | 
 | 	if (pfn) { /* We have page on LRU */ | 
 | 		ret = do_migrate_range(pfn, end_pfn); | 
 | 		if (!ret) { | 
 | 			drain = 1; | 
 | 			goto repeat; | 
 | 		} else { | 
 | 			if (ret < 0) | 
 | 				if (--retry_max == 0) | 
 | 					goto failed_removal; | 
 | 			yield(); | 
 | 			drain = 1; | 
 | 			goto repeat; | 
 | 		} | 
 | 	} | 
 | 	/* drain all zone's lru pagevec, this is asyncronous... */ | 
 | 	lru_add_drain_all(); | 
 | 	flush_scheduled_work(); | 
 | 	yield(); | 
 | 	/* drain pcp pages , this is synchrouns. */ | 
 | 	drain_all_pages(); | 
 | 	/* check again */ | 
 | 	offlined_pages = check_pages_isolated(start_pfn, end_pfn); | 
 | 	if (offlined_pages < 0) { | 
 | 		ret = -EBUSY; | 
 | 		goto failed_removal; | 
 | 	} | 
 | 	printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); | 
 | 	/* Ok, all of our target is islaoted. | 
 | 	   We cannot do rollback at this point. */ | 
 | 	offline_isolated_pages(start_pfn, end_pfn); | 
 | 	/* reset pagetype flags and makes migrate type to be MOVABLE */ | 
 | 	undo_isolate_page_range(start_pfn, end_pfn); | 
 | 	/* removal success */ | 
 | 	zone->present_pages -= offlined_pages; | 
 | 	zone->zone_pgdat->node_present_pages -= offlined_pages; | 
 | 	totalram_pages -= offlined_pages; | 
 | 	num_physpages -= offlined_pages; | 
 |  | 
 | 	vm_total_pages = nr_free_pagecache_pages(); | 
 | 	writeback_set_ratelimit(); | 
 |  | 
 | 	memory_notify(MEM_OFFLINE, &arg); | 
 | 	return 0; | 
 |  | 
 | failed_removal: | 
 | 	printk(KERN_INFO "memory offlining %lx to %lx failed\n", | 
 | 		start_pfn, end_pfn); | 
 | 	memory_notify(MEM_CANCEL_OFFLINE, &arg); | 
 | 	/* pushback to free area */ | 
 | 	undo_isolate_page_range(start_pfn, end_pfn); | 
 |  | 
 | 	return ret; | 
 | } | 
 | #else | 
 | int remove_memory(u64 start, u64 size) | 
 | { | 
 | 	return -EINVAL; | 
 | } | 
 | EXPORT_SYMBOL_GPL(remove_memory); | 
 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |