| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * sparse memory mappings. | 
 | 3 |  */ | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 5 | #include <linux/slab.h> | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 6 | #include <linux/mmzone.h> | 
 | 7 | #include <linux/bootmem.h> | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 8 | #include <linux/highmem.h> | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 9 | #include <linux/export.h> | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 10 | #include <linux/spinlock.h> | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 11 | #include <linux/vmalloc.h> | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 12 | #include "internal.h" | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 13 | #include <asm/dma.h> | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 14 | #include <asm/pgalloc.h> | 
 | 15 | #include <asm/pgtable.h> | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 16 |  | 
 | 17 | /* | 
 | 18 |  * Permanent SPARSEMEM data: | 
 | 19 |  * | 
 | 20 |  * 1) mem_section	- memory sections, mem_map's for valid memory | 
 | 21 |  */ | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 22 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 23 | struct mem_section *mem_section[NR_SECTION_ROOTS] | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 24 | 	____cacheline_internodealigned_in_smp; | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 25 | #else | 
 | 26 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 27 | 	____cacheline_internodealigned_in_smp; | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 28 | #endif | 
 | 29 | EXPORT_SYMBOL(mem_section); | 
 | 30 |  | 
| Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 31 | #ifdef NODE_NOT_IN_PAGE_FLAGS | 
 | 32 | /* | 
 | 33 |  * If we did not store the node number in the page then we have to | 
 | 34 |  * do a lookup in the section_to_node_table in order to find which | 
 | 35 |  * node the page belongs to. | 
 | 36 |  */ | 
 | 37 | #if MAX_NUMNODES <= 256 | 
 | 38 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | 
 | 39 | #else | 
 | 40 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | 
 | 41 | #endif | 
 | 42 |  | 
| Ian Campbell | 33dd4e0 | 2011-07-25 17:11:51 -0700 | [diff] [blame] | 43 | int page_to_nid(const struct page *page) | 
| Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 44 | { | 
 | 45 | 	return section_to_node_table[page_to_section(page)]; | 
 | 46 | } | 
 | 47 | EXPORT_SYMBOL(page_to_nid); | 
| Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 48 |  | 
 | 49 | static void set_section_nid(unsigned long section_nr, int nid) | 
 | 50 | { | 
 | 51 | 	section_to_node_table[section_nr] = nid; | 
 | 52 | } | 
 | 53 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | 
 | 54 | static inline void set_section_nid(unsigned long section_nr, int nid) | 
 | 55 | { | 
 | 56 | } | 
| Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 57 | #endif | 
 | 58 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 59 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
| Sam Ravnborg | 577a32f | 2007-05-17 23:29:25 +0200 | [diff] [blame] | 60 | static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 61 | { | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 62 | 	struct mem_section *section = NULL; | 
 | 63 | 	unsigned long array_size = SECTIONS_PER_ROOT * | 
 | 64 | 				   sizeof(struct mem_section); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 65 |  | 
| Shaohua Li | f52407c | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 66 | 	if (slab_is_available()) { | 
 | 67 | 		if (node_state(nid, N_HIGH_MEMORY)) | 
| Gavin Shan | 5b760e6 | 2012-07-31 16:46:02 -0700 | [diff] [blame] | 68 | 			section = kzalloc_node(array_size, GFP_KERNEL, nid); | 
| Shaohua Li | f52407c | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 69 | 		else | 
| Gavin Shan | 5b760e6 | 2012-07-31 16:46:02 -0700 | [diff] [blame] | 70 | 			section = kzalloc(array_size, GFP_KERNEL); | 
 | 71 | 	} else { | 
| Mike Kravetz | 46a66ee | 2006-05-01 12:16:09 -0700 | [diff] [blame] | 72 | 		section = alloc_bootmem_node(NODE_DATA(nid), array_size); | 
| Gavin Shan | 5b760e6 | 2012-07-31 16:46:02 -0700 | [diff] [blame] | 73 | 	} | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 74 |  | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 75 | 	return section; | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 76 | } | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 77 |  | 
| Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 78 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 79 | { | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 80 | 	unsigned long root = SECTION_NR_TO_ROOT(section_nr); | 
 | 81 | 	struct mem_section *section; | 
 | 82 | 	int ret = 0; | 
 | 83 |  | 
 | 84 | 	if (mem_section[root]) | 
 | 85 | 		return -EEXIST; | 
 | 86 |  | 
 | 87 | 	section = sparse_index_alloc(nid); | 
| WANG Cong | af0cd5a | 2007-12-17 16:19:58 -0800 | [diff] [blame] | 88 | 	if (!section) | 
 | 89 | 		return -ENOMEM; | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 90 |  | 
 | 91 | 	mem_section[root] = section; | 
| Gavin Shan | c1c9518 | 2012-07-31 16:46:06 -0700 | [diff] [blame] | 92 |  | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 93 | 	return ret; | 
 | 94 | } | 
 | 95 | #else /* !SPARSEMEM_EXTREME */ | 
 | 96 | static inline int sparse_index_init(unsigned long section_nr, int nid) | 
 | 97 | { | 
 | 98 | 	return 0; | 
 | 99 | } | 
 | 100 | #endif | 
 | 101 |  | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 102 | /* | 
 | 103 |  * Although written for the SPARSEMEM_EXTREME case, this happens | 
| Andy Whitcroft | cd881a6 | 2007-10-16 01:24:10 -0700 | [diff] [blame] | 104 |  * to also work for the flat array case because | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 105 |  * NR_SECTION_ROOTS==NR_MEM_SECTIONS. | 
 | 106 |  */ | 
 | 107 | int __section_nr(struct mem_section* ms) | 
 | 108 | { | 
 | 109 | 	unsigned long root_nr; | 
 | 110 | 	struct mem_section* root; | 
 | 111 |  | 
| Mike Kravetz | 12783b0 | 2006-05-20 15:00:05 -0700 | [diff] [blame] | 112 | 	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { | 
 | 113 | 		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 114 | 		if (!root) | 
 | 115 | 			continue; | 
 | 116 |  | 
 | 117 | 		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | 
 | 118 | 		     break; | 
 | 119 | 	} | 
 | 120 |  | 
| Gavin Shan | db36a46 | 2012-07-31 16:46:04 -0700 | [diff] [blame] | 121 | 	VM_BUG_ON(root_nr == NR_SECTION_ROOTS); | 
 | 122 |  | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 123 | 	return (root_nr * SECTIONS_PER_ROOT) + (ms - root); | 
 | 124 | } | 
 | 125 |  | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 126 | /* | 
 | 127 |  * During early boot, before section_mem_map is used for an actual | 
 | 128 |  * mem_map, we use section_mem_map to store the section's NUMA | 
 | 129 |  * node.  This keeps us from having to use another data structure.  The | 
 | 130 |  * node information is cleared just before we store the real mem_map. | 
 | 131 |  */ | 
 | 132 | static inline unsigned long sparse_encode_early_nid(int nid) | 
 | 133 | { | 
 | 134 | 	return (nid << SECTION_NID_SHIFT); | 
 | 135 | } | 
 | 136 |  | 
 | 137 | static inline int sparse_early_nid(struct mem_section *section) | 
 | 138 | { | 
 | 139 | 	return (section->section_mem_map >> SECTION_NID_SHIFT); | 
 | 140 | } | 
 | 141 |  | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 142 | /* Validate the physical addressing limitations of the model */ | 
 | 143 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, | 
 | 144 | 						unsigned long *end_pfn) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 145 | { | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 146 | 	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 147 |  | 
| Ingo Molnar | bead9a3 | 2008-04-16 01:40:00 +0200 | [diff] [blame] | 148 | 	/* | 
 | 149 | 	 * Sanity checks - do not allow an architecture to pass | 
 | 150 | 	 * in larger pfns than the maximum scope of sparsemem: | 
 | 151 | 	 */ | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 152 | 	if (*start_pfn > max_sparsemem_pfn) { | 
 | 153 | 		mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | 
 | 154 | 			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | 
 | 155 | 			*start_pfn, *end_pfn, max_sparsemem_pfn); | 
 | 156 | 		WARN_ON_ONCE(1); | 
 | 157 | 		*start_pfn = max_sparsemem_pfn; | 
 | 158 | 		*end_pfn = max_sparsemem_pfn; | 
| Cyrill Gorcunov | ef161a9 | 2009-03-31 15:19:25 -0700 | [diff] [blame] | 159 | 	} else if (*end_pfn > max_sparsemem_pfn) { | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 160 | 		mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | 
 | 161 | 			"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | 
 | 162 | 			*start_pfn, *end_pfn, max_sparsemem_pfn); | 
 | 163 | 		WARN_ON_ONCE(1); | 
 | 164 | 		*end_pfn = max_sparsemem_pfn; | 
 | 165 | 	} | 
 | 166 | } | 
 | 167 |  | 
 | 168 | /* Record a memory area against a node. */ | 
 | 169 | void __init memory_present(int nid, unsigned long start, unsigned long end) | 
 | 170 | { | 
 | 171 | 	unsigned long pfn; | 
| Ingo Molnar | bead9a3 | 2008-04-16 01:40:00 +0200 | [diff] [blame] | 172 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 173 | 	start &= PAGE_SECTION_MASK; | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 174 | 	mminit_validate_memmodel_limits(&start, &end); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 175 | 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { | 
 | 176 | 		unsigned long section = pfn_to_section_nr(pfn); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 177 | 		struct mem_section *ms; | 
 | 178 |  | 
 | 179 | 		sparse_index_init(section, nid); | 
| Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 180 | 		set_section_nid(section, nid); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 181 |  | 
 | 182 | 		ms = __nr_to_section(section); | 
 | 183 | 		if (!ms->section_mem_map) | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 184 | 			ms->section_mem_map = sparse_encode_early_nid(nid) | | 
 | 185 | 							SECTION_MARKED_PRESENT; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 186 | 	} | 
 | 187 | } | 
 | 188 |  | 
 | 189 | /* | 
 | 190 |  * Only used by the i386 NUMA architecures, but relatively | 
 | 191 |  * generic code. | 
 | 192 |  */ | 
 | 193 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, | 
 | 194 | 						     unsigned long end_pfn) | 
 | 195 | { | 
 | 196 | 	unsigned long pfn; | 
 | 197 | 	unsigned long nr_pages = 0; | 
 | 198 |  | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 199 | 	mminit_validate_memmodel_limits(&start_pfn, &end_pfn); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 200 | 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | 
 | 201 | 		if (nid != early_pfn_to_nid(pfn)) | 
 | 202 | 			continue; | 
 | 203 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 204 | 		if (pfn_present(pfn)) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 205 | 			nr_pages += PAGES_PER_SECTION; | 
 | 206 | 	} | 
 | 207 |  | 
 | 208 | 	return nr_pages * sizeof(struct page); | 
 | 209 | } | 
 | 210 |  | 
 | 211 | /* | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 212 |  * Subtle, we encode the real pfn into the mem_map such that | 
 | 213 |  * the identity pfn - section_mem_map will return the actual | 
 | 214 |  * physical page frame number. | 
 | 215 |  */ | 
 | 216 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | 
 | 217 | { | 
 | 218 | 	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | 
 | 219 | } | 
 | 220 |  | 
 | 221 | /* | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 222 |  * Decode mem_map from the coded memmap | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 223 |  */ | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 224 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) | 
 | 225 | { | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 226 | 	/* mask off the extra low bits of information */ | 
 | 227 | 	coded_mem_map &= SECTION_MAP_MASK; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 228 | 	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); | 
 | 229 | } | 
 | 230 |  | 
| Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 231 | static int __meminit sparse_init_one_section(struct mem_section *ms, | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 232 | 		unsigned long pnum, struct page *mem_map, | 
 | 233 | 		unsigned long *pageblock_bitmap) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 234 | { | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 235 | 	if (!present_section(ms)) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 236 | 		return -EINVAL; | 
 | 237 |  | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 238 | 	ms->section_mem_map &= ~SECTION_MAP_MASK; | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 239 | 	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | | 
 | 240 | 							SECTION_HAS_MEM_MAP; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 241 |  	ms->pageblock_flags = pageblock_bitmap; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 242 |  | 
 | 243 | 	return 1; | 
 | 244 | } | 
 | 245 |  | 
| Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 246 | unsigned long usemap_size(void) | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 247 | { | 
 | 248 | 	unsigned long size_bytes; | 
 | 249 | 	size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; | 
 | 250 | 	size_bytes = roundup(size_bytes, sizeof(unsigned long)); | 
 | 251 | 	return size_bytes; | 
 | 252 | } | 
 | 253 |  | 
 | 254 | #ifdef CONFIG_MEMORY_HOTPLUG | 
 | 255 | static unsigned long *__kmalloc_section_usemap(void) | 
 | 256 | { | 
 | 257 | 	return kmalloc(usemap_size(), GFP_KERNEL); | 
 | 258 | } | 
 | 259 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 
 | 260 |  | 
| Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 261 | #ifdef CONFIG_MEMORY_HOTREMOVE | 
 | 262 | static unsigned long * __init | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 263 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, | 
| Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 264 | 					 unsigned long size) | 
| Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 265 | { | 
| Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 266 | 	unsigned long goal, limit; | 
 | 267 | 	unsigned long *p; | 
 | 268 | 	int nid; | 
| Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 269 | 	/* | 
 | 270 | 	 * A page may contain usemaps for other sections preventing the | 
 | 271 | 	 * page being freed and making a section unremovable while | 
 | 272 | 	 * other sections referencing the usemap retmain active. Similarly, | 
 | 273 | 	 * a pgdat can prevent a section being removed. If section A | 
 | 274 | 	 * contains a pgdat and section B contains the usemap, both | 
 | 275 | 	 * sections become inter-dependent. This allocates usemaps | 
 | 276 | 	 * from the same section as the pgdat where possible to avoid | 
 | 277 | 	 * this problem. | 
 | 278 | 	 */ | 
| Yinghai Lu | 07b4e2b | 2012-07-11 14:02:51 -0700 | [diff] [blame] | 279 | 	goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); | 
| Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 280 | 	limit = goal + (1UL << PA_SECTION_SHIFT); | 
 | 281 | 	nid = early_pfn_to_nid(goal >> PAGE_SHIFT); | 
 | 282 | again: | 
 | 283 | 	p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, | 
 | 284 | 					  SMP_CACHE_BYTES, goal, limit); | 
 | 285 | 	if (!p && limit) { | 
 | 286 | 		limit = 0; | 
 | 287 | 		goto again; | 
 | 288 | 	} | 
 | 289 | 	return p; | 
| Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 290 | } | 
 | 291 |  | 
 | 292 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | 
 | 293 | { | 
 | 294 | 	unsigned long usemap_snr, pgdat_snr; | 
 | 295 | 	static unsigned long old_usemap_snr = NR_MEM_SECTIONS; | 
 | 296 | 	static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; | 
 | 297 | 	struct pglist_data *pgdat = NODE_DATA(nid); | 
 | 298 | 	int usemap_nid; | 
 | 299 |  | 
 | 300 | 	usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); | 
 | 301 | 	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | 
 | 302 | 	if (usemap_snr == pgdat_snr) | 
 | 303 | 		return; | 
 | 304 |  | 
 | 305 | 	if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) | 
 | 306 | 		/* skip redundant message */ | 
 | 307 | 		return; | 
 | 308 |  | 
 | 309 | 	old_usemap_snr = usemap_snr; | 
 | 310 | 	old_pgdat_snr = pgdat_snr; | 
 | 311 |  | 
 | 312 | 	usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); | 
 | 313 | 	if (usemap_nid != nid) { | 
 | 314 | 		printk(KERN_INFO | 
 | 315 | 		       "node %d must be removed before remove section %ld\n", | 
 | 316 | 		       nid, usemap_snr); | 
 | 317 | 		return; | 
 | 318 | 	} | 
 | 319 | 	/* | 
 | 320 | 	 * There is a circular dependency. | 
 | 321 | 	 * Some platforms allow un-removable section because they will just | 
 | 322 | 	 * gather other removable sections for dynamic partitioning. | 
 | 323 | 	 * Just notify un-removable section's number here. | 
 | 324 | 	 */ | 
 | 325 | 	printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, | 
 | 326 | 	       pgdat_snr, nid); | 
 | 327 | 	printk(KERN_CONT | 
 | 328 | 	       " have a circular dependency on usemap and pgdat allocations\n"); | 
 | 329 | } | 
 | 330 | #else | 
 | 331 | static unsigned long * __init | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 332 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, | 
| Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 333 | 					 unsigned long size) | 
| Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 334 | { | 
| Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 335 | 	return alloc_bootmem_node_nopanic(pgdat, size); | 
| Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 336 | } | 
 | 337 |  | 
 | 338 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | 
 | 339 | { | 
 | 340 | } | 
 | 341 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 
 | 342 |  | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 343 | static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map, | 
 | 344 | 				 unsigned long pnum_begin, | 
 | 345 | 				 unsigned long pnum_end, | 
 | 346 | 				 unsigned long usemap_count, int nodeid) | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 347 | { | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 348 | 	void *usemap; | 
 | 349 | 	unsigned long pnum; | 
 | 350 | 	int size = usemap_size(); | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 351 |  | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 352 | 	usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), | 
| Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 353 | 							  size * usemap_count); | 
| Nishanth Aravamudan | f5bf18f | 2012-03-21 16:34:07 -0700 | [diff] [blame] | 354 | 	if (!usemap) { | 
| Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 355 | 		printk(KERN_WARNING "%s: allocation failed\n", __func__); | 
 | 356 | 		return; | 
| Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 357 | 	} | 
 | 358 |  | 
| Nishanth Aravamudan | f5bf18f | 2012-03-21 16:34:07 -0700 | [diff] [blame] | 359 | 	for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | 
 | 360 | 		if (!present_section_nr(pnum)) | 
 | 361 | 			continue; | 
 | 362 | 		usemap_map[pnum] = usemap; | 
 | 363 | 		usemap += size; | 
 | 364 | 		check_usemap_section_nr(nodeid, usemap_map[pnum]); | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 365 | 	} | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 366 | } | 
 | 367 |  | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 368 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 369 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 370 | { | 
 | 371 | 	struct page *map; | 
| Yinghai Lu | e48e67e | 2010-05-24 14:31:57 -0700 | [diff] [blame] | 372 | 	unsigned long size; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 373 |  | 
 | 374 | 	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); | 
 | 375 | 	if (map) | 
 | 376 | 		return map; | 
 | 377 |  | 
| Yinghai Lu | e48e67e | 2010-05-24 14:31:57 -0700 | [diff] [blame] | 378 | 	size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); | 
 | 379 | 	map = __alloc_bootmem_node_high(NODE_DATA(nid), size, | 
 | 380 | 					 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 381 | 	return map; | 
 | 382 | } | 
| Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 383 | void __init sparse_mem_maps_populate_node(struct page **map_map, | 
 | 384 | 					  unsigned long pnum_begin, | 
 | 385 | 					  unsigned long pnum_end, | 
 | 386 | 					  unsigned long map_count, int nodeid) | 
 | 387 | { | 
 | 388 | 	void *map; | 
 | 389 | 	unsigned long pnum; | 
 | 390 | 	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | 
 | 391 |  | 
 | 392 | 	map = alloc_remap(nodeid, size * map_count); | 
 | 393 | 	if (map) { | 
 | 394 | 		for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | 
 | 395 | 			if (!present_section_nr(pnum)) | 
 | 396 | 				continue; | 
 | 397 | 			map_map[pnum] = map; | 
 | 398 | 			map += size; | 
 | 399 | 		} | 
 | 400 | 		return; | 
 | 401 | 	} | 
 | 402 |  | 
 | 403 | 	size = PAGE_ALIGN(size); | 
| Yinghai Lu | e48e67e | 2010-05-24 14:31:57 -0700 | [diff] [blame] | 404 | 	map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count, | 
 | 405 | 					 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 
| Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 406 | 	if (map) { | 
 | 407 | 		for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | 
 | 408 | 			if (!present_section_nr(pnum)) | 
 | 409 | 				continue; | 
 | 410 | 			map_map[pnum] = map; | 
 | 411 | 			map += size; | 
 | 412 | 		} | 
 | 413 | 		return; | 
 | 414 | 	} | 
 | 415 |  | 
 | 416 | 	/* fallback */ | 
 | 417 | 	for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | 
 | 418 | 		struct mem_section *ms; | 
 | 419 |  | 
 | 420 | 		if (!present_section_nr(pnum)) | 
 | 421 | 			continue; | 
 | 422 | 		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); | 
 | 423 | 		if (map_map[pnum]) | 
 | 424 | 			continue; | 
 | 425 | 		ms = __nr_to_section(pnum); | 
 | 426 | 		printk(KERN_ERR "%s: sparsemem memory map backing failed " | 
 | 427 | 			"some memory will not be available.\n", __func__); | 
 | 428 | 		ms->section_mem_map = 0; | 
 | 429 | 	} | 
 | 430 | } | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 431 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | 
 | 432 |  | 
| Yinghai Lu | 81d0d95 | 2010-02-27 09:29:38 -0800 | [diff] [blame] | 433 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | 
| Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 434 | static void __init sparse_early_mem_maps_alloc_node(struct page **map_map, | 
 | 435 | 				 unsigned long pnum_begin, | 
 | 436 | 				 unsigned long pnum_end, | 
 | 437 | 				 unsigned long map_count, int nodeid) | 
 | 438 | { | 
 | 439 | 	sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, | 
 | 440 | 					 map_count, nodeid); | 
 | 441 | } | 
| Yinghai Lu | 81d0d95 | 2010-02-27 09:29:38 -0800 | [diff] [blame] | 442 | #else | 
| Adrian Bunk | 9e5c6da | 2008-07-25 19:46:22 -0700 | [diff] [blame] | 443 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 444 | { | 
 | 445 | 	struct page *map; | 
 | 446 | 	struct mem_section *ms = __nr_to_section(pnum); | 
 | 447 | 	int nid = sparse_early_nid(ms); | 
 | 448 |  | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 449 | 	map = sparse_mem_map_populate(pnum, nid); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 450 | 	if (map) | 
 | 451 | 		return map; | 
 | 452 |  | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 453 | 	printk(KERN_ERR "%s: sparsemem memory map backing failed " | 
| Harvey Harrison | d40cee2 | 2008-04-30 00:55:07 -0700 | [diff] [blame] | 454 | 			"some memory will not be available.\n", __func__); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 455 | 	ms->section_mem_map = 0; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 456 | 	return NULL; | 
 | 457 | } | 
| Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 458 | #endif | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 459 |  | 
| Yinghai Lu | c2b91e2 | 2008-04-12 01:19:24 -0700 | [diff] [blame] | 460 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) | 
 | 461 | { | 
 | 462 | } | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 463 |  | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 464 | /* | 
 | 465 |  * Allocate the accumulated non-linear sections, allocate a mem_map | 
 | 466 |  * for each and record the physical to section mapping. | 
 | 467 |  */ | 
 | 468 | void __init sparse_init(void) | 
 | 469 | { | 
 | 470 | 	unsigned long pnum; | 
 | 471 | 	struct page *map; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 472 | 	unsigned long *usemap; | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 473 | 	unsigned long **usemap_map; | 
| Yinghai Lu | 81d0d95 | 2010-02-27 09:29:38 -0800 | [diff] [blame] | 474 | 	int size; | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 475 | 	int nodeid_begin = 0; | 
 | 476 | 	unsigned long pnum_begin = 0; | 
 | 477 | 	unsigned long usemap_count; | 
| Yinghai Lu | 81d0d95 | 2010-02-27 09:29:38 -0800 | [diff] [blame] | 478 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | 
| Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 479 | 	unsigned long map_count; | 
| Yinghai Lu | 81d0d95 | 2010-02-27 09:29:38 -0800 | [diff] [blame] | 480 | 	int size2; | 
 | 481 | 	struct page **map_map; | 
 | 482 | #endif | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 483 |  | 
| Xishi Qiu | ca57df7 | 2012-07-31 16:43:19 -0700 | [diff] [blame] | 484 | 	/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ | 
 | 485 | 	set_pageblock_order(); | 
 | 486 |  | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 487 | 	/* | 
 | 488 | 	 * map is using big page (aka 2M in x86 64 bit) | 
 | 489 | 	 * usemap is less one page (aka 24 bytes) | 
 | 490 | 	 * so alloc 2M (with 2M align) and 24 bytes in turn will | 
 | 491 | 	 * make next 2M slip to one more 2M later. | 
 | 492 | 	 * then in big system, the memory will have a lot of holes... | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 493 | 	 * here try to allocate 2M pages continuously. | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 494 | 	 * | 
 | 495 | 	 * powerpc need to call sparse_init_one_section right after each | 
 | 496 | 	 * sparse_early_mem_map_alloc, so allocate usemap_map at first. | 
 | 497 | 	 */ | 
 | 498 | 	size = sizeof(unsigned long *) * NR_MEM_SECTIONS; | 
 | 499 | 	usemap_map = alloc_bootmem(size); | 
 | 500 | 	if (!usemap_map) | 
 | 501 | 		panic("can not allocate usemap_map\n"); | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 502 |  | 
 | 503 | 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 504 | 		struct mem_section *ms; | 
 | 505 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 506 | 		if (!present_section_nr(pnum)) | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 507 | 			continue; | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 508 | 		ms = __nr_to_section(pnum); | 
 | 509 | 		nodeid_begin = sparse_early_nid(ms); | 
 | 510 | 		pnum_begin = pnum; | 
 | 511 | 		break; | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 512 | 	} | 
| Yinghai Lu | a4322e1 | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 513 | 	usemap_count = 1; | 
 | 514 | 	for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { | 
 | 515 | 		struct mem_section *ms; | 
 | 516 | 		int nodeid; | 
 | 517 |  | 
 | 518 | 		if (!present_section_nr(pnum)) | 
 | 519 | 			continue; | 
 | 520 | 		ms = __nr_to_section(pnum); | 
 | 521 | 		nodeid = sparse_early_nid(ms); | 
 | 522 | 		if (nodeid == nodeid_begin) { | 
 | 523 | 			usemap_count++; | 
 | 524 | 			continue; | 
 | 525 | 		} | 
 | 526 | 		/* ok, we need to take cake of from pnum_begin to pnum - 1*/ | 
 | 527 | 		sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum, | 
 | 528 | 						 usemap_count, nodeid_begin); | 
 | 529 | 		/* new start, update count etc*/ | 
 | 530 | 		nodeid_begin = nodeid; | 
 | 531 | 		pnum_begin = pnum; | 
 | 532 | 		usemap_count = 1; | 
 | 533 | 	} | 
 | 534 | 	/* ok, last chunk */ | 
 | 535 | 	sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS, | 
 | 536 | 					 usemap_count, nodeid_begin); | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 537 |  | 
| Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 538 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | 
 | 539 | 	size2 = sizeof(struct page *) * NR_MEM_SECTIONS; | 
 | 540 | 	map_map = alloc_bootmem(size2); | 
 | 541 | 	if (!map_map) | 
 | 542 | 		panic("can not allocate map_map\n"); | 
 | 543 |  | 
 | 544 | 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 
 | 545 | 		struct mem_section *ms; | 
 | 546 |  | 
 | 547 | 		if (!present_section_nr(pnum)) | 
 | 548 | 			continue; | 
 | 549 | 		ms = __nr_to_section(pnum); | 
 | 550 | 		nodeid_begin = sparse_early_nid(ms); | 
 | 551 | 		pnum_begin = pnum; | 
 | 552 | 		break; | 
 | 553 | 	} | 
 | 554 | 	map_count = 1; | 
 | 555 | 	for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { | 
 | 556 | 		struct mem_section *ms; | 
 | 557 | 		int nodeid; | 
 | 558 |  | 
 | 559 | 		if (!present_section_nr(pnum)) | 
 | 560 | 			continue; | 
 | 561 | 		ms = __nr_to_section(pnum); | 
 | 562 | 		nodeid = sparse_early_nid(ms); | 
 | 563 | 		if (nodeid == nodeid_begin) { | 
 | 564 | 			map_count++; | 
 | 565 | 			continue; | 
 | 566 | 		} | 
 | 567 | 		/* ok, we need to take cake of from pnum_begin to pnum - 1*/ | 
 | 568 | 		sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum, | 
 | 569 | 						 map_count, nodeid_begin); | 
 | 570 | 		/* new start, update count etc*/ | 
 | 571 | 		nodeid_begin = nodeid; | 
 | 572 | 		pnum_begin = pnum; | 
 | 573 | 		map_count = 1; | 
 | 574 | 	} | 
 | 575 | 	/* ok, last chunk */ | 
 | 576 | 	sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS, | 
 | 577 | 					 map_count, nodeid_begin); | 
 | 578 | #endif | 
 | 579 |  | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 580 | 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 
 | 581 | 		if (!present_section_nr(pnum)) | 
 | 582 | 			continue; | 
 | 583 |  | 
 | 584 | 		usemap = usemap_map[pnum]; | 
 | 585 | 		if (!usemap) | 
 | 586 | 			continue; | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 587 |  | 
| Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 588 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | 
 | 589 | 		map = map_map[pnum]; | 
 | 590 | #else | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 591 | 		map = sparse_early_mem_map_alloc(pnum); | 
| Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 592 | #endif | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 593 | 		if (!map) | 
 | 594 | 			continue; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 595 |  | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 596 | 		sparse_init_one_section(__nr_to_section(pnum), pnum, map, | 
 | 597 | 								usemap); | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 598 | 	} | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 599 |  | 
| Yinghai Lu | c2b91e2 | 2008-04-12 01:19:24 -0700 | [diff] [blame] | 600 | 	vmemmap_populate_print_last(); | 
 | 601 |  | 
| Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 602 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | 
 | 603 | 	free_bootmem(__pa(map_map), size2); | 
 | 604 | #endif | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 605 | 	free_bootmem(__pa(usemap_map), size); | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 606 | } | 
 | 607 |  | 
 | 608 | #ifdef CONFIG_MEMORY_HOTPLUG | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 609 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 
 | 610 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | 
 | 611 | 						 unsigned long nr_pages) | 
 | 612 | { | 
 | 613 | 	/* This will make the necessary allocations eventually. */ | 
 | 614 | 	return sparse_mem_map_populate(pnum, nid); | 
 | 615 | } | 
 | 616 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | 
 | 617 | { | 
 | 618 | 	return; /* XXX: Not implemented yet */ | 
 | 619 | } | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 620 | static void free_map_bootmem(struct page *page, unsigned long nr_pages) | 
 | 621 | { | 
 | 622 | } | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 623 | #else | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 624 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | 
 | 625 | { | 
 | 626 | 	struct page *page, *ret; | 
 | 627 | 	unsigned long memmap_size = sizeof(struct page) * nr_pages; | 
 | 628 |  | 
| Yasunori Goto | f2d0aa5 | 2006-10-28 10:38:32 -0700 | [diff] [blame] | 629 | 	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 630 | 	if (page) | 
 | 631 | 		goto got_map_page; | 
 | 632 |  | 
 | 633 | 	ret = vmalloc(memmap_size); | 
 | 634 | 	if (ret) | 
 | 635 | 		goto got_map_ptr; | 
 | 636 |  | 
 | 637 | 	return NULL; | 
 | 638 | got_map_page: | 
 | 639 | 	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | 
 | 640 | got_map_ptr: | 
 | 641 | 	memset(ret, 0, memmap_size); | 
 | 642 |  | 
 | 643 | 	return ret; | 
 | 644 | } | 
 | 645 |  | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 646 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | 
 | 647 | 						  unsigned long nr_pages) | 
 | 648 | { | 
 | 649 | 	return __kmalloc_section_memmap(nr_pages); | 
 | 650 | } | 
 | 651 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 652 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | 
 | 653 | { | 
| Christoph Lameter | 9e2779f | 2008-02-04 22:28:34 -0800 | [diff] [blame] | 654 | 	if (is_vmalloc_addr(memmap)) | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 655 | 		vfree(memmap); | 
 | 656 | 	else | 
 | 657 | 		free_pages((unsigned long)memmap, | 
 | 658 | 			   get_order(sizeof(struct page) * nr_pages)); | 
 | 659 | } | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 660 |  | 
 | 661 | static void free_map_bootmem(struct page *page, unsigned long nr_pages) | 
 | 662 | { | 
 | 663 | 	unsigned long maps_section_nr, removing_section_nr, i; | 
| Andrea Arcangeli | 5f24ce5 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 664 | 	unsigned long magic; | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 665 |  | 
 | 666 | 	for (i = 0; i < nr_pages; i++, page++) { | 
| Andrea Arcangeli | 5f24ce5 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 667 | 		magic = (unsigned long) page->lru.next; | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 668 |  | 
 | 669 | 		BUG_ON(magic == NODE_INFO); | 
 | 670 |  | 
 | 671 | 		maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); | 
 | 672 | 		removing_section_nr = page->private; | 
 | 673 |  | 
 | 674 | 		/* | 
 | 675 | 		 * When this function is called, the removing section is | 
 | 676 | 		 * logical offlined state. This means all pages are isolated | 
 | 677 | 		 * from page allocator. If removing section's memmap is placed | 
 | 678 | 		 * on the same section, it must not be freed. | 
 | 679 | 		 * If it is freed, page allocator may allocate it which will | 
 | 680 | 		 * be removed physically soon. | 
 | 681 | 		 */ | 
 | 682 | 		if (maps_section_nr != removing_section_nr) | 
 | 683 | 			put_page_bootmem(page); | 
 | 684 | 	} | 
 | 685 | } | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 686 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 687 |  | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 688 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) | 
 | 689 | { | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 690 | 	struct page *usemap_page; | 
 | 691 | 	unsigned long nr_pages; | 
 | 692 |  | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 693 | 	if (!usemap) | 
 | 694 | 		return; | 
 | 695 |  | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 696 | 	usemap_page = virt_to_page(usemap); | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 697 | 	/* | 
 | 698 | 	 * Check to see if allocation came from hot-plug-add | 
 | 699 | 	 */ | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 700 | 	if (PageSlab(usemap_page)) { | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 701 | 		kfree(usemap); | 
 | 702 | 		if (memmap) | 
 | 703 | 			__kfree_section_memmap(memmap, PAGES_PER_SECTION); | 
 | 704 | 		return; | 
 | 705 | 	} | 
 | 706 |  | 
 | 707 | 	/* | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 708 | 	 * The usemap came from bootmem. This is packed with other usemaps | 
 | 709 | 	 * on the section which has pgdat at boot time. Just keep it as is now. | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 710 | 	 */ | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 711 |  | 
 | 712 | 	if (memmap) { | 
 | 713 | 		struct page *memmap_page; | 
 | 714 | 		memmap_page = virt_to_page(memmap); | 
 | 715 |  | 
 | 716 | 		nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) | 
 | 717 | 			>> PAGE_SHIFT; | 
 | 718 |  | 
 | 719 | 		free_map_bootmem(memmap_page, nr_pages); | 
 | 720 | 	} | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 721 | } | 
 | 722 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 723 | /* | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 724 |  * returns the number of sections whose mem_maps were properly | 
 | 725 |  * set.  If this is <=0, then that means that the passed-in | 
 | 726 |  * map was not consumed and must be freed. | 
 | 727 |  */ | 
| Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 728 | int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 729 | 			   int nr_pages) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 730 | { | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 731 | 	unsigned long section_nr = pfn_to_section_nr(start_pfn); | 
 | 732 | 	struct pglist_data *pgdat = zone->zone_pgdat; | 
 | 733 | 	struct mem_section *ms; | 
 | 734 | 	struct page *memmap; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 735 | 	unsigned long *usemap; | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 736 | 	unsigned long flags; | 
 | 737 | 	int ret; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 738 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 739 | 	/* | 
 | 740 | 	 * no locking for this, because it does its own | 
 | 741 | 	 * plus, it does a kmalloc | 
 | 742 | 	 */ | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 743 | 	ret = sparse_index_init(section_nr, pgdat->node_id); | 
 | 744 | 	if (ret < 0 && ret != -EEXIST) | 
 | 745 | 		return ret; | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 746 | 	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 747 | 	if (!memmap) | 
 | 748 | 		return -ENOMEM; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 749 | 	usemap = __kmalloc_section_usemap(); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 750 | 	if (!usemap) { | 
 | 751 | 		__kfree_section_memmap(memmap, nr_pages); | 
 | 752 | 		return -ENOMEM; | 
 | 753 | 	} | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 754 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 755 | 	pgdat_resize_lock(pgdat, &flags); | 
 | 756 |  | 
 | 757 | 	ms = __pfn_to_section(start_pfn); | 
 | 758 | 	if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | 
 | 759 | 		ret = -EEXIST; | 
 | 760 | 		goto out; | 
 | 761 | 	} | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 762 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 763 | 	ms->section_mem_map |= SECTION_MARKED_PRESENT; | 
 | 764 |  | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 765 | 	ret = sparse_init_one_section(ms, section_nr, memmap, usemap); | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 766 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 767 | out: | 
 | 768 | 	pgdat_resize_unlock(pgdat, &flags); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 769 | 	if (ret <= 0) { | 
 | 770 | 		kfree(usemap); | 
| Mike Kravetz | 46a66ee | 2006-05-01 12:16:09 -0700 | [diff] [blame] | 771 | 		__kfree_section_memmap(memmap, nr_pages); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 772 | 	} | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 773 | 	return ret; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 774 | } | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 775 |  | 
 | 776 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) | 
 | 777 | { | 
 | 778 | 	struct page *memmap = NULL; | 
 | 779 | 	unsigned long *usemap = NULL; | 
 | 780 |  | 
 | 781 | 	if (ms->section_mem_map) { | 
 | 782 | 		usemap = ms->pageblock_flags; | 
 | 783 | 		memmap = sparse_decode_mem_map(ms->section_mem_map, | 
 | 784 | 						__section_nr(ms)); | 
 | 785 | 		ms->section_mem_map = 0; | 
 | 786 | 		ms->pageblock_flags = NULL; | 
 | 787 | 	} | 
 | 788 |  | 
 | 789 | 	free_section_usemap(memmap, usemap); | 
 | 790 | } | 
| Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 791 | #endif |