| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * sparse memory mappings. | 
|  | 3 | */ | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> | 
|  | 5 | #include <linux/mmzone.h> | 
|  | 6 | #include <linux/bootmem.h> | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 7 | #include <linux/highmem.h> | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 8 | #include <linux/module.h> | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 9 | #include <linux/spinlock.h> | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 10 | #include <linux/vmalloc.h> | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 11 | #include "internal.h" | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 12 | #include <asm/dma.h> | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 13 | #include <asm/pgalloc.h> | 
|  | 14 | #include <asm/pgtable.h> | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 15 |  | 
|  | 16 | /* | 
|  | 17 | * Permanent SPARSEMEM data: | 
|  | 18 | * | 
|  | 19 | * 1) mem_section	- memory sections, mem_map's for valid memory | 
|  | 20 | */ | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 21 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 22 | struct mem_section *mem_section[NR_SECTION_ROOTS] | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 23 | ____cacheline_internodealigned_in_smp; | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 24 | #else | 
|  | 25 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 26 | ____cacheline_internodealigned_in_smp; | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 27 | #endif | 
|  | 28 | EXPORT_SYMBOL(mem_section); | 
|  | 29 |  | 
| Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 30 | #ifdef NODE_NOT_IN_PAGE_FLAGS | 
|  | 31 | /* | 
|  | 32 | * If we did not store the node number in the page then we have to | 
|  | 33 | * do a lookup in the section_to_node_table in order to find which | 
|  | 34 | * node the page belongs to. | 
|  | 35 | */ | 
|  | 36 | #if MAX_NUMNODES <= 256 | 
|  | 37 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | 
|  | 38 | #else | 
|  | 39 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | 
|  | 40 | #endif | 
|  | 41 |  | 
| Andy Whitcroft | 25ba77c | 2006-12-06 20:33:03 -0800 | [diff] [blame] | 42 | int page_to_nid(struct page *page) | 
| Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 43 | { | 
|  | 44 | return section_to_node_table[page_to_section(page)]; | 
|  | 45 | } | 
|  | 46 | EXPORT_SYMBOL(page_to_nid); | 
| Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 47 |  | 
|  | 48 | static void set_section_nid(unsigned long section_nr, int nid) | 
|  | 49 | { | 
|  | 50 | section_to_node_table[section_nr] = nid; | 
|  | 51 | } | 
|  | 52 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | 
|  | 53 | static inline void set_section_nid(unsigned long section_nr, int nid) | 
|  | 54 | { | 
|  | 55 | } | 
| Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 56 | #endif | 
|  | 57 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 58 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
| Sam Ravnborg | 577a32f | 2007-05-17 23:29:25 +0200 | [diff] [blame] | 59 | static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 60 | { | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 61 | struct mem_section *section = NULL; | 
|  | 62 | unsigned long array_size = SECTIONS_PER_ROOT * | 
|  | 63 | sizeof(struct mem_section); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 64 |  | 
| Mike Kravetz | 39d24e6 | 2006-05-15 09:44:13 -0700 | [diff] [blame] | 65 | if (slab_is_available()) | 
| Mike Kravetz | 46a66ee | 2006-05-01 12:16:09 -0700 | [diff] [blame] | 66 | section = kmalloc_node(array_size, GFP_KERNEL, nid); | 
|  | 67 | else | 
|  | 68 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 69 |  | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 70 | if (section) | 
|  | 71 | memset(section, 0, array_size); | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 72 |  | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 73 | return section; | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 74 | } | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 75 |  | 
| Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 76 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 77 | { | 
| Ingo Molnar | 34af946 | 2006-06-27 02:53:55 -0700 | [diff] [blame] | 78 | static DEFINE_SPINLOCK(index_init_lock); | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 79 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); | 
|  | 80 | struct mem_section *section; | 
|  | 81 | int ret = 0; | 
|  | 82 |  | 
|  | 83 | if (mem_section[root]) | 
|  | 84 | return -EEXIST; | 
|  | 85 |  | 
|  | 86 | section = sparse_index_alloc(nid); | 
| WANG Cong | af0cd5a | 2007-12-17 16:19:58 -0800 | [diff] [blame] | 87 | if (!section) | 
|  | 88 | return -ENOMEM; | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 89 | /* | 
|  | 90 | * This lock keeps two different sections from | 
|  | 91 | * reallocating for the same index | 
|  | 92 | */ | 
|  | 93 | spin_lock(&index_init_lock); | 
|  | 94 |  | 
|  | 95 | if (mem_section[root]) { | 
|  | 96 | ret = -EEXIST; | 
|  | 97 | goto out; | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | mem_section[root] = section; | 
|  | 101 | out: | 
|  | 102 | spin_unlock(&index_init_lock); | 
|  | 103 | return ret; | 
|  | 104 | } | 
|  | 105 | #else /* !SPARSEMEM_EXTREME */ | 
|  | 106 | static inline int sparse_index_init(unsigned long section_nr, int nid) | 
|  | 107 | { | 
|  | 108 | return 0; | 
|  | 109 | } | 
|  | 110 | #endif | 
|  | 111 |  | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 112 | /* | 
|  | 113 | * Although written for the SPARSEMEM_EXTREME case, this happens | 
| Andy Whitcroft | cd881a6 | 2007-10-16 01:24:10 -0700 | [diff] [blame] | 114 | * to also work for the flat array case because | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 115 | * NR_SECTION_ROOTS==NR_MEM_SECTIONS. | 
|  | 116 | */ | 
|  | 117 | int __section_nr(struct mem_section* ms) | 
|  | 118 | { | 
|  | 119 | unsigned long root_nr; | 
|  | 120 | struct mem_section* root; | 
|  | 121 |  | 
| Mike Kravetz | 12783b0 | 2006-05-20 15:00:05 -0700 | [diff] [blame] | 122 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { | 
|  | 123 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 124 | if (!root) | 
|  | 125 | continue; | 
|  | 126 |  | 
|  | 127 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | 
|  | 128 | break; | 
|  | 129 | } | 
|  | 130 |  | 
|  | 131 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); | 
|  | 132 | } | 
|  | 133 |  | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 134 | /* | 
|  | 135 | * During early boot, before section_mem_map is used for an actual | 
|  | 136 | * mem_map, we use section_mem_map to store the section's NUMA | 
|  | 137 | * node.  This keeps us from having to use another data structure.  The | 
|  | 138 | * node information is cleared just before we store the real mem_map. | 
|  | 139 | */ | 
|  | 140 | static inline unsigned long sparse_encode_early_nid(int nid) | 
|  | 141 | { | 
|  | 142 | return (nid << SECTION_NID_SHIFT); | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 | static inline int sparse_early_nid(struct mem_section *section) | 
|  | 146 | { | 
|  | 147 | return (section->section_mem_map >> SECTION_NID_SHIFT); | 
|  | 148 | } | 
|  | 149 |  | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 150 | /* Validate the physical addressing limitations of the model */ | 
|  | 151 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, | 
|  | 152 | unsigned long *end_pfn) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 153 | { | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 154 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 155 |  | 
| Ingo Molnar | bead9a3 | 2008-04-16 01:40:00 +0200 | [diff] [blame] | 156 | /* | 
|  | 157 | * Sanity checks - do not allow an architecture to pass | 
|  | 158 | * in larger pfns than the maximum scope of sparsemem: | 
|  | 159 | */ | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 160 | if (*start_pfn > max_sparsemem_pfn) { | 
|  | 161 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | 
|  | 162 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | 
|  | 163 | *start_pfn, *end_pfn, max_sparsemem_pfn); | 
|  | 164 | WARN_ON_ONCE(1); | 
|  | 165 | *start_pfn = max_sparsemem_pfn; | 
|  | 166 | *end_pfn = max_sparsemem_pfn; | 
| Cyrill Gorcunov | ef161a9 | 2009-03-31 15:19:25 -0700 | [diff] [blame] | 167 | } else if (*end_pfn > max_sparsemem_pfn) { | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 168 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | 
|  | 169 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | 
|  | 170 | *start_pfn, *end_pfn, max_sparsemem_pfn); | 
|  | 171 | WARN_ON_ONCE(1); | 
|  | 172 | *end_pfn = max_sparsemem_pfn; | 
|  | 173 | } | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 | /* Record a memory area against a node. */ | 
|  | 177 | void __init memory_present(int nid, unsigned long start, unsigned long end) | 
|  | 178 | { | 
|  | 179 | unsigned long pfn; | 
| Ingo Molnar | bead9a3 | 2008-04-16 01:40:00 +0200 | [diff] [blame] | 180 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 181 | start &= PAGE_SECTION_MASK; | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 182 | mminit_validate_memmodel_limits(&start, &end); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 183 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { | 
|  | 184 | unsigned long section = pfn_to_section_nr(pfn); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 185 | struct mem_section *ms; | 
|  | 186 |  | 
|  | 187 | sparse_index_init(section, nid); | 
| Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 188 | set_section_nid(section, nid); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 189 |  | 
|  | 190 | ms = __nr_to_section(section); | 
|  | 191 | if (!ms->section_mem_map) | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 192 | ms->section_mem_map = sparse_encode_early_nid(nid) | | 
|  | 193 | SECTION_MARKED_PRESENT; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 194 | } | 
|  | 195 | } | 
|  | 196 |  | 
|  | 197 | /* | 
|  | 198 | * Only used by the i386 NUMA architecures, but relatively | 
|  | 199 | * generic code. | 
|  | 200 | */ | 
|  | 201 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, | 
|  | 202 | unsigned long end_pfn) | 
|  | 203 | { | 
|  | 204 | unsigned long pfn; | 
|  | 205 | unsigned long nr_pages = 0; | 
|  | 206 |  | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 207 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 208 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | 
|  | 209 | if (nid != early_pfn_to_nid(pfn)) | 
|  | 210 | continue; | 
|  | 211 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 212 | if (pfn_present(pfn)) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 213 | nr_pages += PAGES_PER_SECTION; | 
|  | 214 | } | 
|  | 215 |  | 
|  | 216 | return nr_pages * sizeof(struct page); | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 | /* | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 220 | * Subtle, we encode the real pfn into the mem_map such that | 
|  | 221 | * the identity pfn - section_mem_map will return the actual | 
|  | 222 | * physical page frame number. | 
|  | 223 | */ | 
|  | 224 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | 
|  | 225 | { | 
|  | 226 | return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | 
|  | 227 | } | 
|  | 228 |  | 
|  | 229 | /* | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 230 | * Decode mem_map from the coded memmap | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 231 | */ | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 232 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) | 
|  | 233 | { | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 234 | /* mask off the extra low bits of information */ | 
|  | 235 | coded_mem_map &= SECTION_MAP_MASK; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 236 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); | 
|  | 237 | } | 
|  | 238 |  | 
| Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 239 | static int __meminit sparse_init_one_section(struct mem_section *ms, | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 240 | unsigned long pnum, struct page *mem_map, | 
|  | 241 | unsigned long *pageblock_bitmap) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 242 | { | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 243 | if (!present_section(ms)) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 244 | return -EINVAL; | 
|  | 245 |  | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 246 | ms->section_mem_map &= ~SECTION_MAP_MASK; | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 247 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | | 
|  | 248 | SECTION_HAS_MEM_MAP; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 249 | ms->pageblock_flags = pageblock_bitmap; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 250 |  | 
|  | 251 | return 1; | 
|  | 252 | } | 
|  | 253 |  | 
| Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 254 | unsigned long usemap_size(void) | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 255 | { | 
|  | 256 | unsigned long size_bytes; | 
|  | 257 | size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; | 
|  | 258 | size_bytes = roundup(size_bytes, sizeof(unsigned long)); | 
|  | 259 | return size_bytes; | 
|  | 260 | } | 
|  | 261 |  | 
|  | 262 | #ifdef CONFIG_MEMORY_HOTPLUG | 
|  | 263 | static unsigned long *__kmalloc_section_usemap(void) | 
|  | 264 | { | 
|  | 265 | return kmalloc(usemap_size(), GFP_KERNEL); | 
|  | 266 | } | 
|  | 267 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 
|  | 268 |  | 
| Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 269 | #ifdef CONFIG_MEMORY_HOTREMOVE | 
|  | 270 | static unsigned long * __init | 
|  | 271 | sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) | 
|  | 272 | { | 
|  | 273 | unsigned long section_nr; | 
|  | 274 |  | 
|  | 275 | /* | 
|  | 276 | * A page may contain usemaps for other sections preventing the | 
|  | 277 | * page being freed and making a section unremovable while | 
|  | 278 | * other sections referencing the usemap retmain active. Similarly, | 
|  | 279 | * a pgdat can prevent a section being removed. If section A | 
|  | 280 | * contains a pgdat and section B contains the usemap, both | 
|  | 281 | * sections become inter-dependent. This allocates usemaps | 
|  | 282 | * from the same section as the pgdat where possible to avoid | 
|  | 283 | * this problem. | 
|  | 284 | */ | 
|  | 285 | section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | 
|  | 286 | return alloc_bootmem_section(usemap_size(), section_nr); | 
|  | 287 | } | 
|  | 288 |  | 
|  | 289 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | 
|  | 290 | { | 
|  | 291 | unsigned long usemap_snr, pgdat_snr; | 
|  | 292 | static unsigned long old_usemap_snr = NR_MEM_SECTIONS; | 
|  | 293 | static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; | 
|  | 294 | struct pglist_data *pgdat = NODE_DATA(nid); | 
|  | 295 | int usemap_nid; | 
|  | 296 |  | 
|  | 297 | usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); | 
|  | 298 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | 
|  | 299 | if (usemap_snr == pgdat_snr) | 
|  | 300 | return; | 
|  | 301 |  | 
|  | 302 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) | 
|  | 303 | /* skip redundant message */ | 
|  | 304 | return; | 
|  | 305 |  | 
|  | 306 | old_usemap_snr = usemap_snr; | 
|  | 307 | old_pgdat_snr = pgdat_snr; | 
|  | 308 |  | 
|  | 309 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); | 
|  | 310 | if (usemap_nid != nid) { | 
|  | 311 | printk(KERN_INFO | 
|  | 312 | "node %d must be removed before remove section %ld\n", | 
|  | 313 | nid, usemap_snr); | 
|  | 314 | return; | 
|  | 315 | } | 
|  | 316 | /* | 
|  | 317 | * There is a circular dependency. | 
|  | 318 | * Some platforms allow un-removable section because they will just | 
|  | 319 | * gather other removable sections for dynamic partitioning. | 
|  | 320 | * Just notify un-removable section's number here. | 
|  | 321 | */ | 
|  | 322 | printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, | 
|  | 323 | pgdat_snr, nid); | 
|  | 324 | printk(KERN_CONT | 
|  | 325 | " have a circular dependency on usemap and pgdat allocations\n"); | 
|  | 326 | } | 
|  | 327 | #else | 
|  | 328 | static unsigned long * __init | 
|  | 329 | sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) | 
|  | 330 | { | 
|  | 331 | return NULL; | 
|  | 332 | } | 
|  | 333 |  | 
|  | 334 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | 
|  | 335 | { | 
|  | 336 | } | 
|  | 337 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 
|  | 338 |  | 
| Sam Ravnborg | a322f8a | 2008-02-04 22:29:35 -0800 | [diff] [blame] | 339 | static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 340 | { | 
| Andrew Morton | 5167464 | 2008-04-30 00:55:17 -0700 | [diff] [blame] | 341 | unsigned long *usemap; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 342 | struct mem_section *ms = __nr_to_section(pnum); | 
|  | 343 | int nid = sparse_early_nid(ms); | 
|  | 344 |  | 
| Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 345 | usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid)); | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 346 | if (usemap) | 
|  | 347 | return usemap; | 
|  | 348 |  | 
| Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 349 | usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); | 
|  | 350 | if (usemap) { | 
|  | 351 | check_usemap_section_nr(nid, usemap); | 
|  | 352 | return usemap; | 
|  | 353 | } | 
|  | 354 |  | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 355 | /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ | 
|  | 356 | nid = 0; | 
|  | 357 |  | 
| Harvey Harrison | d40cee2 | 2008-04-30 00:55:07 -0700 | [diff] [blame] | 358 | printk(KERN_WARNING "%s: allocation failed\n", __func__); | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 359 | return NULL; | 
|  | 360 | } | 
|  | 361 |  | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 362 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 363 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 364 | { | 
|  | 365 | struct page *map; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 366 |  | 
|  | 367 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); | 
|  | 368 | if (map) | 
|  | 369 | return map; | 
|  | 370 |  | 
| Yasunori Goto | 9d99217 | 2008-04-28 02:13:32 -0700 | [diff] [blame] | 371 | map = alloc_bootmem_pages_node(NODE_DATA(nid), | 
|  | 372 | PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 373 | return map; | 
|  | 374 | } | 
|  | 375 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | 
|  | 376 |  | 
| Adrian Bunk | 9e5c6da | 2008-07-25 19:46:22 -0700 | [diff] [blame] | 377 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 378 | { | 
|  | 379 | struct page *map; | 
|  | 380 | struct mem_section *ms = __nr_to_section(pnum); | 
|  | 381 | int nid = sparse_early_nid(ms); | 
|  | 382 |  | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 383 | map = sparse_mem_map_populate(pnum, nid); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 384 | if (map) | 
|  | 385 | return map; | 
|  | 386 |  | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 387 | printk(KERN_ERR "%s: sparsemem memory map backing failed " | 
| Harvey Harrison | d40cee2 | 2008-04-30 00:55:07 -0700 | [diff] [blame] | 388 | "some memory will not be available.\n", __func__); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 389 | ms->section_mem_map = 0; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 390 | return NULL; | 
|  | 391 | } | 
|  | 392 |  | 
| Yinghai Lu | c2b91e2 | 2008-04-12 01:19:24 -0700 | [diff] [blame] | 393 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) | 
|  | 394 | { | 
|  | 395 | } | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 396 | /* | 
|  | 397 | * Allocate the accumulated non-linear sections, allocate a mem_map | 
|  | 398 | * for each and record the physical to section mapping. | 
|  | 399 | */ | 
|  | 400 | void __init sparse_init(void) | 
|  | 401 | { | 
|  | 402 | unsigned long pnum; | 
|  | 403 | struct page *map; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 404 | unsigned long *usemap; | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 405 | unsigned long **usemap_map; | 
|  | 406 | int size; | 
|  | 407 |  | 
|  | 408 | /* | 
|  | 409 | * map is using big page (aka 2M in x86 64 bit) | 
|  | 410 | * usemap is less one page (aka 24 bytes) | 
|  | 411 | * so alloc 2M (with 2M align) and 24 bytes in turn will | 
|  | 412 | * make next 2M slip to one more 2M later. | 
|  | 413 | * then in big system, the memory will have a lot of holes... | 
|  | 414 | * here try to allocate 2M pages continously. | 
|  | 415 | * | 
|  | 416 | * powerpc need to call sparse_init_one_section right after each | 
|  | 417 | * sparse_early_mem_map_alloc, so allocate usemap_map at first. | 
|  | 418 | */ | 
|  | 419 | size = sizeof(unsigned long *) * NR_MEM_SECTIONS; | 
|  | 420 | usemap_map = alloc_bootmem(size); | 
|  | 421 | if (!usemap_map) | 
|  | 422 | panic("can not allocate usemap_map\n"); | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 423 |  | 
|  | 424 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 425 | if (!present_section_nr(pnum)) | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 426 | continue; | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 427 | usemap_map[pnum] = sparse_early_usemap_alloc(pnum); | 
|  | 428 | } | 
|  | 429 |  | 
|  | 430 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 
|  | 431 | if (!present_section_nr(pnum)) | 
|  | 432 | continue; | 
|  | 433 |  | 
|  | 434 | usemap = usemap_map[pnum]; | 
|  | 435 | if (!usemap) | 
|  | 436 | continue; | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 437 |  | 
|  | 438 | map = sparse_early_mem_map_alloc(pnum); | 
|  | 439 | if (!map) | 
|  | 440 | continue; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 441 |  | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 442 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, | 
|  | 443 | usemap); | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 444 | } | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 445 |  | 
| Yinghai Lu | c2b91e2 | 2008-04-12 01:19:24 -0700 | [diff] [blame] | 446 | vmemmap_populate_print_last(); | 
|  | 447 |  | 
| Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 448 | free_bootmem(__pa(usemap_map), size); | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 449 | } | 
|  | 450 |  | 
|  | 451 | #ifdef CONFIG_MEMORY_HOTPLUG | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 452 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 
|  | 453 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | 
|  | 454 | unsigned long nr_pages) | 
|  | 455 | { | 
|  | 456 | /* This will make the necessary allocations eventually. */ | 
|  | 457 | return sparse_mem_map_populate(pnum, nid); | 
|  | 458 | } | 
|  | 459 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | 
|  | 460 | { | 
|  | 461 | return; /* XXX: Not implemented yet */ | 
|  | 462 | } | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 463 | static void free_map_bootmem(struct page *page, unsigned long nr_pages) | 
|  | 464 | { | 
|  | 465 | } | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 466 | #else | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 467 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | 
|  | 468 | { | 
|  | 469 | struct page *page, *ret; | 
|  | 470 | unsigned long memmap_size = sizeof(struct page) * nr_pages; | 
|  | 471 |  | 
| Yasunori Goto | f2d0aa5 | 2006-10-28 10:38:32 -0700 | [diff] [blame] | 472 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 473 | if (page) | 
|  | 474 | goto got_map_page; | 
|  | 475 |  | 
|  | 476 | ret = vmalloc(memmap_size); | 
|  | 477 | if (ret) | 
|  | 478 | goto got_map_ptr; | 
|  | 479 |  | 
|  | 480 | return NULL; | 
|  | 481 | got_map_page: | 
|  | 482 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | 
|  | 483 | got_map_ptr: | 
|  | 484 | memset(ret, 0, memmap_size); | 
|  | 485 |  | 
|  | 486 | return ret; | 
|  | 487 | } | 
|  | 488 |  | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 489 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | 
|  | 490 | unsigned long nr_pages) | 
|  | 491 | { | 
|  | 492 | return __kmalloc_section_memmap(nr_pages); | 
|  | 493 | } | 
|  | 494 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 495 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | 
|  | 496 | { | 
| Christoph Lameter | 9e2779f | 2008-02-04 22:28:34 -0800 | [diff] [blame] | 497 | if (is_vmalloc_addr(memmap)) | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 498 | vfree(memmap); | 
|  | 499 | else | 
|  | 500 | free_pages((unsigned long)memmap, | 
|  | 501 | get_order(sizeof(struct page) * nr_pages)); | 
|  | 502 | } | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 503 |  | 
|  | 504 | static void free_map_bootmem(struct page *page, unsigned long nr_pages) | 
|  | 505 | { | 
|  | 506 | unsigned long maps_section_nr, removing_section_nr, i; | 
|  | 507 | int magic; | 
|  | 508 |  | 
|  | 509 | for (i = 0; i < nr_pages; i++, page++) { | 
|  | 510 | magic = atomic_read(&page->_mapcount); | 
|  | 511 |  | 
|  | 512 | BUG_ON(magic == NODE_INFO); | 
|  | 513 |  | 
|  | 514 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); | 
|  | 515 | removing_section_nr = page->private; | 
|  | 516 |  | 
|  | 517 | /* | 
|  | 518 | * When this function is called, the removing section is | 
|  | 519 | * logical offlined state. This means all pages are isolated | 
|  | 520 | * from page allocator. If removing section's memmap is placed | 
|  | 521 | * on the same section, it must not be freed. | 
|  | 522 | * If it is freed, page allocator may allocate it which will | 
|  | 523 | * be removed physically soon. | 
|  | 524 | */ | 
|  | 525 | if (maps_section_nr != removing_section_nr) | 
|  | 526 | put_page_bootmem(page); | 
|  | 527 | } | 
|  | 528 | } | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 529 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 530 |  | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 531 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) | 
|  | 532 | { | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 533 | struct page *usemap_page; | 
|  | 534 | unsigned long nr_pages; | 
|  | 535 |  | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 536 | if (!usemap) | 
|  | 537 | return; | 
|  | 538 |  | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 539 | usemap_page = virt_to_page(usemap); | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 540 | /* | 
|  | 541 | * Check to see if allocation came from hot-plug-add | 
|  | 542 | */ | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 543 | if (PageSlab(usemap_page)) { | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 544 | kfree(usemap); | 
|  | 545 | if (memmap) | 
|  | 546 | __kfree_section_memmap(memmap, PAGES_PER_SECTION); | 
|  | 547 | return; | 
|  | 548 | } | 
|  | 549 |  | 
|  | 550 | /* | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 551 | * The usemap came from bootmem. This is packed with other usemaps | 
|  | 552 | * on the section which has pgdat at boot time. Just keep it as is now. | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 553 | */ | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 554 |  | 
|  | 555 | if (memmap) { | 
|  | 556 | struct page *memmap_page; | 
|  | 557 | memmap_page = virt_to_page(memmap); | 
|  | 558 |  | 
|  | 559 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) | 
|  | 560 | >> PAGE_SHIFT; | 
|  | 561 |  | 
|  | 562 | free_map_bootmem(memmap_page, nr_pages); | 
|  | 563 | } | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 564 | } | 
|  | 565 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 566 | /* | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 567 | * returns the number of sections whose mem_maps were properly | 
|  | 568 | * set.  If this is <=0, then that means that the passed-in | 
|  | 569 | * map was not consumed and must be freed. | 
|  | 570 | */ | 
| Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 571 | int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 572 | int nr_pages) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 573 | { | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 574 | unsigned long section_nr = pfn_to_section_nr(start_pfn); | 
|  | 575 | struct pglist_data *pgdat = zone->zone_pgdat; | 
|  | 576 | struct mem_section *ms; | 
|  | 577 | struct page *memmap; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 578 | unsigned long *usemap; | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 579 | unsigned long flags; | 
|  | 580 | int ret; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 581 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 582 | /* | 
|  | 583 | * no locking for this, because it does its own | 
|  | 584 | * plus, it does a kmalloc | 
|  | 585 | */ | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 586 | ret = sparse_index_init(section_nr, pgdat->node_id); | 
|  | 587 | if (ret < 0 && ret != -EEXIST) | 
|  | 588 | return ret; | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 589 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 590 | if (!memmap) | 
|  | 591 | return -ENOMEM; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 592 | usemap = __kmalloc_section_usemap(); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 593 | if (!usemap) { | 
|  | 594 | __kfree_section_memmap(memmap, nr_pages); | 
|  | 595 | return -ENOMEM; | 
|  | 596 | } | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 597 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 598 | pgdat_resize_lock(pgdat, &flags); | 
|  | 599 |  | 
|  | 600 | ms = __pfn_to_section(start_pfn); | 
|  | 601 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | 
|  | 602 | ret = -EEXIST; | 
|  | 603 | goto out; | 
|  | 604 | } | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 605 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 606 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | 
|  | 607 |  | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 608 | ret = sparse_init_one_section(ms, section_nr, memmap, usemap); | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 609 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 610 | out: | 
|  | 611 | pgdat_resize_unlock(pgdat, &flags); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 612 | if (ret <= 0) { | 
|  | 613 | kfree(usemap); | 
| Mike Kravetz | 46a66ee | 2006-05-01 12:16:09 -0700 | [diff] [blame] | 614 | __kfree_section_memmap(memmap, nr_pages); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 615 | } | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 616 | return ret; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 617 | } | 
| Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 618 |  | 
|  | 619 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) | 
|  | 620 | { | 
|  | 621 | struct page *memmap = NULL; | 
|  | 622 | unsigned long *usemap = NULL; | 
|  | 623 |  | 
|  | 624 | if (ms->section_mem_map) { | 
|  | 625 | usemap = ms->pageblock_flags; | 
|  | 626 | memmap = sparse_decode_mem_map(ms->section_mem_map, | 
|  | 627 | __section_nr(ms)); | 
|  | 628 | ms->section_mem_map = 0; | 
|  | 629 | ms->pageblock_flags = NULL; | 
|  | 630 | } | 
|  | 631 |  | 
|  | 632 | free_section_usemap(memmap, usemap); | 
|  | 633 | } | 
| Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 634 | #endif |