| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * sparse memory mappings. | 
|  | 3 | */ | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> | 
|  | 5 | #include <linux/mmzone.h> | 
|  | 6 | #include <linux/bootmem.h> | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 7 | #include <linux/highmem.h> | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 8 | #include <linux/module.h> | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 9 | #include <linux/spinlock.h> | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 10 | #include <linux/vmalloc.h> | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 11 | #include <asm/dma.h> | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 12 | #include <asm/pgalloc.h> | 
|  | 13 | #include <asm/pgtable.h> | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 14 |  | 
|  | 15 | /* | 
|  | 16 | * Permanent SPARSEMEM data: | 
|  | 17 | * | 
|  | 18 | * 1) mem_section	- memory sections, mem_map's for valid memory | 
|  | 19 | */ | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 20 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 21 | struct mem_section *mem_section[NR_SECTION_ROOTS] | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 22 | ____cacheline_internodealigned_in_smp; | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 23 | #else | 
|  | 24 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 25 | ____cacheline_internodealigned_in_smp; | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 26 | #endif | 
|  | 27 | EXPORT_SYMBOL(mem_section); | 
|  | 28 |  | 
| Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 29 | #ifdef NODE_NOT_IN_PAGE_FLAGS | 
|  | 30 | /* | 
|  | 31 | * If we did not store the node number in the page then we have to | 
|  | 32 | * do a lookup in the section_to_node_table in order to find which | 
|  | 33 | * node the page belongs to. | 
|  | 34 | */ | 
|  | 35 | #if MAX_NUMNODES <= 256 | 
|  | 36 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | 
|  | 37 | #else | 
|  | 38 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | 
|  | 39 | #endif | 
|  | 40 |  | 
| Andy Whitcroft | 25ba77c | 2006-12-06 20:33:03 -0800 | [diff] [blame] | 41 | int page_to_nid(struct page *page) | 
| Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 42 | { | 
|  | 43 | return section_to_node_table[page_to_section(page)]; | 
|  | 44 | } | 
|  | 45 | EXPORT_SYMBOL(page_to_nid); | 
| Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 46 |  | 
|  | 47 | static void set_section_nid(unsigned long section_nr, int nid) | 
|  | 48 | { | 
|  | 49 | section_to_node_table[section_nr] = nid; | 
|  | 50 | } | 
|  | 51 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | 
|  | 52 | static inline void set_section_nid(unsigned long section_nr, int nid) | 
|  | 53 | { | 
|  | 54 | } | 
| Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 55 | #endif | 
|  | 56 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 57 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
| Sam Ravnborg | 577a32f | 2007-05-17 23:29:25 +0200 | [diff] [blame] | 58 | static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 59 | { | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 60 | struct mem_section *section = NULL; | 
|  | 61 | unsigned long array_size = SECTIONS_PER_ROOT * | 
|  | 62 | sizeof(struct mem_section); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 63 |  | 
| Mike Kravetz | 39d24e6 | 2006-05-15 09:44:13 -0700 | [diff] [blame] | 64 | if (slab_is_available()) | 
| Mike Kravetz | 46a66ee | 2006-05-01 12:16:09 -0700 | [diff] [blame] | 65 | section = kmalloc_node(array_size, GFP_KERNEL, nid); | 
|  | 66 | else | 
|  | 67 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 68 |  | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 69 | if (section) | 
|  | 70 | memset(section, 0, array_size); | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 71 |  | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 72 | return section; | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 73 | } | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 74 |  | 
| Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 75 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 76 | { | 
| Ingo Molnar | 34af946 | 2006-06-27 02:53:55 -0700 | [diff] [blame] | 77 | static DEFINE_SPINLOCK(index_init_lock); | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 78 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); | 
|  | 79 | struct mem_section *section; | 
|  | 80 | int ret = 0; | 
|  | 81 |  | 
|  | 82 | if (mem_section[root]) | 
|  | 83 | return -EEXIST; | 
|  | 84 |  | 
|  | 85 | section = sparse_index_alloc(nid); | 
| WANG Cong | af0cd5a | 2007-12-17 16:19:58 -0800 | [diff] [blame] | 86 | if (!section) | 
|  | 87 | return -ENOMEM; | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 88 | /* | 
|  | 89 | * This lock keeps two different sections from | 
|  | 90 | * reallocating for the same index | 
|  | 91 | */ | 
|  | 92 | spin_lock(&index_init_lock); | 
|  | 93 |  | 
|  | 94 | if (mem_section[root]) { | 
|  | 95 | ret = -EEXIST; | 
|  | 96 | goto out; | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | mem_section[root] = section; | 
|  | 100 | out: | 
|  | 101 | spin_unlock(&index_init_lock); | 
|  | 102 | return ret; | 
|  | 103 | } | 
|  | 104 | #else /* !SPARSEMEM_EXTREME */ | 
|  | 105 | static inline int sparse_index_init(unsigned long section_nr, int nid) | 
|  | 106 | { | 
|  | 107 | return 0; | 
|  | 108 | } | 
|  | 109 | #endif | 
|  | 110 |  | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 111 | /* | 
|  | 112 | * Although written for the SPARSEMEM_EXTREME case, this happens | 
| Andy Whitcroft | cd881a6 | 2007-10-16 01:24:10 -0700 | [diff] [blame] | 113 | * to also work for the flat array case because | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 114 | * NR_SECTION_ROOTS==NR_MEM_SECTIONS. | 
|  | 115 | */ | 
|  | 116 | int __section_nr(struct mem_section* ms) | 
|  | 117 | { | 
|  | 118 | unsigned long root_nr; | 
|  | 119 | struct mem_section* root; | 
|  | 120 |  | 
| Mike Kravetz | 12783b0 | 2006-05-20 15:00:05 -0700 | [diff] [blame] | 121 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { | 
|  | 122 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 123 | if (!root) | 
|  | 124 | continue; | 
|  | 125 |  | 
|  | 126 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | 
|  | 127 | break; | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); | 
|  | 131 | } | 
|  | 132 |  | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 133 | /* | 
|  | 134 | * During early boot, before section_mem_map is used for an actual | 
|  | 135 | * mem_map, we use section_mem_map to store the section's NUMA | 
|  | 136 | * node.  This keeps us from having to use another data structure.  The | 
|  | 137 | * node information is cleared just before we store the real mem_map. | 
|  | 138 | */ | 
|  | 139 | static inline unsigned long sparse_encode_early_nid(int nid) | 
|  | 140 | { | 
|  | 141 | return (nid << SECTION_NID_SHIFT); | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | static inline int sparse_early_nid(struct mem_section *section) | 
|  | 145 | { | 
|  | 146 | return (section->section_mem_map >> SECTION_NID_SHIFT); | 
|  | 147 | } | 
|  | 148 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 149 | /* Record a memory area against a node. */ | 
| Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 150 | void __init memory_present(int nid, unsigned long start, unsigned long end) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 151 | { | 
|  | 152 | unsigned long pfn; | 
|  | 153 |  | 
|  | 154 | start &= PAGE_SECTION_MASK; | 
|  | 155 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { | 
|  | 156 | unsigned long section = pfn_to_section_nr(pfn); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 157 | struct mem_section *ms; | 
|  | 158 |  | 
|  | 159 | sparse_index_init(section, nid); | 
| Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 160 | set_section_nid(section, nid); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 161 |  | 
|  | 162 | ms = __nr_to_section(section); | 
|  | 163 | if (!ms->section_mem_map) | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 164 | ms->section_mem_map = sparse_encode_early_nid(nid) | | 
|  | 165 | SECTION_MARKED_PRESENT; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 166 | } | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | /* | 
|  | 170 | * Only used by the i386 NUMA architecures, but relatively | 
|  | 171 | * generic code. | 
|  | 172 | */ | 
|  | 173 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, | 
|  | 174 | unsigned long end_pfn) | 
|  | 175 | { | 
|  | 176 | unsigned long pfn; | 
|  | 177 | unsigned long nr_pages = 0; | 
|  | 178 |  | 
|  | 179 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | 
|  | 180 | if (nid != early_pfn_to_nid(pfn)) | 
|  | 181 | continue; | 
|  | 182 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 183 | if (pfn_present(pfn)) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 184 | nr_pages += PAGES_PER_SECTION; | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | return nr_pages * sizeof(struct page); | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | /* | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 191 | * Subtle, we encode the real pfn into the mem_map such that | 
|  | 192 | * the identity pfn - section_mem_map will return the actual | 
|  | 193 | * physical page frame number. | 
|  | 194 | */ | 
|  | 195 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | 
|  | 196 | { | 
|  | 197 | return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | 
|  | 198 | } | 
|  | 199 |  | 
|  | 200 | /* | 
|  | 201 | * We need this if we ever free the mem_maps.  While not implemented yet, | 
|  | 202 | * this function is included for parity with its sibling. | 
|  | 203 | */ | 
|  | 204 | static __attribute((unused)) | 
|  | 205 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) | 
|  | 206 | { | 
|  | 207 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); | 
|  | 208 | } | 
|  | 209 |  | 
| Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 210 | static int __meminit sparse_init_one_section(struct mem_section *ms, | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 211 | unsigned long pnum, struct page *mem_map, | 
|  | 212 | unsigned long *pageblock_bitmap) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 213 | { | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 214 | if (!present_section(ms)) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 215 | return -EINVAL; | 
|  | 216 |  | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 217 | ms->section_mem_map &= ~SECTION_MAP_MASK; | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 218 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | | 
|  | 219 | SECTION_HAS_MEM_MAP; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 220 | ms->pageblock_flags = pageblock_bitmap; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 221 |  | 
|  | 222 | return 1; | 
|  | 223 | } | 
|  | 224 |  | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 225 | static unsigned long usemap_size(void) | 
|  | 226 | { | 
|  | 227 | unsigned long size_bytes; | 
|  | 228 | size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; | 
|  | 229 | size_bytes = roundup(size_bytes, sizeof(unsigned long)); | 
|  | 230 | return size_bytes; | 
|  | 231 | } | 
|  | 232 |  | 
|  | 233 | #ifdef CONFIG_MEMORY_HOTPLUG | 
|  | 234 | static unsigned long *__kmalloc_section_usemap(void) | 
|  | 235 | { | 
|  | 236 | return kmalloc(usemap_size(), GFP_KERNEL); | 
|  | 237 | } | 
|  | 238 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 
|  | 239 |  | 
|  | 240 | static unsigned long *sparse_early_usemap_alloc(unsigned long pnum) | 
|  | 241 | { | 
|  | 242 | unsigned long *usemap; | 
|  | 243 | struct mem_section *ms = __nr_to_section(pnum); | 
|  | 244 | int nid = sparse_early_nid(ms); | 
|  | 245 |  | 
|  | 246 | usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); | 
|  | 247 | if (usemap) | 
|  | 248 | return usemap; | 
|  | 249 |  | 
|  | 250 | /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ | 
|  | 251 | nid = 0; | 
|  | 252 |  | 
|  | 253 | printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__); | 
|  | 254 | return NULL; | 
|  | 255 | } | 
|  | 256 |  | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 257 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 258 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 259 | { | 
|  | 260 | struct page *map; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 261 |  | 
|  | 262 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); | 
|  | 263 | if (map) | 
|  | 264 | return map; | 
|  | 265 |  | 
|  | 266 | map = alloc_bootmem_node(NODE_DATA(nid), | 
|  | 267 | sizeof(struct page) * PAGES_PER_SECTION); | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 268 | return map; | 
|  | 269 | } | 
|  | 270 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | 
|  | 271 |  | 
|  | 272 | struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | 
|  | 273 | { | 
|  | 274 | struct page *map; | 
|  | 275 | struct mem_section *ms = __nr_to_section(pnum); | 
|  | 276 | int nid = sparse_early_nid(ms); | 
|  | 277 |  | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 278 | map = sparse_mem_map_populate(pnum, nid); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 279 | if (map) | 
|  | 280 | return map; | 
|  | 281 |  | 
| Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 282 | printk(KERN_ERR "%s: sparsemem memory map backing failed " | 
|  | 283 | "some memory will not be available.\n", __FUNCTION__); | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 284 | ms->section_mem_map = 0; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 285 | return NULL; | 
|  | 286 | } | 
|  | 287 |  | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 288 | /* | 
|  | 289 | * Allocate the accumulated non-linear sections, allocate a mem_map | 
|  | 290 | * for each and record the physical to section mapping. | 
|  | 291 | */ | 
|  | 292 | void __init sparse_init(void) | 
|  | 293 | { | 
|  | 294 | unsigned long pnum; | 
|  | 295 | struct page *map; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 296 | unsigned long *usemap; | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 297 |  | 
|  | 298 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 299 | if (!present_section_nr(pnum)) | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 300 | continue; | 
|  | 301 |  | 
|  | 302 | map = sparse_early_mem_map_alloc(pnum); | 
|  | 303 | if (!map) | 
|  | 304 | continue; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 305 |  | 
|  | 306 | usemap = sparse_early_usemap_alloc(pnum); | 
|  | 307 | if (!usemap) | 
|  | 308 | continue; | 
|  | 309 |  | 
|  | 310 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, | 
|  | 311 | usemap); | 
| Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 312 | } | 
|  | 313 | } | 
|  | 314 |  | 
|  | 315 | #ifdef CONFIG_MEMORY_HOTPLUG | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 316 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 
|  | 317 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | 
|  | 318 | unsigned long nr_pages) | 
|  | 319 | { | 
|  | 320 | /* This will make the necessary allocations eventually. */ | 
|  | 321 | return sparse_mem_map_populate(pnum, nid); | 
|  | 322 | } | 
|  | 323 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | 
|  | 324 | { | 
|  | 325 | return; /* XXX: Not implemented yet */ | 
|  | 326 | } | 
|  | 327 | #else | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 328 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | 
|  | 329 | { | 
|  | 330 | struct page *page, *ret; | 
|  | 331 | unsigned long memmap_size = sizeof(struct page) * nr_pages; | 
|  | 332 |  | 
| Yasunori Goto | f2d0aa5 | 2006-10-28 10:38:32 -0700 | [diff] [blame] | 333 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 334 | if (page) | 
|  | 335 | goto got_map_page; | 
|  | 336 |  | 
|  | 337 | ret = vmalloc(memmap_size); | 
|  | 338 | if (ret) | 
|  | 339 | goto got_map_ptr; | 
|  | 340 |  | 
|  | 341 | return NULL; | 
|  | 342 | got_map_page: | 
|  | 343 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | 
|  | 344 | got_map_ptr: | 
|  | 345 | memset(ret, 0, memmap_size); | 
|  | 346 |  | 
|  | 347 | return ret; | 
|  | 348 | } | 
|  | 349 |  | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 350 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | 
|  | 351 | unsigned long nr_pages) | 
|  | 352 | { | 
|  | 353 | return __kmalloc_section_memmap(nr_pages); | 
|  | 354 | } | 
|  | 355 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 356 | static int vaddr_in_vmalloc_area(void *addr) | 
|  | 357 | { | 
|  | 358 | if (addr >= (void *)VMALLOC_START && | 
|  | 359 | addr < (void *)VMALLOC_END) | 
|  | 360 | return 1; | 
|  | 361 | return 0; | 
|  | 362 | } | 
|  | 363 |  | 
|  | 364 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | 
|  | 365 | { | 
|  | 366 | if (vaddr_in_vmalloc_area(memmap)) | 
|  | 367 | vfree(memmap); | 
|  | 368 | else | 
|  | 369 | free_pages((unsigned long)memmap, | 
|  | 370 | get_order(sizeof(struct page) * nr_pages)); | 
|  | 371 | } | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 372 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 373 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 374 | /* | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 375 | * returns the number of sections whose mem_maps were properly | 
|  | 376 | * set.  If this is <=0, then that means that the passed-in | 
|  | 377 | * map was not consumed and must be freed. | 
|  | 378 | */ | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 379 | int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, | 
|  | 380 | int nr_pages) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 381 | { | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 382 | unsigned long section_nr = pfn_to_section_nr(start_pfn); | 
|  | 383 | struct pglist_data *pgdat = zone->zone_pgdat; | 
|  | 384 | struct mem_section *ms; | 
|  | 385 | struct page *memmap; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 386 | unsigned long *usemap; | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 387 | unsigned long flags; | 
|  | 388 | int ret; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 389 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 390 | /* | 
|  | 391 | * no locking for this, because it does its own | 
|  | 392 | * plus, it does a kmalloc | 
|  | 393 | */ | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 394 | ret = sparse_index_init(section_nr, pgdat->node_id); | 
|  | 395 | if (ret < 0 && ret != -EEXIST) | 
|  | 396 | return ret; | 
| Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 397 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 398 | if (!memmap) | 
|  | 399 | return -ENOMEM; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 400 | usemap = __kmalloc_section_usemap(); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 401 | if (!usemap) { | 
|  | 402 | __kfree_section_memmap(memmap, nr_pages); | 
|  | 403 | return -ENOMEM; | 
|  | 404 | } | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 405 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 406 | pgdat_resize_lock(pgdat, &flags); | 
|  | 407 |  | 
|  | 408 | ms = __pfn_to_section(start_pfn); | 
|  | 409 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | 
|  | 410 | ret = -EEXIST; | 
|  | 411 | goto out; | 
|  | 412 | } | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 413 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 414 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | 
|  | 415 |  | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 416 | ret = sparse_init_one_section(ms, section_nr, memmap, usemap); | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 417 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 418 | out: | 
|  | 419 | pgdat_resize_unlock(pgdat, &flags); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 420 | if (ret <= 0) { | 
|  | 421 | kfree(usemap); | 
| Mike Kravetz | 46a66ee | 2006-05-01 12:16:09 -0700 | [diff] [blame] | 422 | __kfree_section_memmap(memmap, nr_pages); | 
| WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 423 | } | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 424 | return ret; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 425 | } | 
| Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 426 | #endif |