| Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 1 | #include <linux/kernel.h> | 
|  | 2 | #include <linux/module.h> | 
|  | 3 | #include <linux/init.h> | 
|  | 4 | #include <linux/bootmem.h> | 
|  | 5 | #include <linux/percpu.h> | 
| Bernhard Walle | 1ecd276 | 2008-06-20 15:38:22 +0200 | [diff] [blame] | 6 | #include <linux/kexec.h> | 
| Yinghai Lu | 17b4cce | 2008-06-21 21:02:20 -0700 | [diff] [blame] | 7 | #include <linux/crash_dump.h> | 
| Jaswinder Singh Rajput | 8a87dd9 | 2009-01-04 17:04:26 +0530 | [diff] [blame] | 8 | #include <linux/smp.h> | 
|  | 9 | #include <linux/topology.h> | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 10 | #include <linux/pfn.h> | 
| Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 11 | #include <asm/sections.h> | 
|  | 12 | #include <asm/processor.h> | 
|  | 13 | #include <asm/setup.h> | 
| Alexey Starikovskiy | 0fc0906 | 2008-04-04 23:40:48 +0400 | [diff] [blame] | 14 | #include <asm/mpspec.h> | 
| Alexey Starikovskiy | 76eb413 | 2008-04-04 23:40:41 +0400 | [diff] [blame] | 15 | #include <asm/apicdef.h> | 
| Bernhard Walle | 1ecd276 | 2008-06-20 15:38:22 +0200 | [diff] [blame] | 16 | #include <asm/highmem.h> | 
| Tejun Heo | 1a51e3a | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 17 | #include <asm/proto.h> | 
| Jaswinder Singh Rajput | 0687903 | 2009-01-10 12:17:37 +0530 | [diff] [blame] | 18 | #include <asm/cpumask.h> | 
| Brian Gerst | 34019be | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 19 | #include <asm/cpu.h> | 
| Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 20 | #include <asm/stackprotector.h> | 
| Alexey Starikovskiy | 76eb413 | 2008-04-04 23:40:41 +0400 | [diff] [blame] | 21 |  | 
| Mike Travis | c90aa89 | 2009-01-13 20:41:34 +0900 | [diff] [blame] | 22 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 
|  | 23 | # define DBG(x...) printk(KERN_DEBUG x) | 
|  | 24 | #else | 
|  | 25 | # define DBG(x...) | 
|  | 26 | #endif | 
|  | 27 |  | 
| Brian Gerst | ea92790 | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 28 | DEFINE_PER_CPU(int, cpu_number); | 
|  | 29 | EXPORT_PER_CPU_SYMBOL(cpu_number); | 
| Brian Gerst | ea92790 | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 30 |  | 
| Brian Gerst | 1688401 | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 31 | #ifdef CONFIG_X86_64 | 
|  | 32 | #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) | 
|  | 33 | #else | 
|  | 34 | #define BOOT_PERCPU_OFFSET 0 | 
|  | 35 | #endif | 
|  | 36 |  | 
|  | 37 | DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; | 
|  | 38 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); | 
|  | 39 |  | 
| Tejun Heo | 9939dda | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 40 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { | 
| Brian Gerst | 34019be | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 41 | [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, | 
| Tejun Heo | 9939dda | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 42 | }; | 
| Tejun Heo | 9939dda | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 43 | EXPORT_SYMBOL(__per_cpu_offset); | 
| Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 44 |  | 
| Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 45 | /* | 
|  | 46 | * On x86_64 symbols referenced from code should be reachable using | 
|  | 47 | * 32bit relocations.  Reserve space for static percpu variables in | 
|  | 48 | * modules so that they are always served from the first chunk which | 
|  | 49 | * is located at the percpu segment base.  On x86_32, anything can | 
|  | 50 | * address anywhere.  No need to reserve space in the first chunk. | 
|  | 51 | */ | 
|  | 52 | #ifdef CONFIG_X86_64 | 
|  | 53 | #define PERCPU_FIRST_CHUNK_RESERVE	PERCPU_MODULE_RESERVE | 
|  | 54 | #else | 
|  | 55 | #define PERCPU_FIRST_CHUNK_RESERVE	0 | 
|  | 56 | #endif | 
|  | 57 |  | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 58 | /** | 
| Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 59 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA | 
|  | 60 | * | 
|  | 61 | * If NUMA is not configured or there is only one NUMA node available, | 
|  | 62 | * there is no reason to consider NUMA.  This function determines | 
|  | 63 | * whether percpu allocation should consider NUMA or not. | 
|  | 64 | * | 
|  | 65 | * RETURNS: | 
|  | 66 | * true if NUMA should be considered; otherwise, false. | 
|  | 67 | */ | 
|  | 68 | static bool __init pcpu_need_numa(void) | 
|  | 69 | { | 
|  | 70 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 
|  | 71 | pg_data_t *last = NULL; | 
|  | 72 | unsigned int cpu; | 
|  | 73 |  | 
|  | 74 | for_each_possible_cpu(cpu) { | 
|  | 75 | int node = early_cpu_to_node(cpu); | 
|  | 76 |  | 
|  | 77 | if (node_online(node) && NODE_DATA(node) && | 
|  | 78 | last && last != NODE_DATA(node)) | 
|  | 79 | return true; | 
|  | 80 |  | 
|  | 81 | last = NODE_DATA(node); | 
|  | 82 | } | 
|  | 83 | #endif | 
|  | 84 | return false; | 
|  | 85 | } | 
|  | 86 |  | 
|  | 87 | /** | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 88 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu | 
|  | 89 | * @cpu: cpu to allocate for | 
|  | 90 | * @size: size allocation in bytes | 
|  | 91 | * @align: alignment | 
|  | 92 | * | 
|  | 93 | * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper | 
|  | 94 | * does the right thing for NUMA regardless of the current | 
|  | 95 | * configuration. | 
|  | 96 | * | 
|  | 97 | * RETURNS: | 
|  | 98 | * Pointer to the allocated area on success, NULL on failure. | 
|  | 99 | */ | 
|  | 100 | static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | 
|  | 101 | unsigned long align) | 
|  | 102 | { | 
|  | 103 | const unsigned long goal = __pa(MAX_DMA_ADDRESS); | 
|  | 104 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 
|  | 105 | int node = early_cpu_to_node(cpu); | 
|  | 106 | void *ptr; | 
|  | 107 |  | 
|  | 108 | if (!node_online(node) || !NODE_DATA(node)) { | 
|  | 109 | ptr = __alloc_bootmem_nopanic(size, align, goal); | 
|  | 110 | pr_info("cpu %d has no node %d or node-local memory\n", | 
|  | 111 | cpu, node); | 
|  | 112 | pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", | 
|  | 113 | cpu, size, __pa(ptr)); | 
|  | 114 | } else { | 
|  | 115 | ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), | 
|  | 116 | size, align, goal); | 
|  | 117 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at " | 
|  | 118 | "%016lx\n", cpu, size, node, __pa(ptr)); | 
|  | 119 | } | 
|  | 120 | return ptr; | 
|  | 121 | #else | 
|  | 122 | return __alloc_bootmem_nopanic(size, align, goal); | 
|  | 123 | #endif | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | /* | 
| Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 127 | * Remap allocator | 
|  | 128 | * | 
|  | 129 | * This allocator uses PMD page as unit.  A PMD page is allocated for | 
|  | 130 | * each cpu and each is remapped into vmalloc area using PMD mapping. | 
|  | 131 | * As PMD page is quite large, only part of it is used for the first | 
|  | 132 | * chunk.  Unused part is returned to the bootmem allocator. | 
|  | 133 | * | 
|  | 134 | * So, the PMD pages are mapped twice - once to the physical mapping | 
|  | 135 | * and to the vmalloc area for the first percpu chunk.  The double | 
|  | 136 | * mapping does add one more PMD TLB entry pressure but still is much | 
|  | 137 | * better than only using 4k mappings while still being NUMA friendly. | 
|  | 138 | */ | 
|  | 139 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 
|  | 140 | static size_t pcpur_size __initdata; | 
|  | 141 | static void **pcpur_ptrs __initdata; | 
|  | 142 |  | 
|  | 143 | static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) | 
|  | 144 | { | 
|  | 145 | size_t off = (size_t)pageno << PAGE_SHIFT; | 
|  | 146 |  | 
|  | 147 | if (off >= pcpur_size) | 
|  | 148 | return NULL; | 
|  | 149 |  | 
|  | 150 | return virt_to_page(pcpur_ptrs[cpu] + off); | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | static ssize_t __init setup_pcpu_remap(size_t static_size) | 
|  | 154 | { | 
|  | 155 | static struct vm_struct vm; | 
| Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 156 | size_t ptrs_size, dyn_size; | 
| Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 157 | unsigned int cpu; | 
|  | 158 | ssize_t ret; | 
|  | 159 |  | 
|  | 160 | /* | 
|  | 161 | * If large page isn't supported, there's no benefit in doing | 
|  | 162 | * this.  Also, on non-NUMA, embedding is better. | 
|  | 163 | */ | 
| Tejun Heo | eb12ce6 | 2009-04-01 15:06:33 +0900 | [diff] [blame] | 164 | if (!cpu_has_pse || !pcpu_need_numa()) | 
| Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 165 | return -EINVAL; | 
|  | 166 |  | 
| Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 167 | /* | 
|  | 168 | * Currently supports only single page.  Supporting multiple | 
|  | 169 | * pages won't be too difficult if it ever becomes necessary. | 
|  | 170 | */ | 
| Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 171 | pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + | 
|  | 172 | PERCPU_DYNAMIC_RESERVE); | 
| Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 173 | if (pcpur_size > PMD_SIZE) { | 
|  | 174 | pr_warning("PERCPU: static data is larger than large page, " | 
|  | 175 | "can't use large page\n"); | 
|  | 176 | return -EINVAL; | 
|  | 177 | } | 
| Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 178 | dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; | 
| Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 179 |  | 
|  | 180 | /* allocate pointer array and alloc large pages */ | 
|  | 181 | ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); | 
|  | 182 | pcpur_ptrs = alloc_bootmem(ptrs_size); | 
|  | 183 |  | 
|  | 184 | for_each_possible_cpu(cpu) { | 
|  | 185 | pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); | 
|  | 186 | if (!pcpur_ptrs[cpu]) | 
|  | 187 | goto enomem; | 
|  | 188 |  | 
|  | 189 | /* | 
|  | 190 | * Only use pcpur_size bytes and give back the rest. | 
|  | 191 | * | 
|  | 192 | * Ingo: The 2MB up-rounding bootmem is needed to make | 
|  | 193 | * sure the partial 2MB page is still fully RAM - it's | 
|  | 194 | * not well-specified to have a PAT-incompatible area | 
|  | 195 | * (unmapped RAM, device memory, etc.) in that hole. | 
|  | 196 | */ | 
|  | 197 | free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), | 
|  | 198 | PMD_SIZE - pcpur_size); | 
|  | 199 |  | 
|  | 200 | memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); | 
|  | 201 | } | 
|  | 202 |  | 
|  | 203 | /* allocate address and map */ | 
|  | 204 | vm.flags = VM_ALLOC; | 
|  | 205 | vm.size = num_possible_cpus() * PMD_SIZE; | 
|  | 206 | vm_area_register_early(&vm, PMD_SIZE); | 
|  | 207 |  | 
|  | 208 | for_each_possible_cpu(cpu) { | 
|  | 209 | pmd_t *pmd; | 
|  | 210 |  | 
|  | 211 | pmd = populate_extra_pmd((unsigned long)vm.addr | 
|  | 212 | + cpu * PMD_SIZE); | 
|  | 213 | set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])), | 
|  | 214 | PAGE_KERNEL_LARGE)); | 
|  | 215 | } | 
|  | 216 |  | 
|  | 217 | /* we're ready, commit */ | 
|  | 218 | pr_info("PERCPU: Remapped at %p with large pages, static data " | 
|  | 219 | "%zu bytes\n", vm.addr, static_size); | 
|  | 220 |  | 
| Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 221 | ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, | 
| Tejun Heo | 6074d5b | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 222 | PERCPU_FIRST_CHUNK_RESERVE, dyn_size, | 
|  | 223 | PMD_SIZE, vm.addr, NULL); | 
| Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 224 | goto out_free_ar; | 
|  | 225 |  | 
|  | 226 | enomem: | 
|  | 227 | for_each_possible_cpu(cpu) | 
|  | 228 | if (pcpur_ptrs[cpu]) | 
|  | 229 | free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE); | 
|  | 230 | ret = -ENOMEM; | 
|  | 231 | out_free_ar: | 
|  | 232 | free_bootmem(__pa(pcpur_ptrs), ptrs_size); | 
|  | 233 | return ret; | 
|  | 234 | } | 
|  | 235 | #else | 
|  | 236 | static ssize_t __init setup_pcpu_remap(size_t static_size) | 
|  | 237 | { | 
|  | 238 | return -EINVAL; | 
|  | 239 | } | 
|  | 240 | #endif | 
|  | 241 |  | 
|  | 242 | /* | 
| Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 243 | * Embedding allocator | 
|  | 244 | * | 
|  | 245 | * The first chunk is sized to just contain the static area plus | 
| Tejun Heo | 66c3a75 | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 246 | * module and dynamic reserves and embedded into linear physical | 
|  | 247 | * mapping so that it can use PMD mapping without additional TLB | 
|  | 248 | * pressure. | 
| Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 249 | */ | 
| Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 250 | static ssize_t __init setup_pcpu_embed(size_t static_size) | 
|  | 251 | { | 
| Tejun Heo | 66c3a75 | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 252 | size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; | 
| Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 253 |  | 
|  | 254 | /* | 
|  | 255 | * If large page isn't supported, there's no benefit in doing | 
|  | 256 | * this.  Also, embedding allocation doesn't play well with | 
|  | 257 | * NUMA. | 
|  | 258 | */ | 
|  | 259 | if (!cpu_has_pse || pcpu_need_numa()) | 
|  | 260 | return -EINVAL; | 
|  | 261 |  | 
| Tejun Heo | 66c3a75 | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 262 | return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, | 
|  | 263 | reserve - PERCPU_FIRST_CHUNK_RESERVE, -1); | 
| Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 264 | } | 
|  | 265 |  | 
|  | 266 | /* | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 267 | * 4k page allocator | 
|  | 268 | * | 
|  | 269 | * This is the basic allocator.  Static percpu area is allocated | 
|  | 270 | * page-by-page and most of initialization is done by the generic | 
|  | 271 | * setup function. | 
|  | 272 | */ | 
| Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 273 | static struct page **pcpu4k_pages __initdata; | 
|  | 274 | static int pcpu4k_nr_static_pages __initdata; | 
|  | 275 |  | 
|  | 276 | static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno) | 
|  | 277 | { | 
|  | 278 | if (pageno < pcpu4k_nr_static_pages) | 
|  | 279 | return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno]; | 
|  | 280 | return NULL; | 
|  | 281 | } | 
|  | 282 |  | 
| Tejun Heo | 458a3e6 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 283 | static void __init pcpu4k_populate_pte(unsigned long addr) | 
|  | 284 | { | 
|  | 285 | populate_extra_pte(addr); | 
|  | 286 | } | 
|  | 287 |  | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 288 | static ssize_t __init setup_pcpu_4k(size_t static_size) | 
|  | 289 | { | 
|  | 290 | size_t pages_size; | 
|  | 291 | unsigned int cpu; | 
|  | 292 | int i, j; | 
|  | 293 | ssize_t ret; | 
|  | 294 |  | 
|  | 295 | pcpu4k_nr_static_pages = PFN_UP(static_size); | 
|  | 296 |  | 
|  | 297 | /* unaligned allocations can't be freed, round up to page size */ | 
|  | 298 | pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus() | 
|  | 299 | * sizeof(pcpu4k_pages[0])); | 
|  | 300 | pcpu4k_pages = alloc_bootmem(pages_size); | 
|  | 301 |  | 
|  | 302 | /* allocate and copy */ | 
|  | 303 | j = 0; | 
|  | 304 | for_each_possible_cpu(cpu) | 
|  | 305 | for (i = 0; i < pcpu4k_nr_static_pages; i++) { | 
|  | 306 | void *ptr; | 
|  | 307 |  | 
|  | 308 | ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); | 
|  | 309 | if (!ptr) | 
|  | 310 | goto enomem; | 
|  | 311 |  | 
|  | 312 | memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); | 
|  | 313 | pcpu4k_pages[j++] = virt_to_page(ptr); | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | /* we're ready, commit */ | 
|  | 317 | pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", | 
|  | 318 | pcpu4k_nr_static_pages, static_size); | 
|  | 319 |  | 
| Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 320 | ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, | 
| Tejun Heo | 6074d5b | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 321 | PERCPU_FIRST_CHUNK_RESERVE, -1, | 
|  | 322 | -1, NULL, pcpu4k_populate_pte); | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 323 | goto out_free_ar; | 
|  | 324 |  | 
|  | 325 | enomem: | 
|  | 326 | while (--j >= 0) | 
|  | 327 | free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE); | 
|  | 328 | ret = -ENOMEM; | 
|  | 329 | out_free_ar: | 
|  | 330 | free_bootmem(__pa(pcpu4k_pages), pages_size); | 
|  | 331 | return ret; | 
|  | 332 | } | 
|  | 333 |  | 
| Brian Gerst | b2d2f43 | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 334 | static inline void setup_percpu_segment(int cpu) | 
|  | 335 | { | 
|  | 336 | #ifdef CONFIG_X86_32 | 
|  | 337 | struct desc_struct gdt; | 
|  | 338 |  | 
|  | 339 | pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, | 
|  | 340 | 0x2 | DESCTYPE_S, 0x8); | 
|  | 341 | gdt.s = 1; | 
|  | 342 | write_gdt_entry(get_cpu_gdt_table(cpu), | 
|  | 343 | GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); | 
|  | 344 | #endif | 
|  | 345 | } | 
|  | 346 |  | 
| Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 347 | /* | 
|  | 348 | * Great future plan: | 
|  | 349 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | 
|  | 350 | * Always point %gs to its beginning | 
|  | 351 | */ | 
|  | 352 | void __init setup_per_cpu_areas(void) | 
|  | 353 | { | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 354 | size_t static_size = __per_cpu_end - __per_cpu_start; | 
|  | 355 | unsigned int cpu; | 
| Tejun Heo | 1112441 | 2009-02-20 16:29:09 +0900 | [diff] [blame] | 356 | unsigned long delta; | 
|  | 357 | size_t pcpu_unit_size; | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 358 | ssize_t ret; | 
| Mike Travis | a168196 | 2008-12-16 17:33:53 -0800 | [diff] [blame] | 359 |  | 
| Cyrill Gorcunov | ab14398 | 2009-01-02 21:51:32 +0300 | [diff] [blame] | 360 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", | 
| Mike Travis | a168196 | 2008-12-16 17:33:53 -0800 | [diff] [blame] | 361 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); | 
|  | 362 |  | 
| Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 363 | /* | 
|  | 364 | * Allocate percpu area.  If PSE is supported, try to make use | 
|  | 365 | * of large page mappings.  Please read comments on top of | 
|  | 366 | * each allocator for details. | 
|  | 367 | */ | 
|  | 368 | ret = setup_pcpu_remap(static_size); | 
|  | 369 | if (ret < 0) | 
|  | 370 | ret = setup_pcpu_embed(static_size); | 
| Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 371 | if (ret < 0) | 
|  | 372 | ret = setup_pcpu_4k(static_size); | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 373 | if (ret < 0) | 
|  | 374 | panic("cannot allocate static percpu area (%zu bytes, err=%zd)", | 
|  | 375 | static_size, ret); | 
| Mike Travis | b447a46 | 2008-03-25 15:06:51 -0700 | [diff] [blame] | 376 |  | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 377 | pcpu_unit_size = ret; | 
| Tejun Heo | 1a51e3a | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 378 |  | 
| Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 379 | /* alrighty, percpu areas up and running */ | 
| Tejun Heo | 1112441 | 2009-02-20 16:29:09 +0900 | [diff] [blame] | 380 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | 
|  | 381 | for_each_possible_cpu(cpu) { | 
|  | 382 | per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; | 
| Brian Gerst | 26f80bd | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 383 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); | 
| Brian Gerst | ea92790 | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 384 | per_cpu(cpu_number, cpu) = cpu; | 
| Brian Gerst | b2d2f43 | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 385 | setup_percpu_segment(cpu); | 
| Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 386 | setup_stack_canary_segment(cpu); | 
| Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 387 | /* | 
| Tejun Heo | cf3997f | 2009-01-27 14:25:05 +0900 | [diff] [blame] | 388 | * Copy data used in early init routines from the | 
|  | 389 | * initial arrays to the per cpu data areas.  These | 
|  | 390 | * arrays then become expendable and the *_early_ptr's | 
|  | 391 | * are zeroed indicating that the static arrays are | 
|  | 392 | * gone. | 
| Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 393 | */ | 
| Brian Gerst | ec70de8 | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 394 | #ifdef CONFIG_X86_LOCAL_APIC | 
| Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 395 | per_cpu(x86_cpu_to_apicid, cpu) = | 
| Tejun Heo | cf3997f | 2009-01-27 14:25:05 +0900 | [diff] [blame] | 396 | early_per_cpu_map(x86_cpu_to_apicid, cpu); | 
| Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 397 | per_cpu(x86_bios_cpu_apicid, cpu) = | 
| Tejun Heo | cf3997f | 2009-01-27 14:25:05 +0900 | [diff] [blame] | 398 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); | 
| Brian Gerst | ec70de8 | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 399 | #endif | 
| Tejun Heo | 1a51e3a | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 400 | #ifdef CONFIG_X86_64 | 
| Brian Gerst | 26f80bd | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 401 | per_cpu(irq_stack_ptr, cpu) = | 
| Tejun Heo | cf3997f | 2009-01-27 14:25:05 +0900 | [diff] [blame] | 402 | per_cpu(irq_stack_union.irq_stack, cpu) + | 
|  | 403 | IRQ_STACK_SIZE - 64; | 
| Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 404 | #ifdef CONFIG_NUMA | 
|  | 405 | per_cpu(x86_cpu_to_node_map, cpu) = | 
| Tejun Heo | cf3997f | 2009-01-27 14:25:05 +0900 | [diff] [blame] | 406 | early_per_cpu_map(x86_cpu_to_node_map, cpu); | 
| Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 407 | #endif | 
| Brian Gerst | 2697fbd | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 408 | #endif | 
| Tejun Heo | 1a51e3a | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 409 | /* | 
| Brian Gerst | 34019be | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 410 | * Up to this point, the boot CPU has been using .data.init | 
| Brian Gerst | 2697fbd | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 411 | * area.  Reload any changed state for the boot CPU. | 
| Tejun Heo | 1a51e3a | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 412 | */ | 
| Brian Gerst | 34019be | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 413 | if (cpu == boot_cpu_id) | 
| Brian Gerst | 552be87 | 2009-01-30 17:47:53 +0900 | [diff] [blame] | 414 | switch_to_new_gdt(cpu); | 
| Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 415 | } | 
|  | 416 |  | 
| Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 417 | /* indicate the early static arrays will soon be gone */ | 
| James Bottomley | 22f2513 | 2009-01-27 14:21:37 +0900 | [diff] [blame] | 418 | #ifdef CONFIG_X86_LOCAL_APIC | 
| Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 419 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; | 
|  | 420 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; | 
| James Bottomley | 22f2513 | 2009-01-27 14:21:37 +0900 | [diff] [blame] | 421 | #endif | 
| Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 422 | #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) | 
| Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 423 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; | 
|  | 424 | #endif | 
| Mike Travis | 9f0e8d0 | 2008-04-04 18:11:01 -0700 | [diff] [blame] | 425 |  | 
| Mike Travis | 9f248bd | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 426 | /* Setup node to cpumask map */ | 
|  | 427 | setup_node_to_cpumask_map(); | 
| Mike Travis | c2d1cec | 2009-01-04 05:18:03 -0800 | [diff] [blame] | 428 |  | 
|  | 429 | /* Setup cpu initialized, callin, callout masks */ | 
|  | 430 | setup_cpu_local_masks(); | 
| Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 431 | } |