blob: 5d4a4964a8b3e656a85e16dd50d72c8820aab0e2 [file] [log] [blame]
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -03001#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/bootmem.h>
5#include <linux/percpu.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +02006#include <linux/kexec.h>
Yinghai Lu17b4cce2008-06-21 21:02:20 -07007#include <linux/crash_dump.h>
Jaswinder Singh Rajput8a87dd92009-01-04 17:04:26 +05308#include <linux/smp.h>
9#include <linux/topology.h>
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030010#include <asm/sections.h>
11#include <asm/processor.h>
12#include <asm/setup.h>
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040013#include <asm/mpspec.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040014#include <asm/apicdef.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +020015#include <asm/highmem.h>
Tejun Heo1a51e3a2009-01-13 20:41:35 +090016#include <asm/proto.h>
Jaswinder Singh Rajput06879032009-01-10 12:17:37 +053017#include <asm/cpumask.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040018
Mike Travisc90aa892009-01-13 20:41:34 +090019#ifdef CONFIG_DEBUG_PER_CPU_MAPS
20# define DBG(x...) printk(KERN_DEBUG x)
21#else
22# define DBG(x...)
23#endif
24
Brian Gerstea927902009-01-19 00:38:58 +090025/*
26 * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
27 * voyager wants cpu_number too.
28 */
29#ifdef CONFIG_SMP
30DEFINE_PER_CPU(int, cpu_number);
31EXPORT_PER_CPU_SYMBOL(cpu_number);
32#endif
33
James Bottomleyf8955eb2008-05-10 09:01:48 -050034#ifdef CONFIG_X86_LOCAL_APIC
Alexey Starikovskiy2fe60142008-04-04 23:41:44 +040035unsigned int num_processors;
36unsigned disabled_cpus __cpuinitdata;
37/* Processor that is doing the boot up */
38unsigned int boot_cpu_physical_apicid = -1U;
39EXPORT_SYMBOL(boot_cpu_physical_apicid);
Jaswinder Singh Rajput8a87dd92009-01-04 17:04:26 +053040unsigned int max_physical_apicid;
Alexey Starikovskiy2fe60142008-04-04 23:41:44 +040041
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040042/* Bitmask of physically existing CPUs */
43physid_mask_t phys_cpu_present_map;
James Bottomleyf8955eb2008-05-10 09:01:48 -050044#endif
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040045
Mike Travisc90aa892009-01-13 20:41:34 +090046/*
47 * Map cpu index to physical APIC ID
48 */
Mike Travis23ca4bb2008-05-12 21:21:12 +020049DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
50DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
51EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
52EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
53
Mike Travisc90aa892009-01-13 20:41:34 +090054#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030055
Tejun Heo9939dda2009-01-13 20:41:35 +090056#ifdef CONFIG_X86_64
57unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
58 [0] = (unsigned long)__per_cpu_load,
59};
60#else
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030061unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
Tejun Heo1a51e3a2009-01-13 20:41:35 +090062#endif
Tejun Heo9939dda2009-01-13 20:41:35 +090063EXPORT_SYMBOL(__per_cpu_offset);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030064
65/*
66 * Great future plan:
67 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
68 * Always point %gs to its beginning
69 */
70void __init setup_per_cpu_areas(void)
71{
Brian Gerst74631a22009-01-27 12:56:47 +090072 ssize_t size;
Mike Travis3461b0a2008-05-12 21:21:13 +020073 char *ptr;
74 int cpu;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030075
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030076 /* Copy section for each CPU (we discard the original) */
Brian Gerst74631a22009-01-27 12:56:47 +090077 size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
Mike Travisa1681962008-12-16 17:33:53 -080078
Cyrill Gorcunovab143982009-01-02 21:51:32 +030079 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
Mike Travisa1681962008-12-16 17:33:53 -080080 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
81
Cyrill Gorcunovab143982009-01-02 21:51:32 +030082 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
Mike Travisb447a462008-03-25 15:06:51 -070083
Mike Travis3461b0a2008-05-12 21:21:13 +020084 for_each_possible_cpu(cpu) {
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030085#ifndef CONFIG_NEED_MULTIPLE_NODES
Brian Gerst74631a22009-01-27 12:56:47 +090086 ptr = alloc_bootmem_pages(size);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030087#else
Mike Travis3461b0a2008-05-12 21:21:13 +020088 int node = early_cpu_to_node(cpu);
Mike Travisb447a462008-03-25 15:06:51 -070089 if (!node_online(node) || !NODE_DATA(node)) {
Brian Gerst74631a22009-01-27 12:56:47 +090090 ptr = alloc_bootmem_pages(size);
Cyrill Gorcunovab143982009-01-02 21:51:32 +030091 pr_info("cpu %d has no node %d or node-local memory\n",
Mike Travis3461b0a2008-05-12 21:21:13 +020092 cpu, node);
Cyrill Gorcunovab143982009-01-02 21:51:32 +030093 pr_debug("per cpu data for cpu%d at %016lx\n",
94 cpu, __pa(ptr));
95 } else {
Brian Gerst74631a22009-01-27 12:56:47 +090096 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
Cyrill Gorcunovab143982009-01-02 21:51:32 +030097 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
98 cpu, node, __pa(ptr));
Yinghai Lua677f582008-07-29 00:37:10 -070099 }
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300100#endif
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900101
Tejun Heo3e5d8f92009-01-13 20:41:35 +0900102 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
Tejun Heo9939dda2009-01-13 20:41:35 +0900103 per_cpu_offset(cpu) = ptr - __per_cpu_start;
Brian Gerst26f80bd2009-01-19 00:38:58 +0900104 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
Brian Gerstea927902009-01-19 00:38:58 +0900105 per_cpu(cpu_number, cpu) = cpu;
Brian Gerst0d77e7f2009-01-27 12:56:47 +0900106 /*
107 * Copy data used in early init routines from the initial arrays to the
108 * per cpu data areas. These arrays then become expendable and the
109 * *_early_ptr's are zeroed indicating that the static arrays are gone.
110 */
111 per_cpu(x86_cpu_to_apicid, cpu) =
112 early_per_cpu_map(x86_cpu_to_apicid, cpu);
113 per_cpu(x86_bios_cpu_apicid, cpu) =
114 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900115#ifdef CONFIG_X86_64
Brian Gerst26f80bd2009-01-19 00:38:58 +0900116 per_cpu(irq_stack_ptr, cpu) =
Brian Gerst947e76c2009-01-19 12:21:28 +0900117 per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
Brian Gerst6470aff2009-01-27 12:56:47 +0900118#ifdef CONFIG_NUMA
119 per_cpu(x86_cpu_to_node_map, cpu) =
120 early_per_cpu_map(x86_cpu_to_node_map, cpu);
121#endif
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900122 /*
Brian Gerst947e76c2009-01-19 12:21:28 +0900123 * Up to this point, CPU0 has been using .data.init
124 * area. Reload %gs offset for CPU0.
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900125 */
126 if (cpu == 0)
Brian Gerst947e76c2009-01-19 12:21:28 +0900127 load_gs_base(cpu);
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900128#endif
Mike Travisc90aa892009-01-13 20:41:34 +0900129
130 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300131 }
132
Brian Gerst0d77e7f2009-01-27 12:56:47 +0900133 /* indicate the early static arrays will soon be gone */
134 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
135 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
Brian Gerst6470aff2009-01-27 12:56:47 +0900136#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
Brian Gerst0d77e7f2009-01-27 12:56:47 +0900137 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
138#endif
Mike Travis9f0e8d02008-04-04 18:11:01 -0700139
Mike Travis9f248bd2008-05-12 21:21:12 +0200140 /* Setup node to cpumask map */
141 setup_node_to_cpumask_map();
Mike Travisc2d1cec2009-01-04 05:18:03 -0800142
143 /* Setup cpu initialized, callin, callout masks */
144 setup_cpu_local_masks();
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300145}
146
147#endif
Huang, Yingc45a7072008-06-02 14:26:25 +0800148