blob: c73d9a815b6d7b739b2827652332e0039ff19890 [file] [log] [blame]
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -03001#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/bootmem.h>
5#include <linux/percpu.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +02006#include <linux/kexec.h>
Yinghai Lu17b4cce2008-06-21 21:02:20 -07007#include <linux/crash_dump.h>
Jaswinder Singh Rajput8a87dd92009-01-04 17:04:26 +05308#include <linux/smp.h>
9#include <linux/topology.h>
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030010#include <asm/sections.h>
11#include <asm/processor.h>
12#include <asm/setup.h>
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040013#include <asm/mpspec.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040014#include <asm/apicdef.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +020015#include <asm/highmem.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040016
James Bottomleyf8955eb2008-05-10 09:01:48 -050017#ifdef CONFIG_X86_LOCAL_APIC
Alexey Starikovskiy2fe60142008-04-04 23:41:44 +040018unsigned int num_processors;
19unsigned disabled_cpus __cpuinitdata;
20/* Processor that is doing the boot up */
21unsigned int boot_cpu_physical_apicid = -1U;
22EXPORT_SYMBOL(boot_cpu_physical_apicid);
Jaswinder Singh Rajput8a87dd92009-01-04 17:04:26 +053023unsigned int max_physical_apicid;
Alexey Starikovskiy2fe60142008-04-04 23:41:44 +040024
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040025/* Bitmask of physically existing CPUs */
26physid_mask_t phys_cpu_present_map;
James Bottomleyf8955eb2008-05-10 09:01:48 -050027#endif
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040028
Mike Travis23ca4bb2008-05-12 21:21:12 +020029/* map cpu index to physical APIC ID */
30DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
31DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
32EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
33EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
34
35#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
36#define X86_64_NUMA 1
37
Mike Travis7891a242008-05-12 21:21:12 +020038/* map cpu index to node index */
Mike Travis23ca4bb2008-05-12 21:21:12 +020039DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
40EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
Mike Travis9f248bd2008-05-12 21:21:12 +020041
42/* which logical CPUs are on which nodes */
43cpumask_t *node_to_cpumask_map;
44EXPORT_SYMBOL(node_to_cpumask_map);
45
46/* setup node_to_cpumask_map */
47static void __init setup_node_to_cpumask_map(void);
48
49#else
50static inline void setup_node_to_cpumask_map(void) { }
Mike Travis23ca4bb2008-05-12 21:21:12 +020051#endif
52
James Bottomleyf8955eb2008-05-10 09:01:48 -050053#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030054/*
55 * Copy data used in early init routines from the initial arrays to the
56 * per cpu data areas. These arrays then become expendable and the
57 * *_early_ptr's are zeroed indicating that the static arrays are gone.
58 */
59static void __init setup_per_cpu_maps(void)
60{
61 int cpu;
62
63 for_each_possible_cpu(cpu) {
Mike Travis23ca4bb2008-05-12 21:21:12 +020064 per_cpu(x86_cpu_to_apicid, cpu) =
65 early_per_cpu_map(x86_cpu_to_apicid, cpu);
Mike Travisb447a462008-03-25 15:06:51 -070066 per_cpu(x86_bios_cpu_apicid, cpu) =
Mike Travis23ca4bb2008-05-12 21:21:12 +020067 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
68#ifdef X86_64_NUMA
Mike Travisb447a462008-03-25 15:06:51 -070069 per_cpu(x86_cpu_to_node_map, cpu) =
Mike Travis23ca4bb2008-05-12 21:21:12 +020070 early_per_cpu_map(x86_cpu_to_node_map, cpu);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030071#endif
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030072 }
73
74 /* indicate the early static arrays will soon be gone */
Mike Travis23ca4bb2008-05-12 21:21:12 +020075 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
76 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
77#ifdef X86_64_NUMA
78 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030079#endif
80}
81
82#ifdef CONFIG_X86_32
83/*
84 * Great future not-so-futuristic plan: make i386 and x86_64 do it
85 * the same way
86 */
87unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
88EXPORT_SYMBOL(__per_cpu_offset);
Mike Travis3461b0a2008-05-12 21:21:13 +020089static inline void setup_cpu_pda_map(void) { }
90
91#elif !defined(CONFIG_SMP)
92static inline void setup_cpu_pda_map(void) { }
93
94#else /* CONFIG_SMP && CONFIG_X86_64 */
95
96/*
97 * Allocate cpu_pda pointer table and array via alloc_bootmem.
98 */
99static void __init setup_cpu_pda_map(void)
100{
101 char *pda;
102 struct x8664_pda **new_cpu_pda;
103 unsigned long size;
104 int cpu;
105
106 size = roundup(sizeof(struct x8664_pda), cache_line_size());
107
108 /* allocate cpu_pda array and pointer table */
109 {
110 unsigned long tsize = nr_cpu_ids * sizeof(void *);
111 unsigned long asize = size * (nr_cpu_ids - 1);
112
113 tsize = roundup(tsize, cache_line_size());
114 new_cpu_pda = alloc_bootmem(tsize + asize);
115 pda = (char *)new_cpu_pda + tsize;
116 }
117
118 /* initialize pointer table to static pda's */
119 for_each_possible_cpu(cpu) {
120 if (cpu == 0) {
121 /* leave boot cpu pda in place */
122 new_cpu_pda[0] = cpu_pda(0);
123 continue;
124 }
125 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
126 new_cpu_pda[cpu]->in_bootmem = 1;
127 pda += size;
128 }
129
130 /* point to new pointer table */
131 _cpu_pda = new_cpu_pda;
132}
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300133#endif
134
135/*
136 * Great future plan:
137 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
138 * Always point %gs to its beginning
139 */
140void __init setup_per_cpu_areas(void)
141{
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +0200142 ssize_t size, old_size;
Mike Travis3461b0a2008-05-12 21:21:13 +0200143 char *ptr;
144 int cpu;
Yinghai Lu1f8ff032008-08-19 20:49:45 -0700145 unsigned long align = 1;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300146
Mike Travis3461b0a2008-05-12 21:21:13 +0200147 /* Setup cpu_pda map */
148 setup_cpu_pda_map();
149
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300150 /* Copy section for each CPU (we discard the original) */
Yinghai Lu1f3fcd42008-08-19 20:49:44 -0700151 old_size = PERCPU_ENOUGH_ROOM;
Yinghai Lu1f8ff032008-08-19 20:49:45 -0700152 align = max_t(unsigned long, PAGE_SIZE, align);
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +0200153 size = roundup(old_size, align);
Mike Travisa1681962008-12-16 17:33:53 -0800154
155 printk(KERN_INFO
156 "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
157 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
158
Randy Dunlap053713f2008-06-05 11:10:59 -0700159 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300160 size);
Mike Travisb447a462008-03-25 15:06:51 -0700161
Mike Travis3461b0a2008-05-12 21:21:13 +0200162 for_each_possible_cpu(cpu) {
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300163#ifndef CONFIG_NEED_MULTIPLE_NODES
Yinghai Lu1f8ff032008-08-19 20:49:45 -0700164 ptr = __alloc_bootmem(size, align,
165 __pa(MAX_DMA_ADDRESS));
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300166#else
Mike Travis3461b0a2008-05-12 21:21:13 +0200167 int node = early_cpu_to_node(cpu);
Mike Travisb447a462008-03-25 15:06:51 -0700168 if (!node_online(node) || !NODE_DATA(node)) {
Yinghai Lu1f8ff032008-08-19 20:49:45 -0700169 ptr = __alloc_bootmem(size, align,
170 __pa(MAX_DMA_ADDRESS));
Mike Travisb447a462008-03-25 15:06:51 -0700171 printk(KERN_INFO
Mike Travis23ca4bb2008-05-12 21:21:12 +0200172 "cpu %d has no node %d or node-local memory\n",
Mike Travis3461b0a2008-05-12 21:21:13 +0200173 cpu, node);
Yinghai Lua677f582008-07-29 00:37:10 -0700174 if (ptr)
Mike Travisa1681962008-12-16 17:33:53 -0800175 printk(KERN_DEBUG
176 "per cpu data for cpu%d at %016lx\n",
Yinghai Lua677f582008-07-29 00:37:10 -0700177 cpu, __pa(ptr));
Mike Travisb447a462008-03-25 15:06:51 -0700178 }
Yinghai Lua677f582008-07-29 00:37:10 -0700179 else {
Yinghai Lu1f8ff032008-08-19 20:49:45 -0700180 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
181 __pa(MAX_DMA_ADDRESS));
Yinghai Lua677f582008-07-29 00:37:10 -0700182 if (ptr)
Mike Travisa1681962008-12-16 17:33:53 -0800183 printk(KERN_DEBUG
184 "per cpu data for cpu%d on node%d "
185 "at %016lx\n",
186 cpu, node, __pa(ptr));
Yinghai Lua677f582008-07-29 00:37:10 -0700187 }
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300188#endif
Mike Travis3461b0a2008-05-12 21:21:13 +0200189 per_cpu_offset(cpu) = ptr - __per_cpu_start;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300190 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
191 }
192
Mike Travisb447a462008-03-25 15:06:51 -0700193 /* Setup percpu data maps */
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300194 setup_per_cpu_maps();
Mike Travis9f0e8d02008-04-04 18:11:01 -0700195
Mike Travis9f248bd2008-05-12 21:21:12 +0200196 /* Setup node to cpumask map */
197 setup_node_to_cpumask_map();
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300198}
199
200#endif
Huang, Yingc45a7072008-06-02 14:26:25 +0800201
Mike Travis23ca4bb2008-05-12 21:21:12 +0200202#ifdef X86_64_NUMA
Mike Travis9f248bd2008-05-12 21:21:12 +0200203
204/*
205 * Allocate node_to_cpumask_map based on number of available nodes
206 * Requires node_possible_map to be valid.
207 *
208 * Note: node_to_cpumask() is not valid until after this is done.
209 */
210static void __init setup_node_to_cpumask_map(void)
211{
212 unsigned int node, num = 0;
213 cpumask_t *map;
214
215 /* setup nr_node_ids if not done yet */
216 if (nr_node_ids == MAX_NUMNODES) {
217 for_each_node_mask(node, node_possible_map)
218 num = node;
219 nr_node_ids = num + 1;
220 }
221
222 /* allocate the map */
223 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
224
Gustavo F. Padovan55410792008-10-15 10:37:04 -0300225 pr_debug("Node to cpumask map at %p for %d nodes\n",
Thomas Gleixnercfc1b9a2008-07-21 21:35:38 +0200226 map, nr_node_ids);
Mike Travis9f248bd2008-05-12 21:21:12 +0200227
228 /* node_to_cpumask() will now work */
229 node_to_cpumask_map = map;
230}
231
Mike Travis23ca4bb2008-05-12 21:21:12 +0200232void __cpuinit numa_set_node(int cpu, int node)
233{
234 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
235
Mike Travis3461b0a2008-05-12 21:21:13 +0200236 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
Mike Travis7891a242008-05-12 21:21:12 +0200237 cpu_pda(cpu)->nodenumber = node;
238
Mike Travis23ca4bb2008-05-12 21:21:12 +0200239 if (cpu_to_node_map)
240 cpu_to_node_map[cpu] = node;
241
242 else if (per_cpu_offset(cpu))
243 per_cpu(x86_cpu_to_node_map, cpu) = node;
244
245 else
Thomas Gleixnercfc1b9a2008-07-21 21:35:38 +0200246 pr_debug("Setting node for non-present cpu %d\n", cpu);
Mike Travis23ca4bb2008-05-12 21:21:12 +0200247}
248
249void __cpuinit numa_clear_node(int cpu)
250{
251 numa_set_node(cpu, NUMA_NO_NODE);
252}
253
Mike Travis9f248bd2008-05-12 21:21:12 +0200254#ifndef CONFIG_DEBUG_PER_CPU_MAPS
255
Mike Travis23ca4bb2008-05-12 21:21:12 +0200256void __cpuinit numa_add_cpu(int cpu)
257{
258 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
259}
260
261void __cpuinit numa_remove_cpu(int cpu)
262{
263 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
264}
Mike Travis23ca4bb2008-05-12 21:21:12 +0200265
Mike Travis9f248bd2008-05-12 21:21:12 +0200266#else /* CONFIG_DEBUG_PER_CPU_MAPS */
267
268/*
269 * --------- debug versions of the numa functions ---------
270 */
271static void __cpuinit numa_set_cpumask(int cpu, int enable)
272{
273 int node = cpu_to_node(cpu);
274 cpumask_t *mask;
275 char buf[64];
276
277 if (node_to_cpumask_map == NULL) {
278 printk(KERN_ERR "node_to_cpumask_map NULL\n");
279 dump_stack();
280 return;
281 }
282
283 mask = &node_to_cpumask_map[node];
284 if (enable)
285 cpu_set(cpu, *mask);
286 else
287 cpu_clear(cpu, *mask);
288
Rusty Russell29c01772008-12-13 21:20:25 +1030289 cpulist_scnprintf(buf, sizeof(buf), mask);
Mike Travis9f248bd2008-05-12 21:21:12 +0200290 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
Jaswinder Singh Rajput8a87dd92009-01-04 17:04:26 +0530291 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
292}
Mike Travis9f248bd2008-05-12 21:21:12 +0200293
294void __cpuinit numa_add_cpu(int cpu)
295{
296 numa_set_cpumask(cpu, 1);
297}
298
299void __cpuinit numa_remove_cpu(int cpu)
300{
301 numa_set_cpumask(cpu, 0);
302}
Mike Travis23ca4bb2008-05-12 21:21:12 +0200303
304int cpu_to_node(int cpu)
305{
306 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
307 printk(KERN_WARNING
308 "cpu_to_node(%d): usage too early!\n", cpu);
309 dump_stack();
310 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
311 }
312 return per_cpu(x86_cpu_to_node_map, cpu);
313}
314EXPORT_SYMBOL(cpu_to_node);
315
Mike Travis9f248bd2008-05-12 21:21:12 +0200316/*
317 * Same function as cpu_to_node() but used if called before the
318 * per_cpu areas are setup.
319 */
Mike Travis23ca4bb2008-05-12 21:21:12 +0200320int early_cpu_to_node(int cpu)
321{
322 if (early_per_cpu_ptr(x86_cpu_to_node_map))
323 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
324
325 if (!per_cpu_offset(cpu)) {
326 printk(KERN_WARNING
327 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
Mike Travis9f248bd2008-05-12 21:21:12 +0200328 dump_stack();
Mike Travis23ca4bb2008-05-12 21:21:12 +0200329 return NUMA_NO_NODE;
330 }
331 return per_cpu(x86_cpu_to_node_map, cpu);
332}
Mike Travis9f248bd2008-05-12 21:21:12 +0200333
Mike Travis6a2f47c2008-06-27 10:10:13 -0700334
335/* empty cpumask */
336static const cpumask_t cpu_mask_none;
337
Mike Travis9f248bd2008-05-12 21:21:12 +0200338/*
339 * Returns a pointer to the bitmask of CPUs on Node 'node'.
340 */
Mike Travis11369f352008-07-08 14:35:21 -0700341const cpumask_t *_node_to_cpumask_ptr(int node)
Mike Travis9f248bd2008-05-12 21:21:12 +0200342{
343 if (node_to_cpumask_map == NULL) {
344 printk(KERN_WARNING
345 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
346 node);
347 dump_stack();
Mike Travis11369f352008-07-08 14:35:21 -0700348 return (const cpumask_t *)&cpu_online_map;
Mike Travis9f248bd2008-05-12 21:21:12 +0200349 }
Mike Travis6a2f47c2008-06-27 10:10:13 -0700350 if (node >= nr_node_ids) {
351 printk(KERN_WARNING
352 "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n",
353 node, nr_node_ids);
354 dump_stack();
Mike Travis11369f352008-07-08 14:35:21 -0700355 return &cpu_mask_none;
Mike Travis6a2f47c2008-06-27 10:10:13 -0700356 }
Mike Travis11369f352008-07-08 14:35:21 -0700357 return &node_to_cpumask_map[node];
Mike Travis9f248bd2008-05-12 21:21:12 +0200358}
359EXPORT_SYMBOL(_node_to_cpumask_ptr);
360
361/*
362 * Returns a bitmask of CPUs on Node 'node'.
Mike Travis6a2f47c2008-06-27 10:10:13 -0700363 *
364 * Side note: this function creates the returned cpumask on the stack
365 * so with a high NR_CPUS count, excessive stack space is used. The
366 * node_to_cpumask_ptr function should be used whenever possible.
Mike Travis9f248bd2008-05-12 21:21:12 +0200367 */
368cpumask_t node_to_cpumask(int node)
369{
370 if (node_to_cpumask_map == NULL) {
371 printk(KERN_WARNING
372 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
373 dump_stack();
374 return cpu_online_map;
375 }
Mike Travis6a2f47c2008-06-27 10:10:13 -0700376 if (node >= nr_node_ids) {
377 printk(KERN_WARNING
378 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
379 node, nr_node_ids);
380 dump_stack();
381 return cpu_mask_none;
382 }
Mike Travis9f248bd2008-05-12 21:21:12 +0200383 return node_to_cpumask_map[node];
384}
385EXPORT_SYMBOL(node_to_cpumask);
386
387/*
388 * --------- end of debug versions of the numa functions ---------
389 */
390
391#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
392
393#endif /* X86_64_NUMA */
Bernhard Walle1ecd2762008-06-20 15:38:22 +0200394