blob: 61f3966632a80fba51c83afe0c2e77e0aee12bd8 [file] [log] [blame]
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -03001#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/bootmem.h>
5#include <linux/percpu.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +02006#include <linux/kexec.h>
Yinghai Lu17b4cce2008-06-21 21:02:20 -07007#include <linux/crash_dump.h>
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -03008#include <asm/smp.h>
9#include <asm/percpu.h>
10#include <asm/sections.h>
11#include <asm/processor.h>
12#include <asm/setup.h>
13#include <asm/topology.h>
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040014#include <asm/mpspec.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040015#include <asm/apicdef.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +020016#include <asm/highmem.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040017
James Bottomleyf8955eb2008-05-10 09:01:48 -050018#ifdef CONFIG_X86_LOCAL_APIC
Alexey Starikovskiy2fe60142008-04-04 23:41:44 +040019unsigned int num_processors;
20unsigned disabled_cpus __cpuinitdata;
21/* Processor that is doing the boot up */
22unsigned int boot_cpu_physical_apicid = -1U;
Yinghai Lue0da3362008-06-08 18:29:22 -070023unsigned int max_physical_apicid;
Alexey Starikovskiy2fe60142008-04-04 23:41:44 +040024EXPORT_SYMBOL(boot_cpu_physical_apicid);
25
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040026/* Bitmask of physically existing CPUs */
27physid_mask_t phys_cpu_present_map;
James Bottomleyf8955eb2008-05-10 09:01:48 -050028#endif
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040029
Mike Travis23ca4bb2008-05-12 21:21:12 +020030/* map cpu index to physical APIC ID */
31DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
32DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
33EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
34EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
35
36#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
37#define X86_64_NUMA 1
38
Mike Travis7891a242008-05-12 21:21:12 +020039/* map cpu index to node index */
Mike Travis23ca4bb2008-05-12 21:21:12 +020040DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
41EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
Mike Travis9f248bd2008-05-12 21:21:12 +020042
43/* which logical CPUs are on which nodes */
44cpumask_t *node_to_cpumask_map;
45EXPORT_SYMBOL(node_to_cpumask_map);
46
47/* setup node_to_cpumask_map */
48static void __init setup_node_to_cpumask_map(void);
49
50#else
51static inline void setup_node_to_cpumask_map(void) { }
Mike Travis23ca4bb2008-05-12 21:21:12 +020052#endif
53
James Bottomleyf8955eb2008-05-10 09:01:48 -050054#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030055/*
56 * Copy data used in early init routines from the initial arrays to the
57 * per cpu data areas. These arrays then become expendable and the
58 * *_early_ptr's are zeroed indicating that the static arrays are gone.
59 */
60static void __init setup_per_cpu_maps(void)
61{
62 int cpu;
63
64 for_each_possible_cpu(cpu) {
Mike Travis23ca4bb2008-05-12 21:21:12 +020065 per_cpu(x86_cpu_to_apicid, cpu) =
66 early_per_cpu_map(x86_cpu_to_apicid, cpu);
Mike Travisb447a462008-03-25 15:06:51 -070067 per_cpu(x86_bios_cpu_apicid, cpu) =
Mike Travis23ca4bb2008-05-12 21:21:12 +020068 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
69#ifdef X86_64_NUMA
Mike Travisb447a462008-03-25 15:06:51 -070070 per_cpu(x86_cpu_to_node_map, cpu) =
Mike Travis23ca4bb2008-05-12 21:21:12 +020071 early_per_cpu_map(x86_cpu_to_node_map, cpu);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030072#endif
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030073 }
74
75 /* indicate the early static arrays will soon be gone */
Mike Travis23ca4bb2008-05-12 21:21:12 +020076 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
77 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
78#ifdef X86_64_NUMA
79 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030080#endif
81}
82
Mike Travis9f0e8d02008-04-04 18:11:01 -070083#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
84cpumask_t *cpumask_of_cpu_map __read_mostly;
85EXPORT_SYMBOL(cpumask_of_cpu_map);
86
87/* requires nr_cpu_ids to be initialized */
88static void __init setup_cpumask_of_cpu(void)
89{
90 int i;
91
92 /* alloc_bootmem zeroes memory */
93 cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
94 for (i = 0; i < nr_cpu_ids; i++)
95 cpu_set(i, cpumask_of_cpu_map[i]);
96}
97#else
98static inline void setup_cpumask_of_cpu(void) { }
99#endif
100
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300101#ifdef CONFIG_X86_32
102/*
103 * Great future not-so-futuristic plan: make i386 and x86_64 do it
104 * the same way
105 */
106unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
107EXPORT_SYMBOL(__per_cpu_offset);
Mike Travis3461b0a2008-05-12 21:21:13 +0200108static inline void setup_cpu_pda_map(void) { }
109
110#elif !defined(CONFIG_SMP)
111static inline void setup_cpu_pda_map(void) { }
112
113#else /* CONFIG_SMP && CONFIG_X86_64 */
114
115/*
116 * Allocate cpu_pda pointer table and array via alloc_bootmem.
117 */
118static void __init setup_cpu_pda_map(void)
119{
120 char *pda;
121 struct x8664_pda **new_cpu_pda;
122 unsigned long size;
123 int cpu;
124
125 size = roundup(sizeof(struct x8664_pda), cache_line_size());
126
127 /* allocate cpu_pda array and pointer table */
128 {
129 unsigned long tsize = nr_cpu_ids * sizeof(void *);
130 unsigned long asize = size * (nr_cpu_ids - 1);
131
132 tsize = roundup(tsize, cache_line_size());
133 new_cpu_pda = alloc_bootmem(tsize + asize);
134 pda = (char *)new_cpu_pda + tsize;
135 }
136
137 /* initialize pointer table to static pda's */
138 for_each_possible_cpu(cpu) {
139 if (cpu == 0) {
140 /* leave boot cpu pda in place */
141 new_cpu_pda[0] = cpu_pda(0);
142 continue;
143 }
144 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
145 new_cpu_pda[cpu]->in_bootmem = 1;
146 pda += size;
147 }
148
149 /* point to new pointer table */
150 _cpu_pda = new_cpu_pda;
151}
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300152#endif
153
154/*
155 * Great future plan:
156 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
157 * Always point %gs to its beginning
158 */
159void __init setup_per_cpu_areas(void)
160{
Mike Travis3461b0a2008-05-12 21:21:13 +0200161 ssize_t size = PERCPU_ENOUGH_ROOM;
162 char *ptr;
163 int cpu;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300164
Mike Travis3461b0a2008-05-12 21:21:13 +0200165 /* Setup cpu_pda map */
166 setup_cpu_pda_map();
167
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300168 /* Copy section for each CPU (we discard the original) */
169 size = PERCPU_ENOUGH_ROOM;
Randy Dunlap053713f2008-06-05 11:10:59 -0700170 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300171 size);
Mike Travisb447a462008-03-25 15:06:51 -0700172
Mike Travis3461b0a2008-05-12 21:21:13 +0200173 for_each_possible_cpu(cpu) {
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300174#ifndef CONFIG_NEED_MULTIPLE_NODES
175 ptr = alloc_bootmem_pages(size);
176#else
Mike Travis3461b0a2008-05-12 21:21:13 +0200177 int node = early_cpu_to_node(cpu);
Mike Travisb447a462008-03-25 15:06:51 -0700178 if (!node_online(node) || !NODE_DATA(node)) {
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300179 ptr = alloc_bootmem_pages(size);
Mike Travisb447a462008-03-25 15:06:51 -0700180 printk(KERN_INFO
Mike Travis23ca4bb2008-05-12 21:21:12 +0200181 "cpu %d has no node %d or node-local memory\n",
Mike Travis3461b0a2008-05-12 21:21:13 +0200182 cpu, node);
Yinghai Lua677f582008-07-29 00:37:10 -0700183 if (ptr)
184 printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n",
185 cpu, __pa(ptr));
Mike Travisb447a462008-03-25 15:06:51 -0700186 }
Yinghai Lua677f582008-07-29 00:37:10 -0700187 else {
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300188 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
Yinghai Lua677f582008-07-29 00:37:10 -0700189 if (ptr)
190 printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
191 cpu, node, __pa(ptr));
192 }
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300193#endif
Mike Travis3461b0a2008-05-12 21:21:13 +0200194 per_cpu_offset(cpu) = ptr - __per_cpu_start;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300195 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
Mike Travis9f0e8d02008-04-04 18:11:01 -0700196
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300197 }
198
Mike Travis9f248bd2008-05-12 21:21:12 +0200199 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
200 NR_CPUS, nr_cpu_ids, nr_node_ids);
Mike Travis9f0e8d02008-04-04 18:11:01 -0700201
Mike Travisb447a462008-03-25 15:06:51 -0700202 /* Setup percpu data maps */
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300203 setup_per_cpu_maps();
Mike Travis9f0e8d02008-04-04 18:11:01 -0700204
Mike Travis9f248bd2008-05-12 21:21:12 +0200205 /* Setup node to cpumask map */
206 setup_node_to_cpumask_map();
207
Mike Travis9f0e8d02008-04-04 18:11:01 -0700208 /* Setup cpumask_of_cpu map */
209 setup_cpumask_of_cpu();
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300210}
211
212#endif
Huang, Yingc45a7072008-06-02 14:26:25 +0800213
Mike Travis23ca4bb2008-05-12 21:21:12 +0200214#ifdef X86_64_NUMA
Mike Travis9f248bd2008-05-12 21:21:12 +0200215
216/*
217 * Allocate node_to_cpumask_map based on number of available nodes
218 * Requires node_possible_map to be valid.
219 *
220 * Note: node_to_cpumask() is not valid until after this is done.
221 */
222static void __init setup_node_to_cpumask_map(void)
223{
224 unsigned int node, num = 0;
225 cpumask_t *map;
226
227 /* setup nr_node_ids if not done yet */
228 if (nr_node_ids == MAX_NUMNODES) {
229 for_each_node_mask(node, node_possible_map)
230 num = node;
231 nr_node_ids = num + 1;
232 }
233
234 /* allocate the map */
235 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
236
Thomas Gleixnercfc1b9a2008-07-21 21:35:38 +0200237 pr_debug(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
238 map, nr_node_ids);
Mike Travis9f248bd2008-05-12 21:21:12 +0200239
240 /* node_to_cpumask() will now work */
241 node_to_cpumask_map = map;
242}
243
Mike Travis23ca4bb2008-05-12 21:21:12 +0200244void __cpuinit numa_set_node(int cpu, int node)
245{
246 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
247
Mike Travis3461b0a2008-05-12 21:21:13 +0200248 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
Mike Travis7891a242008-05-12 21:21:12 +0200249 cpu_pda(cpu)->nodenumber = node;
250
Mike Travis23ca4bb2008-05-12 21:21:12 +0200251 if (cpu_to_node_map)
252 cpu_to_node_map[cpu] = node;
253
254 else if (per_cpu_offset(cpu))
255 per_cpu(x86_cpu_to_node_map, cpu) = node;
256
257 else
Thomas Gleixnercfc1b9a2008-07-21 21:35:38 +0200258 pr_debug("Setting node for non-present cpu %d\n", cpu);
Mike Travis23ca4bb2008-05-12 21:21:12 +0200259}
260
261void __cpuinit numa_clear_node(int cpu)
262{
263 numa_set_node(cpu, NUMA_NO_NODE);
264}
265
Mike Travis9f248bd2008-05-12 21:21:12 +0200266#ifndef CONFIG_DEBUG_PER_CPU_MAPS
267
Mike Travis23ca4bb2008-05-12 21:21:12 +0200268void __cpuinit numa_add_cpu(int cpu)
269{
270 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
271}
272
273void __cpuinit numa_remove_cpu(int cpu)
274{
275 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
276}
Mike Travis23ca4bb2008-05-12 21:21:12 +0200277
Mike Travis9f248bd2008-05-12 21:21:12 +0200278#else /* CONFIG_DEBUG_PER_CPU_MAPS */
279
280/*
281 * --------- debug versions of the numa functions ---------
282 */
283static void __cpuinit numa_set_cpumask(int cpu, int enable)
284{
285 int node = cpu_to_node(cpu);
286 cpumask_t *mask;
287 char buf[64];
288
289 if (node_to_cpumask_map == NULL) {
290 printk(KERN_ERR "node_to_cpumask_map NULL\n");
291 dump_stack();
292 return;
293 }
294
295 mask = &node_to_cpumask_map[node];
296 if (enable)
297 cpu_set(cpu, *mask);
298 else
299 cpu_clear(cpu, *mask);
300
301 cpulist_scnprintf(buf, sizeof(buf), *mask);
302 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
303 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
304 }
305
306void __cpuinit numa_add_cpu(int cpu)
307{
308 numa_set_cpumask(cpu, 1);
309}
310
311void __cpuinit numa_remove_cpu(int cpu)
312{
313 numa_set_cpumask(cpu, 0);
314}
Mike Travis23ca4bb2008-05-12 21:21:12 +0200315
316int cpu_to_node(int cpu)
317{
318 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
319 printk(KERN_WARNING
320 "cpu_to_node(%d): usage too early!\n", cpu);
321 dump_stack();
322 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
323 }
324 return per_cpu(x86_cpu_to_node_map, cpu);
325}
326EXPORT_SYMBOL(cpu_to_node);
327
Mike Travis9f248bd2008-05-12 21:21:12 +0200328/*
329 * Same function as cpu_to_node() but used if called before the
330 * per_cpu areas are setup.
331 */
Mike Travis23ca4bb2008-05-12 21:21:12 +0200332int early_cpu_to_node(int cpu)
333{
334 if (early_per_cpu_ptr(x86_cpu_to_node_map))
335 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
336
337 if (!per_cpu_offset(cpu)) {
338 printk(KERN_WARNING
339 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
Mike Travis9f248bd2008-05-12 21:21:12 +0200340 dump_stack();
Mike Travis23ca4bb2008-05-12 21:21:12 +0200341 return NUMA_NO_NODE;
342 }
343 return per_cpu(x86_cpu_to_node_map, cpu);
344}
Mike Travis9f248bd2008-05-12 21:21:12 +0200345
Mike Travis6a2f47c2008-06-27 10:10:13 -0700346
347/* empty cpumask */
348static const cpumask_t cpu_mask_none;
349
Mike Travis9f248bd2008-05-12 21:21:12 +0200350/*
351 * Returns a pointer to the bitmask of CPUs on Node 'node'.
352 */
Mike Travis11369f352008-07-08 14:35:21 -0700353const cpumask_t *_node_to_cpumask_ptr(int node)
Mike Travis9f248bd2008-05-12 21:21:12 +0200354{
355 if (node_to_cpumask_map == NULL) {
356 printk(KERN_WARNING
357 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
358 node);
359 dump_stack();
Mike Travis11369f352008-07-08 14:35:21 -0700360 return (const cpumask_t *)&cpu_online_map;
Mike Travis9f248bd2008-05-12 21:21:12 +0200361 }
Mike Travis6a2f47c2008-06-27 10:10:13 -0700362 if (node >= nr_node_ids) {
363 printk(KERN_WARNING
364 "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n",
365 node, nr_node_ids);
366 dump_stack();
Mike Travis11369f352008-07-08 14:35:21 -0700367 return &cpu_mask_none;
Mike Travis6a2f47c2008-06-27 10:10:13 -0700368 }
Mike Travis11369f352008-07-08 14:35:21 -0700369 return &node_to_cpumask_map[node];
Mike Travis9f248bd2008-05-12 21:21:12 +0200370}
371EXPORT_SYMBOL(_node_to_cpumask_ptr);
372
373/*
374 * Returns a bitmask of CPUs on Node 'node'.
Mike Travis6a2f47c2008-06-27 10:10:13 -0700375 *
376 * Side note: this function creates the returned cpumask on the stack
377 * so with a high NR_CPUS count, excessive stack space is used. The
378 * node_to_cpumask_ptr function should be used whenever possible.
Mike Travis9f248bd2008-05-12 21:21:12 +0200379 */
380cpumask_t node_to_cpumask(int node)
381{
382 if (node_to_cpumask_map == NULL) {
383 printk(KERN_WARNING
384 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
385 dump_stack();
386 return cpu_online_map;
387 }
Mike Travis6a2f47c2008-06-27 10:10:13 -0700388 if (node >= nr_node_ids) {
389 printk(KERN_WARNING
390 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
391 node, nr_node_ids);
392 dump_stack();
393 return cpu_mask_none;
394 }
Mike Travis9f248bd2008-05-12 21:21:12 +0200395 return node_to_cpumask_map[node];
396}
397EXPORT_SYMBOL(node_to_cpumask);
398
399/*
400 * --------- end of debug versions of the numa functions ---------
401 */
402
403#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
404
405#endif /* X86_64_NUMA */
Bernhard Walle1ecd2762008-06-20 15:38:22 +0200406