blob: 0e67f72d931683413160a007d554c44a438c2c28 [file] [log] [blame]
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -03001#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/bootmem.h>
5#include <linux/percpu.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +02006#include <linux/kexec.h>
Yinghai Lu17b4cce2008-06-21 21:02:20 -07007#include <linux/crash_dump.h>
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -03008#include <asm/smp.h>
9#include <asm/percpu.h>
10#include <asm/sections.h>
11#include <asm/processor.h>
12#include <asm/setup.h>
13#include <asm/topology.h>
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040014#include <asm/mpspec.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040015#include <asm/apicdef.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +020016#include <asm/highmem.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040017
James Bottomleyf8955eb2008-05-10 09:01:48 -050018#ifdef CONFIG_X86_LOCAL_APIC
Alexey Starikovskiy2fe60142008-04-04 23:41:44 +040019unsigned int num_processors;
20unsigned disabled_cpus __cpuinitdata;
21/* Processor that is doing the boot up */
22unsigned int boot_cpu_physical_apicid = -1U;
Yinghai Lue0da3362008-06-08 18:29:22 -070023unsigned int max_physical_apicid;
Alexey Starikovskiy2fe60142008-04-04 23:41:44 +040024EXPORT_SYMBOL(boot_cpu_physical_apicid);
25
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040026/* Bitmask of physically existing CPUs */
27physid_mask_t phys_cpu_present_map;
James Bottomleyf8955eb2008-05-10 09:01:48 -050028#endif
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040029
Mike Travis23ca4bb2008-05-12 21:21:12 +020030/* map cpu index to physical APIC ID */
31DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
32DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
33EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
34EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
35
36#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
37#define X86_64_NUMA 1
38
Mike Travis7891a242008-05-12 21:21:12 +020039/* map cpu index to node index */
Mike Travis23ca4bb2008-05-12 21:21:12 +020040DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
41EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
Mike Travis9f248bd2008-05-12 21:21:12 +020042
43/* which logical CPUs are on which nodes */
44cpumask_t *node_to_cpumask_map;
45EXPORT_SYMBOL(node_to_cpumask_map);
46
47/* setup node_to_cpumask_map */
48static void __init setup_node_to_cpumask_map(void);
49
50#else
51static inline void setup_node_to_cpumask_map(void) { }
Mike Travis23ca4bb2008-05-12 21:21:12 +020052#endif
53
James Bottomleyf8955eb2008-05-10 09:01:48 -050054#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030055/*
56 * Copy data used in early init routines from the initial arrays to the
57 * per cpu data areas. These arrays then become expendable and the
58 * *_early_ptr's are zeroed indicating that the static arrays are gone.
59 */
60static void __init setup_per_cpu_maps(void)
61{
62 int cpu;
63
64 for_each_possible_cpu(cpu) {
Mike Travis23ca4bb2008-05-12 21:21:12 +020065 per_cpu(x86_cpu_to_apicid, cpu) =
66 early_per_cpu_map(x86_cpu_to_apicid, cpu);
Mike Travisb447a462008-03-25 15:06:51 -070067 per_cpu(x86_bios_cpu_apicid, cpu) =
Mike Travis23ca4bb2008-05-12 21:21:12 +020068 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
69#ifdef X86_64_NUMA
Mike Travisb447a462008-03-25 15:06:51 -070070 per_cpu(x86_cpu_to_node_map, cpu) =
Mike Travis23ca4bb2008-05-12 21:21:12 +020071 early_per_cpu_map(x86_cpu_to_node_map, cpu);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030072#endif
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030073 }
74
75 /* indicate the early static arrays will soon be gone */
Mike Travis23ca4bb2008-05-12 21:21:12 +020076 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
77 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
78#ifdef X86_64_NUMA
79 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030080#endif
81}
82
83#ifdef CONFIG_X86_32
84/*
85 * Great future not-so-futuristic plan: make i386 and x86_64 do it
86 * the same way
87 */
88unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
89EXPORT_SYMBOL(__per_cpu_offset);
Mike Travis3461b0a2008-05-12 21:21:13 +020090static inline void setup_cpu_pda_map(void) { }
91
92#elif !defined(CONFIG_SMP)
93static inline void setup_cpu_pda_map(void) { }
94
95#else /* CONFIG_SMP && CONFIG_X86_64 */
96
97/*
98 * Allocate cpu_pda pointer table and array via alloc_bootmem.
99 */
100static void __init setup_cpu_pda_map(void)
101{
102 char *pda;
103 struct x8664_pda **new_cpu_pda;
104 unsigned long size;
105 int cpu;
106
107 size = roundup(sizeof(struct x8664_pda), cache_line_size());
108
109 /* allocate cpu_pda array and pointer table */
110 {
111 unsigned long tsize = nr_cpu_ids * sizeof(void *);
112 unsigned long asize = size * (nr_cpu_ids - 1);
113
114 tsize = roundup(tsize, cache_line_size());
115 new_cpu_pda = alloc_bootmem(tsize + asize);
116 pda = (char *)new_cpu_pda + tsize;
117 }
118
119 /* initialize pointer table to static pda's */
120 for_each_possible_cpu(cpu) {
121 if (cpu == 0) {
122 /* leave boot cpu pda in place */
123 new_cpu_pda[0] = cpu_pda(0);
124 continue;
125 }
126 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
127 new_cpu_pda[cpu]->in_bootmem = 1;
128 pda += size;
129 }
130
131 /* point to new pointer table */
132 _cpu_pda = new_cpu_pda;
133}
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300134#endif
135
136/*
137 * Great future plan:
138 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
139 * Always point %gs to its beginning
140 */
141void __init setup_per_cpu_areas(void)
142{
Mike Travis3461b0a2008-05-12 21:21:13 +0200143 ssize_t size = PERCPU_ENOUGH_ROOM;
144 char *ptr;
145 int cpu;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300146
Mike Travis3461b0a2008-05-12 21:21:13 +0200147 /* Setup cpu_pda map */
148 setup_cpu_pda_map();
149
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300150 /* Copy section for each CPU (we discard the original) */
151 size = PERCPU_ENOUGH_ROOM;
Randy Dunlap053713f2008-06-05 11:10:59 -0700152 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300153 size);
Mike Travisb447a462008-03-25 15:06:51 -0700154
Mike Travis3461b0a2008-05-12 21:21:13 +0200155 for_each_possible_cpu(cpu) {
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300156#ifndef CONFIG_NEED_MULTIPLE_NODES
157 ptr = alloc_bootmem_pages(size);
158#else
Mike Travis3461b0a2008-05-12 21:21:13 +0200159 int node = early_cpu_to_node(cpu);
Mike Travisb447a462008-03-25 15:06:51 -0700160 if (!node_online(node) || !NODE_DATA(node)) {
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300161 ptr = alloc_bootmem_pages(size);
Mike Travisb447a462008-03-25 15:06:51 -0700162 printk(KERN_INFO
Mike Travis23ca4bb2008-05-12 21:21:12 +0200163 "cpu %d has no node %d or node-local memory\n",
Mike Travis3461b0a2008-05-12 21:21:13 +0200164 cpu, node);
Yinghai Lua677f582008-07-29 00:37:10 -0700165 if (ptr)
166 printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n",
167 cpu, __pa(ptr));
Mike Travisb447a462008-03-25 15:06:51 -0700168 }
Yinghai Lua677f582008-07-29 00:37:10 -0700169 else {
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300170 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
Yinghai Lua677f582008-07-29 00:37:10 -0700171 if (ptr)
172 printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
173 cpu, node, __pa(ptr));
174 }
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300175#endif
Mike Travis3461b0a2008-05-12 21:21:13 +0200176 per_cpu_offset(cpu) = ptr - __per_cpu_start;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300177 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
Mike Travis9f0e8d02008-04-04 18:11:01 -0700178
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300179 }
180
Mike Travis9f248bd2008-05-12 21:21:12 +0200181 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
182 NR_CPUS, nr_cpu_ids, nr_node_ids);
Mike Travis9f0e8d02008-04-04 18:11:01 -0700183
Mike Travisb447a462008-03-25 15:06:51 -0700184 /* Setup percpu data maps */
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300185 setup_per_cpu_maps();
Mike Travis9f0e8d02008-04-04 18:11:01 -0700186
Mike Travis9f248bd2008-05-12 21:21:12 +0200187 /* Setup node to cpumask map */
188 setup_node_to_cpumask_map();
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300189}
190
191#endif
Huang, Yingc45a7072008-06-02 14:26:25 +0800192
Mike Travis23ca4bb2008-05-12 21:21:12 +0200193#ifdef X86_64_NUMA
Mike Travis9f248bd2008-05-12 21:21:12 +0200194
195/*
196 * Allocate node_to_cpumask_map based on number of available nodes
197 * Requires node_possible_map to be valid.
198 *
199 * Note: node_to_cpumask() is not valid until after this is done.
200 */
201static void __init setup_node_to_cpumask_map(void)
202{
203 unsigned int node, num = 0;
204 cpumask_t *map;
205
206 /* setup nr_node_ids if not done yet */
207 if (nr_node_ids == MAX_NUMNODES) {
208 for_each_node_mask(node, node_possible_map)
209 num = node;
210 nr_node_ids = num + 1;
211 }
212
213 /* allocate the map */
214 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
215
Thomas Gleixnercfc1b9a2008-07-21 21:35:38 +0200216 pr_debug(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
217 map, nr_node_ids);
Mike Travis9f248bd2008-05-12 21:21:12 +0200218
219 /* node_to_cpumask() will now work */
220 node_to_cpumask_map = map;
221}
222
Mike Travis23ca4bb2008-05-12 21:21:12 +0200223void __cpuinit numa_set_node(int cpu, int node)
224{
225 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
226
Mike Travis3461b0a2008-05-12 21:21:13 +0200227 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
Mike Travis7891a242008-05-12 21:21:12 +0200228 cpu_pda(cpu)->nodenumber = node;
229
Mike Travis23ca4bb2008-05-12 21:21:12 +0200230 if (cpu_to_node_map)
231 cpu_to_node_map[cpu] = node;
232
233 else if (per_cpu_offset(cpu))
234 per_cpu(x86_cpu_to_node_map, cpu) = node;
235
236 else
Thomas Gleixnercfc1b9a2008-07-21 21:35:38 +0200237 pr_debug("Setting node for non-present cpu %d\n", cpu);
Mike Travis23ca4bb2008-05-12 21:21:12 +0200238}
239
240void __cpuinit numa_clear_node(int cpu)
241{
242 numa_set_node(cpu, NUMA_NO_NODE);
243}
244
Mike Travis9f248bd2008-05-12 21:21:12 +0200245#ifndef CONFIG_DEBUG_PER_CPU_MAPS
246
Mike Travis23ca4bb2008-05-12 21:21:12 +0200247void __cpuinit numa_add_cpu(int cpu)
248{
249 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
250}
251
252void __cpuinit numa_remove_cpu(int cpu)
253{
254 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
255}
Mike Travis23ca4bb2008-05-12 21:21:12 +0200256
Mike Travis9f248bd2008-05-12 21:21:12 +0200257#else /* CONFIG_DEBUG_PER_CPU_MAPS */
258
259/*
260 * --------- debug versions of the numa functions ---------
261 */
262static void __cpuinit numa_set_cpumask(int cpu, int enable)
263{
264 int node = cpu_to_node(cpu);
265 cpumask_t *mask;
266 char buf[64];
267
268 if (node_to_cpumask_map == NULL) {
269 printk(KERN_ERR "node_to_cpumask_map NULL\n");
270 dump_stack();
271 return;
272 }
273
274 mask = &node_to_cpumask_map[node];
275 if (enable)
276 cpu_set(cpu, *mask);
277 else
278 cpu_clear(cpu, *mask);
279
280 cpulist_scnprintf(buf, sizeof(buf), *mask);
281 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
282 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
283 }
284
285void __cpuinit numa_add_cpu(int cpu)
286{
287 numa_set_cpumask(cpu, 1);
288}
289
290void __cpuinit numa_remove_cpu(int cpu)
291{
292 numa_set_cpumask(cpu, 0);
293}
Mike Travis23ca4bb2008-05-12 21:21:12 +0200294
295int cpu_to_node(int cpu)
296{
297 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
298 printk(KERN_WARNING
299 "cpu_to_node(%d): usage too early!\n", cpu);
300 dump_stack();
301 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
302 }
303 return per_cpu(x86_cpu_to_node_map, cpu);
304}
305EXPORT_SYMBOL(cpu_to_node);
306
Mike Travis9f248bd2008-05-12 21:21:12 +0200307/*
308 * Same function as cpu_to_node() but used if called before the
309 * per_cpu areas are setup.
310 */
Mike Travis23ca4bb2008-05-12 21:21:12 +0200311int early_cpu_to_node(int cpu)
312{
313 if (early_per_cpu_ptr(x86_cpu_to_node_map))
314 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
315
316 if (!per_cpu_offset(cpu)) {
317 printk(KERN_WARNING
318 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
Mike Travis9f248bd2008-05-12 21:21:12 +0200319 dump_stack();
Mike Travis23ca4bb2008-05-12 21:21:12 +0200320 return NUMA_NO_NODE;
321 }
322 return per_cpu(x86_cpu_to_node_map, cpu);
323}
Mike Travis9f248bd2008-05-12 21:21:12 +0200324
Mike Travis6a2f47c2008-06-27 10:10:13 -0700325
326/* empty cpumask */
327static const cpumask_t cpu_mask_none;
328
Mike Travis9f248bd2008-05-12 21:21:12 +0200329/*
330 * Returns a pointer to the bitmask of CPUs on Node 'node'.
331 */
Mike Travis11369f352008-07-08 14:35:21 -0700332const cpumask_t *_node_to_cpumask_ptr(int node)
Mike Travis9f248bd2008-05-12 21:21:12 +0200333{
334 if (node_to_cpumask_map == NULL) {
335 printk(KERN_WARNING
336 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
337 node);
338 dump_stack();
Mike Travis11369f352008-07-08 14:35:21 -0700339 return (const cpumask_t *)&cpu_online_map;
Mike Travis9f248bd2008-05-12 21:21:12 +0200340 }
Mike Travis6a2f47c2008-06-27 10:10:13 -0700341 if (node >= nr_node_ids) {
342 printk(KERN_WARNING
343 "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n",
344 node, nr_node_ids);
345 dump_stack();
Mike Travis11369f352008-07-08 14:35:21 -0700346 return &cpu_mask_none;
Mike Travis6a2f47c2008-06-27 10:10:13 -0700347 }
Mike Travis11369f352008-07-08 14:35:21 -0700348 return &node_to_cpumask_map[node];
Mike Travis9f248bd2008-05-12 21:21:12 +0200349}
350EXPORT_SYMBOL(_node_to_cpumask_ptr);
351
352/*
353 * Returns a bitmask of CPUs on Node 'node'.
Mike Travis6a2f47c2008-06-27 10:10:13 -0700354 *
355 * Side note: this function creates the returned cpumask on the stack
356 * so with a high NR_CPUS count, excessive stack space is used. The
357 * node_to_cpumask_ptr function should be used whenever possible.
Mike Travis9f248bd2008-05-12 21:21:12 +0200358 */
359cpumask_t node_to_cpumask(int node)
360{
361 if (node_to_cpumask_map == NULL) {
362 printk(KERN_WARNING
363 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
364 dump_stack();
365 return cpu_online_map;
366 }
Mike Travis6a2f47c2008-06-27 10:10:13 -0700367 if (node >= nr_node_ids) {
368 printk(KERN_WARNING
369 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
370 node, nr_node_ids);
371 dump_stack();
372 return cpu_mask_none;
373 }
Mike Travis9f248bd2008-05-12 21:21:12 +0200374 return node_to_cpumask_map[node];
375}
376EXPORT_SYMBOL(node_to_cpumask);
377
378/*
379 * --------- end of debug versions of the numa functions ---------
380 */
381
382#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
383
384#endif /* X86_64_NUMA */
Bernhard Walle1ecd2762008-06-20 15:38:22 +0200385