blob: 258497f93f4df9d82a449bfd3d23c3fcd58f116a [file] [log] [blame]
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -03001#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/bootmem.h>
5#include <linux/percpu.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +02006#include <linux/kexec.h>
Yinghai Lu17b4cce2008-06-21 21:02:20 -07007#include <linux/crash_dump.h>
Jaswinder Singh Rajput8a87dd92009-01-04 17:04:26 +05308#include <linux/smp.h>
9#include <linux/topology.h>
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030010#include <asm/sections.h>
11#include <asm/processor.h>
12#include <asm/setup.h>
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040013#include <asm/mpspec.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040014#include <asm/apicdef.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +020015#include <asm/highmem.h>
Tejun Heo1a51e3a2009-01-13 20:41:35 +090016#include <asm/proto.h>
Jaswinder Singh Rajput06879032009-01-10 12:17:37 +053017#include <asm/cpumask.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040018
Mike Travisc90aa892009-01-13 20:41:34 +090019#ifdef CONFIG_DEBUG_PER_CPU_MAPS
20# define DBG(x...) printk(KERN_DEBUG x)
21#else
22# define DBG(x...)
23#endif
24
Brian Gerstea927902009-01-19 00:38:58 +090025/*
26 * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
27 * voyager wants cpu_number too.
28 */
29#ifdef CONFIG_SMP
30DEFINE_PER_CPU(int, cpu_number);
31EXPORT_PER_CPU_SYMBOL(cpu_number);
32#endif
33
James Bottomleyf8955eb2008-05-10 09:01:48 -050034#ifdef CONFIG_X86_LOCAL_APIC
Alexey Starikovskiy2fe60142008-04-04 23:41:44 +040035unsigned int num_processors;
36unsigned disabled_cpus __cpuinitdata;
37/* Processor that is doing the boot up */
38unsigned int boot_cpu_physical_apicid = -1U;
39EXPORT_SYMBOL(boot_cpu_physical_apicid);
Jaswinder Singh Rajput8a87dd92009-01-04 17:04:26 +053040unsigned int max_physical_apicid;
Alexey Starikovskiy2fe60142008-04-04 23:41:44 +040041
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040042/* Bitmask of physically existing CPUs */
43physid_mask_t phys_cpu_present_map;
James Bottomleyf8955eb2008-05-10 09:01:48 -050044#endif
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040045
Mike Travisc90aa892009-01-13 20:41:34 +090046/*
47 * Map cpu index to physical APIC ID
48 */
Mike Travis23ca4bb2008-05-12 21:21:12 +020049DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
50DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
51EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
52EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
53
54#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
Mike Travisc90aa892009-01-13 20:41:34 +090055#define X86_64_NUMA 1 /* (used later) */
Mike Travis23ca4bb2008-05-12 21:21:12 +020056
Mike Travisc90aa892009-01-13 20:41:34 +090057/*
58 * Map cpu index to node index
59 */
Mike Travis23ca4bb2008-05-12 21:21:12 +020060DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
61EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
Mike Travis9f248bd2008-05-12 21:21:12 +020062
Mike Travisc90aa892009-01-13 20:41:34 +090063/*
64 * Which logical CPUs are on which nodes
65 */
Mike Travis9f248bd2008-05-12 21:21:12 +020066cpumask_t *node_to_cpumask_map;
67EXPORT_SYMBOL(node_to_cpumask_map);
68
Mike Travisc90aa892009-01-13 20:41:34 +090069/*
70 * Setup node_to_cpumask_map
71 */
Mike Travis9f248bd2008-05-12 21:21:12 +020072static void __init setup_node_to_cpumask_map(void);
73
74#else
75static inline void setup_node_to_cpumask_map(void) { }
Mike Travis23ca4bb2008-05-12 21:21:12 +020076#endif
77
Tejun Heob12d8db2009-01-13 20:41:35 +090078/*
79 * Define load_pda_offset() and per-cpu __pda for x86_64.
80 * load_pda_offset() is responsible for loading the offset of pda into
81 * %gs.
82 *
83 * On SMP, pda offset also duals as percpu base address and thus it
84 * should be at the start of per-cpu area. To achieve this, it's
85 * preallocated in vmlinux_64.lds.S directly instead of using
86 * DEFINE_PER_CPU().
87 */
Tejun Heo1a51e3a2009-01-13 20:41:35 +090088#ifdef CONFIG_X86_64
89void __cpuinit load_pda_offset(int cpu)
90{
91 /* Memory clobbers used to order pda/percpu accesses */
92 mb();
93 wrmsrl(MSR_GS_BASE, cpu_pda(cpu));
94 mb();
95}
Tejun Heob12d8db2009-01-13 20:41:35 +090096#ifndef CONFIG_SMP
97DEFINE_PER_CPU(struct x8664_pda, __pda);
Tejun Heob12d8db2009-01-13 20:41:35 +090098#endif
Tejun Heoa338af22009-01-16 11:19:03 +090099EXPORT_PER_CPU_SYMBOL(__pda);
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900100#endif /* CONFIG_SMP && CONFIG_X86_64 */
101
102#ifdef CONFIG_X86_64
103
104/* correctly size the local cpu masks */
105static void setup_cpu_local_masks(void)
106{
107 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
108 alloc_bootmem_cpumask_var(&cpu_callin_mask);
109 alloc_bootmem_cpumask_var(&cpu_callout_mask);
110 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
111}
112
113#else /* CONFIG_X86_32 */
114
115static inline void setup_cpu_local_masks(void)
116{
117}
118
119#endif /* CONFIG_X86_32 */
120
Mike Travisc90aa892009-01-13 20:41:34 +0900121#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300122/*
123 * Copy data used in early init routines from the initial arrays to the
124 * per cpu data areas. These arrays then become expendable and the
125 * *_early_ptr's are zeroed indicating that the static arrays are gone.
126 */
127static void __init setup_per_cpu_maps(void)
128{
129 int cpu;
130
131 for_each_possible_cpu(cpu) {
Mike Travis23ca4bb2008-05-12 21:21:12 +0200132 per_cpu(x86_cpu_to_apicid, cpu) =
133 early_per_cpu_map(x86_cpu_to_apicid, cpu);
Mike Travisb447a462008-03-25 15:06:51 -0700134 per_cpu(x86_bios_cpu_apicid, cpu) =
Mike Travis23ca4bb2008-05-12 21:21:12 +0200135 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
136#ifdef X86_64_NUMA
Mike Travisb447a462008-03-25 15:06:51 -0700137 per_cpu(x86_cpu_to_node_map, cpu) =
Mike Travis23ca4bb2008-05-12 21:21:12 +0200138 early_per_cpu_map(x86_cpu_to_node_map, cpu);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300139#endif
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300140 }
141
142 /* indicate the early static arrays will soon be gone */
Mike Travis23ca4bb2008-05-12 21:21:12 +0200143 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
144 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
145#ifdef X86_64_NUMA
146 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300147#endif
148}
149
Tejun Heo9939dda2009-01-13 20:41:35 +0900150#ifdef CONFIG_X86_64
151unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
152 [0] = (unsigned long)__per_cpu_load,
153};
154#else
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300155unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900156#endif
Tejun Heo9939dda2009-01-13 20:41:35 +0900157EXPORT_SYMBOL(__per_cpu_offset);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300158
159/*
160 * Great future plan:
161 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
162 * Always point %gs to its beginning
163 */
164void __init setup_per_cpu_areas(void)
165{
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +0200166 ssize_t size, old_size;
Mike Travis3461b0a2008-05-12 21:21:13 +0200167 char *ptr;
168 int cpu;
Yinghai Lu1f8ff032008-08-19 20:49:45 -0700169 unsigned long align = 1;
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300170
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300171 /* Copy section for each CPU (we discard the original) */
Yinghai Lu1f3fcd42008-08-19 20:49:44 -0700172 old_size = PERCPU_ENOUGH_ROOM;
Yinghai Lu1f8ff032008-08-19 20:49:45 -0700173 align = max_t(unsigned long, PAGE_SIZE, align);
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +0200174 size = roundup(old_size, align);
Mike Travisa1681962008-12-16 17:33:53 -0800175
Cyrill Gorcunovab143982009-01-02 21:51:32 +0300176 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
Mike Travisa1681962008-12-16 17:33:53 -0800177 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
178
Cyrill Gorcunovab143982009-01-02 21:51:32 +0300179 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
Mike Travisb447a462008-03-25 15:06:51 -0700180
Mike Travis3461b0a2008-05-12 21:21:13 +0200181 for_each_possible_cpu(cpu) {
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300182#ifndef CONFIG_NEED_MULTIPLE_NODES
Yinghai Lu1f8ff032008-08-19 20:49:45 -0700183 ptr = __alloc_bootmem(size, align,
184 __pa(MAX_DMA_ADDRESS));
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300185#else
Mike Travis3461b0a2008-05-12 21:21:13 +0200186 int node = early_cpu_to_node(cpu);
Mike Travisb447a462008-03-25 15:06:51 -0700187 if (!node_online(node) || !NODE_DATA(node)) {
Yinghai Lu1f8ff032008-08-19 20:49:45 -0700188 ptr = __alloc_bootmem(size, align,
189 __pa(MAX_DMA_ADDRESS));
Cyrill Gorcunovab143982009-01-02 21:51:32 +0300190 pr_info("cpu %d has no node %d or node-local memory\n",
Mike Travis3461b0a2008-05-12 21:21:13 +0200191 cpu, node);
Cyrill Gorcunovab143982009-01-02 21:51:32 +0300192 pr_debug("per cpu data for cpu%d at %016lx\n",
193 cpu, __pa(ptr));
194 } else {
Yinghai Lu1f8ff032008-08-19 20:49:45 -0700195 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
196 __pa(MAX_DMA_ADDRESS));
Cyrill Gorcunovab143982009-01-02 21:51:32 +0300197 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
198 cpu, node, __pa(ptr));
Yinghai Lua677f582008-07-29 00:37:10 -0700199 }
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300200#endif
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900201
Tejun Heo3e5d8f92009-01-13 20:41:35 +0900202 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
Tejun Heo9939dda2009-01-13 20:41:35 +0900203 per_cpu_offset(cpu) = ptr - __per_cpu_start;
Brian Gerst26f80bd2009-01-19 00:38:58 +0900204 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
Brian Gerstea927902009-01-19 00:38:58 +0900205 per_cpu(cpu_number, cpu) = cpu;
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900206#ifdef CONFIG_X86_64
Brian Gerst26f80bd2009-01-19 00:38:58 +0900207 per_cpu(irq_stack_ptr, cpu) =
208 (char *)per_cpu(irq_stack, cpu) + IRQ_STACK_SIZE - 64;
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900209 /*
210 * CPU0 modified pda in the init data area, reload pda
211 * offset for CPU0 and clear the area for others.
212 */
213 if (cpu == 0)
214 load_pda_offset(0);
215 else
216 memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu)));
217#endif
Mike Travisc90aa892009-01-13 20:41:34 +0900218
219 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300220 }
221
Mike Travisb447a462008-03-25 15:06:51 -0700222 /* Setup percpu data maps */
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300223 setup_per_cpu_maps();
Mike Travis9f0e8d02008-04-04 18:11:01 -0700224
Mike Travis9f248bd2008-05-12 21:21:12 +0200225 /* Setup node to cpumask map */
226 setup_node_to_cpumask_map();
Mike Travisc2d1cec2009-01-04 05:18:03 -0800227
228 /* Setup cpu initialized, callin, callout masks */
229 setup_cpu_local_masks();
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300230}
231
232#endif
Huang, Yingc45a7072008-06-02 14:26:25 +0800233
Mike Travis23ca4bb2008-05-12 21:21:12 +0200234#ifdef X86_64_NUMA
Mike Travis9f248bd2008-05-12 21:21:12 +0200235
236/*
237 * Allocate node_to_cpumask_map based on number of available nodes
238 * Requires node_possible_map to be valid.
239 *
240 * Note: node_to_cpumask() is not valid until after this is done.
Mike Travisc90aa892009-01-13 20:41:34 +0900241 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
Mike Travis9f248bd2008-05-12 21:21:12 +0200242 */
243static void __init setup_node_to_cpumask_map(void)
244{
245 unsigned int node, num = 0;
246 cpumask_t *map;
247
248 /* setup nr_node_ids if not done yet */
249 if (nr_node_ids == MAX_NUMNODES) {
250 for_each_node_mask(node, node_possible_map)
251 num = node;
252 nr_node_ids = num + 1;
253 }
254
255 /* allocate the map */
256 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
Mike Travisc90aa892009-01-13 20:41:34 +0900257 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
Mike Travis9f248bd2008-05-12 21:21:12 +0200258
Gustavo F. Padovan55410792008-10-15 10:37:04 -0300259 pr_debug("Node to cpumask map at %p for %d nodes\n",
Thomas Gleixnercfc1b9a2008-07-21 21:35:38 +0200260 map, nr_node_ids);
Mike Travis9f248bd2008-05-12 21:21:12 +0200261
262 /* node_to_cpumask() will now work */
263 node_to_cpumask_map = map;
264}
265
Mike Travis23ca4bb2008-05-12 21:21:12 +0200266void __cpuinit numa_set_node(int cpu, int node)
267{
268 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
269
Mike Travisc90aa892009-01-13 20:41:34 +0900270 /* early setting, no percpu area yet */
271 if (cpu_to_node_map) {
Mike Travis23ca4bb2008-05-12 21:21:12 +0200272 cpu_to_node_map[cpu] = node;
Mike Travisc90aa892009-01-13 20:41:34 +0900273 return;
274 }
Mike Travis23ca4bb2008-05-12 21:21:12 +0200275
Mike Travisc90aa892009-01-13 20:41:34 +0900276#ifdef CONFIG_DEBUG_PER_CPU_MAPS
277 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
278 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
279 dump_stack();
280 return;
281 }
282#endif
283 per_cpu(x86_cpu_to_node_map, cpu) = node;
Mike Travis23ca4bb2008-05-12 21:21:12 +0200284
Mike Travisc90aa892009-01-13 20:41:34 +0900285 if (node != NUMA_NO_NODE)
286 cpu_pda(cpu)->nodenumber = node;
Mike Travis23ca4bb2008-05-12 21:21:12 +0200287}
288
289void __cpuinit numa_clear_node(int cpu)
290{
291 numa_set_node(cpu, NUMA_NO_NODE);
292}
293
Mike Travis9f248bd2008-05-12 21:21:12 +0200294#ifndef CONFIG_DEBUG_PER_CPU_MAPS
295
Mike Travis23ca4bb2008-05-12 21:21:12 +0200296void __cpuinit numa_add_cpu(int cpu)
297{
298 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
299}
300
301void __cpuinit numa_remove_cpu(int cpu)
302{
Mike Travisc90aa892009-01-13 20:41:34 +0900303 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
Mike Travis23ca4bb2008-05-12 21:21:12 +0200304}
Mike Travis23ca4bb2008-05-12 21:21:12 +0200305
Mike Travis9f248bd2008-05-12 21:21:12 +0200306#else /* CONFIG_DEBUG_PER_CPU_MAPS */
307
308/*
309 * --------- debug versions of the numa functions ---------
310 */
311static void __cpuinit numa_set_cpumask(int cpu, int enable)
312{
Mike Travisc90aa892009-01-13 20:41:34 +0900313 int node = early_cpu_to_node(cpu);
Mike Travis9f248bd2008-05-12 21:21:12 +0200314 cpumask_t *mask;
315 char buf[64];
316
317 if (node_to_cpumask_map == NULL) {
318 printk(KERN_ERR "node_to_cpumask_map NULL\n");
319 dump_stack();
320 return;
321 }
322
323 mask = &node_to_cpumask_map[node];
324 if (enable)
325 cpu_set(cpu, *mask);
326 else
327 cpu_clear(cpu, *mask);
328
Rusty Russell29c01772008-12-13 21:20:25 +1030329 cpulist_scnprintf(buf, sizeof(buf), mask);
Mike Travis9f248bd2008-05-12 21:21:12 +0200330 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
Jaswinder Singh Rajput8a87dd92009-01-04 17:04:26 +0530331 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
332}
Mike Travis9f248bd2008-05-12 21:21:12 +0200333
334void __cpuinit numa_add_cpu(int cpu)
335{
336 numa_set_cpumask(cpu, 1);
337}
338
339void __cpuinit numa_remove_cpu(int cpu)
340{
341 numa_set_cpumask(cpu, 0);
342}
Mike Travis23ca4bb2008-05-12 21:21:12 +0200343
344int cpu_to_node(int cpu)
345{
346 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
347 printk(KERN_WARNING
348 "cpu_to_node(%d): usage too early!\n", cpu);
349 dump_stack();
350 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
351 }
352 return per_cpu(x86_cpu_to_node_map, cpu);
353}
354EXPORT_SYMBOL(cpu_to_node);
355
Mike Travis9f248bd2008-05-12 21:21:12 +0200356/*
357 * Same function as cpu_to_node() but used if called before the
358 * per_cpu areas are setup.
359 */
Mike Travis23ca4bb2008-05-12 21:21:12 +0200360int early_cpu_to_node(int cpu)
361{
362 if (early_per_cpu_ptr(x86_cpu_to_node_map))
363 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
364
365 if (!per_cpu_offset(cpu)) {
366 printk(KERN_WARNING
367 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
Mike Travis9f248bd2008-05-12 21:21:12 +0200368 dump_stack();
Mike Travis23ca4bb2008-05-12 21:21:12 +0200369 return NUMA_NO_NODE;
370 }
371 return per_cpu(x86_cpu_to_node_map, cpu);
372}
Mike Travis9f248bd2008-05-12 21:21:12 +0200373
Mike Travis6a2f47c2008-06-27 10:10:13 -0700374
375/* empty cpumask */
376static const cpumask_t cpu_mask_none;
377
Mike Travis9f248bd2008-05-12 21:21:12 +0200378/*
379 * Returns a pointer to the bitmask of CPUs on Node 'node'.
380 */
Rusty Russell393d68f2008-12-26 22:23:38 +1030381const cpumask_t *cpumask_of_node(int node)
Mike Travis9f248bd2008-05-12 21:21:12 +0200382{
383 if (node_to_cpumask_map == NULL) {
384 printk(KERN_WARNING
Rusty Russell393d68f2008-12-26 22:23:38 +1030385 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
Mike Travis9f248bd2008-05-12 21:21:12 +0200386 node);
387 dump_stack();
Mike Travis11369f352008-07-08 14:35:21 -0700388 return (const cpumask_t *)&cpu_online_map;
Mike Travis9f248bd2008-05-12 21:21:12 +0200389 }
Mike Travis6a2f47c2008-06-27 10:10:13 -0700390 if (node >= nr_node_ids) {
391 printk(KERN_WARNING
Rusty Russell393d68f2008-12-26 22:23:38 +1030392 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
Mike Travis6a2f47c2008-06-27 10:10:13 -0700393 node, nr_node_ids);
394 dump_stack();
Mike Travis11369f352008-07-08 14:35:21 -0700395 return &cpu_mask_none;
Mike Travis6a2f47c2008-06-27 10:10:13 -0700396 }
Mike Travis11369f352008-07-08 14:35:21 -0700397 return &node_to_cpumask_map[node];
Mike Travis9f248bd2008-05-12 21:21:12 +0200398}
Rusty Russell393d68f2008-12-26 22:23:38 +1030399EXPORT_SYMBOL(cpumask_of_node);
Mike Travis9f248bd2008-05-12 21:21:12 +0200400
401/*
402 * Returns a bitmask of CPUs on Node 'node'.
Mike Travis6a2f47c2008-06-27 10:10:13 -0700403 *
404 * Side note: this function creates the returned cpumask on the stack
405 * so with a high NR_CPUS count, excessive stack space is used. The
406 * node_to_cpumask_ptr function should be used whenever possible.
Mike Travis9f248bd2008-05-12 21:21:12 +0200407 */
408cpumask_t node_to_cpumask(int node)
409{
410 if (node_to_cpumask_map == NULL) {
411 printk(KERN_WARNING
412 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
413 dump_stack();
414 return cpu_online_map;
415 }
Mike Travis6a2f47c2008-06-27 10:10:13 -0700416 if (node >= nr_node_ids) {
417 printk(KERN_WARNING
418 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
419 node, nr_node_ids);
420 dump_stack();
421 return cpu_mask_none;
422 }
Mike Travis9f248bd2008-05-12 21:21:12 +0200423 return node_to_cpumask_map[node];
424}
425EXPORT_SYMBOL(node_to_cpumask);
426
427/*
428 * --------- end of debug versions of the numa functions ---------
429 */
430
431#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
432
433#endif /* X86_64_NUMA */
Bernhard Walle1ecd2762008-06-20 15:38:22 +0200434