blob: cce174109ca97799f1410bb2e6e67eaf300d184b [file] [log] [blame]
Rusty Russell71ee73e2009-03-13 14:49:52 +10301/* Common code for 32 and 64-bit NUMA */
2#include <linux/topology.h>
3#include <linux/module.h>
4#include <linux/bootmem.h>
Jan Beulich90321602011-01-19 08:57:21 +00005#include <asm/numa.h>
6#include <asm/acpi.h>
7
8int __initdata numa_off;
Tejun Heoe6df5952011-05-02 14:18:53 +02009nodemask_t numa_nodes_parsed __initdata;
Jan Beulich90321602011-01-19 08:57:21 +000010
11static __init int numa_setup(char *opt)
12{
13 if (!opt)
14 return -EINVAL;
15 if (!strncmp(opt, "off", 3))
16 numa_off = 1;
17#ifdef CONFIG_NUMA_EMU
18 if (!strncmp(opt, "fake=", 5))
19 numa_emu_cmdline(opt + 5);
20#endif
21#ifdef CONFIG_ACPI_NUMA
22 if (!strncmp(opt, "noacpi", 6))
23 acpi_numa = -1;
24#endif
25 return 0;
26}
27early_param("numa", numa_setup);
Rusty Russell71ee73e2009-03-13 14:49:52 +103028
Rusty Russell71ee73e2009-03-13 14:49:52 +103029/*
Tejun Heobbc9e2f2011-01-23 14:37:39 +010030 * apicid, cpu, node mappings
Rusty Russell71ee73e2009-03-13 14:49:52 +103031 */
Tejun Heobbc9e2f2011-01-23 14:37:39 +010032s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
33 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
34};
35
Tejun Heo6bd26272011-05-02 14:18:52 +020036int __cpuinit numa_cpu_node(int cpu)
37{
38 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
39
40 if (apicid != BAD_APICID)
41 return __apicid_to_node[apicid];
42 return NUMA_NO_NODE;
43}
44
Rusty Russellc032ef602009-03-13 14:49:53 +103045cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
Rusty Russell71ee73e2009-03-13 14:49:52 +103046EXPORT_SYMBOL(node_to_cpumask_map);
47
48/*
Tejun Heo645a7912011-01-23 14:37:40 +010049 * Map cpu index to node index
50 */
Tejun Heo645a7912011-01-23 14:37:40 +010051DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
Tejun Heo645a7912011-01-23 14:37:40 +010052EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
53
54void __cpuinit numa_set_node(int cpu, int node)
55{
56 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
57
58 /* early setting, no percpu area yet */
59 if (cpu_to_node_map) {
60 cpu_to_node_map[cpu] = node;
61 return;
62 }
63
64#ifdef CONFIG_DEBUG_PER_CPU_MAPS
65 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
66 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
67 dump_stack();
68 return;
69 }
70#endif
71 per_cpu(x86_cpu_to_node_map, cpu) = node;
72
73 if (node != NUMA_NO_NODE)
74 set_cpu_numa_node(cpu, node);
75}
76
77void __cpuinit numa_clear_node(int cpu)
78{
79 numa_set_node(cpu, NUMA_NO_NODE);
80}
81
82/*
Rusty Russell71ee73e2009-03-13 14:49:52 +103083 * Allocate node_to_cpumask_map based on number of available nodes
84 * Requires node_possible_map to be valid.
85 *
86 * Note: node_to_cpumask() is not valid until after this is done.
87 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
88 */
89void __init setup_node_to_cpumask_map(void)
90{
91 unsigned int node, num = 0;
Rusty Russell71ee73e2009-03-13 14:49:52 +103092
93 /* setup nr_node_ids if not done yet */
94 if (nr_node_ids == MAX_NUMNODES) {
95 for_each_node_mask(node, node_possible_map)
96 num = node;
97 nr_node_ids = num + 1;
98 }
99
100 /* allocate the map */
Rusty Russellc032ef602009-03-13 14:49:53 +1030101 for (node = 0; node < nr_node_ids; node++)
102 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
Rusty Russell71ee73e2009-03-13 14:49:52 +1030103
Rusty Russellc032ef602009-03-13 14:49:53 +1030104 /* cpumask_of_node() will now work */
105 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
Rusty Russell71ee73e2009-03-13 14:49:52 +1030106}
107
Tejun Heo8db78cc2011-01-23 14:37:42 +0100108/*
109 * There are unfortunately some poorly designed mainboards around that
110 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
111 * mapping. To avoid this fill in the mapping for all possible CPUs,
112 * as the number of CPUs is not known yet. We round robin the existing
113 * nodes.
114 */
115void __init numa_init_array(void)
116{
117 int rr, i;
118
119 rr = first_node(node_online_map);
120 for (i = 0; i < nr_cpu_ids; i++) {
121 if (early_cpu_to_node(i) != NUMA_NO_NODE)
122 continue;
123 numa_set_node(i, rr);
124 rr = next_node(rr, node_online_map);
125 if (rr == MAX_NUMNODES)
126 rr = first_node(node_online_map);
127 }
128}
129
130static __init int find_near_online_node(int node)
131{
132 int n, val;
133 int min_val = INT_MAX;
134 int best_node = -1;
135
136 for_each_online_node(n) {
137 val = node_distance(node, n);
138
139 if (val < min_val) {
140 min_val = val;
141 best_node = n;
142 }
143 }
144
145 return best_node;
146}
147
148/*
149 * Setup early cpu_to_node.
150 *
151 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
152 * and apicid_to_node[] tables have valid entries for a CPU.
153 * This means we skip cpu_to_node[] initialisation for NUMA
154 * emulation and faking node case (when running a kernel compiled
155 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
156 * is already initialized in a round robin manner at numa_init_array,
157 * prior to this call, and this initialization is good enough
158 * for the fake NUMA cases.
159 *
160 * Called before the per_cpu areas are setup.
161 */
162void __init init_cpu_to_node(void)
163{
164 int cpu;
165 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
166
167 BUG_ON(cpu_to_apicid == NULL);
168
169 for_each_possible_cpu(cpu) {
170 int node = numa_cpu_node(cpu);
171
172 if (node == NUMA_NO_NODE)
173 continue;
174 if (!node_online(node))
175 node = find_near_online_node(node);
176 numa_set_node(cpu, node);
177 }
178}
179
Tejun Heode2d9442011-01-23 14:37:41 +0100180#ifndef CONFIG_DEBUG_PER_CPU_MAPS
181
182# ifndef CONFIG_NUMA_EMU
183void __cpuinit numa_add_cpu(int cpu)
184{
185 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
186}
187
188void __cpuinit numa_remove_cpu(int cpu)
189{
190 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
191}
192# endif /* !CONFIG_NUMA_EMU */
193
194#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
Tejun Heo645a7912011-01-23 14:37:40 +0100195
196int __cpu_to_node(int cpu)
197{
198 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
199 printk(KERN_WARNING
200 "cpu_to_node(%d): usage too early!\n", cpu);
201 dump_stack();
202 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
203 }
204 return per_cpu(x86_cpu_to_node_map, cpu);
205}
206EXPORT_SYMBOL(__cpu_to_node);
207
208/*
209 * Same function as cpu_to_node() but used if called before the
210 * per_cpu areas are setup.
211 */
212int early_cpu_to_node(int cpu)
213{
214 if (early_per_cpu_ptr(x86_cpu_to_node_map))
215 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
216
217 if (!cpu_possible(cpu)) {
218 printk(KERN_WARNING
219 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
220 dump_stack();
221 return NUMA_NO_NODE;
222 }
223 return per_cpu(x86_cpu_to_node_map, cpu);
224}
225
David Rientjes7a6c6542011-04-20 19:19:13 -0700226void debug_cpumask_set_cpu(int cpu, int node, bool enable)
Tejun Heode2d9442011-01-23 14:37:41 +0100227{
Tejun Heode2d9442011-01-23 14:37:41 +0100228 struct cpumask *mask;
229 char buf[64];
230
David Rientjes14392fd2011-02-07 14:08:53 -0800231 if (node == NUMA_NO_NODE) {
232 /* early_cpu_to_node() already emits a warning and trace */
David Rientjes7a6c6542011-04-20 19:19:13 -0700233 return;
David Rientjes14392fd2011-02-07 14:08:53 -0800234 }
Tejun Heode2d9442011-01-23 14:37:41 +0100235 mask = node_to_cpumask_map[node];
236 if (!mask) {
237 pr_err("node_to_cpumask_map[%i] NULL\n", node);
238 dump_stack();
Tejun Heode2d9442011-01-23 14:37:41 +0100239 return;
David Rientjes7a6c6542011-04-20 19:19:13 -0700240 }
Tejun Heode2d9442011-01-23 14:37:41 +0100241
242 if (enable)
243 cpumask_set_cpu(cpu, mask);
244 else
245 cpumask_clear_cpu(cpu, mask);
David Rientjes7a6c6542011-04-20 19:19:13 -0700246
247 cpulist_scnprintf(buf, sizeof(buf), mask);
248 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
249 enable ? "numa_add_cpu" : "numa_remove_cpu",
250 cpu, node, buf);
251 return;
252}
253
254# ifndef CONFIG_NUMA_EMU
255static void __cpuinit numa_set_cpumask(int cpu, bool enable)
256{
257 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
Tejun Heode2d9442011-01-23 14:37:41 +0100258}
259
260void __cpuinit numa_add_cpu(int cpu)
261{
David Rientjes7a6c6542011-04-20 19:19:13 -0700262 numa_set_cpumask(cpu, true);
Tejun Heode2d9442011-01-23 14:37:41 +0100263}
264
265void __cpuinit numa_remove_cpu(int cpu)
266{
David Rientjes7a6c6542011-04-20 19:19:13 -0700267 numa_set_cpumask(cpu, false);
Tejun Heode2d9442011-01-23 14:37:41 +0100268}
269# endif /* !CONFIG_NUMA_EMU */
270
Rusty Russell71ee73e2009-03-13 14:49:52 +1030271/*
272 * Returns a pointer to the bitmask of CPUs on Node 'node'.
273 */
Rusty Russell73e907d2009-03-13 14:49:57 +1030274const struct cpumask *cpumask_of_node(int node)
Rusty Russell71ee73e2009-03-13 14:49:52 +1030275{
Rusty Russell71ee73e2009-03-13 14:49:52 +1030276 if (node >= nr_node_ids) {
277 printk(KERN_WARNING
278 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
279 node, nr_node_ids);
280 dump_stack();
281 return cpu_none_mask;
282 }
Rusty Russellc032ef602009-03-13 14:49:53 +1030283 if (node_to_cpumask_map[node] == NULL) {
284 printk(KERN_WARNING
285 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
286 node);
287 dump_stack();
288 return cpu_online_mask;
289 }
Rusty Russell0b966252009-03-13 23:42:42 +1030290 return node_to_cpumask_map[node];
Rusty Russell71ee73e2009-03-13 14:49:52 +1030291}
292EXPORT_SYMBOL(cpumask_of_node);
Tejun Heo645a7912011-01-23 14:37:40 +0100293
Tejun Heode2d9442011-01-23 14:37:41 +0100294#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */