blob: 83db411b3aa7a4be27fc987a14666629616ea200 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Some of the code in this file has been gleaned from the 64 bit
3 * discontigmem support code base.
4 *
5 * Copyright (C) 2002, IBM Corp.
6 *
7 * All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
18 * details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Send feedback to Pat Gaughen <gone@us.ibm.com>
25 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mm.h>
27#include <linux/bootmem.h>
28#include <linux/mmzone.h>
29#include <linux/acpi.h>
30#include <linux/nodemask.h>
31#include <asm/srat.h>
32#include <asm/topology.h>
33
34/*
35 * proximity macros and definitions
36 */
37#define NODE_ARRAY_INDEX(x) ((x) / 8) /* 8 bits/char */
38#define NODE_ARRAY_OFFSET(x) ((x) % 8) /* 8 bits/char */
39#define BMAP_SET(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] |= 1 << NODE_ARRAY_OFFSET(bit))
40#define BMAP_TEST(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] & (1 << NODE_ARRAY_OFFSET(bit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* bitmap length; _PXM is at most 255 */
42#define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8)
43static u8 pxm_bitmap[PXM_BITMAP_LEN]; /* bitmap of proximity domains */
44
Christoph Lameterb9b15782006-09-25 23:31:16 -070045#define MAX_CHUNKS_PER_NODE 3
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#define MAXCHUNKS (MAX_CHUNKS_PER_NODE * MAX_NUMNODES)
47struct node_memory_chunk_s {
48 unsigned long start_pfn;
49 unsigned long end_pfn;
50 u8 pxm; // proximity domain of node
51 u8 nid; // which cnode contains this chunk?
52 u8 bank; // which mem bank on this node
53};
54static struct node_memory_chunk_s node_memory_chunk[MAXCHUNKS];
55
56static int num_memory_chunks; /* total number of memory chunks */
57static int zholes_size_init;
58static unsigned long zholes_size[MAX_NUMNODES * MAX_NR_ZONES];
59
60extern void * boot_ioremap(unsigned long, unsigned long);
61
62/* Identify CPU proximity domains */
63static void __init parse_cpu_affinity_structure(char *p)
64{
65 struct acpi_table_processor_affinity *cpu_affinity =
66 (struct acpi_table_processor_affinity *) p;
67
68 if (!cpu_affinity->flags.enabled)
69 return; /* empty entry */
70
71 /* mark this node as "seen" in node bitmap */
72 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain);
73
74 printk("CPU 0x%02X in proximity domain 0x%02X\n",
75 cpu_affinity->apic_id, cpu_affinity->proximity_domain);
76}
77
78/*
79 * Identify memory proximity domains and hot-remove capabilities.
80 * Fill node memory chunk list structure.
81 */
82static void __init parse_memory_affinity_structure (char *sratp)
83{
84 unsigned long long paddr, size;
85 unsigned long start_pfn, end_pfn;
86 u8 pxm;
87 struct node_memory_chunk_s *p, *q, *pend;
88 struct acpi_table_memory_affinity *memory_affinity =
89 (struct acpi_table_memory_affinity *) sratp;
90
91 if (!memory_affinity->flags.enabled)
92 return; /* empty entry */
93
94 /* mark this node as "seen" in node bitmap */
95 BMAP_SET(pxm_bitmap, memory_affinity->proximity_domain);
96
97 /* calculate info for memory chunk structure */
98 paddr = memory_affinity->base_addr_hi;
99 paddr = (paddr << 32) | memory_affinity->base_addr_lo;
100 size = memory_affinity->length_hi;
101 size = (size << 32) | memory_affinity->length_lo;
102
103 start_pfn = paddr >> PAGE_SHIFT;
104 end_pfn = (paddr + size) >> PAGE_SHIFT;
105
106 pxm = memory_affinity->proximity_domain;
107
108 if (num_memory_chunks >= MAXCHUNKS) {
109 printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n",
110 size/(1024*1024), paddr);
111 return;
112 }
113
114 /* Insertion sort based on base address */
115 pend = &node_memory_chunk[num_memory_chunks];
116 for (p = &node_memory_chunk[0]; p < pend; p++) {
117 if (start_pfn < p->start_pfn)
118 break;
119 }
120 if (p < pend) {
121 for (q = pend; q >= p; q--)
122 *(q + 1) = *q;
123 }
124 p->start_pfn = start_pfn;
125 p->end_pfn = end_pfn;
126 p->pxm = pxm;
127
128 num_memory_chunks++;
129
130 printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n",
131 start_pfn, end_pfn,
132 memory_affinity->memory_type,
133 memory_affinity->proximity_domain,
134 (memory_affinity->flags.hot_pluggable ?
135 "enabled and removable" : "enabled" ) );
136}
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/* Take a chunk of pages from page frame cstart to cend and count the number
139 * of pages in each zone, returned via zones[].
140 */
141static __init void chunk_to_zones(unsigned long cstart, unsigned long cend,
142 unsigned long *zones)
143{
144 unsigned long max_dma;
145 extern unsigned long max_low_pfn;
146
147 int z;
148 unsigned long rend;
149
150 /* FIXME: MAX_DMA_ADDRESS and max_low_pfn are trying to provide
151 * similarly scoped information and should be handled in a consistant
152 * manner.
153 */
154 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
155
156 /* Split the hole into the zones in which it falls. Repeatedly
157 * take the segment in which the remaining hole starts, round it
158 * to the end of that zone.
159 */
160 memset(zones, 0, MAX_NR_ZONES * sizeof(long));
161 while (cstart < cend) {
162 if (cstart < max_dma) {
163 z = ZONE_DMA;
164 rend = (cend < max_dma)? cend : max_dma;
165
166 } else if (cstart < max_low_pfn) {
167 z = ZONE_NORMAL;
168 rend = (cend < max_low_pfn)? cend : max_low_pfn;
169
170 } else {
171 z = ZONE_HIGHMEM;
172 rend = cend;
173 }
174 zones[z] += rend - cstart;
175 cstart = rend;
176 }
177}
178
179/*
180 * The SRAT table always lists ascending addresses, so can always
181 * assume that the first "start" address that you see is the real
182 * start of the node, and that the current "end" address is after
183 * the previous one.
184 */
185static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk)
186{
187 /*
188 * Only add present memory as told by the e820.
189 * There is no guarantee from the SRAT that the memory it
190 * enumerates is present at boot time because it represents
191 * *possible* memory hotplug areas the same as normal RAM.
192 */
193 if (memory_chunk->start_pfn >= max_pfn) {
194 printk (KERN_INFO "Ignoring SRAT pfns: 0x%08lx -> %08lx\n",
195 memory_chunk->start_pfn, memory_chunk->end_pfn);
196 return;
197 }
198 if (memory_chunk->nid != nid)
199 return;
200
201 if (!node_has_online_mem(nid))
202 node_start_pfn[nid] = memory_chunk->start_pfn;
203
204 if (node_start_pfn[nid] > memory_chunk->start_pfn)
205 node_start_pfn[nid] = memory_chunk->start_pfn;
206
207 if (node_end_pfn[nid] < memory_chunk->end_pfn)
208 node_end_pfn[nid] = memory_chunk->end_pfn;
209}
210
211/* Parse the ACPI Static Resource Affinity Table */
212static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
213{
214 u8 *start, *end, *p;
215 int i, j, nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217 start = (u8 *)(&(sratp->reserved) + 1); /* skip header */
218 p = start;
219 end = (u8 *)sratp + sratp->header.length;
220
221 memset(pxm_bitmap, 0, sizeof(pxm_bitmap)); /* init proximity domain bitmap */
222 memset(node_memory_chunk, 0, sizeof(node_memory_chunk));
223 memset(zholes_size, 0, sizeof(zholes_size));
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 num_memory_chunks = 0;
226 while (p < end) {
227 switch (*p) {
228 case ACPI_SRAT_PROCESSOR_AFFINITY:
229 parse_cpu_affinity_structure(p);
230 break;
231 case ACPI_SRAT_MEMORY_AFFINITY:
232 parse_memory_affinity_structure(p);
233 break;
234 default:
235 printk("ACPI 2.0 SRAT: unknown entry skipped: type=0x%02X, len=%d\n", p[0], p[1]);
236 break;
237 }
238 p += p[1];
239 if (p[1] == 0) {
240 printk("acpi20_parse_srat: Entry length value is zero;"
241 " can't parse any further!\n");
242 break;
243 }
244 }
245
246 if (num_memory_chunks == 0) {
247 printk("could not finy any ACPI SRAT memory areas.\n");
248 goto out_fail;
249 }
250
251 /* Calculate total number of nodes in system from PXM bitmap and create
252 * a set of sequential node IDs starting at zero. (ACPI doesn't seem
253 * to specify the range of _PXM values.)
254 */
255 /*
256 * MCD - we no longer HAVE to number nodes sequentially. PXM domain
257 * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically
258 * 32, so we will continue numbering them in this manner until MAX_NUMNODES
259 * approaches MAX_PXM_DOMAINS for i386.
260 */
261 nodes_clear(node_online_map);
262 for (i = 0; i < MAX_PXM_DOMAINS; i++) {
263 if (BMAP_TEST(pxm_bitmap, i)) {
Yasunori Goto762834e2006-06-23 02:03:19 -0700264 int nid = acpi_map_pxm_to_node(i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 node_set_online(nid);
266 }
267 }
268 BUG_ON(num_online_nodes() == 0);
269
270 /* set cnode id in memory chunk structure */
271 for (i = 0; i < num_memory_chunks; i++)
Yasunori Goto762834e2006-06-23 02:03:19 -0700272 node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274 printk("pxm bitmap: ");
275 for (i = 0; i < sizeof(pxm_bitmap); i++) {
276 printk("%02X ", pxm_bitmap[i]);
277 }
278 printk("\n");
279 printk("Number of logical nodes in system = %d\n", num_online_nodes());
280 printk("Number of memory chunks in system = %d\n", num_memory_chunks);
281
282 for (j = 0; j < num_memory_chunks; j++){
283 struct node_memory_chunk_s * chunk = &node_memory_chunk[j];
284 printk("chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
285 j, chunk->nid, chunk->start_pfn, chunk->end_pfn);
286 node_read_chunk(chunk->nid, chunk);
287 }
288
289 for_each_online_node(nid) {
290 unsigned long start = node_start_pfn[nid];
291 unsigned long end = node_end_pfn[nid];
292
293 memory_present(nid, start, end);
294 node_remap_size[nid] = node_memmap_size_bytes(nid, start, end);
295 }
296 return 1;
297out_fail:
298 return 0;
299}
300
301int __init get_memcfg_from_srat(void)
302{
303 struct acpi_table_header *header = NULL;
304 struct acpi_table_rsdp *rsdp = NULL;
305 struct acpi_table_rsdt *rsdt = NULL;
306 struct acpi_pointer *rsdp_address = NULL;
307 struct acpi_table_rsdt saved_rsdt;
308 int tables = 0;
309 int i = 0;
310
Magnus Damm5d357042005-10-30 14:59:48 -0800311 if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING,
312 rsdp_address))) {
313 printk("%s: System description tables not found\n",
314 __FUNCTION__);
315 goto out_err;
316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
318 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) {
319 printk("%s: assigning address to rsdp\n", __FUNCTION__);
320 rsdp = (struct acpi_table_rsdp *)
321 (u32)rsdp_address->pointer.physical;
322 } else {
323 printk("%s: rsdp_address is not a physical pointer\n", __FUNCTION__);
324 goto out_err;
325 }
326 if (!rsdp) {
327 printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
328 goto out_err;
329 }
330
331 printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
332 rsdp->oem_id);
333
334 if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) {
335 printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
336 goto out_err;
337 }
338
339 rsdt = (struct acpi_table_rsdt *)
340 boot_ioremap(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt));
341
342 if (!rsdt) {
343 printk(KERN_WARNING
344 "%s: ACPI: Invalid root system description tables (RSDT)\n",
345 __FUNCTION__);
346 goto out_err;
347 }
348
349 header = & rsdt->header;
350
351 if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) {
352 printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
353 goto out_err;
354 }
355
356 /*
357 * The number of tables is computed by taking the
358 * size of all entries (header size minus total
359 * size of RSDT) divided by the size of each entry
360 * (4-byte table pointers).
361 */
362 tables = (header->length - sizeof(struct acpi_table_header)) / 4;
363
364 if (!tables)
365 goto out_err;
366
367 memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));
368
369 if (saved_rsdt.header.length > sizeof(saved_rsdt)) {
370 printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
371 saved_rsdt.header.length);
372 goto out_err;
373 }
374
375 printk("Begin SRAT table scan....\n");
376
377 for (i = 0; i < tables; i++) {
378 /* Map in header, then map in full table length. */
379 header = (struct acpi_table_header *)
380 boot_ioremap(saved_rsdt.entry[i], sizeof(struct acpi_table_header));
381 if (!header)
382 break;
383 header = (struct acpi_table_header *)
384 boot_ioremap(saved_rsdt.entry[i], header->length);
385 if (!header)
386 break;
387
388 if (strncmp((char *) &header->signature, "SRAT", 4))
389 continue;
390
391 /* we've found the srat table. don't need to look at any more tables */
392 return acpi20_parse_srat((struct acpi_table_srat *)header);
393 }
394out_err:
395 printk("failed to get NUMA memory information from SRAT table\n");
396 return 0;
397}
398
399/* For each node run the memory list to determine whether there are
400 * any memory holes. For each hole determine which ZONE they fall
401 * into.
402 *
403 * NOTE#1: this requires knowledge of the zone boundries and so
404 * _cannot_ be performed before those are calculated in setup_memory.
405 *
406 * NOTE#2: we rely on the fact that the memory chunks are ordered by
407 * start pfn number during setup.
408 */
409static void __init get_zholes_init(void)
410{
411 int nid;
412 int c;
413 int first;
414 unsigned long end = 0;
415
416 for_each_online_node(nid) {
417 first = 1;
418 for (c = 0; c < num_memory_chunks; c++){
419 if (node_memory_chunk[c].nid == nid) {
420 if (first) {
421 end = node_memory_chunk[c].end_pfn;
422 first = 0;
423
424 } else {
425 /* Record any gap between this chunk
426 * and the previous chunk on this node
427 * against the zones it spans.
428 */
429 chunk_to_zones(end,
430 node_memory_chunk[c].start_pfn,
431 &zholes_size[nid * MAX_NR_ZONES]);
432 }
433 }
434 }
435 }
436}
437
438unsigned long * __init get_zholes_size(int nid)
439{
440 if (!zholes_size_init) {
441 zholes_size_init++;
442 get_zholes_init();
443 }
444 if (nid >= MAX_NUMNODES || !node_online(nid))
445 printk("%s: nid = %d is invalid/offline. num_online_nodes = %d",
446 __FUNCTION__, nid, num_online_nodes());
447 return &zholes_size[nid * MAX_NR_ZONES];
448}