Yinghai Lu | b79cd8f | 2008-05-11 00:30:15 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Handle the memory map. |
| 3 | * The functions here do the job until bootmem takes over. |
| 4 | * |
| 5 | * Getting sanitize_e820_map() in sync with i386 version by applying change: |
| 6 | * - Provisions for empty E820 memory regions (reported by certain BIOSes). |
| 7 | * Alex Achenbach <xela@slit.de>, December 2002. |
| 8 | * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> |
| 9 | * |
| 10 | */ |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/bootmem.h> |
| 15 | #include <linux/ioport.h> |
| 16 | #include <linux/string.h> |
| 17 | #include <linux/kexec.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/pfn.h> |
| 21 | |
| 22 | #include <asm/pgtable.h> |
| 23 | #include <asm/page.h> |
| 24 | #include <asm/e820.h> |
| 25 | #include <asm/setup.h> |
| 26 | |
| 27 | struct e820map e820; |
| 28 | |
| 29 | /* For PCI or other memory-mapped resources */ |
| 30 | unsigned long pci_mem_start = 0xaeedbabe; |
| 31 | #ifdef CONFIG_PCI |
| 32 | EXPORT_SYMBOL(pci_mem_start); |
| 33 | #endif |
| 34 | |
| 35 | /* |
| 36 | * This function checks if any part of the range <start,end> is mapped |
| 37 | * with type. |
| 38 | */ |
| 39 | int |
| 40 | e820_any_mapped(u64 start, u64 end, unsigned type) |
| 41 | { |
| 42 | int i; |
| 43 | |
| 44 | for (i = 0; i < e820.nr_map; i++) { |
| 45 | struct e820entry *ei = &e820.map[i]; |
| 46 | |
| 47 | if (type && ei->type != type) |
| 48 | continue; |
| 49 | if (ei->addr >= end || ei->addr + ei->size <= start) |
| 50 | continue; |
| 51 | return 1; |
| 52 | } |
| 53 | return 0; |
| 54 | } |
| 55 | EXPORT_SYMBOL_GPL(e820_any_mapped); |
| 56 | |
| 57 | /* |
| 58 | * This function checks if the entire range <start,end> is mapped with type. |
| 59 | * |
| 60 | * Note: this function only works correct if the e820 table is sorted and |
| 61 | * not-overlapping, which is the case |
| 62 | */ |
| 63 | int __init e820_all_mapped(u64 start, u64 end, unsigned type) |
| 64 | { |
| 65 | int i; |
| 66 | |
| 67 | for (i = 0; i < e820.nr_map; i++) { |
| 68 | struct e820entry *ei = &e820.map[i]; |
| 69 | |
| 70 | if (type && ei->type != type) |
| 71 | continue; |
| 72 | /* is the region (part) in overlap with the current region ?*/ |
| 73 | if (ei->addr >= end || ei->addr + ei->size <= start) |
| 74 | continue; |
| 75 | |
| 76 | /* if the region is at the beginning of <start,end> we move |
| 77 | * start to the end of the region since it's ok until there |
| 78 | */ |
| 79 | if (ei->addr <= start) |
| 80 | start = ei->addr + ei->size; |
| 81 | /* |
| 82 | * if start is now at or beyond end, we're done, full |
| 83 | * coverage |
| 84 | */ |
| 85 | if (start >= end) |
| 86 | return 1; |
| 87 | } |
| 88 | return 0; |
| 89 | } |
| 90 | |
| 91 | /* |
| 92 | * Add a memory region to the kernel e820 map. |
| 93 | */ |
| 94 | void __init add_memory_region(u64 start, u64 size, int type) |
| 95 | { |
| 96 | int x = e820.nr_map; |
| 97 | |
| 98 | if (x == E820MAX) { |
| 99 | printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); |
| 100 | return; |
| 101 | } |
| 102 | |
| 103 | e820.map[x].addr = start; |
| 104 | e820.map[x].size = size; |
| 105 | e820.map[x].type = type; |
| 106 | e820.nr_map++; |
| 107 | } |
| 108 | |
| 109 | void __init e820_print_map(char *who) |
| 110 | { |
| 111 | int i; |
| 112 | |
| 113 | for (i = 0; i < e820.nr_map; i++) { |
| 114 | printk(KERN_INFO " %s: %016Lx - %016Lx ", who, |
| 115 | (unsigned long long) e820.map[i].addr, |
| 116 | (unsigned long long) |
| 117 | (e820.map[i].addr + e820.map[i].size)); |
| 118 | switch (e820.map[i].type) { |
| 119 | case E820_RAM: |
| 120 | printk(KERN_CONT "(usable)\n"); |
| 121 | break; |
| 122 | case E820_RESERVED: |
| 123 | printk(KERN_CONT "(reserved)\n"); |
| 124 | break; |
| 125 | case E820_ACPI: |
| 126 | printk(KERN_CONT "(ACPI data)\n"); |
| 127 | break; |
| 128 | case E820_NVS: |
| 129 | printk(KERN_CONT "(ACPI NVS)\n"); |
| 130 | break; |
| 131 | default: |
| 132 | printk(KERN_CONT "type %u\n", e820.map[i].type); |
| 133 | break; |
| 134 | } |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * Sanitize the BIOS e820 map. |
| 140 | * |
| 141 | * Some e820 responses include overlapping entries. The following |
| 142 | * replaces the original e820 map with a new one, removing overlaps. |
| 143 | * |
| 144 | */ |
| 145 | int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map) |
| 146 | { |
| 147 | struct change_member { |
| 148 | struct e820entry *pbios; /* pointer to original bios entry */ |
| 149 | unsigned long long addr; /* address for this change point */ |
| 150 | }; |
| 151 | static struct change_member change_point_list[2*E820MAX] __initdata; |
| 152 | static struct change_member *change_point[2*E820MAX] __initdata; |
| 153 | static struct e820entry *overlap_list[E820MAX] __initdata; |
| 154 | static struct e820entry new_bios[E820MAX] __initdata; |
| 155 | struct change_member *change_tmp; |
| 156 | unsigned long current_type, last_type; |
| 157 | unsigned long long last_addr; |
| 158 | int chgidx, still_changing; |
| 159 | int overlap_entries; |
| 160 | int new_bios_entry; |
| 161 | int old_nr, new_nr, chg_nr; |
| 162 | int i; |
| 163 | |
| 164 | /* |
| 165 | Visually we're performing the following |
| 166 | (1,2,3,4 = memory types)... |
| 167 | |
| 168 | Sample memory map (w/overlaps): |
| 169 | ____22__________________ |
| 170 | ______________________4_ |
| 171 | ____1111________________ |
| 172 | _44_____________________ |
| 173 | 11111111________________ |
| 174 | ____________________33__ |
| 175 | ___________44___________ |
| 176 | __________33333_________ |
| 177 | ______________22________ |
| 178 | ___________________2222_ |
| 179 | _________111111111______ |
| 180 | _____________________11_ |
| 181 | _________________4______ |
| 182 | |
| 183 | Sanitized equivalent (no overlap): |
| 184 | 1_______________________ |
| 185 | _44_____________________ |
| 186 | ___1____________________ |
| 187 | ____22__________________ |
| 188 | ______11________________ |
| 189 | _________1______________ |
| 190 | __________3_____________ |
| 191 | ___________44___________ |
| 192 | _____________33_________ |
| 193 | _______________2________ |
| 194 | ________________1_______ |
| 195 | _________________4______ |
| 196 | ___________________2____ |
| 197 | ____________________33__ |
| 198 | ______________________4_ |
| 199 | */ |
| 200 | |
| 201 | /* if there's only one memory region, don't bother */ |
| 202 | if (*pnr_map < 2) |
| 203 | return -1; |
| 204 | |
| 205 | old_nr = *pnr_map; |
| 206 | |
| 207 | /* bail out if we find any unreasonable addresses in bios map */ |
| 208 | for (i = 0; i < old_nr; i++) |
| 209 | if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) |
| 210 | return -1; |
| 211 | |
| 212 | /* create pointers for initial change-point information (for sorting) */ |
| 213 | for (i = 0; i < 2 * old_nr; i++) |
| 214 | change_point[i] = &change_point_list[i]; |
| 215 | |
| 216 | /* record all known change-points (starting and ending addresses), |
| 217 | omitting those that are for empty memory regions */ |
| 218 | chgidx = 0; |
| 219 | for (i = 0; i < old_nr; i++) { |
| 220 | if (biosmap[i].size != 0) { |
| 221 | change_point[chgidx]->addr = biosmap[i].addr; |
| 222 | change_point[chgidx++]->pbios = &biosmap[i]; |
| 223 | change_point[chgidx]->addr = biosmap[i].addr + |
| 224 | biosmap[i].size; |
| 225 | change_point[chgidx++]->pbios = &biosmap[i]; |
| 226 | } |
| 227 | } |
| 228 | chg_nr = chgidx; |
| 229 | |
| 230 | /* sort change-point list by memory addresses (low -> high) */ |
| 231 | still_changing = 1; |
| 232 | while (still_changing) { |
| 233 | still_changing = 0; |
| 234 | for (i = 1; i < chg_nr; i++) { |
| 235 | unsigned long long curaddr, lastaddr; |
| 236 | unsigned long long curpbaddr, lastpbaddr; |
| 237 | |
| 238 | curaddr = change_point[i]->addr; |
| 239 | lastaddr = change_point[i - 1]->addr; |
| 240 | curpbaddr = change_point[i]->pbios->addr; |
| 241 | lastpbaddr = change_point[i - 1]->pbios->addr; |
| 242 | |
| 243 | /* |
| 244 | * swap entries, when: |
| 245 | * |
| 246 | * curaddr > lastaddr or |
| 247 | * curaddr == lastaddr and curaddr == curpbaddr and |
| 248 | * lastaddr != lastpbaddr |
| 249 | */ |
| 250 | if (curaddr < lastaddr || |
| 251 | (curaddr == lastaddr && curaddr == curpbaddr && |
| 252 | lastaddr != lastpbaddr)) { |
| 253 | change_tmp = change_point[i]; |
| 254 | change_point[i] = change_point[i-1]; |
| 255 | change_point[i-1] = change_tmp; |
| 256 | still_changing = 1; |
| 257 | } |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | /* create a new bios memory map, removing overlaps */ |
| 262 | overlap_entries = 0; /* number of entries in the overlap table */ |
| 263 | new_bios_entry = 0; /* index for creating new bios map entries */ |
| 264 | last_type = 0; /* start with undefined memory type */ |
| 265 | last_addr = 0; /* start with 0 as last starting address */ |
| 266 | |
| 267 | /* loop through change-points, determining affect on the new bios map */ |
| 268 | for (chgidx = 0; chgidx < chg_nr; chgidx++) { |
| 269 | /* keep track of all overlapping bios entries */ |
| 270 | if (change_point[chgidx]->addr == |
| 271 | change_point[chgidx]->pbios->addr) { |
| 272 | /* |
| 273 | * add map entry to overlap list (> 1 entry |
| 274 | * implies an overlap) |
| 275 | */ |
| 276 | overlap_list[overlap_entries++] = |
| 277 | change_point[chgidx]->pbios; |
| 278 | } else { |
| 279 | /* |
| 280 | * remove entry from list (order independent, |
| 281 | * so swap with last) |
| 282 | */ |
| 283 | for (i = 0; i < overlap_entries; i++) { |
| 284 | if (overlap_list[i] == |
| 285 | change_point[chgidx]->pbios) |
| 286 | overlap_list[i] = |
| 287 | overlap_list[overlap_entries-1]; |
| 288 | } |
| 289 | overlap_entries--; |
| 290 | } |
| 291 | /* |
| 292 | * if there are overlapping entries, decide which |
| 293 | * "type" to use (larger value takes precedence -- |
| 294 | * 1=usable, 2,3,4,4+=unusable) |
| 295 | */ |
| 296 | current_type = 0; |
| 297 | for (i = 0; i < overlap_entries; i++) |
| 298 | if (overlap_list[i]->type > current_type) |
| 299 | current_type = overlap_list[i]->type; |
| 300 | /* |
| 301 | * continue building up new bios map based on this |
| 302 | * information |
| 303 | */ |
| 304 | if (current_type != last_type) { |
| 305 | if (last_type != 0) { |
| 306 | new_bios[new_bios_entry].size = |
| 307 | change_point[chgidx]->addr - last_addr; |
| 308 | /* |
| 309 | * move forward only if the new size |
| 310 | * was non-zero |
| 311 | */ |
| 312 | if (new_bios[new_bios_entry].size != 0) |
| 313 | /* |
| 314 | * no more space left for new |
| 315 | * bios entries ? |
| 316 | */ |
| 317 | if (++new_bios_entry >= E820MAX) |
| 318 | break; |
| 319 | } |
| 320 | if (current_type != 0) { |
| 321 | new_bios[new_bios_entry].addr = |
| 322 | change_point[chgidx]->addr; |
| 323 | new_bios[new_bios_entry].type = current_type; |
| 324 | last_addr = change_point[chgidx]->addr; |
| 325 | } |
| 326 | last_type = current_type; |
| 327 | } |
| 328 | } |
| 329 | /* retain count for new bios entries */ |
| 330 | new_nr = new_bios_entry; |
| 331 | |
| 332 | /* copy new bios mapping into original location */ |
| 333 | memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry)); |
| 334 | *pnr_map = new_nr; |
| 335 | |
| 336 | return 0; |
| 337 | } |
| 338 | |
| 339 | /* |
| 340 | * Copy the BIOS e820 map into a safe place. |
| 341 | * |
| 342 | * Sanity-check it while we're at it.. |
| 343 | * |
| 344 | * If we're lucky and live on a modern system, the setup code |
| 345 | * will have given us a memory map that we can use to properly |
| 346 | * set up memory. If we aren't, we'll fake a memory map. |
| 347 | */ |
| 348 | int __init copy_e820_map(struct e820entry *biosmap, int nr_map) |
| 349 | { |
| 350 | /* Only one memory region (or negative)? Ignore it */ |
| 351 | if (nr_map < 2) |
| 352 | return -1; |
| 353 | |
| 354 | do { |
| 355 | u64 start = biosmap->addr; |
| 356 | u64 size = biosmap->size; |
| 357 | u64 end = start + size; |
| 358 | u32 type = biosmap->type; |
| 359 | |
| 360 | /* Overflow in 64 bits? Ignore the memory map. */ |
| 361 | if (start > end) |
| 362 | return -1; |
| 363 | |
| 364 | add_memory_region(start, size, type); |
| 365 | } while (biosmap++, --nr_map); |
| 366 | return 0; |
| 367 | } |
| 368 | |
| 369 | u64 __init update_memory_range(u64 start, u64 size, unsigned old_type, |
| 370 | unsigned new_type) |
| 371 | { |
| 372 | int i; |
| 373 | u64 real_updated_size = 0; |
| 374 | |
| 375 | BUG_ON(old_type == new_type); |
| 376 | |
| 377 | for (i = 0; i < e820.nr_map; i++) { |
| 378 | struct e820entry *ei = &e820.map[i]; |
| 379 | u64 final_start, final_end; |
| 380 | if (ei->type != old_type) |
| 381 | continue; |
| 382 | /* totally covered? */ |
| 383 | if (ei->addr >= start && |
| 384 | (ei->addr + ei->size) <= (start + size)) { |
| 385 | ei->type = new_type; |
| 386 | real_updated_size += ei->size; |
| 387 | continue; |
| 388 | } |
| 389 | /* partially covered */ |
| 390 | final_start = max(start, ei->addr); |
| 391 | final_end = min(start + size, ei->addr + ei->size); |
| 392 | if (final_start >= final_end) |
| 393 | continue; |
| 394 | add_memory_region(final_start, final_end - final_start, |
| 395 | new_type); |
| 396 | real_updated_size += final_end - final_start; |
| 397 | } |
| 398 | return real_updated_size; |
| 399 | } |
| 400 | |
| 401 | void __init update_e820(void) |
| 402 | { |
| 403 | u8 nr_map; |
| 404 | |
| 405 | nr_map = e820.nr_map; |
| 406 | if (sanitize_e820_map(e820.map, &nr_map)) |
| 407 | return; |
| 408 | e820.nr_map = nr_map; |
| 409 | printk(KERN_INFO "modified physical RAM map:\n"); |
| 410 | e820_print_map("modified"); |
| 411 | } |
| 412 | |
| 413 | /* |
| 414 | * Search for the biggest gap in the low 32 bits of the e820 |
| 415 | * memory space. We pass this space to PCI to assign MMIO resources |
| 416 | * for hotplug or unconfigured devices in. |
| 417 | * Hopefully the BIOS let enough space left. |
| 418 | */ |
| 419 | __init void e820_setup_gap(void) |
| 420 | { |
| 421 | unsigned long gapstart, gapsize, round; |
| 422 | unsigned long long last; |
| 423 | int i; |
| 424 | int found = 0; |
| 425 | |
| 426 | last = 0x100000000ull; |
| 427 | gapstart = 0x10000000; |
| 428 | gapsize = 0x400000; |
| 429 | i = e820.nr_map; |
| 430 | while (--i >= 0) { |
| 431 | unsigned long long start = e820.map[i].addr; |
| 432 | unsigned long long end = start + e820.map[i].size; |
| 433 | |
| 434 | /* |
| 435 | * Since "last" is at most 4GB, we know we'll |
| 436 | * fit in 32 bits if this condition is true |
| 437 | */ |
| 438 | if (last > end) { |
| 439 | unsigned long gap = last - end; |
| 440 | |
| 441 | if (gap > gapsize) { |
| 442 | gapsize = gap; |
| 443 | gapstart = end; |
| 444 | found = 1; |
| 445 | } |
| 446 | } |
| 447 | if (start < last) |
| 448 | last = start; |
| 449 | } |
| 450 | |
| 451 | #ifdef CONFIG_X86_64 |
| 452 | if (!found) { |
| 453 | gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024; |
| 454 | printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit " |
| 455 | "address range\n" |
| 456 | KERN_ERR "PCI: Unassigned devices with 32bit resource " |
| 457 | "registers may break!\n"); |
| 458 | } |
| 459 | #endif |
| 460 | |
| 461 | /* |
| 462 | * See how much we want to round up: start off with |
| 463 | * rounding to the next 1MB area. |
| 464 | */ |
| 465 | round = 0x100000; |
| 466 | while ((gapsize >> 4) > round) |
| 467 | round += round; |
| 468 | /* Fun with two's complement */ |
| 469 | pci_mem_start = (gapstart + round) & -round; |
| 470 | |
| 471 | printk(KERN_INFO |
| 472 | "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", |
| 473 | pci_mem_start, gapstart, gapsize); |
| 474 | } |
| 475 | |