Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * x86 SMP booting functions |
| 3 | * |
| 4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> |
| 5 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> |
| 6 | * |
| 7 | * Much of the core SMP work is based on previous work by Thomas Radke, to |
| 8 | * whom a great many thanks are extended. |
| 9 | * |
| 10 | * Thanks to Intel for making available several different Pentium, |
| 11 | * Pentium Pro and Pentium-II/Xeon MP machines. |
| 12 | * Original development of Linux SMP code supported by Caldera. |
| 13 | * |
| 14 | * This code is released under the GNU General Public License version 2 or |
| 15 | * later. |
| 16 | * |
| 17 | * Fixes |
| 18 | * Felix Koop : NR_CPUS used properly |
| 19 | * Jose Renau : Handle single CPU case. |
| 20 | * Alan Cox : By repeated request 8) - Total BogoMIPS report. |
| 21 | * Greg Wright : Fix for kernel stacks panic. |
| 22 | * Erich Boleyn : MP v1.4 and additional changes. |
| 23 | * Matthias Sattler : Changes for 2.1 kernel map. |
| 24 | * Michel Lespinasse : Changes for 2.1 kernel map. |
| 25 | * Michael Chastain : Change trampoline.S to gnu as. |
| 26 | * Alan Cox : Dumb bug: 'B' step PPro's are fine |
| 27 | * Ingo Molnar : Added APIC timers, based on code |
| 28 | * from Jose Renau |
| 29 | * Ingo Molnar : various cleanups and rewrites |
| 30 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. |
| 31 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs |
| 32 | * Martin J. Bligh : Added support for multi-quad systems |
| 33 | * Dave Jones : Report invalid combinations of Athlon CPUs. |
| 34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. */ |
| 35 | |
| 36 | #include <linux/module.h> |
| 37 | #include <linux/config.h> |
| 38 | #include <linux/init.h> |
| 39 | #include <linux/kernel.h> |
| 40 | |
| 41 | #include <linux/mm.h> |
| 42 | #include <linux/sched.h> |
| 43 | #include <linux/kernel_stat.h> |
| 44 | #include <linux/smp_lock.h> |
| 45 | #include <linux/irq.h> |
| 46 | #include <linux/bootmem.h> |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame^] | 47 | #include <linux/notifier.h> |
| 48 | #include <linux/cpu.h> |
| 49 | #include <linux/percpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
| 51 | #include <linux/delay.h> |
| 52 | #include <linux/mc146818rtc.h> |
| 53 | #include <asm/tlbflush.h> |
| 54 | #include <asm/desc.h> |
| 55 | #include <asm/arch_hooks.h> |
| 56 | |
| 57 | #include <mach_apic.h> |
| 58 | #include <mach_wakecpu.h> |
| 59 | #include <smpboot_hooks.h> |
| 60 | |
| 61 | /* Set if we find a B stepping CPU */ |
| 62 | static int __initdata smp_b_stepping; |
| 63 | |
| 64 | /* Number of siblings per CPU package */ |
| 65 | int smp_num_siblings = 1; |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 66 | #ifdef CONFIG_X86_HT |
| 67 | EXPORT_SYMBOL(smp_num_siblings); |
| 68 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ |
| 70 | EXPORT_SYMBOL(phys_proc_id); |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 71 | int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */ |
| 72 | EXPORT_SYMBOL(cpu_core_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
| 74 | /* bitmap of online cpus */ |
| 75 | cpumask_t cpu_online_map; |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 76 | EXPORT_SYMBOL(cpu_online_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | |
| 78 | cpumask_t cpu_callin_map; |
| 79 | cpumask_t cpu_callout_map; |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 80 | EXPORT_SYMBOL(cpu_callout_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | static cpumask_t smp_commenced_mask; |
| 82 | |
| 83 | /* Per CPU bogomips and other parameters */ |
| 84 | struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 85 | EXPORT_SYMBOL(cpu_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | |
| 87 | u8 x86_cpu_to_apicid[NR_CPUS] = |
| 88 | { [0 ... NR_CPUS-1] = 0xff }; |
| 89 | EXPORT_SYMBOL(x86_cpu_to_apicid); |
| 90 | |
| 91 | /* |
| 92 | * Trampoline 80x86 program as an array. |
| 93 | */ |
| 94 | |
| 95 | extern unsigned char trampoline_data []; |
| 96 | extern unsigned char trampoline_end []; |
| 97 | static unsigned char *trampoline_base; |
| 98 | static int trampoline_exec; |
| 99 | |
| 100 | static void map_cpu_to_logical_apicid(void); |
| 101 | |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame^] | 102 | /* State of each CPU. */ |
| 103 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
| 104 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | /* |
| 106 | * Currently trivial. Write the real->protected mode |
| 107 | * bootstrap into the page concerned. The caller |
| 108 | * has made sure it's suitably aligned. |
| 109 | */ |
| 110 | |
| 111 | static unsigned long __init setup_trampoline(void) |
| 112 | { |
| 113 | memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); |
| 114 | return virt_to_phys(trampoline_base); |
| 115 | } |
| 116 | |
| 117 | /* |
| 118 | * We are called very early to get the low memory for the |
| 119 | * SMP bootup trampoline page. |
| 120 | */ |
| 121 | void __init smp_alloc_memory(void) |
| 122 | { |
| 123 | trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE); |
| 124 | /* |
| 125 | * Has to be in very low memory so we can execute |
| 126 | * real-mode AP code. |
| 127 | */ |
| 128 | if (__pa(trampoline_base) >= 0x9F000) |
| 129 | BUG(); |
| 130 | /* |
| 131 | * Make the SMP trampoline executable: |
| 132 | */ |
| 133 | trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1); |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * The bootstrap kernel entry code has set these up. Save them for |
| 138 | * a given CPU |
| 139 | */ |
| 140 | |
| 141 | static void __init smp_store_cpu_info(int id) |
| 142 | { |
| 143 | struct cpuinfo_x86 *c = cpu_data + id; |
| 144 | |
| 145 | *c = boot_cpu_data; |
| 146 | if (id!=0) |
| 147 | identify_cpu(c); |
| 148 | /* |
| 149 | * Mask B, Pentium, but not Pentium MMX |
| 150 | */ |
| 151 | if (c->x86_vendor == X86_VENDOR_INTEL && |
| 152 | c->x86 == 5 && |
| 153 | c->x86_mask >= 1 && c->x86_mask <= 4 && |
| 154 | c->x86_model <= 3) |
| 155 | /* |
| 156 | * Remember we have B step Pentia with bugs |
| 157 | */ |
| 158 | smp_b_stepping = 1; |
| 159 | |
| 160 | /* |
| 161 | * Certain Athlons might work (for various values of 'work') in SMP |
| 162 | * but they are not certified as MP capable. |
| 163 | */ |
| 164 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { |
| 165 | |
| 166 | /* Athlon 660/661 is valid. */ |
| 167 | if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1))) |
| 168 | goto valid_k7; |
| 169 | |
| 170 | /* Duron 670 is valid */ |
| 171 | if ((c->x86_model==7) && (c->x86_mask==0)) |
| 172 | goto valid_k7; |
| 173 | |
| 174 | /* |
| 175 | * Athlon 662, Duron 671, and Athlon >model 7 have capability bit. |
| 176 | * It's worth noting that the A5 stepping (662) of some Athlon XP's |
| 177 | * have the MP bit set. |
| 178 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more. |
| 179 | */ |
| 180 | if (((c->x86_model==6) && (c->x86_mask>=2)) || |
| 181 | ((c->x86_model==7) && (c->x86_mask>=1)) || |
| 182 | (c->x86_model> 7)) |
| 183 | if (cpu_has_mp) |
| 184 | goto valid_k7; |
| 185 | |
| 186 | /* If we get here, it's not a certified SMP capable AMD system. */ |
| 187 | tainted |= TAINT_UNSAFE_SMP; |
| 188 | } |
| 189 | |
| 190 | valid_k7: |
| 191 | ; |
| 192 | } |
| 193 | |
| 194 | /* |
| 195 | * TSC synchronization. |
| 196 | * |
| 197 | * We first check whether all CPUs have their TSC's synchronized, |
| 198 | * then we print a warning if not, and always resync. |
| 199 | */ |
| 200 | |
| 201 | static atomic_t tsc_start_flag = ATOMIC_INIT(0); |
| 202 | static atomic_t tsc_count_start = ATOMIC_INIT(0); |
| 203 | static atomic_t tsc_count_stop = ATOMIC_INIT(0); |
| 204 | static unsigned long long tsc_values[NR_CPUS]; |
| 205 | |
| 206 | #define NR_LOOPS 5 |
| 207 | |
| 208 | static void __init synchronize_tsc_bp (void) |
| 209 | { |
| 210 | int i; |
| 211 | unsigned long long t0; |
| 212 | unsigned long long sum, avg; |
| 213 | long long delta; |
Andrew Morton | a3a255e | 2005-06-23 00:08:34 -0700 | [diff] [blame] | 214 | unsigned int one_usec; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | int buggy = 0; |
| 216 | |
| 217 | printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus()); |
| 218 | |
| 219 | /* convert from kcyc/sec to cyc/usec */ |
| 220 | one_usec = cpu_khz / 1000; |
| 221 | |
| 222 | atomic_set(&tsc_start_flag, 1); |
| 223 | wmb(); |
| 224 | |
| 225 | /* |
| 226 | * We loop a few times to get a primed instruction cache, |
| 227 | * then the last pass is more or less synchronized and |
| 228 | * the BP and APs set their cycle counters to zero all at |
| 229 | * once. This reduces the chance of having random offsets |
| 230 | * between the processors, and guarantees that the maximum |
| 231 | * delay between the cycle counters is never bigger than |
| 232 | * the latency of information-passing (cachelines) between |
| 233 | * two CPUs. |
| 234 | */ |
| 235 | for (i = 0; i < NR_LOOPS; i++) { |
| 236 | /* |
| 237 | * all APs synchronize but they loop on '== num_cpus' |
| 238 | */ |
| 239 | while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) |
| 240 | mb(); |
| 241 | atomic_set(&tsc_count_stop, 0); |
| 242 | wmb(); |
| 243 | /* |
| 244 | * this lets the APs save their current TSC: |
| 245 | */ |
| 246 | atomic_inc(&tsc_count_start); |
| 247 | |
| 248 | rdtscll(tsc_values[smp_processor_id()]); |
| 249 | /* |
| 250 | * We clear the TSC in the last loop: |
| 251 | */ |
| 252 | if (i == NR_LOOPS-1) |
| 253 | write_tsc(0, 0); |
| 254 | |
| 255 | /* |
| 256 | * Wait for all APs to leave the synchronization point: |
| 257 | */ |
| 258 | while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) |
| 259 | mb(); |
| 260 | atomic_set(&tsc_count_start, 0); |
| 261 | wmb(); |
| 262 | atomic_inc(&tsc_count_stop); |
| 263 | } |
| 264 | |
| 265 | sum = 0; |
| 266 | for (i = 0; i < NR_CPUS; i++) { |
| 267 | if (cpu_isset(i, cpu_callout_map)) { |
| 268 | t0 = tsc_values[i]; |
| 269 | sum += t0; |
| 270 | } |
| 271 | } |
| 272 | avg = sum; |
| 273 | do_div(avg, num_booting_cpus()); |
| 274 | |
| 275 | sum = 0; |
| 276 | for (i = 0; i < NR_CPUS; i++) { |
| 277 | if (!cpu_isset(i, cpu_callout_map)) |
| 278 | continue; |
| 279 | delta = tsc_values[i] - avg; |
| 280 | if (delta < 0) |
| 281 | delta = -delta; |
| 282 | /* |
| 283 | * We report bigger than 2 microseconds clock differences. |
| 284 | */ |
| 285 | if (delta > 2*one_usec) { |
| 286 | long realdelta; |
| 287 | if (!buggy) { |
| 288 | buggy = 1; |
| 289 | printk("\n"); |
| 290 | } |
| 291 | realdelta = delta; |
| 292 | do_div(realdelta, one_usec); |
| 293 | if (tsc_values[i] < avg) |
| 294 | realdelta = -realdelta; |
| 295 | |
| 296 | printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta); |
| 297 | } |
| 298 | |
| 299 | sum += delta; |
| 300 | } |
| 301 | if (!buggy) |
| 302 | printk("passed.\n"); |
| 303 | } |
| 304 | |
| 305 | static void __init synchronize_tsc_ap (void) |
| 306 | { |
| 307 | int i; |
| 308 | |
| 309 | /* |
| 310 | * Not every cpu is online at the time |
| 311 | * this gets called, so we first wait for the BP to |
| 312 | * finish SMP initialization: |
| 313 | */ |
| 314 | while (!atomic_read(&tsc_start_flag)) mb(); |
| 315 | |
| 316 | for (i = 0; i < NR_LOOPS; i++) { |
| 317 | atomic_inc(&tsc_count_start); |
| 318 | while (atomic_read(&tsc_count_start) != num_booting_cpus()) |
| 319 | mb(); |
| 320 | |
| 321 | rdtscll(tsc_values[smp_processor_id()]); |
| 322 | if (i == NR_LOOPS-1) |
| 323 | write_tsc(0, 0); |
| 324 | |
| 325 | atomic_inc(&tsc_count_stop); |
| 326 | while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb(); |
| 327 | } |
| 328 | } |
| 329 | #undef NR_LOOPS |
| 330 | |
| 331 | extern void calibrate_delay(void); |
| 332 | |
| 333 | static atomic_t init_deasserted; |
| 334 | |
| 335 | static void __init smp_callin(void) |
| 336 | { |
| 337 | int cpuid, phys_id; |
| 338 | unsigned long timeout; |
| 339 | |
| 340 | /* |
| 341 | * If waken up by an INIT in an 82489DX configuration |
| 342 | * we may get here before an INIT-deassert IPI reaches |
| 343 | * our local APIC. We have to wait for the IPI or we'll |
| 344 | * lock up on an APIC access. |
| 345 | */ |
| 346 | wait_for_init_deassert(&init_deasserted); |
| 347 | |
| 348 | /* |
| 349 | * (This works even if the APIC is not enabled.) |
| 350 | */ |
| 351 | phys_id = GET_APIC_ID(apic_read(APIC_ID)); |
| 352 | cpuid = smp_processor_id(); |
| 353 | if (cpu_isset(cpuid, cpu_callin_map)) { |
| 354 | printk("huh, phys CPU#%d, CPU#%d already present??\n", |
| 355 | phys_id, cpuid); |
| 356 | BUG(); |
| 357 | } |
| 358 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); |
| 359 | |
| 360 | /* |
| 361 | * STARTUP IPIs are fragile beasts as they might sometimes |
| 362 | * trigger some glue motherboard logic. Complete APIC bus |
| 363 | * silence for 1 second, this overestimates the time the |
| 364 | * boot CPU is spending to send the up to 2 STARTUP IPIs |
| 365 | * by a factor of two. This should be enough. |
| 366 | */ |
| 367 | |
| 368 | /* |
| 369 | * Waiting 2s total for startup (udelay is not yet working) |
| 370 | */ |
| 371 | timeout = jiffies + 2*HZ; |
| 372 | while (time_before(jiffies, timeout)) { |
| 373 | /* |
| 374 | * Has the boot CPU finished it's STARTUP sequence? |
| 375 | */ |
| 376 | if (cpu_isset(cpuid, cpu_callout_map)) |
| 377 | break; |
| 378 | rep_nop(); |
| 379 | } |
| 380 | |
| 381 | if (!time_before(jiffies, timeout)) { |
| 382 | printk("BUG: CPU%d started up but did not get a callout!\n", |
| 383 | cpuid); |
| 384 | BUG(); |
| 385 | } |
| 386 | |
| 387 | /* |
| 388 | * the boot CPU has finished the init stage and is spinning |
| 389 | * on callin_map until we finish. We are free to set up this |
| 390 | * CPU, first the APIC. (this is probably redundant on most |
| 391 | * boards) |
| 392 | */ |
| 393 | |
| 394 | Dprintk("CALLIN, before setup_local_APIC().\n"); |
| 395 | smp_callin_clear_local_apic(); |
| 396 | setup_local_APIC(); |
| 397 | map_cpu_to_logical_apicid(); |
| 398 | |
| 399 | /* |
| 400 | * Get our bogomips. |
| 401 | */ |
| 402 | calibrate_delay(); |
| 403 | Dprintk("Stack at about %p\n",&cpuid); |
| 404 | |
| 405 | /* |
| 406 | * Save our processor parameters |
| 407 | */ |
| 408 | smp_store_cpu_info(cpuid); |
| 409 | |
| 410 | disable_APIC_timer(); |
| 411 | |
| 412 | /* |
| 413 | * Allow the master to continue. |
| 414 | */ |
| 415 | cpu_set(cpuid, cpu_callin_map); |
| 416 | |
| 417 | /* |
| 418 | * Synchronize the TSC with the BP |
| 419 | */ |
| 420 | if (cpu_has_tsc && cpu_khz) |
| 421 | synchronize_tsc_ap(); |
| 422 | } |
| 423 | |
| 424 | static int cpucount; |
| 425 | |
| 426 | /* |
| 427 | * Activate a secondary processor. |
| 428 | */ |
| 429 | static void __init start_secondary(void *unused) |
| 430 | { |
| 431 | /* |
| 432 | * Dont put anything before smp_callin(), SMP |
| 433 | * booting is too fragile that we want to limit the |
| 434 | * things done here to the most necessary things. |
| 435 | */ |
| 436 | cpu_init(); |
| 437 | smp_callin(); |
| 438 | while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) |
| 439 | rep_nop(); |
| 440 | setup_secondary_APIC_clock(); |
| 441 | if (nmi_watchdog == NMI_IO_APIC) { |
| 442 | disable_8259A_irq(0); |
| 443 | enable_NMI_through_LVT0(NULL); |
| 444 | enable_8259A_irq(0); |
| 445 | } |
| 446 | enable_APIC_timer(); |
| 447 | /* |
| 448 | * low-memory mappings have been cleared, flush them from |
| 449 | * the local TLBs too. |
| 450 | */ |
| 451 | local_flush_tlb(); |
| 452 | cpu_set(smp_processor_id(), cpu_online_map); |
| 453 | |
| 454 | /* We can take interrupts now: we're officially "up". */ |
| 455 | local_irq_enable(); |
| 456 | |
| 457 | wmb(); |
| 458 | cpu_idle(); |
| 459 | } |
| 460 | |
| 461 | /* |
| 462 | * Everything has been set up for the secondary |
| 463 | * CPUs - they just need to reload everything |
| 464 | * from the task structure |
| 465 | * This function must not return. |
| 466 | */ |
| 467 | void __init initialize_secondary(void) |
| 468 | { |
| 469 | /* |
| 470 | * We don't actually need to load the full TSS, |
| 471 | * basically just the stack pointer and the eip. |
| 472 | */ |
| 473 | |
| 474 | asm volatile( |
| 475 | "movl %0,%%esp\n\t" |
| 476 | "jmp *%1" |
| 477 | : |
| 478 | :"r" (current->thread.esp),"r" (current->thread.eip)); |
| 479 | } |
| 480 | |
| 481 | extern struct { |
| 482 | void * esp; |
| 483 | unsigned short ss; |
| 484 | } stack_start; |
| 485 | |
| 486 | #ifdef CONFIG_NUMA |
| 487 | |
| 488 | /* which logical CPUs are on which nodes */ |
| 489 | cpumask_t node_2_cpu_mask[MAX_NUMNODES] = |
| 490 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; |
| 491 | /* which node each logical CPU is on */ |
| 492 | int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 }; |
| 493 | EXPORT_SYMBOL(cpu_2_node); |
| 494 | |
| 495 | /* set up a mapping between cpu and node. */ |
| 496 | static inline void map_cpu_to_node(int cpu, int node) |
| 497 | { |
| 498 | printk("Mapping cpu %d to node %d\n", cpu, node); |
| 499 | cpu_set(cpu, node_2_cpu_mask[node]); |
| 500 | cpu_2_node[cpu] = node; |
| 501 | } |
| 502 | |
| 503 | /* undo a mapping between cpu and node. */ |
| 504 | static inline void unmap_cpu_to_node(int cpu) |
| 505 | { |
| 506 | int node; |
| 507 | |
| 508 | printk("Unmapping cpu %d from all nodes\n", cpu); |
| 509 | for (node = 0; node < MAX_NUMNODES; node ++) |
| 510 | cpu_clear(cpu, node_2_cpu_mask[node]); |
| 511 | cpu_2_node[cpu] = 0; |
| 512 | } |
| 513 | #else /* !CONFIG_NUMA */ |
| 514 | |
| 515 | #define map_cpu_to_node(cpu, node) ({}) |
| 516 | #define unmap_cpu_to_node(cpu) ({}) |
| 517 | |
| 518 | #endif /* CONFIG_NUMA */ |
| 519 | |
| 520 | u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; |
| 521 | |
| 522 | static void map_cpu_to_logical_apicid(void) |
| 523 | { |
| 524 | int cpu = smp_processor_id(); |
| 525 | int apicid = logical_smp_processor_id(); |
| 526 | |
| 527 | cpu_2_logical_apicid[cpu] = apicid; |
| 528 | map_cpu_to_node(cpu, apicid_to_node(apicid)); |
| 529 | } |
| 530 | |
| 531 | static void unmap_cpu_to_logical_apicid(int cpu) |
| 532 | { |
| 533 | cpu_2_logical_apicid[cpu] = BAD_APICID; |
| 534 | unmap_cpu_to_node(cpu); |
| 535 | } |
| 536 | |
| 537 | #if APIC_DEBUG |
| 538 | static inline void __inquire_remote_apic(int apicid) |
| 539 | { |
| 540 | int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; |
| 541 | char *names[] = { "ID", "VERSION", "SPIV" }; |
| 542 | int timeout, status; |
| 543 | |
| 544 | printk("Inquiring remote APIC #%d...\n", apicid); |
| 545 | |
| 546 | for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) { |
| 547 | printk("... APIC #%d %s: ", apicid, names[i]); |
| 548 | |
| 549 | /* |
| 550 | * Wait for idle. |
| 551 | */ |
| 552 | apic_wait_icr_idle(); |
| 553 | |
| 554 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); |
| 555 | apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); |
| 556 | |
| 557 | timeout = 0; |
| 558 | do { |
| 559 | udelay(100); |
| 560 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; |
| 561 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); |
| 562 | |
| 563 | switch (status) { |
| 564 | case APIC_ICR_RR_VALID: |
| 565 | status = apic_read(APIC_RRR); |
| 566 | printk("%08x\n", status); |
| 567 | break; |
| 568 | default: |
| 569 | printk("failed\n"); |
| 570 | } |
| 571 | } |
| 572 | } |
| 573 | #endif |
| 574 | |
| 575 | #ifdef WAKE_SECONDARY_VIA_NMI |
| 576 | /* |
| 577 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal |
| 578 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this |
| 579 | * won't ... remember to clear down the APIC, etc later. |
| 580 | */ |
| 581 | static int __init |
| 582 | wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) |
| 583 | { |
| 584 | unsigned long send_status = 0, accept_status = 0; |
| 585 | int timeout, maxlvt; |
| 586 | |
| 587 | /* Target chip */ |
| 588 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); |
| 589 | |
| 590 | /* Boot on the stack */ |
| 591 | /* Kick the second */ |
| 592 | apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); |
| 593 | |
| 594 | Dprintk("Waiting for send to finish...\n"); |
| 595 | timeout = 0; |
| 596 | do { |
| 597 | Dprintk("+"); |
| 598 | udelay(100); |
| 599 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; |
| 600 | } while (send_status && (timeout++ < 1000)); |
| 601 | |
| 602 | /* |
| 603 | * Give the other CPU some time to accept the IPI. |
| 604 | */ |
| 605 | udelay(200); |
| 606 | /* |
| 607 | * Due to the Pentium erratum 3AP. |
| 608 | */ |
| 609 | maxlvt = get_maxlvt(); |
| 610 | if (maxlvt > 3) { |
| 611 | apic_read_around(APIC_SPIV); |
| 612 | apic_write(APIC_ESR, 0); |
| 613 | } |
| 614 | accept_status = (apic_read(APIC_ESR) & 0xEF); |
| 615 | Dprintk("NMI sent.\n"); |
| 616 | |
| 617 | if (send_status) |
| 618 | printk("APIC never delivered???\n"); |
| 619 | if (accept_status) |
| 620 | printk("APIC delivery error (%lx).\n", accept_status); |
| 621 | |
| 622 | return (send_status | accept_status); |
| 623 | } |
| 624 | #endif /* WAKE_SECONDARY_VIA_NMI */ |
| 625 | |
| 626 | #ifdef WAKE_SECONDARY_VIA_INIT |
| 627 | static int __init |
| 628 | wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) |
| 629 | { |
| 630 | unsigned long send_status = 0, accept_status = 0; |
| 631 | int maxlvt, timeout, num_starts, j; |
| 632 | |
| 633 | /* |
| 634 | * Be paranoid about clearing APIC errors. |
| 635 | */ |
| 636 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { |
| 637 | apic_read_around(APIC_SPIV); |
| 638 | apic_write(APIC_ESR, 0); |
| 639 | apic_read(APIC_ESR); |
| 640 | } |
| 641 | |
| 642 | Dprintk("Asserting INIT.\n"); |
| 643 | |
| 644 | /* |
| 645 | * Turn INIT on target chip |
| 646 | */ |
| 647 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
| 648 | |
| 649 | /* |
| 650 | * Send IPI |
| 651 | */ |
| 652 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT |
| 653 | | APIC_DM_INIT); |
| 654 | |
| 655 | Dprintk("Waiting for send to finish...\n"); |
| 656 | timeout = 0; |
| 657 | do { |
| 658 | Dprintk("+"); |
| 659 | udelay(100); |
| 660 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; |
| 661 | } while (send_status && (timeout++ < 1000)); |
| 662 | |
| 663 | mdelay(10); |
| 664 | |
| 665 | Dprintk("Deasserting INIT.\n"); |
| 666 | |
| 667 | /* Target chip */ |
| 668 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
| 669 | |
| 670 | /* Send IPI */ |
| 671 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); |
| 672 | |
| 673 | Dprintk("Waiting for send to finish...\n"); |
| 674 | timeout = 0; |
| 675 | do { |
| 676 | Dprintk("+"); |
| 677 | udelay(100); |
| 678 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; |
| 679 | } while (send_status && (timeout++ < 1000)); |
| 680 | |
| 681 | atomic_set(&init_deasserted, 1); |
| 682 | |
| 683 | /* |
| 684 | * Should we send STARTUP IPIs ? |
| 685 | * |
| 686 | * Determine this based on the APIC version. |
| 687 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. |
| 688 | */ |
| 689 | if (APIC_INTEGRATED(apic_version[phys_apicid])) |
| 690 | num_starts = 2; |
| 691 | else |
| 692 | num_starts = 0; |
| 693 | |
| 694 | /* |
| 695 | * Run STARTUP IPI loop. |
| 696 | */ |
| 697 | Dprintk("#startup loops: %d.\n", num_starts); |
| 698 | |
| 699 | maxlvt = get_maxlvt(); |
| 700 | |
| 701 | for (j = 1; j <= num_starts; j++) { |
| 702 | Dprintk("Sending STARTUP #%d.\n",j); |
| 703 | apic_read_around(APIC_SPIV); |
| 704 | apic_write(APIC_ESR, 0); |
| 705 | apic_read(APIC_ESR); |
| 706 | Dprintk("After apic_write.\n"); |
| 707 | |
| 708 | /* |
| 709 | * STARTUP IPI |
| 710 | */ |
| 711 | |
| 712 | /* Target chip */ |
| 713 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
| 714 | |
| 715 | /* Boot on the stack */ |
| 716 | /* Kick the second */ |
| 717 | apic_write_around(APIC_ICR, APIC_DM_STARTUP |
| 718 | | (start_eip >> 12)); |
| 719 | |
| 720 | /* |
| 721 | * Give the other CPU some time to accept the IPI. |
| 722 | */ |
| 723 | udelay(300); |
| 724 | |
| 725 | Dprintk("Startup point 1.\n"); |
| 726 | |
| 727 | Dprintk("Waiting for send to finish...\n"); |
| 728 | timeout = 0; |
| 729 | do { |
| 730 | Dprintk("+"); |
| 731 | udelay(100); |
| 732 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; |
| 733 | } while (send_status && (timeout++ < 1000)); |
| 734 | |
| 735 | /* |
| 736 | * Give the other CPU some time to accept the IPI. |
| 737 | */ |
| 738 | udelay(200); |
| 739 | /* |
| 740 | * Due to the Pentium erratum 3AP. |
| 741 | */ |
| 742 | if (maxlvt > 3) { |
| 743 | apic_read_around(APIC_SPIV); |
| 744 | apic_write(APIC_ESR, 0); |
| 745 | } |
| 746 | accept_status = (apic_read(APIC_ESR) & 0xEF); |
| 747 | if (send_status || accept_status) |
| 748 | break; |
| 749 | } |
| 750 | Dprintk("After Startup.\n"); |
| 751 | |
| 752 | if (send_status) |
| 753 | printk("APIC never delivered???\n"); |
| 754 | if (accept_status) |
| 755 | printk("APIC delivery error (%lx).\n", accept_status); |
| 756 | |
| 757 | return (send_status | accept_status); |
| 758 | } |
| 759 | #endif /* WAKE_SECONDARY_VIA_INIT */ |
| 760 | |
| 761 | extern cpumask_t cpu_initialized; |
| 762 | |
| 763 | static int __init do_boot_cpu(int apicid) |
| 764 | /* |
| 765 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad |
| 766 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. |
| 767 | * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. |
| 768 | */ |
| 769 | { |
| 770 | struct task_struct *idle; |
| 771 | unsigned long boot_error; |
| 772 | int timeout, cpu; |
| 773 | unsigned long start_eip; |
| 774 | unsigned short nmi_high = 0, nmi_low = 0; |
| 775 | |
| 776 | cpu = ++cpucount; |
| 777 | /* |
| 778 | * We can't use kernel_thread since we must avoid to |
| 779 | * reschedule the child. |
| 780 | */ |
| 781 | idle = fork_idle(cpu); |
| 782 | if (IS_ERR(idle)) |
| 783 | panic("failed fork for CPU %d", cpu); |
| 784 | idle->thread.eip = (unsigned long) start_secondary; |
| 785 | /* start_eip had better be page-aligned! */ |
| 786 | start_eip = setup_trampoline(); |
| 787 | |
| 788 | /* So we see what's up */ |
| 789 | printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip); |
| 790 | /* Stack for startup_32 can be just as for start_secondary onwards */ |
| 791 | stack_start.esp = (void *) idle->thread.esp; |
| 792 | |
| 793 | irq_ctx_init(cpu); |
| 794 | |
| 795 | /* |
| 796 | * This grunge runs the startup process for |
| 797 | * the targeted processor. |
| 798 | */ |
| 799 | |
| 800 | atomic_set(&init_deasserted, 0); |
| 801 | |
| 802 | Dprintk("Setting warm reset code and vector.\n"); |
| 803 | |
| 804 | store_NMI_vector(&nmi_high, &nmi_low); |
| 805 | |
| 806 | smpboot_setup_warm_reset_vector(start_eip); |
| 807 | |
| 808 | /* |
| 809 | * Starting actual IPI sequence... |
| 810 | */ |
| 811 | boot_error = wakeup_secondary_cpu(apicid, start_eip); |
| 812 | |
| 813 | if (!boot_error) { |
| 814 | /* |
| 815 | * allow APs to start initializing. |
| 816 | */ |
| 817 | Dprintk("Before Callout %d.\n", cpu); |
| 818 | cpu_set(cpu, cpu_callout_map); |
| 819 | Dprintk("After Callout %d.\n", cpu); |
| 820 | |
| 821 | /* |
| 822 | * Wait 5s total for a response |
| 823 | */ |
| 824 | for (timeout = 0; timeout < 50000; timeout++) { |
| 825 | if (cpu_isset(cpu, cpu_callin_map)) |
| 826 | break; /* It has booted */ |
| 827 | udelay(100); |
| 828 | } |
| 829 | |
| 830 | if (cpu_isset(cpu, cpu_callin_map)) { |
| 831 | /* number CPUs logically, starting from 1 (BSP is 0) */ |
| 832 | Dprintk("OK.\n"); |
| 833 | printk("CPU%d: ", cpu); |
| 834 | print_cpu_info(&cpu_data[cpu]); |
| 835 | Dprintk("CPU has booted.\n"); |
| 836 | } else { |
| 837 | boot_error= 1; |
| 838 | if (*((volatile unsigned char *)trampoline_base) |
| 839 | == 0xA5) |
| 840 | /* trampoline started but...? */ |
| 841 | printk("Stuck ??\n"); |
| 842 | else |
| 843 | /* trampoline code not run */ |
| 844 | printk("Not responding.\n"); |
| 845 | inquire_remote_apic(apicid); |
| 846 | } |
| 847 | } |
| 848 | x86_cpu_to_apicid[cpu] = apicid; |
| 849 | if (boot_error) { |
| 850 | /* Try to put things back the way they were before ... */ |
| 851 | unmap_cpu_to_logical_apicid(cpu); |
| 852 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ |
| 853 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ |
| 854 | cpucount--; |
| 855 | } |
| 856 | |
| 857 | /* mark "stuck" area as not stuck */ |
| 858 | *((volatile unsigned long *)trampoline_base) = 0; |
| 859 | |
| 860 | return boot_error; |
| 861 | } |
| 862 | |
| 863 | static void smp_tune_scheduling (void) |
| 864 | { |
| 865 | unsigned long cachesize; /* kB */ |
| 866 | unsigned long bandwidth = 350; /* MB/s */ |
| 867 | /* |
| 868 | * Rough estimation for SMP scheduling, this is the number of |
| 869 | * cycles it takes for a fully memory-limited process to flush |
| 870 | * the SMP-local cache. |
| 871 | * |
| 872 | * (For a P5 this pretty much means we will choose another idle |
| 873 | * CPU almost always at wakeup time (this is due to the small |
| 874 | * L1 cache), on PIIs it's around 50-100 usecs, depending on |
| 875 | * the cache size) |
| 876 | */ |
| 877 | |
| 878 | if (!cpu_khz) { |
| 879 | /* |
| 880 | * this basically disables processor-affinity |
| 881 | * scheduling on SMP without a TSC. |
| 882 | */ |
| 883 | return; |
| 884 | } else { |
| 885 | cachesize = boot_cpu_data.x86_cache_size; |
| 886 | if (cachesize == -1) { |
| 887 | cachesize = 16; /* Pentiums, 2x8kB cache */ |
| 888 | bandwidth = 100; |
| 889 | } |
| 890 | } |
| 891 | } |
| 892 | |
| 893 | /* |
| 894 | * Cycle through the processors sending APIC IPIs to boot each. |
| 895 | */ |
| 896 | |
| 897 | static int boot_cpu_logical_apicid; |
| 898 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ |
| 899 | void *xquad_portio; |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 900 | #ifdef CONFIG_X86_NUMAQ |
| 901 | EXPORT_SYMBOL(xquad_portio); |
| 902 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | |
| 904 | cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 905 | #ifdef CONFIG_X86_HT |
| 906 | EXPORT_SYMBOL(cpu_sibling_map); |
| 907 | #endif |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 908 | cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; |
Andi Kleen | 2df9fa3 | 2005-05-20 14:27:59 -0700 | [diff] [blame] | 909 | EXPORT_SYMBOL(cpu_core_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | |
| 911 | static void __init smp_boot_cpus(unsigned int max_cpus) |
| 912 | { |
| 913 | int apicid, cpu, bit, kicked; |
| 914 | unsigned long bogosum = 0; |
| 915 | |
| 916 | /* |
| 917 | * Setup boot CPU information |
| 918 | */ |
| 919 | smp_store_cpu_info(0); /* Final full version of the data */ |
| 920 | printk("CPU%d: ", 0); |
| 921 | print_cpu_info(&cpu_data[0]); |
| 922 | |
| 923 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); |
| 924 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
| 925 | x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; |
| 926 | |
| 927 | current_thread_info()->cpu = 0; |
| 928 | smp_tune_scheduling(); |
| 929 | cpus_clear(cpu_sibling_map[0]); |
| 930 | cpu_set(0, cpu_sibling_map[0]); |
| 931 | |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 932 | cpus_clear(cpu_core_map[0]); |
| 933 | cpu_set(0, cpu_core_map[0]); |
| 934 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | /* |
| 936 | * If we couldn't find an SMP configuration at boot time, |
| 937 | * get out of here now! |
| 938 | */ |
| 939 | if (!smp_found_config && !acpi_lapic) { |
| 940 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); |
| 941 | smpboot_clear_io_apic_irqs(); |
| 942 | phys_cpu_present_map = physid_mask_of_physid(0); |
| 943 | if (APIC_init_uniprocessor()) |
| 944 | printk(KERN_NOTICE "Local APIC not detected." |
| 945 | " Using dummy APIC emulation.\n"); |
| 946 | map_cpu_to_logical_apicid(); |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 947 | cpu_set(0, cpu_sibling_map[0]); |
| 948 | cpu_set(0, cpu_core_map[0]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | return; |
| 950 | } |
| 951 | |
| 952 | /* |
| 953 | * Should not be necessary because the MP table should list the boot |
| 954 | * CPU too, but we do it for the sake of robustness anyway. |
| 955 | * Makes no sense to do this check in clustered apic mode, so skip it |
| 956 | */ |
| 957 | if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { |
| 958 | printk("weird, boot CPU (#%d) not listed by the BIOS.\n", |
| 959 | boot_cpu_physical_apicid); |
| 960 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); |
| 961 | } |
| 962 | |
| 963 | /* |
| 964 | * If we couldn't find a local APIC, then get out of here now! |
| 965 | */ |
| 966 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) { |
| 967 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", |
| 968 | boot_cpu_physical_apicid); |
| 969 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); |
| 970 | smpboot_clear_io_apic_irqs(); |
| 971 | phys_cpu_present_map = physid_mask_of_physid(0); |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 972 | cpu_set(0, cpu_sibling_map[0]); |
| 973 | cpu_set(0, cpu_core_map[0]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 | return; |
| 975 | } |
| 976 | |
| 977 | verify_local_APIC(); |
| 978 | |
| 979 | /* |
| 980 | * If SMP should be disabled, then really disable it! |
| 981 | */ |
| 982 | if (!max_cpus) { |
| 983 | smp_found_config = 0; |
| 984 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); |
| 985 | smpboot_clear_io_apic_irqs(); |
| 986 | phys_cpu_present_map = physid_mask_of_physid(0); |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 987 | cpu_set(0, cpu_sibling_map[0]); |
| 988 | cpu_set(0, cpu_core_map[0]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 989 | return; |
| 990 | } |
| 991 | |
| 992 | connect_bsp_APIC(); |
| 993 | setup_local_APIC(); |
| 994 | map_cpu_to_logical_apicid(); |
| 995 | |
| 996 | |
| 997 | setup_portio_remap(); |
| 998 | |
| 999 | /* |
| 1000 | * Scan the CPU present map and fire up the other CPUs via do_boot_cpu |
| 1001 | * |
| 1002 | * In clustered apic mode, phys_cpu_present_map is a constructed thus: |
| 1003 | * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the |
| 1004 | * clustered apic ID. |
| 1005 | */ |
| 1006 | Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map)); |
| 1007 | |
| 1008 | kicked = 1; |
| 1009 | for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) { |
| 1010 | apicid = cpu_present_to_apicid(bit); |
| 1011 | /* |
| 1012 | * Don't even attempt to start the boot CPU! |
| 1013 | */ |
| 1014 | if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID)) |
| 1015 | continue; |
| 1016 | |
| 1017 | if (!check_apicid_present(bit)) |
| 1018 | continue; |
| 1019 | if (max_cpus <= cpucount+1) |
| 1020 | continue; |
| 1021 | |
| 1022 | if (do_boot_cpu(apicid)) |
| 1023 | printk("CPU #%d not responding - cannot use it.\n", |
| 1024 | apicid); |
| 1025 | else |
| 1026 | ++kicked; |
| 1027 | } |
| 1028 | |
| 1029 | /* |
| 1030 | * Cleanup possible dangling ends... |
| 1031 | */ |
| 1032 | smpboot_restore_warm_reset_vector(); |
| 1033 | |
| 1034 | /* |
| 1035 | * Allow the user to impress friends. |
| 1036 | */ |
| 1037 | Dprintk("Before bogomips.\n"); |
| 1038 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
| 1039 | if (cpu_isset(cpu, cpu_callout_map)) |
| 1040 | bogosum += cpu_data[cpu].loops_per_jiffy; |
| 1041 | printk(KERN_INFO |
| 1042 | "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", |
| 1043 | cpucount+1, |
| 1044 | bogosum/(500000/HZ), |
| 1045 | (bogosum/(5000/HZ))%100); |
| 1046 | |
| 1047 | Dprintk("Before bogocount - setting activated=1.\n"); |
| 1048 | |
| 1049 | if (smp_b_stepping) |
| 1050 | printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n"); |
| 1051 | |
| 1052 | /* |
| 1053 | * Don't taint if we are running SMP kernel on a single non-MP |
| 1054 | * approved Athlon |
| 1055 | */ |
| 1056 | if (tainted & TAINT_UNSAFE_SMP) { |
| 1057 | if (cpucount) |
| 1058 | printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n"); |
| 1059 | else |
| 1060 | tainted &= ~TAINT_UNSAFE_SMP; |
| 1061 | } |
| 1062 | |
| 1063 | Dprintk("Boot done.\n"); |
| 1064 | |
| 1065 | /* |
| 1066 | * construct cpu_sibling_map[], so that we can tell sibling CPUs |
| 1067 | * efficiently. |
| 1068 | */ |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 1069 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 | cpus_clear(cpu_sibling_map[cpu]); |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 1071 | cpus_clear(cpu_core_map[cpu]); |
| 1072 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | |
| 1074 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 1075 | struct cpuinfo_x86 *c = cpu_data + cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | int siblings = 0; |
| 1077 | int i; |
| 1078 | if (!cpu_isset(cpu, cpu_callout_map)) |
| 1079 | continue; |
| 1080 | |
| 1081 | if (smp_num_siblings > 1) { |
| 1082 | for (i = 0; i < NR_CPUS; i++) { |
| 1083 | if (!cpu_isset(i, cpu_callout_map)) |
| 1084 | continue; |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 1085 | if (cpu_core_id[cpu] == cpu_core_id[i]) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1086 | siblings++; |
| 1087 | cpu_set(i, cpu_sibling_map[cpu]); |
| 1088 | } |
| 1089 | } |
| 1090 | } else { |
| 1091 | siblings++; |
| 1092 | cpu_set(cpu, cpu_sibling_map[cpu]); |
| 1093 | } |
| 1094 | |
Siddha, Suresh B | 49f384b | 2005-05-27 12:53:01 -0700 | [diff] [blame] | 1095 | if (siblings != smp_num_siblings) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings); |
Siddha, Suresh B | 49f384b | 2005-05-27 12:53:01 -0700 | [diff] [blame] | 1097 | smp_num_siblings = siblings; |
| 1098 | } |
Andi Kleen | 3dd9d51 | 2005-04-16 15:25:15 -0700 | [diff] [blame] | 1099 | |
| 1100 | if (c->x86_num_cores > 1) { |
| 1101 | for (i = 0; i < NR_CPUS; i++) { |
| 1102 | if (!cpu_isset(i, cpu_callout_map)) |
| 1103 | continue; |
| 1104 | if (phys_proc_id[cpu] == phys_proc_id[i]) { |
| 1105 | cpu_set(i, cpu_core_map[cpu]); |
| 1106 | } |
| 1107 | } |
| 1108 | } else { |
| 1109 | cpu_core_map[cpu] = cpu_sibling_map[cpu]; |
| 1110 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | } |
| 1112 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1113 | smpboot_setup_io_apic(); |
| 1114 | |
| 1115 | setup_boot_APIC_clock(); |
| 1116 | |
| 1117 | /* |
| 1118 | * Synchronize the TSC with the AP |
| 1119 | */ |
| 1120 | if (cpu_has_tsc && cpucount && cpu_khz) |
| 1121 | synchronize_tsc_bp(); |
| 1122 | } |
| 1123 | |
| 1124 | /* These are wrappers to interface to the new boot process. Someone |
| 1125 | who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ |
| 1126 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 1127 | { |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame^] | 1128 | smp_commenced_mask = cpumask_of_cpu(0); |
| 1129 | cpu_callin_map = cpumask_of_cpu(0); |
| 1130 | mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1131 | smp_boot_cpus(max_cpus); |
| 1132 | } |
| 1133 | |
| 1134 | void __devinit smp_prepare_boot_cpu(void) |
| 1135 | { |
| 1136 | cpu_set(smp_processor_id(), cpu_online_map); |
| 1137 | cpu_set(smp_processor_id(), cpu_callout_map); |
| 1138 | } |
| 1139 | |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame^] | 1140 | #ifdef CONFIG_HOTPLUG_CPU |
| 1141 | |
| 1142 | /* must be called with the cpucontrol mutex held */ |
| 1143 | static int __devinit cpu_enable(unsigned int cpu) |
| 1144 | { |
| 1145 | /* get the target out of its holding state */ |
| 1146 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
| 1147 | wmb(); |
| 1148 | |
| 1149 | /* wait for the processor to ack it. timeout? */ |
| 1150 | while (!cpu_online(cpu)) |
| 1151 | cpu_relax(); |
| 1152 | |
| 1153 | fixup_irqs(cpu_online_map); |
| 1154 | /* counter the disable in fixup_irqs() */ |
| 1155 | local_irq_enable(); |
| 1156 | return 0; |
| 1157 | } |
| 1158 | |
| 1159 | int __cpu_disable(void) |
| 1160 | { |
| 1161 | cpumask_t map = cpu_online_map; |
| 1162 | int cpu = smp_processor_id(); |
| 1163 | |
| 1164 | /* |
| 1165 | * Perhaps use cpufreq to drop frequency, but that could go |
| 1166 | * into generic code. |
| 1167 | * |
| 1168 | * We won't take down the boot processor on i386 due to some |
| 1169 | * interrupts only being able to be serviced by the BSP. |
| 1170 | * Especially so if we're not using an IOAPIC -zwane |
| 1171 | */ |
| 1172 | if (cpu == 0) |
| 1173 | return -EBUSY; |
| 1174 | |
| 1175 | /* We enable the timer again on the exit path of the death loop */ |
| 1176 | disable_APIC_timer(); |
| 1177 | /* Allow any queued timer interrupts to get serviced */ |
| 1178 | local_irq_enable(); |
| 1179 | mdelay(1); |
| 1180 | local_irq_disable(); |
| 1181 | |
| 1182 | cpu_clear(cpu, map); |
| 1183 | fixup_irqs(map); |
| 1184 | /* It's now safe to remove this processor from the online map */ |
| 1185 | cpu_clear(cpu, cpu_online_map); |
| 1186 | return 0; |
| 1187 | } |
| 1188 | |
| 1189 | void __cpu_die(unsigned int cpu) |
| 1190 | { |
| 1191 | /* We don't do anything here: idle task is faking death itself. */ |
| 1192 | unsigned int i; |
| 1193 | |
| 1194 | for (i = 0; i < 10; i++) { |
| 1195 | /* They ack this in play_dead by setting CPU_DEAD */ |
| 1196 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) |
| 1197 | return; |
| 1198 | current->state = TASK_UNINTERRUPTIBLE; |
| 1199 | schedule_timeout(HZ/10); |
| 1200 | } |
| 1201 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
| 1202 | } |
| 1203 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
| 1204 | int __cpu_disable(void) |
| 1205 | { |
| 1206 | return -ENOSYS; |
| 1207 | } |
| 1208 | |
| 1209 | void __cpu_die(unsigned int cpu) |
| 1210 | { |
| 1211 | /* We said "no" in __cpu_disable */ |
| 1212 | BUG(); |
| 1213 | } |
| 1214 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 1215 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | int __devinit __cpu_up(unsigned int cpu) |
| 1217 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | /* In case one didn't come up */ |
| 1219 | if (!cpu_isset(cpu, cpu_callin_map)) { |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame^] | 1220 | printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | local_irq_enable(); |
| 1222 | return -EIO; |
| 1223 | } |
| 1224 | |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame^] | 1225 | #ifdef CONFIG_HOTPLUG_CPU |
| 1226 | /* Already up, and in cpu_quiescent now? */ |
| 1227 | if (cpu_isset(cpu, smp_commenced_mask)) { |
| 1228 | cpu_enable(cpu); |
| 1229 | return 0; |
| 1230 | } |
| 1231 | #endif |
| 1232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1233 | local_irq_enable(); |
| 1234 | /* Unleash the CPU! */ |
| 1235 | cpu_set(cpu, smp_commenced_mask); |
| 1236 | while (!cpu_isset(cpu, cpu_online_map)) |
| 1237 | mb(); |
| 1238 | return 0; |
| 1239 | } |
| 1240 | |
| 1241 | void __init smp_cpus_done(unsigned int max_cpus) |
| 1242 | { |
| 1243 | #ifdef CONFIG_X86_IO_APIC |
| 1244 | setup_ioapic_dest(); |
| 1245 | #endif |
| 1246 | zap_low_mappings(); |
| 1247 | /* |
| 1248 | * Disable executability of the SMP trampoline: |
| 1249 | */ |
| 1250 | set_kernel_exec((unsigned long)trampoline_base, trampoline_exec); |
| 1251 | } |
| 1252 | |
| 1253 | void __init smp_intr_init(void) |
| 1254 | { |
| 1255 | /* |
| 1256 | * IRQ0 must be given a fixed assignment and initialized, |
| 1257 | * because it's used before the IO-APIC is set up. |
| 1258 | */ |
| 1259 | set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); |
| 1260 | |
| 1261 | /* |
| 1262 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper |
| 1263 | * IPI, driven by wakeup. |
| 1264 | */ |
| 1265 | set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); |
| 1266 | |
| 1267 | /* IPI for invalidation */ |
| 1268 | set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); |
| 1269 | |
| 1270 | /* IPI for generic function call */ |
| 1271 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
| 1272 | } |