blob: e5714090b00d165e421304b06889e4532d94cf81 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * x86 SMP booting functions
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
7 *
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
10 *
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
14 *
15 * This code is released under the GNU General Public License version 2 or
16 * later.
17 *
18 * Fixes
19 * Felix Koop : NR_CPUS used properly
20 * Jose Renau : Handle single CPU case.
21 * Alan Cox : By repeated request 8) - Total BogoMIP report.
22 * Greg Wright : Fix for kernel stacks panic.
23 * Erich Boleyn : MP v1.4 and additional changes.
24 * Matthias Sattler : Changes for 2.1 kernel map.
25 * Michel Lespinasse : Changes for 2.1 kernel map.
26 * Michael Chastain : Change trampoline.S to gnu as.
27 * Alan Cox : Dumb bug: 'B' step PPro's are fine
28 * Ingo Molnar : Added APIC timers, based on code
29 * from Jose Renau
30 * Ingo Molnar : various cleanups and rewrites
31 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
32 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
33 * Andi Kleen : Changed for SMP boot into long mode.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
35 */
36
37#include <linux/config.h>
38#include <linux/init.h>
39
40#include <linux/mm.h>
41#include <linux/kernel_stat.h>
42#include <linux/smp_lock.h>
43#include <linux/irq.h>
44#include <linux/bootmem.h>
45#include <linux/thread_info.h>
46#include <linux/module.h>
47
48#include <linux/delay.h>
49#include <linux/mc146818rtc.h>
50#include <asm/mtrr.h>
51#include <asm/pgalloc.h>
52#include <asm/desc.h>
53#include <asm/kdebug.h>
54#include <asm/tlbflush.h>
55#include <asm/proto.h>
56
57/* Number of siblings per CPU package */
58int smp_num_siblings = 1;
59/* Package ID of each logical CPU */
60u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
61EXPORT_SYMBOL(phys_proc_id);
62
63/* Bitmask of currently online CPUs */
64cpumask_t cpu_online_map;
65
66cpumask_t cpu_callin_map;
67cpumask_t cpu_callout_map;
68static cpumask_t smp_commenced_mask;
69
70/* Per CPU bogomips and other parameters */
71struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
72
73cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
74
75/*
76 * Trampoline 80x86 program as an array.
77 */
78
79extern unsigned char trampoline_data [];
80extern unsigned char trampoline_end [];
81
82/*
83 * Currently trivial. Write the real->protected mode
84 * bootstrap into the page concerned. The caller
85 * has made sure it's suitably aligned.
86 */
87
88static unsigned long __init setup_trampoline(void)
89{
90 void *tramp = __va(SMP_TRAMPOLINE_BASE);
91 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
92 return virt_to_phys(tramp);
93}
94
95/*
96 * The bootstrap kernel entry code has set these up. Save them for
97 * a given CPU
98 */
99
100static void __init smp_store_cpu_info(int id)
101{
102 struct cpuinfo_x86 *c = cpu_data + id;
103
104 *c = boot_cpu_data;
105 identify_cpu(c);
106}
107
108/*
109 * TSC synchronization.
110 *
111 * We first check whether all CPUs have their TSC's synchronized,
112 * then we print a warning if not, and always resync.
113 */
114
115static atomic_t tsc_start_flag = ATOMIC_INIT(0);
116static atomic_t tsc_count_start = ATOMIC_INIT(0);
117static atomic_t tsc_count_stop = ATOMIC_INIT(0);
118static unsigned long long tsc_values[NR_CPUS];
119
120#define NR_LOOPS 5
121
122extern unsigned int fast_gettimeoffset_quotient;
123
124static void __init synchronize_tsc_bp (void)
125{
126 int i;
127 unsigned long long t0;
128 unsigned long long sum, avg;
129 long long delta;
130 long one_usec;
131 int buggy = 0;
132
133 printk(KERN_INFO "checking TSC synchronization across %u CPUs: ",num_booting_cpus());
134
135 one_usec = cpu_khz;
136
137 atomic_set(&tsc_start_flag, 1);
138 wmb();
139
140 /*
141 * We loop a few times to get a primed instruction cache,
142 * then the last pass is more or less synchronized and
143 * the BP and APs set their cycle counters to zero all at
144 * once. This reduces the chance of having random offsets
145 * between the processors, and guarantees that the maximum
146 * delay between the cycle counters is never bigger than
147 * the latency of information-passing (cachelines) between
148 * two CPUs.
149 */
150 for (i = 0; i < NR_LOOPS; i++) {
151 /*
152 * all APs synchronize but they loop on '== num_cpus'
153 */
154 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) mb();
155 atomic_set(&tsc_count_stop, 0);
156 wmb();
157 /*
158 * this lets the APs save their current TSC:
159 */
160 atomic_inc(&tsc_count_start);
161
162 sync_core();
163 rdtscll(tsc_values[smp_processor_id()]);
164 /*
165 * We clear the TSC in the last loop:
166 */
167 if (i == NR_LOOPS-1)
168 write_tsc(0, 0);
169
170 /*
171 * Wait for all APs to leave the synchronization point:
172 */
173 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) mb();
174 atomic_set(&tsc_count_start, 0);
175 wmb();
176 atomic_inc(&tsc_count_stop);
177 }
178
179 sum = 0;
180 for (i = 0; i < NR_CPUS; i++) {
181 if (cpu_isset(i, cpu_callout_map)) {
182 t0 = tsc_values[i];
183 sum += t0;
184 }
185 }
186 avg = sum / num_booting_cpus();
187
188 sum = 0;
189 for (i = 0; i < NR_CPUS; i++) {
190 if (!cpu_isset(i, cpu_callout_map))
191 continue;
192
193 delta = tsc_values[i] - avg;
194 if (delta < 0)
195 delta = -delta;
196 /*
197 * We report bigger than 2 microseconds clock differences.
198 */
199 if (delta > 2*one_usec) {
200 long realdelta;
201 if (!buggy) {
202 buggy = 1;
203 printk("\n");
204 }
205 realdelta = delta / one_usec;
206 if (tsc_values[i] < avg)
207 realdelta = -realdelta;
208
209 printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
210 i, realdelta);
211 }
212
213 sum += delta;
214 }
215 if (!buggy)
216 printk("passed.\n");
217}
218
219static void __init synchronize_tsc_ap (void)
220{
221 int i;
222
223 /*
224 * Not every cpu is online at the time
225 * this gets called, so we first wait for the BP to
226 * finish SMP initialization:
227 */
228 while (!atomic_read(&tsc_start_flag)) mb();
229
230 for (i = 0; i < NR_LOOPS; i++) {
231 atomic_inc(&tsc_count_start);
232 while (atomic_read(&tsc_count_start) != num_booting_cpus()) mb();
233
234 sync_core();
235 rdtscll(tsc_values[smp_processor_id()]);
236 if (i == NR_LOOPS-1)
237 write_tsc(0, 0);
238
239 atomic_inc(&tsc_count_stop);
240 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
241 }
242}
243#undef NR_LOOPS
244
245static atomic_t init_deasserted;
246
247static void __init smp_callin(void)
248{
249 int cpuid, phys_id;
250 unsigned long timeout;
251
252 /*
253 * If waken up by an INIT in an 82489DX configuration
254 * we may get here before an INIT-deassert IPI reaches
255 * our local APIC. We have to wait for the IPI or we'll
256 * lock up on an APIC access.
257 */
258 while (!atomic_read(&init_deasserted));
259
260 /*
261 * (This works even if the APIC is not enabled.)
262 */
263 phys_id = GET_APIC_ID(apic_read(APIC_ID));
264 cpuid = smp_processor_id();
265 if (cpu_isset(cpuid, cpu_callin_map)) {
266 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
267 phys_id, cpuid);
268 }
269 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
270
271 /*
272 * STARTUP IPIs are fragile beasts as they might sometimes
273 * trigger some glue motherboard logic. Complete APIC bus
274 * silence for 1 second, this overestimates the time the
275 * boot CPU is spending to send the up to 2 STARTUP IPIs
276 * by a factor of two. This should be enough.
277 */
278
279 /*
280 * Waiting 2s total for startup (udelay is not yet working)
281 */
282 timeout = jiffies + 2*HZ;
283 while (time_before(jiffies, timeout)) {
284 /*
285 * Has the boot CPU finished it's STARTUP sequence?
286 */
287 if (cpu_isset(cpuid, cpu_callout_map))
288 break;
289 rep_nop();
290 }
291
292 if (!time_before(jiffies, timeout)) {
293 panic("smp_callin: CPU%d started up but did not get a callout!\n",
294 cpuid);
295 }
296
297 /*
298 * the boot CPU has finished the init stage and is spinning
299 * on callin_map until we finish. We are free to set up this
300 * CPU, first the APIC. (this is probably redundant on most
301 * boards)
302 */
303
304 Dprintk("CALLIN, before setup_local_APIC().\n");
305 setup_local_APIC();
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 /*
308 * Get our bogomips.
309 */
310 calibrate_delay();
311 Dprintk("Stack at about %p\n",&cpuid);
312
313 disable_APIC_timer();
314
315 /*
316 * Save our processor parameters
317 */
318 smp_store_cpu_info(cpuid);
319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 /*
321 * Allow the master to continue.
322 */
323 cpu_set(cpuid, cpu_callin_map);
324
325 /*
326 * Synchronize the TSC with the BP
327 */
328 if (cpu_has_tsc)
329 synchronize_tsc_ap();
330}
331
332static int cpucount;
333
334/*
335 * Activate a secondary processor.
336 */
337void __init start_secondary(void)
338{
339 /*
340 * Dont put anything before smp_callin(), SMP
341 * booting is too fragile that we want to limit the
342 * things done here to the most necessary things.
343 */
344 cpu_init();
345 smp_callin();
346
347 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
348 barrier();
349
350 Dprintk("cpu %d: waiting for commence\n", smp_processor_id());
351 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
352 rep_nop();
353
354 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
355 setup_secondary_APIC_clock();
356
357 Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
358
359 if (nmi_watchdog == NMI_IO_APIC) {
360 disable_8259A_irq(0);
361 enable_NMI_through_LVT0(NULL);
362 enable_8259A_irq(0);
363 }
364
365
366 enable_APIC_timer();
367
368 /*
369 * low-memory mappings have been cleared, flush them from
370 * the local TLBs too.
371 */
372 local_flush_tlb();
373
374 Dprintk("cpu %d eSetting cpu_online_map\n", smp_processor_id());
375 cpu_set(smp_processor_id(), cpu_online_map);
376 wmb();
377
378 cpu_idle();
379}
380
381extern volatile unsigned long init_rsp;
382extern void (*initial_code)(void);
383
384#if APIC_DEBUG
385static inline void inquire_remote_apic(int apicid)
386{
387 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
388 char *names[] = { "ID", "VERSION", "SPIV" };
389 int timeout, status;
390
391 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
392
393 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
394 printk("... APIC #%d %s: ", apicid, names[i]);
395
396 /*
397 * Wait for idle.
398 */
399 apic_wait_icr_idle();
400
401 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
402 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
403
404 timeout = 0;
405 do {
406 udelay(100);
407 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
408 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
409
410 switch (status) {
411 case APIC_ICR_RR_VALID:
412 status = apic_read(APIC_RRR);
413 printk("%08x\n", status);
414 break;
415 default:
416 printk("failed\n");
417 }
418 }
419}
420#endif
421
422static int __init wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
423{
424 unsigned long send_status = 0, accept_status = 0;
425 int maxlvt, timeout, num_starts, j;
426
427 Dprintk("Asserting INIT.\n");
428
429 /*
430 * Turn INIT on target chip
431 */
432 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
433
434 /*
435 * Send IPI
436 */
437 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
438 | APIC_DM_INIT);
439
440 Dprintk("Waiting for send to finish...\n");
441 timeout = 0;
442 do {
443 Dprintk("+");
444 udelay(100);
445 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
446 } while (send_status && (timeout++ < 1000));
447
448 mdelay(10);
449
450 Dprintk("Deasserting INIT.\n");
451
452 /* Target chip */
453 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
454
455 /* Send IPI */
456 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
457
458 Dprintk("Waiting for send to finish...\n");
459 timeout = 0;
460 do {
461 Dprintk("+");
462 udelay(100);
463 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
464 } while (send_status && (timeout++ < 1000));
465
466 atomic_set(&init_deasserted, 1);
467
468 /*
469 * Should we send STARTUP IPIs ?
470 *
471 * Determine this based on the APIC version.
472 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
473 */
474 if (APIC_INTEGRATED(apic_version[phys_apicid]))
475 num_starts = 2;
476 else
477 num_starts = 0;
478
479 /*
480 * Run STARTUP IPI loop.
481 */
482 Dprintk("#startup loops: %d.\n", num_starts);
483
484 maxlvt = get_maxlvt();
485
486 for (j = 1; j <= num_starts; j++) {
487 Dprintk("Sending STARTUP #%d.\n",j);
488 apic_read_around(APIC_SPIV);
489 apic_write(APIC_ESR, 0);
490 apic_read(APIC_ESR);
491 Dprintk("After apic_write.\n");
492
493 /*
494 * STARTUP IPI
495 */
496
497 /* Target chip */
498 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
499
500 /* Boot on the stack */
501 /* Kick the second */
502 apic_write_around(APIC_ICR, APIC_DM_STARTUP
503 | (start_rip >> 12));
504
505 /*
506 * Give the other CPU some time to accept the IPI.
507 */
508 udelay(300);
509
510 Dprintk("Startup point 1.\n");
511
512 Dprintk("Waiting for send to finish...\n");
513 timeout = 0;
514 do {
515 Dprintk("+");
516 udelay(100);
517 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
518 } while (send_status && (timeout++ < 1000));
519
520 /*
521 * Give the other CPU some time to accept the IPI.
522 */
523 udelay(200);
524 /*
525 * Due to the Pentium erratum 3AP.
526 */
527 if (maxlvt > 3) {
528 apic_read_around(APIC_SPIV);
529 apic_write(APIC_ESR, 0);
530 }
531 accept_status = (apic_read(APIC_ESR) & 0xEF);
532 if (send_status || accept_status)
533 break;
534 }
535 Dprintk("After Startup.\n");
536
537 if (send_status)
538 printk(KERN_ERR "APIC never delivered???\n");
539 if (accept_status)
540 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
541
542 return (send_status | accept_status);
543}
544
545static void __init do_boot_cpu (int apicid)
546{
547 struct task_struct *idle;
548 unsigned long boot_error;
549 int timeout, cpu;
550 unsigned long start_rip;
551
552 cpu = ++cpucount;
553 /*
554 * We can't use kernel_thread since we must avoid to
555 * reschedule the child.
556 */
557 idle = fork_idle(cpu);
558 if (IS_ERR(idle))
559 panic("failed fork for CPU %d", cpu);
560 x86_cpu_to_apicid[cpu] = apicid;
561
562 cpu_pda[cpu].pcurrent = idle;
563
564 start_rip = setup_trampoline();
565
566 init_rsp = idle->thread.rsp;
567 per_cpu(init_tss,cpu).rsp0 = init_rsp;
568 initial_code = start_secondary;
569 clear_ti_thread_flag(idle->thread_info, TIF_FORK);
570
571 printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid,
572 start_rip, init_rsp);
573
574 /*
575 * This grunge runs the startup process for
576 * the targeted processor.
577 */
578
579 atomic_set(&init_deasserted, 0);
580
581 Dprintk("Setting warm reset code and vector.\n");
582
583 CMOS_WRITE(0xa, 0xf);
584 local_flush_tlb();
585 Dprintk("1.\n");
586 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
587 Dprintk("2.\n");
588 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
589 Dprintk("3.\n");
590
591 /*
592 * Be paranoid about clearing APIC errors.
593 */
594 if (APIC_INTEGRATED(apic_version[apicid])) {
595 apic_read_around(APIC_SPIV);
596 apic_write(APIC_ESR, 0);
597 apic_read(APIC_ESR);
598 }
599
600 /*
601 * Status is now clean
602 */
603 boot_error = 0;
604
605 /*
606 * Starting actual IPI sequence...
607 */
608 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
609
610 if (!boot_error) {
611 /*
612 * allow APs to start initializing.
613 */
614 Dprintk("Before Callout %d.\n", cpu);
615 cpu_set(cpu, cpu_callout_map);
616 Dprintk("After Callout %d.\n", cpu);
617
618 /*
619 * Wait 5s total for a response
620 */
621 for (timeout = 0; timeout < 50000; timeout++) {
622 if (cpu_isset(cpu, cpu_callin_map))
623 break; /* It has booted */
624 udelay(100);
625 }
626
627 if (cpu_isset(cpu, cpu_callin_map)) {
628 /* number CPUs logically, starting from 1 (BSP is 0) */
629 Dprintk("OK.\n");
630 print_cpu_info(&cpu_data[cpu]);
631 Dprintk("CPU has booted.\n");
632 } else {
633 boot_error = 1;
634 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
635 == 0xA5)
636 /* trampoline started but...? */
637 printk("Stuck ??\n");
638 else
639 /* trampoline code not run */
640 printk("Not responding.\n");
641#if APIC_DEBUG
642 inquire_remote_apic(apicid);
643#endif
644 }
645 }
646 if (boot_error) {
647 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
648 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
649 cpucount--;
650 x86_cpu_to_apicid[cpu] = BAD_APICID;
651 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
652 }
653}
654
655static void smp_tune_scheduling (void)
656{
657 int cachesize; /* kB */
658 unsigned long bandwidth = 1000; /* MB/s */
659 /*
660 * Rough estimation for SMP scheduling, this is the number of
661 * cycles it takes for a fully memory-limited process to flush
662 * the SMP-local cache.
663 *
664 * (For a P5 this pretty much means we will choose another idle
665 * CPU almost always at wakeup time (this is due to the small
666 * L1 cache), on PIIs it's around 50-100 usecs, depending on
667 * the cache size)
668 */
669
670 if (!cpu_khz) {
671 return;
672 } else {
673 cachesize = boot_cpu_data.x86_cache_size;
674 if (cachesize == -1) {
675 cachesize = 16; /* Pentiums, 2x8kB cache */
676 bandwidth = 100;
677 }
678 }
679}
680
681/*
682 * Cycle through the processors sending APIC IPIs to boot each.
683 */
684
685static void __init smp_boot_cpus(unsigned int max_cpus)
686{
687 unsigned apicid, cpu, bit, kicked;
688
689 nmi_watchdog_default();
690
691 /*
692 * Setup boot CPU information
693 */
694 smp_store_cpu_info(0); /* Final full version of the data */
695 printk(KERN_INFO "CPU%d: ", 0);
696 print_cpu_info(&cpu_data[0]);
697
698 current_thread_info()->cpu = 0;
699 smp_tune_scheduling();
700
701 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
702 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
703 hard_smp_processor_id());
704 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
705 }
706
707 /*
708 * If we couldn't find an SMP configuration at boot time,
709 * get out of here now!
710 */
711 if (!smp_found_config) {
712 printk(KERN_NOTICE "SMP motherboard not detected.\n");
713 io_apic_irqs = 0;
714 cpu_online_map = cpumask_of_cpu(0);
715 cpu_set(0, cpu_sibling_map[0]);
716 phys_cpu_present_map = physid_mask_of_physid(0);
717 if (APIC_init_uniprocessor())
718 printk(KERN_NOTICE "Local APIC not detected."
719 " Using dummy APIC emulation.\n");
720 goto smp_done;
721 }
722
723 /*
724 * Should not be necessary because the MP table should list the boot
725 * CPU too, but we do it for the sake of robustness anyway.
726 */
727 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
728 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
729 boot_cpu_id);
730 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
731 }
732
733 /*
734 * If we couldn't find a local APIC, then get out of here now!
735 */
736 if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
737 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
738 boot_cpu_id);
739 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
740 io_apic_irqs = 0;
741 cpu_online_map = cpumask_of_cpu(0);
742 cpu_set(0, cpu_sibling_map[0]);
743 phys_cpu_present_map = physid_mask_of_physid(0);
744 disable_apic = 1;
745 goto smp_done;
746 }
747
748 verify_local_APIC();
749
750 /*
751 * If SMP should be disabled, then really disable it!
752 */
753 if (!max_cpus) {
754 smp_found_config = 0;
755 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
756 io_apic_irqs = 0;
757 cpu_online_map = cpumask_of_cpu(0);
758 cpu_set(0, cpu_sibling_map[0]);
759 phys_cpu_present_map = physid_mask_of_physid(0);
760 disable_apic = 1;
761 goto smp_done;
762 }
763
764 connect_bsp_APIC();
765 setup_local_APIC();
766
767 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id)
768 BUG();
769
770 x86_cpu_to_apicid[0] = boot_cpu_id;
771
772 /*
773 * Now scan the CPU present map and fire up the other CPUs.
774 */
775 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
776
777 kicked = 1;
778 for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
779 apicid = cpu_present_to_apicid(bit);
780 /*
781 * Don't even attempt to start the boot CPU!
782 */
783 if (apicid == boot_cpu_id || (apicid == BAD_APICID))
784 continue;
785
786 if (!physid_isset(apicid, phys_cpu_present_map))
787 continue;
788 if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
789 continue;
790
791 do_boot_cpu(apicid);
792 ++kicked;
793 }
794
795 /*
796 * Cleanup possible dangling ends...
797 */
798 {
799 /*
800 * Install writable page 0 entry to set BIOS data area.
801 */
802 local_flush_tlb();
803
804 /*
805 * Paranoid: Set warm reset code and vector here back
806 * to default values.
807 */
808 CMOS_WRITE(0, 0xf);
809
810 *((volatile int *) phys_to_virt(0x467)) = 0;
811 }
812
813 /*
814 * Allow the user to impress friends.
815 */
816
817 Dprintk("Before bogomips.\n");
818 if (!cpucount) {
819 printk(KERN_INFO "Only one processor found.\n");
820 } else {
821 unsigned long bogosum = 0;
822 for (cpu = 0; cpu < NR_CPUS; cpu++)
823 if (cpu_isset(cpu, cpu_callout_map))
824 bogosum += cpu_data[cpu].loops_per_jiffy;
825 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
826 cpucount+1,
827 bogosum/(500000/HZ),
828 (bogosum/(5000/HZ))%100);
829 Dprintk("Before bogocount - setting activated=1.\n");
830 }
831
832 /*
833 * Construct cpu_sibling_map[], so that we can tell the
834 * sibling CPU efficiently.
835 */
836 for (cpu = 0; cpu < NR_CPUS; cpu++)
837 cpus_clear(cpu_sibling_map[cpu]);
838
839 for (cpu = 0; cpu < NR_CPUS; cpu++) {
840 int siblings = 0;
841 int i;
842 if (!cpu_isset(cpu, cpu_callout_map))
843 continue;
844
845 if (smp_num_siblings > 1) {
846 for (i = 0; i < NR_CPUS; i++) {
847 if (!cpu_isset(i, cpu_callout_map))
848 continue;
849 if (phys_proc_id[cpu] == phys_proc_id[i]) {
850 siblings++;
851 cpu_set(i, cpu_sibling_map[cpu]);
852 }
853 }
854 } else {
855 siblings++;
856 cpu_set(cpu, cpu_sibling_map[cpu]);
857 }
858
859 if (siblings != smp_num_siblings) {
860 printk(KERN_WARNING
861 "WARNING: %d siblings found for CPU%d, should be %d\n",
862 siblings, cpu, smp_num_siblings);
863 smp_num_siblings = siblings;
864 }
865 }
866
867 Dprintk("Boot done.\n");
868
869 /*
870 * Here we can be sure that there is an IO-APIC in the system. Let's
871 * go and set it up:
872 */
873 if (!skip_ioapic_setup && nr_ioapics)
874 setup_IO_APIC();
875 else
876 nr_ioapics = 0;
877
878 setup_boot_APIC_clock();
879
880 /*
881 * Synchronize the TSC with the AP
882 */
883 if (cpu_has_tsc && cpucount)
884 synchronize_tsc_bp();
885
886 smp_done:
887 time_init_smp();
888}
889
890/* These are wrappers to interface to the new boot process. Someone
891 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
892void __init smp_prepare_cpus(unsigned int max_cpus)
893{
894 smp_boot_cpus(max_cpus);
895}
896
897void __devinit smp_prepare_boot_cpu(void)
898{
899 cpu_set(smp_processor_id(), cpu_online_map);
900 cpu_set(smp_processor_id(), cpu_callout_map);
901}
902
903int __devinit __cpu_up(unsigned int cpu)
904{
905 /* This only works at boot for x86. See "rewrite" above. */
906 if (cpu_isset(cpu, smp_commenced_mask)) {
907 local_irq_enable();
908 return -ENOSYS;
909 }
910
911 /* In case one didn't come up */
912 if (!cpu_isset(cpu, cpu_callin_map)) {
913 local_irq_enable();
914 return -EIO;
915 }
916 local_irq_enable();
917
918 /* Unleash the CPU! */
919 Dprintk("waiting for cpu %d\n", cpu);
920
921 cpu_set(cpu, smp_commenced_mask);
922 while (!cpu_isset(cpu, cpu_online_map))
923 mb();
924 return 0;
925}
926
927void __init smp_cpus_done(unsigned int max_cpus)
928{
929#ifdef CONFIG_X86_IO_APIC
930 setup_ioapic_dest();
931#endif
932 zap_low_mappings();
933}
934