2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
40 #include <linux/config.h>
41 #include <linux/init.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/smp_lock.h>
46 #include <linux/irq.h>
47 #include <linux/bootmem.h>
48 #include <linux/thread_info.h>
49 #include <linux/module.h>
51 #include <linux/delay.h>
52 #include <linux/mc146818rtc.h>
54 #include <asm/pgalloc.h>
56 #include <asm/kdebug.h>
57 #include <asm/tlbflush.h>
58 #include <asm/proto.h>
61 /* Number of siblings per CPU package */
62 int smp_num_siblings = 1;
63 /* Package ID of each logical CPU */
64 u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
65 u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
66 EXPORT_SYMBOL(phys_proc_id);
67 EXPORT_SYMBOL(cpu_core_id);
69 /* Bitmask of currently online CPUs */
70 cpumask_t cpu_online_map;
72 EXPORT_SYMBOL(cpu_online_map);
75 * Private maps to synchronize booting between AP and BP.
76 * Probably not needed anymore, but it makes for easier debugging. -AK
78 cpumask_t cpu_callin_map;
79 cpumask_t cpu_callout_map;
81 cpumask_t cpu_possible_map;
82 EXPORT_SYMBOL(cpu_possible_map);
84 /* Per CPU bogomips and other parameters */
85 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
87 /* Set when the idlers are all forked */
88 int smp_threads_ready;
90 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
91 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
92 EXPORT_SYMBOL(cpu_core_map);
95 * Trampoline 80x86 program as an array.
98 extern unsigned char trampoline_data[];
99 extern unsigned char trampoline_end[];
102 * Currently trivial. Write the real->protected mode
103 * bootstrap into the page concerned. The caller
104 * has made sure it's suitably aligned.
107 static unsigned long __cpuinit setup_trampoline(void)
109 void *tramp = __va(SMP_TRAMPOLINE_BASE);
110 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
111 return virt_to_phys(tramp);
115 * The bootstrap kernel entry code has set these up. Save them for
119 static void __cpuinit smp_store_cpu_info(int id)
121 struct cpuinfo_x86 *c = cpu_data + id;
129 * New Funky TSC sync algorithm borrowed from IA64.
130 * Main advantage is that it doesn't reset the TSCs fully and
131 * in general looks more robust and it works better than my earlier
132 * attempts. I believe it was written by David Mosberger. Some minor
133 * adjustments for x86-64 by me -AK
135 * Original comment reproduced below.
137 * Synchronize TSC of the current (slave) CPU with the TSC of the
138 * MASTER CPU (normally the time-keeper CPU). We use a closed loop to
139 * eliminate the possibility of unaccounted-for errors (such as
140 * getting a machine check in the middle of a calibration step). The
141 * basic idea is for the slave to ask the master what itc value it has
142 * and to read its own itc before and after the master responds. Each
143 * iteration gives us three timestamps:
156 * The goal is to adjust the slave's TSC such that tm falls exactly
157 * half-way between t0 and t1. If we achieve this, the clocks are
158 * synchronized provided the interconnect between the slave and the
159 * master is symmetric. Even if the interconnect were asymmetric, we
160 * would still know that the synchronization error is smaller than the
161 * roundtrip latency (t0 - t1).
163 * When the interconnect is quiet and symmetric, this lets us
164 * synchronize the TSC to within one or two cycles. However, we can
165 * only *guarantee* that the synchronization is accurate to within a
166 * round-trip time, which is typically in the range of several hundred
167 * cycles (e.g., ~500 cycles). In practice, this means that the TSCs
168 * are usually almost perfectly synchronized, but we shouldn't assume
169 * that the accuracy is much better than half a micro second or so.
171 * [there are other errors like the latency of RDTSC and of the
172 * WRMSR. These can also account to hundreds of cycles. So it's
173 * probably worse. It claims 153 cycles error on a dual Opteron,
174 * but I suspect the numbers are actually somewhat worse -AK]
178 #define SLAVE (SMP_CACHE_BYTES/8)
180 /* Intentionally don't use cpu_relax() while TSC synchronization
181 because we don't want to go into funky power save modi or cause
182 hypervisors to schedule us away. Going to sleep would likely affect
183 latency and low latency is the primary objective here. -AK */
184 #define no_cpu_relax() barrier()
186 static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
187 static volatile __cpuinitdata unsigned long go[SLAVE + 1];
188 static int notscsync __cpuinitdata;
190 #undef DEBUG_TSC_SYNC
192 #define NUM_ROUNDS 64 /* magic value */
193 #define NUM_ITERS 5 /* likewise */
195 /* Callback on boot CPU */
196 static __cpuinit void sync_master(void *arg)
198 unsigned long flags, i;
200 if (smp_processor_id() != boot_cpu_id)
205 local_irq_save(flags);
207 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
214 local_irq_restore(flags);
218 * Return the number of cycles by which our tsc differs from the tsc
219 * on the master (time-keeper) CPU. A positive number indicates our
220 * tsc is ahead of the master, negative that it is behind.
223 get_delta(long *rt, long *master)
225 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
226 unsigned long tcenter, t0, t1, tm;
229 for (i = 0; i < NUM_ITERS; ++i) {
232 while (!(tm = go[SLAVE]))
237 if (t1 - t0 < best_t1 - best_t0)
238 best_t0 = t0, best_t1 = t1, best_tm = tm;
241 *rt = best_t1 - best_t0;
242 *master = best_tm - best_t0;
244 /* average best_t0 and best_t1 without overflow: */
245 tcenter = (best_t0/2 + best_t1/2);
246 if (best_t0 % 2 + best_t1 % 2 == 2)
248 return tcenter - best_tm;
251 static __cpuinit void sync_tsc(void)
254 long delta, adj, adjust_latency = 0;
255 unsigned long flags, rt, master_time_stamp, bound;
257 static struct syncdebug {
258 long rt; /* roundtrip time */
259 long master; /* master's timestamp */
260 long diff; /* difference between midpoint and master's timestamp */
261 long lat; /* estimate of tsc adjustment latency */
262 } t[NUM_ROUNDS] __cpuinitdata;
267 smp_call_function(sync_master, NULL, 1, 0);
269 while (go[MASTER]) /* wait for master to be ready */
272 spin_lock_irqsave(&tsc_sync_lock, flags);
274 for (i = 0; i < NUM_ROUNDS; ++i) {
275 delta = get_delta(&rt, &master_time_stamp);
277 done = 1; /* let's lock on to this... */
284 adjust_latency += -delta;
285 adj = -delta + adjust_latency/4;
290 wrmsrl(MSR_IA32_TSC, t + adj);
294 t[i].master = master_time_stamp;
296 t[i].lat = adjust_latency/4;
300 spin_unlock_irqrestore(&tsc_sync_lock, flags);
303 for (i = 0; i < NUM_ROUNDS; ++i)
304 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
305 t[i].rt, t[i].master, t[i].diff, t[i].lat);
309 "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
310 "maxerr %lu cycles)\n",
311 smp_processor_id(), boot_cpu_id, delta, rt);
314 static void __cpuinit tsc_sync_wait(void)
316 if (notscsync || !cpu_has_tsc)
318 printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
323 static __init int notscsync_setup(char *s)
328 __setup("notscsync", notscsync_setup);
330 static atomic_t init_deasserted __cpuinitdata;
333 * Report back to the Boot Processor.
336 void __cpuinit smp_callin(void)
339 unsigned long timeout;
342 * If waken up by an INIT in an 82489DX configuration
343 * we may get here before an INIT-deassert IPI reaches
344 * our local APIC. We have to wait for the IPI or we'll
345 * lock up on an APIC access.
347 while (!atomic_read(&init_deasserted))
351 * (This works even if the APIC is not enabled.)
353 phys_id = GET_APIC_ID(apic_read(APIC_ID));
354 cpuid = smp_processor_id();
355 if (cpu_isset(cpuid, cpu_callin_map)) {
356 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
359 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
362 * STARTUP IPIs are fragile beasts as they might sometimes
363 * trigger some glue motherboard logic. Complete APIC bus
364 * silence for 1 second, this overestimates the time the
365 * boot CPU is spending to send the up to 2 STARTUP IPIs
366 * by a factor of two. This should be enough.
370 * Waiting 2s total for startup (udelay is not yet working)
372 timeout = jiffies + 2*HZ;
373 while (time_before(jiffies, timeout)) {
375 * Has the boot CPU finished it's STARTUP sequence?
377 if (cpu_isset(cpuid, cpu_callout_map))
382 if (!time_before(jiffies, timeout)) {
383 panic("smp_callin: CPU%d started up but did not get a callout!\n",
388 * the boot CPU has finished the init stage and is spinning
389 * on callin_map until we finish. We are free to set up this
390 * CPU, first the APIC. (this is probably redundant on most
394 Dprintk("CALLIN, before setup_local_APIC().\n");
401 Dprintk("Stack at about %p\n",&cpuid);
403 disable_APIC_timer();
406 * Save our processor parameters
408 smp_store_cpu_info(cpuid);
411 * Allow the master to continue.
413 cpu_set(cpuid, cpu_callin_map);
417 * Setup code on secondary processor (after comming out of the trampoline)
419 void __cpuinit start_secondary(void)
422 * Dont put anything before smp_callin(), SMP
423 * booting is too fragile that we want to limit the
424 * things done here to the most necessary things.
429 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
432 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
433 setup_secondary_APIC_clock();
435 Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
437 if (nmi_watchdog == NMI_IO_APIC) {
438 disable_8259A_irq(0);
439 enable_NMI_through_LVT0(NULL);
446 * Allow the master to continue.
448 cpu_set(smp_processor_id(), cpu_online_map);
451 /* Wait for TSC sync to not schedule things before.
452 We still process interrupts, which could see an inconsistent
453 time in that window unfortunately. */
459 extern volatile unsigned long init_rsp;
460 extern void (*initial_code)(void);
463 static void inquire_remote_apic(int apicid)
465 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
466 char *names[] = { "ID", "VERSION", "SPIV" };
469 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
471 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
472 printk("... APIC #%d %s: ", apicid, names[i]);
477 apic_wait_icr_idle();
479 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
480 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
485 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
486 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
489 case APIC_ICR_RR_VALID:
490 status = apic_read(APIC_RRR);
491 printk("%08x\n", status);
501 * Kick the secondary to wake up.
503 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
505 unsigned long send_status = 0, accept_status = 0;
506 int maxlvt, timeout, num_starts, j;
508 Dprintk("Asserting INIT.\n");
511 * Turn INIT on target chip
513 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
518 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
521 Dprintk("Waiting for send to finish...\n");
526 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
527 } while (send_status && (timeout++ < 1000));
531 Dprintk("Deasserting INIT.\n");
534 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
537 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
539 Dprintk("Waiting for send to finish...\n");
544 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
545 } while (send_status && (timeout++ < 1000));
547 atomic_set(&init_deasserted, 1);
550 * Should we send STARTUP IPIs ?
552 * Determine this based on the APIC version.
553 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
555 if (APIC_INTEGRATED(apic_version[phys_apicid]))
561 * Run STARTUP IPI loop.
563 Dprintk("#startup loops: %d.\n", num_starts);
565 maxlvt = get_maxlvt();
567 for (j = 1; j <= num_starts; j++) {
568 Dprintk("Sending STARTUP #%d.\n",j);
569 apic_read_around(APIC_SPIV);
570 apic_write(APIC_ESR, 0);
572 Dprintk("After apic_write.\n");
579 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
581 /* Boot on the stack */
582 /* Kick the second */
583 apic_write_around(APIC_ICR, APIC_DM_STARTUP
584 | (start_rip >> 12));
587 * Give the other CPU some time to accept the IPI.
591 Dprintk("Startup point 1.\n");
593 Dprintk("Waiting for send to finish...\n");
598 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
599 } while (send_status && (timeout++ < 1000));
602 * Give the other CPU some time to accept the IPI.
606 * Due to the Pentium erratum 3AP.
609 apic_read_around(APIC_SPIV);
610 apic_write(APIC_ESR, 0);
612 accept_status = (apic_read(APIC_ESR) & 0xEF);
613 if (send_status || accept_status)
616 Dprintk("After Startup.\n");
619 printk(KERN_ERR "APIC never delivered???\n");
621 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
623 return (send_status | accept_status);
629 static int __cpuinit do_boot_cpu(int cpu, int apicid)
631 struct task_struct *idle;
632 unsigned long boot_error;
634 unsigned long start_rip;
636 * We can't use kernel_thread since we must avoid to
637 * reschedule the child.
639 idle = fork_idle(cpu);
641 printk("failed fork for CPU %d\n", cpu);
642 return PTR_ERR(idle);
645 cpu_pda[cpu].pcurrent = idle;
647 start_rip = setup_trampoline();
649 init_rsp = idle->thread.rsp;
650 per_cpu(init_tss,cpu).rsp0 = init_rsp;
651 initial_code = start_secondary;
652 clear_ti_thread_flag(idle->thread_info, TIF_FORK);
654 printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid,
655 start_rip, init_rsp);
658 * This grunge runs the startup process for
659 * the targeted processor.
662 atomic_set(&init_deasserted, 0);
664 Dprintk("Setting warm reset code and vector.\n");
666 CMOS_WRITE(0xa, 0xf);
669 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
671 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
675 * Be paranoid about clearing APIC errors.
677 if (APIC_INTEGRATED(apic_version[apicid])) {
678 apic_read_around(APIC_SPIV);
679 apic_write(APIC_ESR, 0);
684 * Status is now clean
689 * Starting actual IPI sequence...
691 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
695 * allow APs to start initializing.
697 Dprintk("Before Callout %d.\n", cpu);
698 cpu_set(cpu, cpu_callout_map);
699 Dprintk("After Callout %d.\n", cpu);
702 * Wait 5s total for a response
704 for (timeout = 0; timeout < 50000; timeout++) {
705 if (cpu_isset(cpu, cpu_callin_map))
706 break; /* It has booted */
710 if (cpu_isset(cpu, cpu_callin_map)) {
711 /* number CPUs logically, starting from 1 (BSP is 0) */
712 Dprintk("CPU has booted.\n");
715 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
717 /* trampoline started but...? */
718 printk("Stuck ??\n");
720 /* trampoline code not run */
721 printk("Not responding.\n");
723 inquire_remote_apic(apicid);
728 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
729 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
730 cpu_clear(cpu, cpu_present_map);
731 cpu_clear(cpu, cpu_possible_map);
732 x86_cpu_to_apicid[cpu] = BAD_APICID;
733 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
740 cycles_t cacheflush_time;
741 unsigned long cache_decay_ticks;
744 * Construct cpu_sibling_map[], so that we can tell the sibling CPU
745 * on SMT systems efficiently.
747 static __cpuinit void detect_siblings(void)
751 for (cpu = 0; cpu < NR_CPUS; cpu++) {
752 cpus_clear(cpu_sibling_map[cpu]);
753 cpus_clear(cpu_core_map[cpu]);
756 for_each_online_cpu (cpu) {
757 struct cpuinfo_x86 *c = cpu_data + cpu;
760 if (smp_num_siblings > 1) {
761 for_each_online_cpu (i) {
762 if (cpu_core_id[cpu] == cpu_core_id[i]) {
764 cpu_set(i, cpu_sibling_map[cpu]);
769 cpu_set(cpu, cpu_sibling_map[cpu]);
772 if (siblings != smp_num_siblings) {
774 "WARNING: %d siblings found for CPU%d, should be %d\n",
775 siblings, cpu, smp_num_siblings);
776 smp_num_siblings = siblings;
778 if (c->x86_num_cores > 1) {
779 for_each_online_cpu(i) {
780 if (phys_proc_id[cpu] == phys_proc_id[i])
781 cpu_set(i, cpu_core_map[cpu]);
784 cpu_core_map[cpu] = cpu_sibling_map[cpu];
789 * Cleanup possible dangling ends...
791 static __cpuinit void smp_cleanup_boot(void)
794 * Paranoid: Set warm reset code and vector here back
800 * Reset trampoline flag
802 *((volatile int *) phys_to_virt(0x467)) = 0;
804 #ifndef CONFIG_HOTPLUG_CPU
806 * Free pages reserved for SMP bootup.
807 * When you add hotplug CPU support later remove this
808 * Note there is more work to be done for later CPU bootup.
811 free_page((unsigned long) __va(PAGE_SIZE));
812 free_page((unsigned long) __va(SMP_TRAMPOLINE_BASE));
817 * Fall back to non SMP mode after errors.
819 * RED-PEN audit/test this more. I bet there is more state messed up here.
821 static __init void disable_smp(void)
823 cpu_present_map = cpumask_of_cpu(0);
824 cpu_possible_map = cpumask_of_cpu(0);
825 if (smp_found_config)
826 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
828 phys_cpu_present_map = physid_mask_of_physid(0);
829 cpu_set(0, cpu_sibling_map[0]);
830 cpu_set(0, cpu_core_map[0]);
834 * Handle user cpus=... parameter.
836 static __init void enforce_max_cpus(unsigned max_cpus)
840 for (i = 0; i < NR_CPUS; i++) {
841 if (!cpu_possible(i))
843 if (++k > max_cpus) {
844 cpu_clear(i, cpu_possible_map);
845 cpu_clear(i, cpu_present_map);
851 * Various sanity checks.
853 static int __init smp_sanity_check(unsigned max_cpus)
855 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
856 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
857 hard_smp_processor_id());
858 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
862 * If we couldn't find an SMP configuration at boot time,
863 * get out of here now!
865 if (!smp_found_config) {
866 printk(KERN_NOTICE "SMP motherboard not detected.\n");
868 if (APIC_init_uniprocessor())
869 printk(KERN_NOTICE "Local APIC not detected."
870 " Using dummy APIC emulation.\n");
875 * Should not be necessary because the MP table should list the boot
876 * CPU too, but we do it for the sake of robustness anyway.
878 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
879 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
881 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
885 * If we couldn't find a local APIC, then get out of here now!
887 if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
888 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
890 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
896 * If SMP should be disabled, then really disable it!
899 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
908 * Prepare for SMP bootup. The MP table or ACPI has been read
909 * earlier. Just do some sanity checking here and enable APIC mode.
911 void __init smp_prepare_cpus(unsigned int max_cpus)
915 nmi_watchdog_default();
916 current_cpu_data = boot_cpu_data;
917 current_thread_info()->cpu = 0; /* needed? */
919 enforce_max_cpus(max_cpus);
922 * Fill in cpu_present_mask
924 for (i = 0; i < NR_CPUS; i++) {
925 int apicid = cpu_present_to_apicid(i);
926 if (physid_isset(apicid, phys_cpu_present_map)) {
927 cpu_set(i, cpu_present_map);
928 /* possible map would be different if we supported real
930 cpu_set(i, cpu_possible_map);
934 if (smp_sanity_check(max_cpus) < 0) {
935 printk(KERN_INFO "SMP disabled\n");
942 * Switch from PIC to APIC mode.
947 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
948 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
949 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
950 /* Or can we switch back to PIC here? */
954 * Now start the IO-APICs
956 if (!skip_ioapic_setup && nr_ioapics)
962 * Set up local APIC timer on boot CPU.
965 setup_boot_APIC_clock();
969 * Early setup to make printk work.
971 void __init smp_prepare_boot_cpu(void)
973 int me = smp_processor_id();
974 cpu_set(me, cpu_online_map);
975 cpu_set(me, cpu_callout_map);
979 * Entry point to boot a CPU.
981 * This is all __cpuinit, not __devinit for now because we don't support
984 int __cpuinit __cpu_up(unsigned int cpu)
987 int apicid = cpu_present_to_apicid(cpu);
989 WARN_ON(irqs_disabled());
991 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
993 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
994 !physid_isset(apicid, phys_cpu_present_map)) {
995 printk("__cpu_up: bad cpu %d\n", cpu);
1000 err = do_boot_cpu(cpu, apicid);
1002 Dprintk("do_boot_cpu failed %d\n", err);
1006 /* Unleash the CPU! */
1007 Dprintk("waiting for cpu %d\n", cpu);
1009 while (!cpu_isset(cpu, cpu_online_map))
1015 * Finish the SMP boot.
1017 void __init smp_cpus_done(unsigned int max_cpus)
1022 #ifdef CONFIG_X86_IO_APIC
1023 setup_ioapic_dest();
1029 check_nmi_watchdog();