1 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/percpu.h>
6 #include <linux/bootmem.h>
16 #include <asm/pgtable.h>
17 #include <asm/tlbflush.h>
21 #include <linux/mc146818rtc.h>
23 #include <mach_apic.h>
24 #include <mach_wakecpu.h>
25 #include <smpboot_hooks.h>
27 /* State of each CPU */
28 DEFINE_PER_CPU(int, cpu_state) = { 0 };
30 /* Store all idle threads, this can be reused instead of creating
31 * a new thread. Also avoids complicated thread destroy functionality
34 #ifdef CONFIG_HOTPLUG_CPU
36 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
37 * removed after init for !CONFIG_HOTPLUG_CPU.
39 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
40 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
41 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
43 struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
44 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
45 #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
48 /* Number of siblings per CPU package */
49 int smp_num_siblings = 1;
50 EXPORT_SYMBOL(smp_num_siblings);
52 /* Last level cache ID of each logical CPU */
53 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
55 /* bitmap of online cpus */
56 cpumask_t cpu_online_map __read_mostly;
57 EXPORT_SYMBOL(cpu_online_map);
59 cpumask_t cpu_callin_map;
60 cpumask_t cpu_callout_map;
61 cpumask_t cpu_possible_map;
62 EXPORT_SYMBOL(cpu_possible_map);
64 /* representing HT siblings of each logical CPU */
65 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
66 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
68 /* representing HT and core siblings of each logical CPU */
69 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
70 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
72 /* Per CPU bogomips and other parameters */
73 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
74 EXPORT_PER_CPU_SYMBOL(cpu_info);
76 static atomic_t init_deasserted;
78 /* ready for x86_64, no harm for x86, since it will overwrite after alloc */
79 unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
81 /* representing cpus for which sibling maps can be computed */
82 static cpumask_t cpu_sibling_setup_map;
84 /* Set if we find a B stepping CPU */
85 int __cpuinitdata smp_b_stepping;
87 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
89 /* which logical CPUs are on which nodes */
90 cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
91 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
92 EXPORT_SYMBOL(node_to_cpumask_map);
93 /* which node each logical CPU is on */
94 int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
95 EXPORT_SYMBOL(cpu_to_node_map);
97 /* set up a mapping between cpu and node. */
98 static void map_cpu_to_node(int cpu, int node)
100 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
101 cpu_set(cpu, node_to_cpumask_map[node]);
102 cpu_to_node_map[cpu] = node;
105 /* undo a mapping between cpu and node. */
106 static void unmap_cpu_to_node(int cpu)
110 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
111 for (node = 0; node < MAX_NUMNODES; node++)
112 cpu_clear(cpu, node_to_cpumask_map[node]);
113 cpu_to_node_map[cpu] = 0;
115 #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
116 #define map_cpu_to_node(cpu, node) ({})
117 #define unmap_cpu_to_node(cpu) ({})
121 u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
122 { [0 ... NR_CPUS-1] = BAD_APICID };
124 void map_cpu_to_logical_apicid(void)
126 int cpu = smp_processor_id();
127 int apicid = logical_smp_processor_id();
128 int node = apicid_to_node(apicid);
130 if (!node_online(node))
131 node = first_online_node;
133 cpu_2_logical_apicid[cpu] = apicid;
134 map_cpu_to_node(cpu, node);
137 void unmap_cpu_to_logical_apicid(int cpu)
139 cpu_2_logical_apicid[cpu] = BAD_APICID;
140 unmap_cpu_to_node(cpu);
143 #define unmap_cpu_to_logical_apicid(cpu) do {} while (0)
144 #define map_cpu_to_logical_apicid() do {} while (0)
148 * Report back to the Boot Processor.
151 void __cpuinit smp_callin(void)
154 unsigned long timeout;
157 * If waken up by an INIT in an 82489DX configuration
158 * we may get here before an INIT-deassert IPI reaches
159 * our local APIC. We have to wait for the IPI or we'll
160 * lock up on an APIC access.
162 wait_for_init_deassert(&init_deasserted);
165 * (This works even if the APIC is not enabled.)
167 phys_id = GET_APIC_ID(apic_read(APIC_ID));
168 cpuid = smp_processor_id();
169 if (cpu_isset(cpuid, cpu_callin_map)) {
170 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
173 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
176 * STARTUP IPIs are fragile beasts as they might sometimes
177 * trigger some glue motherboard logic. Complete APIC bus
178 * silence for 1 second, this overestimates the time the
179 * boot CPU is spending to send the up to 2 STARTUP IPIs
180 * by a factor of two. This should be enough.
184 * Waiting 2s total for startup (udelay is not yet working)
186 timeout = jiffies + 2*HZ;
187 while (time_before(jiffies, timeout)) {
189 * Has the boot CPU finished it's STARTUP sequence?
191 if (cpu_isset(cpuid, cpu_callout_map))
196 if (!time_before(jiffies, timeout)) {
197 panic("%s: CPU%d started up but did not get a callout!\n",
202 * the boot CPU has finished the init stage and is spinning
203 * on callin_map until we finish. We are free to set up this
204 * CPU, first the APIC. (this is probably redundant on most
208 Dprintk("CALLIN, before setup_local_APIC().\n");
209 smp_callin_clear_local_apic();
211 end_local_APIC_setup();
212 map_cpu_to_logical_apicid();
217 * Need to enable IRQs because it can take longer and then
218 * the NMI watchdog might kill us.
223 Dprintk("Stack at about %p\n", &cpuid);
226 * Save our processor parameters
228 smp_store_cpu_info(cpuid);
231 * Allow the master to continue.
233 cpu_set(cpuid, cpu_callin_map);
237 * Activate a secondary processor.
239 void __cpuinit start_secondary(void *unused)
242 * Don't put *anything* before cpu_init(), SMP booting is too
243 * fragile that we want to limit the things done here to the
244 * most necessary things.
253 /* otherwise gcc will move up smp_processor_id before the cpu_init */
256 * Check TSC synchronization with the BP:
258 check_tsc_sync_target();
260 if (nmi_watchdog == NMI_IO_APIC) {
261 disable_8259A_irq(0);
262 enable_NMI_through_LVT0();
266 /* This must be done before setting cpu_online_map */
267 set_cpu_sibling_map(raw_smp_processor_id());
271 * We need to hold call_lock, so there is no inconsistency
272 * between the time smp_call_function() determines number of
273 * IPI recipients, and the time when the determination is made
274 * for which cpus receive the IPI. Holding this
275 * lock helps us to not include this cpu in a currently in progress
276 * smp_call_function().
278 lock_ipi_call_lock();
280 spin_lock(&vector_lock);
282 /* Setup the per cpu irq handling data structures */
283 __setup_vector_irq(smp_processor_id());
285 * Allow the master to continue.
287 spin_unlock(&vector_lock);
289 cpu_set(smp_processor_id(), cpu_online_map);
290 unlock_ipi_call_lock();
291 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
293 setup_secondary_clock();
301 * Everything has been set up for the secondary
302 * CPUs - they just need to reload everything
303 * from the task structure
304 * This function must not return.
306 void __devinit initialize_secondary(void)
309 * We don't actually need to load the full TSS,
310 * basically just the stack pointer and the ip.
317 :"m" (current->thread.sp), "m" (current->thread.ip));
321 static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
325 * Mask B, Pentium, but not Pentium MMX
327 if (c->x86_vendor == X86_VENDOR_INTEL &&
329 c->x86_mask >= 1 && c->x86_mask <= 4 &&
332 * Remember we have B step Pentia with bugs
337 * Certain Athlons might work (for various values of 'work') in SMP
338 * but they are not certified as MP capable.
340 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
342 if (num_possible_cpus() == 1)
345 /* Athlon 660/661 is valid. */
346 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
350 /* Duron 670 is valid */
351 if ((c->x86_model == 7) && (c->x86_mask == 0))
355 * Athlon 662, Duron 671, and Athlon >model 7 have capability
356 * bit. It's worth noting that the A5 stepping (662) of some
357 * Athlon XP's have the MP bit set.
358 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
361 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
362 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
367 /* If we get here, not a certified SMP capable AMD system. */
368 add_taint(TAINT_UNSAFE_SMP);
376 void smp_checks(void)
379 printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
380 "with B stepping processors.\n");
383 * Don't taint if we are running SMP kernel on a single non-MP
386 if (tainted & TAINT_UNSAFE_SMP) {
387 if (num_online_cpus())
388 printk(KERN_INFO "WARNING: This combination of AMD"
389 "processors is not suitable for SMP.\n");
391 tainted &= ~TAINT_UNSAFE_SMP;
396 * The bootstrap kernel entry code has set these up. Save them for
400 void __cpuinit smp_store_cpu_info(int id)
402 struct cpuinfo_x86 *c = &cpu_data(id);
407 identify_secondary_cpu(c);
412 void __cpuinit set_cpu_sibling_map(int cpu)
415 struct cpuinfo_x86 *c = &cpu_data(cpu);
417 cpu_set(cpu, cpu_sibling_setup_map);
419 if (smp_num_siblings > 1) {
420 for_each_cpu_mask(i, cpu_sibling_setup_map) {
421 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
422 c->cpu_core_id == cpu_data(i).cpu_core_id) {
423 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
424 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
425 cpu_set(i, per_cpu(cpu_core_map, cpu));
426 cpu_set(cpu, per_cpu(cpu_core_map, i));
427 cpu_set(i, c->llc_shared_map);
428 cpu_set(cpu, cpu_data(i).llc_shared_map);
432 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
435 cpu_set(cpu, c->llc_shared_map);
437 if (current_cpu_data.x86_max_cores == 1) {
438 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
443 for_each_cpu_mask(i, cpu_sibling_setup_map) {
444 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
445 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
446 cpu_set(i, c->llc_shared_map);
447 cpu_set(cpu, cpu_data(i).llc_shared_map);
449 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
450 cpu_set(i, per_cpu(cpu_core_map, cpu));
451 cpu_set(cpu, per_cpu(cpu_core_map, i));
453 * Does this new cpu bringup a new core?
455 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
457 * for each core in package, increment
458 * the booted_cores for this new cpu
460 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
463 * increment the core count for all
464 * the other cpus in this package
467 cpu_data(i).booted_cores++;
468 } else if (i != cpu && !c->booted_cores)
469 c->booted_cores = cpu_data(i).booted_cores;
474 /* maps the cpu to the sched domain representing multi-core */
475 cpumask_t cpu_coregroup_map(int cpu)
477 struct cpuinfo_x86 *c = &cpu_data(cpu);
479 * For perf, we return last level cache shared map.
480 * And for power savings, we return cpu_core_map
482 if (sched_mc_power_savings || sched_smt_power_savings)
483 return per_cpu(cpu_core_map, cpu);
485 return c->llc_shared_map;
489 * Currently trivial. Write the real->protected mode
490 * bootstrap into the page concerned. The caller
491 * has made sure it's suitably aligned.
494 unsigned long __cpuinit setup_trampoline(void)
496 memcpy(trampoline_base, trampoline_data,
497 trampoline_end - trampoline_data);
498 return virt_to_phys(trampoline_base);
503 * We are called very early to get the low memory for the
504 * SMP bootup trampoline page.
506 void __init smp_alloc_memory(void)
508 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
510 * Has to be in very low memory so we can execute
513 if (__pa(trampoline_base) >= 0x9F000)
518 void impress_friends(void)
521 unsigned long bogosum = 0;
523 * Allow the user to impress friends.
525 Dprintk("Before bogomips.\n");
526 for_each_possible_cpu(cpu)
527 if (cpu_isset(cpu, cpu_callout_map))
528 bogosum += cpu_data(cpu).loops_per_jiffy;
530 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
533 (bogosum/(5000/HZ))%100);
535 Dprintk("Before bogocount - setting activated=1.\n");
538 static inline void __inquire_remote_apic(int apicid)
540 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
541 char *names[] = { "ID", "VERSION", "SPIV" };
545 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
547 for (i = 0; i < ARRAY_SIZE(regs); i++) {
548 printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
553 status = safe_apic_wait_icr_idle();
556 "a previous APIC delivery may have failed\n");
558 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
559 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
564 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
565 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
568 case APIC_ICR_RR_VALID:
569 status = apic_read(APIC_RRR);
570 printk(KERN_CONT "%08x\n", status);
573 printk(KERN_CONT "failed\n");
578 #ifdef WAKE_SECONDARY_VIA_NMI
580 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
581 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
582 * won't ... remember to clear down the APIC, etc later.
585 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
587 unsigned long send_status, accept_status = 0;
591 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
593 /* Boot on the stack */
594 /* Kick the second */
595 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
597 Dprintk("Waiting for send to finish...\n");
598 send_status = safe_apic_wait_icr_idle();
601 * Give the other CPU some time to accept the IPI.
605 * Due to the Pentium erratum 3AP.
607 maxlvt = lapic_get_maxlvt();
609 apic_read_around(APIC_SPIV);
610 apic_write(APIC_ESR, 0);
612 accept_status = (apic_read(APIC_ESR) & 0xEF);
613 Dprintk("NMI sent.\n");
616 printk(KERN_ERR "APIC never delivered???\n");
618 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
620 return (send_status | accept_status);
622 #endif /* WAKE_SECONDARY_VIA_NMI */
624 #ifdef WAKE_SECONDARY_VIA_INIT
626 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
628 unsigned long send_status, accept_status = 0;
629 int maxlvt, num_starts, j;
632 * Be paranoid about clearing APIC errors.
634 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
635 apic_read_around(APIC_SPIV);
636 apic_write(APIC_ESR, 0);
640 Dprintk("Asserting INIT.\n");
643 * Turn INIT on target chip
645 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
650 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
653 Dprintk("Waiting for send to finish...\n");
654 send_status = safe_apic_wait_icr_idle();
658 Dprintk("Deasserting INIT.\n");
661 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
664 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
666 Dprintk("Waiting for send to finish...\n");
667 send_status = safe_apic_wait_icr_idle();
670 atomic_set(&init_deasserted, 1);
673 * Should we send STARTUP IPIs ?
675 * Determine this based on the APIC version.
676 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
678 if (APIC_INTEGRATED(apic_version[phys_apicid]))
684 * Paravirt / VMI wants a startup IPI hook here to set up the
685 * target processor state.
687 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
689 (unsigned long)init_rsp);
691 (unsigned long)stack_start.sp);
695 * Run STARTUP IPI loop.
697 Dprintk("#startup loops: %d.\n", num_starts);
699 maxlvt = lapic_get_maxlvt();
701 for (j = 1; j <= num_starts; j++) {
702 Dprintk("Sending STARTUP #%d.\n", j);
703 apic_read_around(APIC_SPIV);
704 apic_write(APIC_ESR, 0);
706 Dprintk("After apic_write.\n");
713 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
715 /* Boot on the stack */
716 /* Kick the second */
717 apic_write_around(APIC_ICR, APIC_DM_STARTUP
718 | (start_eip >> 12));
721 * Give the other CPU some time to accept the IPI.
725 Dprintk("Startup point 1.\n");
727 Dprintk("Waiting for send to finish...\n");
728 send_status = safe_apic_wait_icr_idle();
731 * Give the other CPU some time to accept the IPI.
735 * Due to the Pentium erratum 3AP.
738 apic_read_around(APIC_SPIV);
739 apic_write(APIC_ESR, 0);
741 accept_status = (apic_read(APIC_ESR) & 0xEF);
742 if (send_status || accept_status)
745 Dprintk("After Startup.\n");
748 printk(KERN_ERR "APIC never delivered???\n");
750 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
752 return (send_status | accept_status);
754 #endif /* WAKE_SECONDARY_VIA_INIT */
757 struct work_struct work;
758 struct task_struct *idle;
759 struct completion done;
763 static void __cpuinit do_fork_idle(struct work_struct *work)
765 struct create_idle *c_idle =
766 container_of(work, struct create_idle, work);
768 c_idle->idle = fork_idle(c_idle->cpu);
769 complete(&c_idle->done);
772 static int __cpuinit do_boot_cpu(int apicid, int cpu)
774 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
775 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
776 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
779 unsigned long boot_error = 0;
781 unsigned long start_ip;
782 unsigned short nmi_high = 0, nmi_low = 0;
783 struct create_idle c_idle = {
785 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
787 INIT_WORK(&c_idle.work, do_fork_idle);
789 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
790 if (!cpu_gdt_descr[cpu].address &&
791 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
792 printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
796 /* Allocate node local memory for AP pdas */
797 if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
798 struct x8664_pda *newpda, *pda;
799 int node = cpu_to_node(cpu);
801 newpda = kmalloc_node(sizeof(struct x8664_pda), GFP_ATOMIC,
804 memcpy(newpda, pda, sizeof(struct x8664_pda));
805 cpu_pda(cpu) = newpda;
808 "Could not allocate node local PDA for CPU %d on node %d\n",
813 alternatives_smp_switch(1);
815 c_idle.idle = get_idle_for_cpu(cpu);
818 * We can't use kernel_thread since we must avoid to
819 * reschedule the child.
822 c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
823 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
824 init_idle(c_idle.idle, cpu);
828 if (!keventd_up() || current_is_keventd())
829 c_idle.work.func(&c_idle.work);
831 schedule_work(&c_idle.work);
832 wait_for_completion(&c_idle.done);
835 if (IS_ERR(c_idle.idle)) {
836 printk("failed fork for CPU %d\n", cpu);
837 return PTR_ERR(c_idle.idle);
840 set_idle_for_cpu(cpu, c_idle.idle);
843 per_cpu(current_task, cpu) = c_idle.idle;
845 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
846 c_idle.idle->thread.ip = (unsigned long) start_secondary;
847 /* Stack for startup_32 can be just as for start_secondary onwards */
848 stack_start.sp = (void *) c_idle.idle->thread.sp;
851 cpu_pda(cpu)->pcurrent = c_idle.idle;
852 init_rsp = c_idle.idle->thread.sp;
853 load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread);
854 initial_code = (unsigned long)start_secondary;
855 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
858 /* start_ip had better be page-aligned! */
859 start_ip = setup_trampoline();
861 /* So we see what's up */
862 printk(KERN_INFO "Booting processor %d/%d ip %lx\n",
863 cpu, apicid, start_ip);
866 * This grunge runs the startup process for
867 * the targeted processor.
870 atomic_set(&init_deasserted, 0);
872 Dprintk("Setting warm reset code and vector.\n");
874 store_NMI_vector(&nmi_high, &nmi_low);
876 smpboot_setup_warm_reset_vector(start_ip);
878 * Be paranoid about clearing APIC errors.
880 apic_write(APIC_ESR, 0);
884 * Starting actual IPI sequence...
886 boot_error = wakeup_secondary_cpu(apicid, start_ip);
890 * allow APs to start initializing.
892 Dprintk("Before Callout %d.\n", cpu);
893 cpu_set(cpu, cpu_callout_map);
894 Dprintk("After Callout %d.\n", cpu);
897 * Wait 5s total for a response
899 for (timeout = 0; timeout < 50000; timeout++) {
900 if (cpu_isset(cpu, cpu_callin_map))
901 break; /* It has booted */
905 if (cpu_isset(cpu, cpu_callin_map)) {
906 /* number CPUs logically, starting from 1 (BSP is 0) */
908 printk(KERN_INFO "CPU%d: ", cpu);
909 print_cpu_info(&cpu_data(cpu));
910 Dprintk("CPU has booted.\n");
913 if (*((volatile unsigned char *)trampoline_base)
915 /* trampoline started but...? */
916 printk(KERN_ERR "Stuck ??\n");
918 /* trampoline code not run */
919 printk(KERN_ERR "Not responding.\n");
920 inquire_remote_apic(apicid);
925 /* Try to put things back the way they were before ... */
926 unmap_cpu_to_logical_apicid(cpu);
928 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
930 cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */
931 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
932 cpu_clear(cpu, cpu_possible_map);
933 cpu_clear(cpu, cpu_present_map);
934 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
937 /* mark "stuck" area as not stuck */
938 *((volatile unsigned long *)trampoline_base) = 0;
943 int __cpuinit native_cpu_up(unsigned int cpu)
945 int apicid = cpu_present_to_apicid(cpu);
949 WARN_ON(irqs_disabled());
951 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
953 if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
954 !physid_isset(apicid, phys_cpu_present_map)) {
955 printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
960 * Already booted CPU?
962 if (cpu_isset(cpu, cpu_callin_map)) {
963 Dprintk("do_boot_cpu %d Already started\n", cpu);
968 * Save current MTRR state in case it was changed since early boot
969 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
973 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
976 /* init low mem mapping */
977 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
978 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
982 err = do_boot_cpu(apicid, cpu);
984 Dprintk("do_boot_cpu failed %d\n", err);
989 * Check TSC synchronization with the AP (keep irqs disabled
992 local_irq_save(flags);
993 check_tsc_sync_source(cpu);
994 local_irq_restore(flags);
996 while (!cpu_isset(cpu, cpu_online_map)) {
998 touch_nmi_watchdog();
1005 * Early setup to make printk work.
1007 void __init native_smp_prepare_boot_cpu(void)
1009 int me = smp_processor_id();
1010 #ifdef CONFIG_X86_32
1012 switch_to_new_gdt();
1014 /* already set me in cpu_online_map in boot_cpu_init() */
1015 cpu_set(me, cpu_callout_map);
1016 per_cpu(cpu_state, me) = CPU_ONLINE;
1019 void __init native_smp_cpus_done(unsigned int max_cpus)
1022 * Cleanup possible dangling ends...
1024 smpboot_restore_warm_reset_vector();
1026 Dprintk("Boot done.\n");
1030 #ifdef CONFIG_X86_IO_APIC
1031 setup_ioapic_dest();
1033 check_nmi_watchdog();
1034 #ifdef CONFIG_X86_32
1039 #ifdef CONFIG_HOTPLUG_CPU
1040 void remove_siblinginfo(int cpu)
1043 struct cpuinfo_x86 *c = &cpu_data(cpu);
1045 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1046 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1048 * last thread sibling in this cpu core going down
1050 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1051 cpu_data(sibling).booted_cores--;
1054 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1055 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1056 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1057 cpus_clear(per_cpu(cpu_core_map, cpu));
1058 c->phys_proc_id = 0;
1060 cpu_clear(cpu, cpu_sibling_setup_map);
1063 int additional_cpus __initdata = -1;
1065 static __init int setup_additional_cpus(char *s)
1067 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
1069 early_param("additional_cpus", setup_additional_cpus);
1072 * cpu_possible_map should be static, it cannot change as cpu's
1073 * are onlined, or offlined. The reason is per-cpu data-structures
1074 * are allocated by some modules at init time, and dont expect to
1075 * do this dynamically on cpu arrival/departure.
1076 * cpu_present_map on the other hand can change dynamically.
1077 * In case when cpu_hotplug is not compiled, then we resort to current
1078 * behaviour, which is cpu_possible == cpu_present.
1081 * Three ways to find out the number of additional hotplug CPUs:
1082 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1083 * - The user can overwrite it with additional_cpus=NUM
1084 * - Otherwise don't reserve additional CPUs.
1085 * We do this because additional CPUs waste a lot of memory.
1088 __init void prefill_possible_map(void)
1093 if (additional_cpus == -1) {
1094 if (disabled_cpus > 0)
1095 additional_cpus = disabled_cpus;
1097 additional_cpus = 0;
1099 possible = num_processors + additional_cpus;
1100 if (possible > NR_CPUS)
1103 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1104 possible, max_t(int, possible - num_processors, 0));
1106 for (i = 0; i < possible; i++)
1107 cpu_set(i, cpu_possible_map);
1110 static void __ref remove_cpu_from_maps(int cpu)
1112 cpu_clear(cpu, cpu_online_map);
1113 #ifdef CONFIG_X86_64
1114 cpu_clear(cpu, cpu_callout_map);
1115 cpu_clear(cpu, cpu_callin_map);
1116 /* was set by cpu_init() */
1117 clear_bit(cpu, (unsigned long *)&cpu_initialized);
1118 clear_node_cpumask(cpu);
1122 int __cpu_disable(void)
1124 int cpu = smp_processor_id();
1127 * Perhaps use cpufreq to drop frequency, but that could go
1128 * into generic code.
1130 * We won't take down the boot processor on i386 due to some
1131 * interrupts only being able to be serviced by the BSP.
1132 * Especially so if we're not using an IOAPIC -zwane
1137 if (nmi_watchdog == NMI_LOCAL_APIC)
1138 stop_apic_nmi_watchdog(NULL);
1143 * Allow any queued timer interrupts to get serviced
1144 * This is only a temporary solution until we cleanup
1145 * fixup_irqs as we do for IA64.
1150 local_irq_disable();
1151 remove_siblinginfo(cpu);
1153 /* It's now safe to remove this processor from the online map */
1154 remove_cpu_from_maps(cpu);
1155 fixup_irqs(cpu_online_map);
1159 void __cpu_die(unsigned int cpu)
1161 /* We don't do anything here: idle task is faking death itself. */
1164 for (i = 0; i < 10; i++) {
1165 /* They ack this in play_dead by setting CPU_DEAD */
1166 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1167 printk(KERN_INFO "CPU %d is now offline\n", cpu);
1168 if (1 == num_online_cpus())
1169 alternatives_smp_switch(0);
1174 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1176 #else /* ... !CONFIG_HOTPLUG_CPU */
1177 int __cpu_disable(void)
1182 void __cpu_die(unsigned int cpu)
1184 /* We said "no" in __cpu_disable */
1190 * If the BIOS enumerates physical processors before logical,
1191 * maxcpus=N at enumeration-time can be used to disable HT.
1193 static int __init parse_maxcpus(char *arg)
1195 extern unsigned int maxcpus;
1197 maxcpus = simple_strtoul(arg, NULL, 0);
1200 early_param("maxcpus", parse_maxcpus);