#include <linux/bootmem.h>
#include <linux/err.h>
#include <linux/nmi.h>
+#include <linux/tboot.h>
+#include <linux/stackprotector.h>
+#include <linux/gfp.h>
#include <asm/acpi.h>
#include <asm/desc.h>
#include <asm/tlbflush.h>
#include <asm/mtrr.h>
#include <asm/vmi.h>
-#include <asm/genapic.h>
+#include <asm/apic.h>
#include <asm/setup.h>
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
-#include <asm/genapic.h>
#include <asm/smpboot_hooks.h>
+#include <asm/i8259.h>
#ifdef CONFIG_X86_32
u8 apicid_2_node[MAX_APICID];
DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
/* representing HT siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
/* representing HT and core siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/* Per CPU bogomips and other parameters */
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
-static atomic_t init_deasserted;
-
-
-/* Set if we find a B stepping CPU */
-static int __cpuinitdata smp_b_stepping;
+atomic_t init_deasserted;
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
-
-/* which logical CPUs are on which nodes */
-cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
- { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
-EXPORT_SYMBOL(node_to_cpumask_map);
/* which node each logical CPU is on */
int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
EXPORT_SYMBOL(cpu_to_node_map);
static void map_cpu_to_node(int cpu, int node)
{
printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
- cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
+ cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
cpu_to_node_map[cpu] = node;
}
printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
for (node = 0; node < MAX_NUMNODES; node++)
- cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
cpu_to_node_map[cpu] = 0;
}
#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
map_cpu_to_logical_apicid();
notify_cpu_starting(cpuid);
+
+ /*
+ * Need to setup vector mappings before we enable interrupts.
+ */
+ setup_vector_irq(smp_processor_id());
/*
* Get our bogomips.
*
cpumask_set_cpu(cpuid, cpu_callin_mask);
}
-static int __cpuinitdata unsafe_smp;
-
/*
* Activate a secondary processor.
*/
check_tsc_sync_target();
if (nmi_watchdog == NMI_IO_APIC) {
- disable_8259A_irq(0);
+ legacy_pic->chip->mask(0);
enable_NMI_through_LVT0();
- enable_8259A_irq(0);
+ legacy_pic->chip->unmask(0);
}
#ifdef CONFIG_X86_32
__flush_tlb_all();
#endif
- /* This must be done before setting cpu_online_map */
+ /* This must be done before setting cpu_online_mask */
set_cpu_sibling_map(raw_smp_processor_id());
wmb();
*/
ipi_call_lock();
lock_vector_lock();
- __setup_vector_irq(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
unlock_vector_lock();
ipi_call_unlock();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+ x86_platform.nmi_init();
/* enable local interrupts */
local_irq_enable();
- setup_secondary_clock();
+ /* to prevent fake stack check failure in clock setup */
+ boot_init_stack_canary();
+
+ x86_cpuinit.setup_percpu_clockev();
wmb();
cpu_idle();
}
-static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* In this case, llc_shared_map is a pointer to a cpumask. */
+static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
+ const struct cpuinfo_x86 *src)
{
- /*
- * Mask B, Pentium, but not Pentium MMX
- */
- if (c->x86_vendor == X86_VENDOR_INTEL &&
- c->x86 == 5 &&
- c->x86_mask >= 1 && c->x86_mask <= 4 &&
- c->x86_model <= 3)
- /*
- * Remember we have B step Pentia with bugs
- */
- smp_b_stepping = 1;
-
- /*
- * Certain Athlons might work (for various values of 'work') in SMP
- * but they are not certified as MP capable.
- */
- if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
-
- if (num_possible_cpus() == 1)
- goto valid_k7;
-
- /* Athlon 660/661 is valid. */
- if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
- (c->x86_mask == 1)))
- goto valid_k7;
-
- /* Duron 670 is valid */
- if ((c->x86_model == 7) && (c->x86_mask == 0))
- goto valid_k7;
-
- /*
- * Athlon 662, Duron 671, and Athlon >model 7 have capability
- * bit. It's worth noting that the A5 stepping (662) of some
- * Athlon XP's have the MP bit set.
- * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
- * more.
- */
- if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
- ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
- (c->x86_model > 7))
- if (cpu_has_mp)
- goto valid_k7;
-
- /* If we get here, not a certified SMP capable AMD system. */
- unsafe_smp = 1;
- }
-
-valid_k7:
- ;
+ struct cpumask *llc = dst->llc_shared_map;
+ *dst = *src;
+ dst->llc_shared_map = llc;
}
-
-static void __cpuinit smp_checks(void)
+#else
+static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
+ const struct cpuinfo_x86 *src)
{
- if (smp_b_stepping)
- printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
- "with B stepping processors.\n");
-
- /*
- * Don't taint if we are running SMP kernel on a single non-MP
- * approved Athlon
- */
- if (unsafe_smp && num_online_cpus() > 1) {
- printk(KERN_INFO "WARNING: This combination of AMD"
- "processors is not suitable for SMP.\n");
- add_taint(TAINT_UNSAFE_SMP);
- }
+ *dst = *src;
}
+#endif /* CONFIG_CPUMASK_OFFSTACK */
/*
* The bootstrap kernel entry code has set these up. Save them for
{
struct cpuinfo_x86 *c = &cpu_data(id);
- *c = boot_cpu_data;
+ copy_cpuinfo_x86(c, &boot_cpu_data);
c->cpu_index = id;
if (id != 0)
identify_secondary_cpu(c);
- smp_apply_quirks(c);
}
cpumask_set_cpu(cpu, cpu_sibling_mask(i));
cpumask_set_cpu(i, cpu_core_mask(cpu));
cpumask_set_cpu(cpu, cpu_core_mask(i));
- cpumask_set_cpu(i, &c->llc_shared_map);
- cpumask_set_cpu(cpu, &o->llc_shared_map);
+ cpumask_set_cpu(i, c->llc_shared_map);
+ cpumask_set_cpu(cpu, o->llc_shared_map);
}
}
} else {
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
}
- cpumask_set_cpu(cpu, &c->llc_shared_map);
+ cpumask_set_cpu(cpu, c->llc_shared_map);
if (current_cpu_data.x86_max_cores == 1) {
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
for_each_cpu(i, cpu_sibling_setup_mask) {
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
- cpumask_set_cpu(i, &c->llc_shared_map);
- cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
+ cpumask_set_cpu(i, c->llc_shared_map);
+ cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
}
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
cpumask_set_cpu(i, cpu_core_mask(cpu));
* For perf, we return last level cache shared map.
* And for power savings, we return cpu_core_map
*/
- if (sched_mc_power_savings || sched_smt_power_savings)
+ if ((sched_mc_power_savings || sched_smt_power_savings) &&
+ !(cpu_has(c, X86_FEATURE_AMD_DCM)))
return cpu_core_mask(cpu);
else
- return &c->llc_shared_map;
-}
-
-cpumask_t cpu_coregroup_map(int cpu)
-{
- return *cpu_coregroup_mask(cpu);
+ return c->llc_shared_map;
}
static void impress_friends(void)
* INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
* won't ... remember to clear down the APIC, etc later.
*/
-int __devinit
+int __cpuinit
wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
{
unsigned long send_status, accept_status = 0;
return (send_status | accept_status);
}
-int __devinit
+static int __cpuinit
wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
{
unsigned long send_status, accept_status = 0;
int maxlvt, num_starts, j;
- if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
- send_status = uv_wakeup_secondary(phys_apicid, start_eip);
- atomic_set(&init_deasserted, 1);
- return send_status;
- }
-
maxlvt = lapic_get_maxlvt();
/*
complete(&c_idle->done);
}
-static int __cpuinit do_boot_cpu(int apicid, int cpu)
+/* reduce the number of lines printed when booting a large cpu count system */
+static void __cpuinit announce_cpu(int cpu, int apicid)
+{
+ static int current_node = -1;
+ int node = cpu_to_node(cpu);
+
+ if (system_state == SYSTEM_BOOTING) {
+ if (node != current_node) {
+ if (current_node > (-1))
+ pr_cont(" Ok.\n");
+ current_node = node;
+ pr_info("Booting Node %3d, Processors ", node);
+ }
+ pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
+ return;
+ } else
+ pr_info("Booting Node %d Processor %d APIC 0x%x\n",
+ node, cpu, apicid);
+}
+
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
- * Returns zero if CPU booted OK, else error code from ->wakeup_cpu.
+ * Returns zero if CPU booted OK, else error code from
+ * ->wakeup_secondary_cpu.
*/
+static int __cpuinit do_boot_cpu(int apicid, int cpu)
{
unsigned long boot_error = 0;
- int timeout;
unsigned long start_ip;
- unsigned short nmi_high = 0, nmi_low = 0;
+ int timeout;
struct create_idle c_idle = {
- .cpu = cpu,
- .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+ .cpu = cpu,
+ .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
- INIT_WORK(&c_idle.work, do_fork_idle);
+
+ INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);
alternatives_smp_switch(1);
if (IS_ERR(c_idle.idle)) {
printk("failed fork for CPU %d\n", cpu);
+ destroy_work_on_stack(&c_idle.work);
return PTR_ERR(c_idle.idle);
}
/* start_ip had better be page-aligned! */
start_ip = setup_trampoline();
- /* So we see what's up */
- printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
- cpu, apicid, start_ip);
+ /* So we see what's up */
+ announce_cpu(cpu, apicid);
/*
* This grunge runs the startup process for
pr_debug("Setting warm reset code and vector.\n");
- if (apic->store_NMI_vector)
- apic->store_NMI_vector(&nmi_high, &nmi_low);
-
smpboot_setup_warm_reset_vector(start_ip);
/*
* Be paranoid about clearing APIC errors.
}
/*
- * Starting actual IPI sequence...
+ * Kick the secondary CPU. Use the method in the APIC driver
+ * if it's defined - or use an INIT boot APIC message otherwise:
*/
- boot_error = apic->wakeup_cpu(apicid, start_ip);
+ if (apic->wakeup_secondary_cpu)
+ boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
+ else
+ boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
if (!boot_error) {
/*
udelay(100);
}
- if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
- /* number CPUs logically, starting from 1 (BSP is 0) */
- pr_debug("OK.\n");
- printk(KERN_INFO "CPU%d: ", cpu);
- print_cpu_info(&cpu_data(cpu));
- pr_debug("CPU has booted.\n");
- } else {
+ if (cpumask_test_cpu(cpu, cpu_callin_mask))
+ pr_debug("CPU%d: has booted.\n", cpu);
+ else {
boot_error = 1;
if (*((volatile unsigned char *)trampoline_base)
== 0xA5)
/* trampoline started but...? */
- printk(KERN_ERR "Stuck ??\n");
+ pr_err("CPU%d: Stuck ??\n", cpu);
else
/* trampoline code not run */
- printk(KERN_ERR "Not responding.\n");
+ pr_err("CPU%d: Not responding.\n", cpu);
if (apic->inquire_remote_apic)
apic->inquire_remote_apic(apicid);
}
/* mark "stuck" area as not stuck */
*((volatile unsigned long *)trampoline_base) = 0;
- /*
- * Cleanup possible dangling ends...
- */
- smpboot_restore_warm_reset_vector();
+ if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
+ /*
+ * Cleanup possible dangling ends...
+ */
+ smpboot_restore_warm_reset_vector();
+ }
+ destroy_work_on_stack(&c_idle.work);
return boot_error;
}
err = do_boot_cpu(apicid, cpu);
- zap_low_mappings();
+ zap_low_mappings(false);
low_mappings = 0;
#else
err = do_boot_cpu(apicid, cpu);
*/
static __init void disable_smp(void)
{
- /* use the read/write pointers to the present and possible maps */
- cpumask_copy(&cpu_present_map, cpumask_of(0));
- cpumask_copy(&cpu_possible_map, cpumask_of(0));
+ init_cpu_present(cpumask_of(0));
+ init_cpu_possible(cpumask_of(0));
smpboot_clear_io_apic_irqs();
if (smp_found_config)
*/
if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
!cpu_has_apic) {
- printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
- boot_cpu_physical_apicid);
- printk(KERN_ERR "... forcing use of dummy APIC emulation."
+ if (!disable_apic) {
+ pr_err("BIOS bug, local APIC #%d not detected!...\n",
+ boot_cpu_physical_apicid);
+ pr_err("... forcing use of dummy APIC emulation."
"(tell your hw vendor)\n");
+ }
smpboot_clear_io_apic();
- disable_ioapic_setup();
+ arch_disable_smp_support();
return -1;
}
*/
void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
+ unsigned int i;
+
preempt_disable();
smp_cpu_index_default();
current_cpu_data = boot_cpu_data;
boot_cpu_logical_apicid = logical_smp_processor_id();
#endif
current_thread_info()->cpu = 0; /* needed? */
+ for_each_possible_cpu(i) {
+ zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
+ }
set_cpu_sibling_map(0);
-#ifdef CONFIG_X86_64
enable_IR_x2apic();
default_setup_apic_routing();
-#endif
if (smp_sanity_check(max_cpus) < 0) {
printk(KERN_INFO "SMP disabled\n");
*/
setup_local_APIC();
-#ifdef CONFIG_X86_64
/*
* Enable IO APIC before setting up error vector
*/
if (!skip_ioapic_setup && nr_ioapics)
enable_IO_APIC();
-#endif
+
end_local_APIC_setup();
map_cpu_to_logical_apicid();
printk(KERN_INFO "CPU%d: ", 0);
print_cpu_info(&cpu_data(0));
- setup_boot_clock();
+ x86_init.timers.setup_percpu_clockev();
if (is_uv_system())
uv_system_init();
+
+ set_mtrr_aps_delayed_init();
out:
preempt_enable();
}
+
+void arch_enable_nonboot_cpus_begin(void)
+{
+ set_mtrr_aps_delayed_init();
+}
+
+void arch_enable_nonboot_cpus_end(void)
+{
+ mtrr_aps_init();
+}
+
/*
* Early setup to make printk work.
*/
void __init native_smp_prepare_boot_cpu(void)
{
int me = smp_processor_id();
- switch_to_new_gdt();
+ switch_to_new_gdt(me);
/* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask);
per_cpu(cpu_state, me) = CPU_ONLINE;
pr_debug("Boot done.\n");
impress_friends();
- smp_checks();
#ifdef CONFIG_X86_IO_APIC
setup_ioapic_dest();
#endif
check_nmi_watchdog();
+ mtrr_aps_init();
}
static int __initdata setup_possible_cpus = -1;
/*
- * cpu_possible_map should be static, it cannot change as cpu's
+ * cpu_possible_mask should be static, it cannot change as cpu's
* are onlined, or offlined. The reason is per-cpu data-structures
* are allocated by some modules at init time, and dont expect to
* do this dynamically on cpu arrival/departure.
- * cpu_present_map on the other hand can change dynamically.
+ * cpu_present_mask on the other hand can change dynamically.
* In case when cpu_hotplug is not compiled, then we resort to current
* behaviour, which is cpu_possible == cpu_present.
* - Ashok Raj
total_cpus = max_t(int, possible, num_processors + disabled_cpus);
- if (possible > CONFIG_NR_CPUS) {
+ /* nr_cpu_ids could be reduced via nr_cpus= */
+ if (possible > nr_cpu_ids) {
printk(KERN_WARNING
"%d Processors exceeds NR_CPUS limit of %d\n",
- possible, CONFIG_NR_CPUS);
- possible = CONFIG_NR_CPUS;
+ possible, nr_cpu_ids);
+ possible = nr_cpu_ids;
}
printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
void cpu_disable_common(void)
{
int cpu = smp_processor_id();
- /*
- * HACK:
- * Allow any queued timer interrupts to get serviced
- * This is only a temporary solution until we cleanup
- * fixup_irqs as we do for IA64.
- */
- local_irq_enable();
- mdelay(1);
- local_irq_disable();
remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */
for (i = 0; i < 10; i++) {
/* They ack this in play_dead by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
- printk(KERN_INFO "CPU %d is now offline\n", cpu);
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+
if (1 == num_online_cpus())
alternatives_smp_switch(0);
return;
}
msleep(100);
}
- printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+ pr_err("CPU %u didn't die...\n", cpu);
}
void play_dead_common(void)
void native_play_dead(void)
{
play_dead_common();
+ tboot_shutdown(TB_SHUTDOWN_WFS);
wbinvd_halt();
}