x86: initialize per-cpu GDT segment in per-cpu setup
[safe/jmp/linux-2.6] / arch / x86 / xen / smp.c
index d8faf79..7735e3d 100644 (file)
  * useful topology information for the kernel to make use of.  As a
  * result, all CPUs are treated as if they're single-core and
  * single-threaded.
- *
- * This does not handle HOTPLUG_CPU yet.
  */
 #include <linux/sched.h>
-#include <linux/kernel_stat.h>
 #include <linux/err.h>
 #include <linux/smp.h>
 
@@ -36,9 +33,7 @@
 #include "xen-ops.h"
 #include "mmu.h"
 
-static void __cpuinit xen_init_lock_cpu(int cpu);
-
-cpumask_t xen_cpu_initialized_map;
+cpumask_var_t xen_cpu_initialized_map;
 
 static DEFINE_PER_CPU(int, resched_irq);
 static DEFINE_PER_CPU(int, callfunc_irq);
@@ -55,20 +50,17 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
  */
 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
 {
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_resched_count++;
-#else
-       add_pda(irq_resched_count, 1);
-#endif
+       inc_irq_stat(irq_resched_count);
 
        return IRQ_HANDLED;
 }
 
-static __cpuinit void cpu_bringup_and_idle(void)
+static __cpuinit void cpu_bringup(void)
 {
        int cpu = smp_processor_id();
 
        cpu_init();
+       touch_softlockup_watchdog();
        preempt_disable();
 
        xen_enable_sysenter();
@@ -82,13 +74,18 @@ static __cpuinit void cpu_bringup_and_idle(void)
        xen_setup_cpu_clockevents();
 
        cpu_set(cpu, cpu_online_map);
-       x86_write_percpu(cpu_state, CPU_ONLINE);
+       percpu_write(cpu_state, CPU_ONLINE);
        wmb();
 
        /* We can take interrupts now: we're officially "up". */
        local_irq_enable();
 
        wmb();                  /* make sure everything is out */
+}
+
+static __cpuinit void cpu_bringup_and_idle(void)
+{
+       cpu_bringup();
        cpu_idle();
 }
 
@@ -157,7 +154,7 @@ static void __init xen_fill_possible_map(void)
 {
        int i, rc;
 
-       for (i = 0; i < NR_CPUS; i++) {
+       for (i = 0; i < nr_cpu_ids; i++) {
                rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
                if (rc >= 0) {
                        num_processors++;
@@ -191,11 +188,14 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
        if (xen_smp_intr_init(0))
                BUG();
 
-       xen_cpu_initialized_map = cpumask_of_cpu(0);
+       if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
+               panic("could not allocate xen_cpu_initialized_map\n");
+
+       cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
 
        /* Restrict the possible_map according to max_cpus. */
        while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
-               for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
+               for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
                        continue;
                cpu_clear(cpu, cpu_possible_map);
        }
@@ -212,8 +212,6 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
 
                cpu_set(cpu, cpu_present_map);
        }
-
-       //init_xenbus_allowed_cpumask();
 }
 
 static __cpuinit int
@@ -222,7 +220,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        struct vcpu_guest_context *ctxt;
        struct desc_struct *gdt;
 
-       if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
+       if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
                return 0;
 
        ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
@@ -281,28 +279,10 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
        struct task_struct *idle = idle_task(cpu);
        int rc;
 
-#if 0
-       rc = cpu_up_check(cpu);
-       if (rc)
-               return rc;
-#endif
-
-#ifdef CONFIG_X86_64
-       /* Allocate node local memory for AP pdas */
-       WARN_ON(cpu == 0);
-       if (cpu > 0) {
-               rc = get_local_pda(cpu);
-               if (rc)
-                       return rc;
-       }
-#endif
-
-#ifdef CONFIG_X86_32
-       init_gdt(cpu);
        per_cpu(current_task, cpu) = idle;
+#ifdef CONFIG_X86_32
        irq_ctx_init(cpu);
 #else
-       cpu_pda(cpu)->pcurrent = idle;
        clear_tsk_thread_flag(idle, TIF_FORK);
 #endif
        xen_setup_timer(cpu);
@@ -339,6 +319,60 @@ static void xen_smp_cpus_done(unsigned int max_cpus)
 {
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int xen_cpu_disable(void)
+{
+       unsigned int cpu = smp_processor_id();
+       if (cpu == 0)
+               return -EBUSY;
+
+       cpu_disable_common();
+
+       load_cr3(swapper_pg_dir);
+       return 0;
+}
+
+static void xen_cpu_die(unsigned int cpu)
+{
+       while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
+               current->state = TASK_UNINTERRUPTIBLE;
+               schedule_timeout(HZ/10);
+       }
+       unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+       xen_uninit_lock_cpu(cpu);
+       xen_teardown_timer(cpu);
+
+       if (num_online_cpus() == 1)
+               alternatives_smp_switch(0);
+}
+
+static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
+{
+       play_dead_common();
+       HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+       cpu_bringup();
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+static int xen_cpu_disable(void)
+{
+       return -ENOSYS;
+}
+
+static void xen_cpu_die(unsigned int cpu)
+{
+       BUG();
+}
+
+static void xen_play_dead(void)
+{
+       BUG();
+}
+
+#endif
 static void stop_self(void *v)
 {
        int cpu = smp_processor_id();
@@ -361,24 +395,23 @@ static void xen_smp_send_reschedule(int cpu)
        xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
 }
 
-static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
+static void xen_send_IPI_mask(const struct cpumask *mask,
+                             enum ipi_vector vector)
 {
        unsigned cpu;
 
-       cpus_and(mask, mask, cpu_online_map);
-
-       for_each_cpu_mask_nr(cpu, mask)
+       for_each_cpu_and(cpu, mask, cpu_online_mask)
                xen_send_IPI_one(cpu, vector);
 }
 
-static void xen_smp_send_call_function_ipi(cpumask_t mask)
+static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
 {
        int cpu;
 
        xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
 
        /* Make sure other vcpus get a chance to run if they need to. */
-       for_each_cpu_mask_nr(cpu, mask) {
+       for_each_cpu(cpu, mask) {
                if (xen_vcpu_stolen(cpu)) {
                        HYPERVISOR_sched_op(SCHEDOP_yield, 0);
                        break;
@@ -388,18 +421,15 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask)
 
 static void xen_smp_send_call_function_single_ipi(int cpu)
 {
-       xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
+       xen_send_IPI_mask(cpumask_of(cpu),
+                         XEN_CALL_FUNCTION_SINGLE_VECTOR);
 }
 
 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
 {
        irq_enter();
        generic_smp_call_function_interrupt();
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_call_count++;
-#else
-       add_pda(irq_call_count, 1);
-#endif
+       inc_irq_stat(irq_call_count);
        irq_exit();
 
        return IRQ_HANDLED;
@@ -409,186 +439,22 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
 {
        irq_enter();
        generic_smp_call_function_single_interrupt();
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_call_count++;
-#else
-       add_pda(irq_call_count, 1);
-#endif
+       inc_irq_stat(irq_call_count);
        irq_exit();
 
        return IRQ_HANDLED;
 }
 
-struct xen_spinlock {
-       unsigned char lock;             /* 0 -> free; 1 -> locked */
-       unsigned short spinners;        /* count of waiting cpus */
-};
-
-static int xen_spin_is_locked(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
-       return xl->lock != 0;
-}
-
-static int xen_spin_is_contended(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
-       /* Not strictly true; this is only the count of contended
-          lock-takers entering the slow path. */
-       return xl->spinners != 0;
-}
-
-static int xen_spin_trylock(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-       u8 old = 1;
-
-       asm("xchgb %b0,%1"
-           : "+q" (old), "+m" (xl->lock) : : "memory");
-
-       return old == 0;
-}
-
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
-
-static inline void spinning_lock(struct xen_spinlock *xl)
-{
-       __get_cpu_var(lock_spinners) = xl;
-       wmb();                  /* set lock of interest before count */
-       asm(LOCK_PREFIX " incw %0"
-           : "+m" (xl->spinners) : : "memory");
-}
-
-static inline void unspinning_lock(struct xen_spinlock *xl)
-{
-       asm(LOCK_PREFIX " decw %0"
-           : "+m" (xl->spinners) : : "memory");
-       wmb();                  /* decrement count before clearing lock */
-       __get_cpu_var(lock_spinners) = NULL;
-}
-
-static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-       int irq = __get_cpu_var(lock_kicker_irq);
-       int ret;
-
-       /* If kicker interrupts not initialized yet, just spin */
-       if (irq == -1)
-               return 0;
-
-       /* announce we're spinning */
-       spinning_lock(xl);
-
-       /* clear pending */
-       xen_clear_irq_pending(irq);
-
-       /* check again make sure it didn't become free while
-          we weren't looking  */
-       ret = xen_spin_trylock(lock);
-       if (ret)
-               goto out;
-
-       /* block until irq becomes pending */
-       xen_poll_irq(irq);
-       kstat_this_cpu.irqs[irq]++;
-
-out:
-       unspinning_lock(xl);
-       return ret;
-}
-
-static void xen_spin_lock(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-       int timeout;
-       u8 oldval;
-
-       do {
-               timeout = 1 << 10;
-
-               asm("1: xchgb %1,%0\n"
-                   "   testb %1,%1\n"
-                   "   jz 3f\n"
-                   "2: rep;nop\n"
-                   "   cmpb $0,%0\n"
-                   "   je 1b\n"
-                   "   dec %2\n"
-                   "   jnz 2b\n"
-                   "3:\n"
-                   : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
-                   : "1" (1)
-                   : "memory");
-
-       } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
-}
-
-static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
-{
-       int cpu;
-
-       for_each_online_cpu(cpu) {
-               /* XXX should mix up next cpu selection */
-               if (per_cpu(lock_spinners, cpu) == xl) {
-                       xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
-                       break;
-               }
-       }
-}
-
-static void xen_spin_unlock(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
-       smp_wmb();              /* make sure no writes get moved after unlock */
-       xl->lock = 0;           /* release lock */
-
-       /* make sure unlock happens before kick */
-       barrier();
-
-       if (unlikely(xl->spinners))
-               xen_spin_unlock_slow(xl);
-}
-
-static __cpuinit void xen_init_lock_cpu(int cpu)
-{
-       int irq;
-       const char *name;
-
-       name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
-       irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
-                                    cpu,
-                                    xen_reschedule_interrupt,
-                                    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
-                                    name,
-                                    NULL);
-
-       if (irq >= 0) {
-               disable_irq(irq); /* make sure it's never delivered */
-               per_cpu(lock_kicker_irq, cpu) = irq;
-       }
-
-       printk("cpu %d spinlock event irq %d\n", cpu, irq);
-}
-
-static void __init xen_init_spinlocks(void)
-{
-       pv_lock_ops.spin_is_locked = xen_spin_is_locked;
-       pv_lock_ops.spin_is_contended = xen_spin_is_contended;
-       pv_lock_ops.spin_lock = xen_spin_lock;
-       pv_lock_ops.spin_trylock = xen_spin_trylock;
-       pv_lock_ops.spin_unlock = xen_spin_unlock;
-}
-
 static const struct smp_ops xen_smp_ops __initdata = {
        .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
        .smp_prepare_cpus = xen_smp_prepare_cpus,
-       .cpu_up = xen_cpu_up,
        .smp_cpus_done = xen_smp_cpus_done,
 
+       .cpu_up = xen_cpu_up,
+       .cpu_die = xen_cpu_die,
+       .cpu_disable = xen_cpu_disable,
+       .play_dead = xen_play_dead,
+
        .smp_send_stop = xen_smp_send_stop,
        .smp_send_reschedule = xen_smp_send_reschedule,