include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / x86 / xen / smp.c
index 7735e3d..a29693f 100644 (file)
@@ -14,6 +14,7 @@
  */
 #include <linux/sched.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 #include <linux/smp.h>
 
 #include <asm/paravirt.h>
 
 cpumask_var_t xen_cpu_initialized_map;
 
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
-static DEFINE_PER_CPU(int, callfuncsingle_irq);
-static DEFINE_PER_CPU(int, debug_irq) = -1;
+static DEFINE_PER_CPU(int, xen_resched_irq);
+static DEFINE_PER_CPU(int, xen_callfunc_irq);
+static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
+static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
 
 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -73,7 +74,7 @@ static __cpuinit void cpu_bringup(void)
 
        xen_setup_cpu_clockevents();
 
-       cpu_set(cpu, cpu_online_map);
+       set_cpu_online(cpu, true);
        percpu_write(cpu_state, CPU_ONLINE);
        wmb();
 
@@ -103,7 +104,7 @@ static int xen_smp_intr_init(unsigned int cpu)
                                    NULL);
        if (rc < 0)
                goto fail;
-       per_cpu(resched_irq, cpu) = rc;
+       per_cpu(xen_resched_irq, cpu) = rc;
 
        callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
        rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -114,7 +115,7 @@ static int xen_smp_intr_init(unsigned int cpu)
                                    NULL);
        if (rc < 0)
                goto fail;
-       per_cpu(callfunc_irq, cpu) = rc;
+       per_cpu(xen_callfunc_irq, cpu) = rc;
 
        debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
        rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -122,7 +123,7 @@ static int xen_smp_intr_init(unsigned int cpu)
                                     debug_name, NULL);
        if (rc < 0)
                goto fail;
-       per_cpu(debug_irq, cpu) = rc;
+       per_cpu(xen_debug_irq, cpu) = rc;
 
        callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
        rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -133,19 +134,20 @@ static int xen_smp_intr_init(unsigned int cpu)
                                    NULL);
        if (rc < 0)
                goto fail;
-       per_cpu(callfuncsingle_irq, cpu) = rc;
+       per_cpu(xen_callfuncsingle_irq, cpu) = rc;
 
        return 0;
 
  fail:
-       if (per_cpu(resched_irq, cpu) >= 0)
-               unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-       if (per_cpu(callfunc_irq, cpu) >= 0)
-               unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-       if (per_cpu(debug_irq, cpu) >= 0)
-               unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
-       if (per_cpu(callfuncsingle_irq, cpu) >= 0)
-               unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+       if (per_cpu(xen_resched_irq, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+       if (per_cpu(xen_callfunc_irq, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+       if (per_cpu(xen_debug_irq, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+       if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
+                                      NULL);
 
        return rc;
 }
@@ -158,7 +160,7 @@ static void __init xen_fill_possible_map(void)
                rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
                if (rc >= 0) {
                        num_processors++;
-                       cpu_set(i, cpu_possible_map);
+                       set_cpu_possible(i, true);
                }
        }
 }
@@ -170,7 +172,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
 
        /* We've switched to the "real" per-cpu gdt, so make sure the
           old memory can be recycled */
-       make_lowmem_page_readwrite(&per_cpu_var(gdt_page));
+       make_lowmem_page_readwrite(xen_initial_gdt);
 
        xen_setup_vcpu_info_placement();
 }
@@ -197,7 +199,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
        while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
                for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
                        continue;
-               cpu_clear(cpu, cpu_possible_map);
+               set_cpu_possible(cpu, false);
        }
 
        for_each_possible_cpu (cpu) {
@@ -210,7 +212,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
                if (IS_ERR(idle))
                        panic("failed fork for CPU %d", cpu);
 
-               cpu_set(cpu, cpu_present_map);
+               set_cpu_present(cpu, true);
        }
 }
 
@@ -219,6 +221,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
 {
        struct vcpu_guest_context *ctxt;
        struct desc_struct *gdt;
+       unsigned long gdt_mfn;
 
        if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
                return 0;
@@ -235,6 +238,9 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        ctxt->user_regs.ss = __KERNEL_DS;
 #ifdef CONFIG_X86_32
        ctxt->user_regs.fs = __KERNEL_PERCPU;
+       ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
+#else
+       ctxt->gs_base_kernel = per_cpu_offset(cpu);
 #endif
        ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
        ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
@@ -246,9 +252,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        ctxt->ldt_ents = 0;
 
        BUG_ON((unsigned long)gdt & ~PAGE_MASK);
+
+       gdt_mfn = arbitrary_virt_to_mfn(gdt);
        make_lowmem_page_readonly(gdt);
+       make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
 
-       ctxt->gdt_frames[0] = virt_to_mfn(gdt);
+       ctxt->gdt_frames[0] = gdt_mfn;
        ctxt->gdt_ents      = GDT_ENTRIES;
 
        ctxt->user_regs.cs = __KERNEL_CS;
@@ -284,7 +293,11 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
        irq_ctx_init(cpu);
 #else
        clear_tsk_thread_flag(idle, TIF_FORK);
+       per_cpu(kernel_stack, cpu) =
+               (unsigned long)task_stack_page(idle) -
+               KERNEL_STACK_OFFSET + THREAD_SIZE;
 #endif
+       xen_setup_runstate_info(cpu);
        xen_setup_timer(cpu);
        xen_init_lock_cpu(cpu);
 
@@ -308,7 +321,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
        BUG_ON(rc);
 
        while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
-               HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+               HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
                barrier();
        }
 
@@ -338,10 +351,10 @@ static void xen_cpu_die(unsigned int cpu)
                current->state = TASK_UNINTERRUPTIBLE;
                schedule_timeout(HZ/10);
        }
-       unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-       unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-       unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
-       unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
        xen_uninit_lock_cpu(cpu);
        xen_teardown_timer(cpu);
 
@@ -349,7 +362,7 @@ static void xen_cpu_die(unsigned int cpu)
                alternatives_smp_switch(0);
 }
 
-static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
+static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
 {
        play_dead_common();
        HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
@@ -413,7 +426,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
        /* Make sure other vcpus get a chance to run if they need to. */
        for_each_cpu(cpu, mask) {
                if (xen_vcpu_stolen(cpu)) {
-                       HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+                       HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
                        break;
                }
        }