Merge branch 'generic-ipi' into generic-ipi-for-linus
[safe/jmp/linux-2.6] / arch / x86 / xen / enlighten.c
index 845b4fd..bb50845 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/mm.h>
 #include <linux/page-flags.h>
 #include <linux/highmem.h>
+#include <linux/console.h>
 
 #include <xen/interface/xen.h>
 #include <xen/interface/physdev.h>
@@ -44,6 +45,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/reboot.h>
+#include <asm/pgalloc.h>
 
 #include "xen-ops.h"
 #include "mmu.h"
@@ -74,13 +76,13 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3);      /* actual vcpu cr3 */
 struct start_info *xen_start_info;
 EXPORT_SYMBOL_GPL(xen_start_info);
 
-static /* __initdata */ struct shared_info dummy_shared_info;
+struct shared_info xen_dummy_shared_info;
 
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
  */
-struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
+struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
 
 /*
  * Flag to determine whether vcpu info placement is available on all
@@ -95,14 +97,15 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
  *
  * 0: not available, 1: available
  */
-static int have_vcpu_info_placement = 0;
+static int have_vcpu_info_placement = 1;
 
-static void __init xen_vcpu_setup(int cpu)
+static void xen_vcpu_setup(int cpu)
 {
        struct vcpu_register_vcpu_info info;
        int err;
        struct vcpu_info *vcpup;
 
+       BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
        per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
 
        if (!have_vcpu_info_placement)
@@ -134,11 +137,41 @@ static void __init xen_vcpu_setup(int cpu)
        }
 }
 
+/*
+ * On restore, set the vcpu placement up again.
+ * If it fails, then we're in a bad state, since
+ * we can't back out from using it...
+ */
+void xen_vcpu_restore(void)
+{
+       if (have_vcpu_info_placement) {
+               int cpu;
+
+               for_each_online_cpu(cpu) {
+                       bool other_cpu = (cpu != smp_processor_id());
+
+                       if (other_cpu &&
+                           HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
+                               BUG();
+
+                       xen_vcpu_setup(cpu);
+
+                       if (other_cpu &&
+                           HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
+                               BUG();
+               }
+
+               BUG_ON(!have_vcpu_info_placement);
+       }
+}
+
 static void __init xen_banner(void)
 {
        printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
               pv_info.name);
-       printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic);
+       printk(KERN_INFO "Hypervisor signature: %s%s\n",
+              xen_start_info->magic,
+              xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
 }
 
 static void xen_cpuid(unsigned int *ax, unsigned int *bx,
@@ -153,6 +186,8 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
        if (*ax == 1)
                maskedx = ~((1 << X86_FEATURE_APIC) |  /* disable APIC */
                            (1 << X86_FEATURE_ACPI) |  /* disable ACPI */
+                           (1 << X86_FEATURE_MCE)  |  /* disable MCE */
+                           (1 << X86_FEATURE_MCA)  |  /* disable MCA */
                            (1 << X86_FEATURE_ACC));   /* thermal monitoring */
 
        asm(XEN_EMULATE_PREFIX "cpuid"
@@ -231,13 +266,13 @@ static void xen_irq_enable(void)
 {
        struct vcpu_info *vcpu;
 
-       /* There's a one instruction preempt window here.  We need to
-          make sure we're don't switch CPUs between getting the vcpu
-          pointer and updating the mask. */
-       preempt_disable();
+       /* We don't need to worry about being preempted here, since
+          either a) interrupts are disabled, so no preemption, or b)
+          the caller is confused and is trying to re-enable interrupts
+          on an indeterminate processor. */
+
        vcpu = x86_read_percpu(xen_vcpu);
        vcpu->evtchn_upcall_mask = 0;
-       preempt_enable_no_resched();
 
        /* Doesn't matter if we get preempted here, because any
           pending event will get dealt with anyway. */
@@ -250,7 +285,7 @@ static void xen_irq_enable(void)
 static void xen_safe_halt(void)
 {
        /* Blocking includes an implicit local_irq_enable(). */
-       if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0)
+       if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
                BUG();
 }
 
@@ -528,26 +563,37 @@ static void xen_apic_write(unsigned long reg, u32 val)
 static void xen_flush_tlb(void)
 {
        struct mmuext_op *op;
-       struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+       struct multicall_space mcs;
+
+       preempt_disable();
+
+       mcs = xen_mc_entry(sizeof(*op));
 
        op = mcs.args;
        op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
        MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
 
        xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
 }
 
 static void xen_flush_tlb_single(unsigned long addr)
 {
        struct mmuext_op *op;
-       struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+       struct multicall_space mcs;
+
+       preempt_disable();
 
+       mcs = xen_mc_entry(sizeof(*op));
        op = mcs.args;
        op->cmd = MMUEXT_INVLPG_LOCAL;
        op->arg1.linear_addr = addr & PAGE_MASK;
        MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
 
        xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
 }
 
 static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
@@ -592,6 +638,30 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
        xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
 
+static void xen_clts(void)
+{
+       struct multicall_space mcs;
+
+       mcs = xen_mc_entry(0);
+
+       MULTI_fpu_taskswitch(mcs.mc, 0);
+
+       xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+static void xen_write_cr0(unsigned long cr0)
+{
+       struct multicall_space mcs;
+
+       /* Only pay attention to cr0.TS; everything else is
+          ignored. */
+       mcs = xen_mc_entry(0);
+
+       MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
+
+       xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
 static void xen_write_cr2(unsigned long cr2)
 {
        x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
@@ -609,8 +679,10 @@ static unsigned long xen_read_cr2_direct(void)
 
 static void xen_write_cr4(unsigned long cr4)
 {
-       /* Just ignore cr4 changes; Xen doesn't allow us to do
-          anything anyway. */
+       cr4 &= ~X86_CR4_PGE;
+       cr4 &= ~X86_CR4_PSE;
+
+       native_write_cr4(cr4);
 }
 
 static unsigned long xen_read_cr3(void)
@@ -652,16 +724,25 @@ static void xen_write_cr3(unsigned long cr3)
 
 /* Early in boot, while setting up the initial pagetable, assume
    everything is pinned. */
-static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
+static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn)
 {
+#ifdef CONFIG_FLATMEM
        BUG_ON(mem_map);        /* should only be used early */
+#endif
        make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
 }
 
-static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
+/* Early release_pte assumes that all pts are pinned, since there's
+   only init_mm and anything attached to that is pinned. */
+static void xen_release_pte_init(u32 pfn)
+{
+       make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+}
+
+static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
 {
        struct mmuext_op op;
-       op.cmd = level;
+       op.cmd = cmd;
        op.arg1.mfn = pfn_to_mfn(pfn);
        if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
                BUG();
@@ -669,7 +750,7 @@ static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
 
 /* This needs to make sure the new pte page is pinned iff its being
    attached to a pinned pagetable. */
-static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
+static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
 {
        struct page *page = pfn_to_page(pfn);
 
@@ -678,7 +759,8 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
 
                if (!PageHighMem(page)) {
                        make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
-                       pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+                       if (level == PT_PTE)
+                               pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
                } else
                        /* make sure there are no stray mappings of
                           this page */
@@ -686,19 +768,41 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
        }
 }
 
+static void xen_alloc_pte(struct mm_struct *mm, u32 pfn)
+{
+       xen_alloc_ptpage(mm, pfn, PT_PTE);
+}
+
+static void xen_alloc_pmd(struct mm_struct *mm, u32 pfn)
+{
+       xen_alloc_ptpage(mm, pfn, PT_PMD);
+}
+
 /* This should never happen until we're OK to use struct page */
-static void xen_release_pt(u32 pfn)
+static void xen_release_ptpage(u32 pfn, unsigned level)
 {
        struct page *page = pfn_to_page(pfn);
 
        if (PagePinned(page)) {
                if (!PageHighMem(page)) {
-                       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
+                       if (level == PT_PTE)
+                               pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
                        make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
                }
+               ClearPagePinned(page);
        }
 }
 
+static void xen_release_pte(u32 pfn)
+{
+       xen_release_ptpage(pfn, PT_PTE);
+}
+
+static void xen_release_pmd(u32 pfn)
+{
+       xen_release_ptpage(pfn, PT_PMD);
+}
+
 #ifdef CONFIG_HIGHPTE
 static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
 {
@@ -738,38 +842,35 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
 static __init void xen_pagetable_setup_start(pgd_t *base)
 {
        pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
+       int i;
 
        /* special set_pte for pagetable initialization */
        pv_mmu_ops.set_pte = xen_set_pte_init;
 
        init_mm.pgd = base;
        /*
-        * copy top-level of Xen-supplied pagetable into place.  For
-        * !PAE we can use this as-is, but for PAE it is a stand-in
-        * while we copy the pmd pages.
+        * copy top-level of Xen-supplied pagetable into place.  This
+        * is a stand-in while we copy the pmd pages.
         */
        memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));
 
-       if (PTRS_PER_PMD > 1) {
-               int i;
-               /*
-                * For PAE, need to allocate new pmds, rather than
-                * share Xen's, since Xen doesn't like pmd's being
-                * shared between address spaces.
-                */
-               for (i = 0; i < PTRS_PER_PGD; i++) {
-                       if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
-                               pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+       /*
+        * For PAE, need to allocate new pmds, rather than
+        * share Xen's, since Xen doesn't like pmd's being
+        * shared between address spaces.
+        */
+       for (i = 0; i < PTRS_PER_PGD; i++) {
+               if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
+                       pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
 
-                               memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
-                                      PAGE_SIZE);
+                       memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
+                              PAGE_SIZE);
 
-                               make_lowmem_page_readonly(pmd);
+                       make_lowmem_page_readonly(pmd);
 
-                               set_pgd(&base[i], __pgd(1 + __pa(pmd)));
-                       } else
-                               pgd_clear(&base[i]);
-               }
+                       set_pgd(&base[i], __pgd(1 + __pa(pmd)));
+               } else
+                       pgd_clear(&base[i]);
        }
 
        /* make sure zero_page is mapped RO so we can use it in pagetables */
@@ -781,49 +882,66 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
         * added to the table can be prepared properly for Xen.
         */
        xen_write_cr3(__pa(base));
+
+       /* Unpin initial Xen pagetable */
+       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
+                         PFN_DOWN(__pa(xen_start_info->pt_base)));
 }
 
-static __init void xen_pagetable_setup_done(pgd_t *base)
+void xen_setup_shared_info(void)
 {
-       /* This will work as long as patching hasn't happened yet
-          (which it hasn't) */
-       pv_mmu_ops.alloc_pt = xen_alloc_pt;
-       pv_mmu_ops.set_pte = xen_set_pte;
-
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+               unsigned long addr = fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+
                /*
                 * Create a mapping for the shared info page.
                 * Should be set_fixmap(), but shared_info is a machine
                 * address with no corresponding pseudo-phys address.
                 */
-               set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
+               set_pte_mfn(addr,
                            PFN_DOWN(xen_start_info->shared_info),
                            PAGE_KERNEL);
 
-               HYPERVISOR_shared_info =
-                       (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
-
+               HYPERVISOR_shared_info = (struct shared_info *)addr;
        } else
                HYPERVISOR_shared_info =
                        (struct shared_info *)__va(xen_start_info->shared_info);
 
+#ifndef CONFIG_SMP
+       /* In UP this is as good a place as any to set up shared info */
+       xen_setup_vcpu_info_placement();
+#endif
+
+       xen_setup_mfn_list_list();
+}
+
+static __init void xen_pagetable_setup_done(pgd_t *base)
+{
+       /* This will work as long as patching hasn't happened yet
+          (which it hasn't) */
+       pv_mmu_ops.alloc_pte = xen_alloc_pte;
+       pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
+       pv_mmu_ops.release_pte = xen_release_pte;
+       pv_mmu_ops.release_pmd = xen_release_pmd;
+       pv_mmu_ops.set_pte = xen_set_pte;
+
+       xen_setup_shared_info();
+
        /* Actually pin the pagetable down, but we can't set PG_pinned
           yet because the page structures don't exist yet. */
-       {
-               unsigned level;
+       pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(base)));
+}
 
-#ifdef CONFIG_X86_PAE
-               level = MMUEXT_PIN_L3_TABLE;
-#else
-               level = MMUEXT_PIN_L2_TABLE;
-#endif
+static __init void xen_post_allocator_init(void)
+{
+       pv_mmu_ops.set_pmd = xen_set_pmd;
+       pv_mmu_ops.set_pud = xen_set_pud;
 
-               pin_pagetable_pfn(level, PFN_DOWN(__pa(base)));
-       }
+       xen_mark_init_mm_pinned();
 }
 
 /* This is called once we have the cpu_possible_map */
-void __init xen_setup_vcpu_info_placement(void)
+void xen_setup_vcpu_info_placement(void)
 {
        int cpu;
 
@@ -840,7 +958,6 @@ void __init xen_setup_vcpu_info_placement(void)
                pv_irq_ops.irq_disable = xen_irq_disable_direct;
                pv_irq_ops.irq_enable = xen_irq_enable_direct;
                pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
-               pv_cpu_ops.iret = xen_iret_direct;
        }
 }
 
@@ -897,6 +1014,33 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
        return ret;
 }
 
+static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
+{
+       pte_t pte;
+
+       phys >>= PAGE_SHIFT;
+
+       switch (idx) {
+       case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
+#ifdef CONFIG_X86_F00F_BUG
+       case FIX_F00F_IDT:
+#endif
+       case FIX_WP_TEST:
+       case FIX_VDSO:
+#ifdef CONFIG_X86_LOCAL_APIC
+       case FIX_APIC_BASE:     /* maps dummy local APIC */
+#endif
+               pte = pfn_pte(phys, prot);
+               break;
+
+       default:
+               pte = mfn_pte(phys, prot);
+               break;
+       }
+
+       __native_set_fixmap(idx, pte);
+}
+
 static const struct pv_info xen_info __initdata = {
        .paravirt_enabled = 1,
        .shared_kernel_pmd = 0,
@@ -910,7 +1054,7 @@ static const struct pv_init_ops xen_init_ops __initdata = {
        .banner = xen_banner,
        .memory_setup = xen_memory_setup,
        .arch_setup = xen_arch_setup,
-       .post_allocator_init = xen_mark_init_mm_pinned,
+       .post_allocator_init = xen_post_allocator_init,
 };
 
 static const struct pv_time_ops xen_time_ops __initdata = {
@@ -918,7 +1062,7 @@ static const struct pv_time_ops xen_time_ops __initdata = {
 
        .set_wallclock = xen_set_wallclock,
        .get_wallclock = xen_get_wallclock,
-       .get_cpu_khz = xen_cpu_khz,
+       .get_tsc_khz = xen_tsc_khz,
        .sched_clock = xen_sched_clock,
 };
 
@@ -928,10 +1072,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
        .set_debugreg = xen_set_debugreg,
        .get_debugreg = xen_get_debugreg,
 
-       .clts = native_clts,
+       .clts = xen_clts,
 
        .read_cr0 = native_read_cr0,
-       .write_cr0 = native_write_cr0,
+       .write_cr0 = xen_write_cr0,
 
        .read_cr4 = native_read_cr4,
        .read_cr4_safe = native_read_cr4_safe,
@@ -944,8 +1088,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
        .read_tsc = native_read_tsc,
        .read_pmc = native_read_pmc,
 
-       .iret = (void *)&hypercall_page[__HYPERVISOR_iret],
-       .irq_enable_syscall_ret = NULL,  /* never called */
+       .iret = xen_iret,
+       .irq_enable_sysexit = xen_sysexit,
 
        .load_tr_desc = paravirt_nop,
        .set_ldt = xen_set_ldt,
@@ -979,6 +1123,9 @@ static const struct pv_irq_ops xen_irq_ops __initdata = {
        .irq_enable = xen_irq_enable,
        .safe_halt = xen_safe_halt,
        .halt = xen_halt,
+#ifdef CONFIG_X86_64
+       .adjust_exception_frame = paravirt_nop,
+#endif
 };
 
 static const struct pv_apic_ops xen_apic_ops __initdata = {
@@ -1010,11 +1157,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
        .pte_update = paravirt_nop,
        .pte_update_defer = paravirt_nop,
 
-       .alloc_pt = xen_alloc_pt_init,
-       .release_pt = xen_release_pt,
-       .alloc_pd = paravirt_nop,
-       .alloc_pd_clone = paravirt_nop,
-       .release_pd = paravirt_nop,
+       .pgd_alloc = __paravirt_pgd_alloc,
+       .pgd_free = paravirt_nop,
+
+       .alloc_pte = xen_alloc_pte_init,
+       .release_pte = xen_release_pte_init,
+       .alloc_pmd = xen_alloc_pte_init,
+       .alloc_pmd_clone = paravirt_nop,
+       .release_pmd = xen_release_pte_init,
 
 #ifdef CONFIG_HIGHPTE
        .kmap_atomic_pte = xen_kmap_atomic_pte,
@@ -1022,24 +1172,26 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
 
        .set_pte = NULL,        /* see xen_pagetable_setup_* */
        .set_pte_at = xen_set_pte_at,
-       .set_pmd = xen_set_pmd,
+       .set_pmd = xen_set_pmd_hyper,
+
+       .ptep_modify_prot_start = __ptep_modify_prot_start,
+       .ptep_modify_prot_commit = __ptep_modify_prot_commit,
 
        .pte_val = xen_pte_val,
+       .pte_flags = native_pte_val,
        .pgd_val = xen_pgd_val,
 
        .make_pte = xen_make_pte,
        .make_pgd = xen_make_pgd,
 
-#ifdef CONFIG_X86_PAE
        .set_pte_atomic = xen_set_pte_atomic,
        .set_pte_present = xen_set_pte_at,
-       .set_pud = xen_set_pud,
+       .set_pud = xen_set_pud_hyper,
        .pte_clear = xen_pte_clear,
        .pmd_clear = xen_pmd_clear,
 
        .make_pmd = xen_make_pmd,
        .pmd_val = xen_pmd_val,
-#endif /* PAE */
 
        .activate_mm = xen_activate_mm,
        .dup_mmap = xen_dup_mmap,
@@ -1049,6 +1201,8 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
                .enter = paravirt_enter_lazy_mmu,
                .leave = xen_leave_lazy,
        },
+
+       .set_fixmap = xen_set_fixmap,
 };
 
 #ifdef CONFIG_SMP
@@ -1060,17 +1214,21 @@ static const struct smp_ops xen_smp_ops __initdata = {
 
        .smp_send_stop = xen_smp_send_stop,
        .smp_send_reschedule = xen_smp_send_reschedule,
-       .smp_call_function_mask = xen_smp_call_function_mask,
+
+       .send_call_func_ipi = xen_smp_send_call_function_ipi,
+       .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
 };
 #endif /* CONFIG_SMP */
 
 static void xen_reboot(int reason)
 {
+       struct sched_shutdown r = { .reason = reason };
+
 #ifdef CONFIG_SMP
        smp_send_stop();
 #endif
 
-       if (HYPERVISOR_sched_op(SCHEDOP_shutdown, reason))
+       if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
                BUG();
 }
 
@@ -1125,6 +1283,8 @@ asmlinkage void __init xen_start_kernel(void)
 
        BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0);
 
+       xen_setup_features();
+
        /* Install Xen paravirt ops */
        pv_info = xen_info;
        pv_init_ops = xen_init_ops;
@@ -1134,21 +1294,26 @@ asmlinkage void __init xen_start_kernel(void)
        pv_apic_ops = xen_apic_ops;
        pv_mmu_ops = xen_mmu_ops;
 
+       if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
+               pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
+               pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
+       }
+
        machine_ops = xen_machine_ops;
 
 #ifdef CONFIG_SMP
        smp_ops = xen_smp_ops;
 #endif
 
-       xen_setup_features();
-
        /* Get mfn list */
        if (!xen_feature(XENFEAT_auto_translated_physmap))
-               phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;
+               xen_build_dynamic_phys_to_machine();
 
        pgd = (pgd_t *)xen_start_info->pt_base;
 
+       init_pg_tables_start = __pa(pgd);
        init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
+       max_pfn_mapped = (init_pg_tables_end + 512*1024) >> PAGE_SHIFT;
 
        init_mm.pgd = pgd; /* use the Xen pagetables to start */
 
@@ -1157,20 +1322,19 @@ asmlinkage void __init xen_start_kernel(void)
        x86_write_percpu(xen_cr3, __pa(pgd));
        x86_write_percpu(xen_current_cr3, __pa(pgd));
 
-#ifdef CONFIG_SMP
        /* Don't do the full vcpu_info placement stuff until we have a
-          possible map. */
+          possible map and a non-dummy shared_info. */
        per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
-#else
-       /* May as well do it now, since there's no good time to call
-          it later on UP. */
-       xen_setup_vcpu_info_placement();
-#endif
 
        pv_info.kernel_rpl = 1;
        if (xen_feature(XENFEAT_supervisor_mode_kernel))
                pv_info.kernel_rpl = 0;
 
+       /* Prevent unwanted bits from being set in PTEs. */
+       __supported_pte_mask &= ~_PAGE_GLOBAL;
+       if (!is_initial_xendomain())
+               __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
+
        /* set the limit of our address space */
        xen_reserve_top();
 
@@ -1185,6 +1349,12 @@ asmlinkage void __init xen_start_kernel(void)
                ? __pa(xen_start_info->mod_start) : 0;
        boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
 
+       if (!is_initial_xendomain()) {
+               add_preferred_console("xenboot", 0, NULL);
+               add_preferred_console("tty", 0, NULL);
+               add_preferred_console("hvc", 0, NULL);
+       }
+
        /* Start the world */
-       start_kernel();
+       i386_start_kernel();
 }