x86: xen: 64-bit kernel RPL should be 0
[safe/jmp/linux-2.6] / arch / x86 / xen / mmu.c
index fba55b1..bf4cd6b 100644 (file)
@@ -185,7 +185,7 @@ static inline unsigned p2m_index(unsigned long pfn)
 }
 
 /* Build the parallel p2m_top_mfn structures */
-static void __init xen_build_mfn_list_list(void)
+void xen_build_mfn_list_list(void)
 {
        unsigned pfn, idx;
 
@@ -452,10 +452,6 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
                    pte_t *ptep, pte_t pteval)
 {
-       /* updates to init_mm may be done without lock */
-       if (mm == &init_mm)
-               preempt_disable();
-
        ADD_STATS(set_pte_at, 1);
 //     ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
        ADD_STATS(set_pte_at_current, mm == current->mm);
@@ -476,9 +472,7 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
        }
        xen_set_pte(ptep, pteval);
 
-out:
-       if (mm == &init_mm)
-               preempt_enable();
+out:   return;
 }
 
 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
@@ -1152,10 +1146,8 @@ static void drop_other_mm_ref(void *info)
 
        /* If this cpu still has a stale cr3 reference, then make sure
           it has been flushed. */
-       if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
+       if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
                load_cr3(swapper_pg_dir);
-               arch_flush_lazy_cpu_mode();
-       }
 }
 
 static void xen_drop_mm_ref(struct mm_struct *mm)
@@ -1168,20 +1160,19 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
                        load_cr3(swapper_pg_dir);
                else
                        leave_mm(smp_processor_id());
-               arch_flush_lazy_cpu_mode();
        }
 
        /* Get the "official" set of cpus referring to our pagetable. */
        if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
                for_each_online_cpu(cpu) {
-                       if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
+                       if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
                            && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
                                continue;
                        smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
                }
                return;
        }
-       cpumask_copy(mask, &mm->cpu_vm_mask);
+       cpumask_copy(mask, mm_cpumask(mm));
 
        /* It's possible that a vcpu may have a stale reference to our
           cr3, because its in lazy mode, and it hasn't yet flushed
@@ -1238,9 +1229,12 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
 {
 }
 
+static void xen_post_allocator_init(void);
+
 static __init void xen_pagetable_setup_done(pgd_t *base)
 {
        xen_setup_shared_info();
+       xen_post_allocator_init();
 }
 
 static void xen_write_cr2(unsigned long cr2)
@@ -1850,7 +1844,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 #endif
 }
 
-__init void xen_post_allocator_init(void)
+static __init void xen_post_allocator_init(void)
 {
        pv_mmu_ops.set_pte = xen_set_pte;
        pv_mmu_ops.set_pmd = xen_set_pmd;
@@ -1876,10 +1870,15 @@ __init void xen_post_allocator_init(void)
        xen_mark_init_mm_pinned();
 }
 
-const struct pv_mmu_ops xen_mmu_ops __initdata = {
-       .pagetable_setup_start = xen_pagetable_setup_start,
-       .pagetable_setup_done = xen_pagetable_setup_done,
+static void xen_leave_lazy_mmu(void)
+{
+       preempt_disable();
+       xen_mc_flush();
+       paravirt_leave_lazy_mmu();
+       preempt_enable();
+}
 
+static const struct pv_mmu_ops xen_mmu_ops __initdata = {
        .read_cr2 = xen_read_cr2,
        .write_cr2 = xen_write_cr2,
 
@@ -1949,12 +1948,18 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
 
        .lazy_mode = {
                .enter = paravirt_enter_lazy_mmu,
-               .leave = xen_leave_lazy,
+               .leave = xen_leave_lazy_mmu,
        },
 
        .set_fixmap = xen_set_fixmap,
 };
 
+void __init xen_init_mmu_ops(void)
+{
+       x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
+       x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
+       pv_mmu_ops = xen_mmu_ops;
+}
 
 #ifdef CONFIG_XEN_DEBUG_FS