include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / x86 / kernel / vmi_32.c
index 55a5d69..ce9fbac 100644 (file)
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/sched.h>
+#include <linux/gfp.h>
 #include <asm/vmi.h>
 #include <asm/io.h>
 #include <asm/fixmap.h>
 #include <asm/apicdef.h>
 #include <asm/apic.h>
+#include <asm/pgalloc.h>
 #include <asm/processor.h>
 #include <asm/timer.h>
 #include <asm/vmi_time.h>
@@ -266,30 +268,6 @@ static void vmi_nop(void)
 {
 }
 
-#ifdef CONFIG_HIGHPTE
-static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
-{
-       void *va = kmap_atomic(page, type);
-
-       /*
-        * Internally, the VMI ROM must map virtual addresses to physical
-        * addresses for processing MMU updates.  By the time MMU updates
-        * are issued, this information is typically already lost.
-        * Fortunately, the VMI provides a cache of mapping slots for active
-        * page tables.
-        *
-        * We use slot zero for the linear mapping of physical memory, and
-        * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
-        *
-        *  args:                 SLOT                 VA    COUNT PFN
-        */
-       BUG_ON(type != KM_PTE0 && type != KM_PTE1);
-       vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
-
-       return va;
-}
-#endif
-
 static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
 {
        vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
@@ -395,11 +373,6 @@ static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
        vmi_ops.update_pte(ptep, VMI_PAGE_PT);
 }
 
-static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
-{
-       vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1));
-}
-
 static void vmi_set_pud(pud_t *pudp, pud_t pudval)
 {
        /* Um, eww */
@@ -446,7 +419,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
        ap.ds = __USER_DS;
        ap.es = __USER_DS;
        ap.fs = __KERNEL_PERCPU;
-       ap.gs = 0;
+       ap.gs = __KERNEL_STACK_CANARY;
 
        ap.eflags = 0;
 
@@ -645,6 +618,12 @@ static inline int __init activate_vmi(void)
        u64 reloc;
        const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
 
+       /*
+        * Prevent page tables from being allocated in highmem, even if
+        * CONFIG_HIGHPTE is enabled.
+        */
+       __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
        if (call_vrom_func(vmi_rom, vmi_init) != 0) {
                printk(KERN_ERR "VMI ROM failed to initialize!");
                return 0;
@@ -653,7 +632,7 @@ static inline int __init activate_vmi(void)
 
        pv_info.paravirt_enabled = 1;
        pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
-       pv_info.name = "vmi";
+       pv_info.name = "vmi [deprecated]";
 
        pv_init_ops.patch = vmi_patch;
 
@@ -756,7 +735,6 @@ static inline int __init activate_vmi(void)
                pv_mmu_ops.set_pmd = vmi_set_pmd;
 #ifdef CONFIG_X86_PAE
                pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
-               pv_mmu_ops.set_pte_present = vmi_set_pte_present;
                pv_mmu_ops.set_pud = vmi_set_pud;
                pv_mmu_ops.pte_clear = vmi_pte_clear;
                pv_mmu_ops.pmd_clear = vmi_pmd_clear;
@@ -784,10 +762,6 @@ static inline int __init activate_vmi(void)
 
        /* Set linear is needed in all cases */
        vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
-#ifdef CONFIG_HIGHPTE
-       if (vmi_ops.set_linear_mapping)
-               pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
-#endif
 
        /*
         * These MUST always be patched.  Don't support indirect jumps
@@ -823,15 +797,15 @@ static inline int __init activate_vmi(void)
                vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
                vmi_timer_ops.cancel_alarm =
                         vmi_get_function(VMI_CALL_CancelAlarm);
-               pv_time_ops.time_init = vmi_time_init;
-               pv_time_ops.get_wallclock = vmi_get_wallclock;
-               pv_time_ops.set_wallclock = vmi_set_wallclock;
+               x86_init.timers.timer_init = vmi_time_init;
 #ifdef CONFIG_X86_LOCAL_APIC
-               pv_apic_ops.setup_boot_clock = vmi_time_bsp_init;
-               pv_apic_ops.setup_secondary_clock = vmi_time_ap_init;
+               x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init;
+               x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init;
 #endif
                pv_time_ops.sched_clock = vmi_sched_clock;
-               pv_time_ops.get_tsc_khz = vmi_tsc_khz;
+               x86_platform.calibrate_tsc = vmi_tsc_khz;
+               x86_platform.get_wallclock = vmi_get_wallclock;
+               x86_platform.set_wallclock = vmi_set_wallclock;
 
                /* We have true wallclock functions; disable CMOS clock sync */
                no_sync_cmos_clock = 1;