x86: mtrr_cleanup: first 1M may be covered in var mtrrs
[safe/jmp/linux-2.6] / arch / x86 / mm / fault.c
index 0eb70d1..455f3fe 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/ptrace.h>
+#include <linux/mmiotrace.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
 #define PF_RSVD                (1<<3)
 #define PF_INSTR       (1<<4)
 
+static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
+{
+#ifdef CONFIG_MMIOTRACE_HOOKS
+       if (unlikely(is_kmmio_active()))
+               if (kmmio_handler(regs, addr) == 1)
+                       return -1;
+#endif
+       return 0;
+}
+
 static inline int notify_page_fault(struct pt_regs *regs)
 {
 #ifdef CONFIG_KPROBES
        int ret = 0;
 
        /* kprobe_running() needs smp_processor_id() */
-#ifdef CONFIG_X86_32
        if (!user_mode_vm(regs)) {
-#else
-       if (!user_mode(regs)) {
-#endif
                preempt_disable();
                if (kprobe_running() && kprobe_fault_handler(regs, 14))
                        ret = 1;
@@ -602,6 +609,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
 
        if (notify_page_fault(regs))
                return;
+       if (unlikely(kmmio_fault(regs, address)))
+               return;
 
        /*
         * We fault-in kernel-space virtual memory on-demand. The
@@ -907,14 +916,7 @@ LIST_HEAD(pgd_list);
 void vmalloc_sync_all(void)
 {
 #ifdef CONFIG_X86_32
-       /*
-        * Note that races in the updates of insync and start aren't
-        * problematic: insync can only get set bits added, and updates to
-        * start are only improving performance (without affecting correctness
-        * if undone).
-        */
-       static DECLARE_BITMAP(insync, PTRS_PER_PGD);
-       static unsigned long start = TASK_SIZE;
+       unsigned long start = VMALLOC_START & PGDIR_MASK;
        unsigned long address;
 
        if (SHARED_KERNEL_PMD)
@@ -922,56 +924,38 @@ void vmalloc_sync_all(void)
 
        BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
        for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
-               if (!test_bit(pgd_index(address), insync)) {
-                       unsigned long flags;
-                       struct page *page;
-
-                       spin_lock_irqsave(&pgd_lock, flags);
-                       list_for_each_entry(page, &pgd_list, lru) {
-                               if (!vmalloc_sync_one(page_address(page),
-                                                     address))
-                                       break;
-                       }
-                       spin_unlock_irqrestore(&pgd_lock, flags);
-                       if (!page)
-                               set_bit(pgd_index(address), insync);
+               unsigned long flags;
+               struct page *page;
+
+               spin_lock_irqsave(&pgd_lock, flags);
+               list_for_each_entry(page, &pgd_list, lru) {
+                       if (!vmalloc_sync_one(page_address(page),
+                                             address))
+                               break;
                }
-               if (address == start && test_bit(pgd_index(address), insync))
-                       start = address + PGDIR_SIZE;
+               spin_unlock_irqrestore(&pgd_lock, flags);
        }
 #else /* CONFIG_X86_64 */
-       /*
-        * Note that races in the updates of insync and start aren't
-        * problematic: insync can only get set bits added, and updates to
-        * start are only improving performance (without affecting correctness
-        * if undone).
-        */
-       static DECLARE_BITMAP(insync, PTRS_PER_PGD);
-       static unsigned long start = VMALLOC_START & PGDIR_MASK;
+       unsigned long start = VMALLOC_START & PGDIR_MASK;
        unsigned long address;
 
        for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
-               if (!test_bit(pgd_index(address), insync)) {
-                       const pgd_t *pgd_ref = pgd_offset_k(address);
-                       unsigned long flags;
-                       struct page *page;
-
-                       if (pgd_none(*pgd_ref))
-                               continue;
-                       spin_lock_irqsave(&pgd_lock, flags);
-                       list_for_each_entry(page, &pgd_list, lru) {
-                               pgd_t *pgd;
-                               pgd = (pgd_t *)page_address(page) + pgd_index(address);
-                               if (pgd_none(*pgd))
-                                       set_pgd(pgd, *pgd_ref);
-                               else
-                                       BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
-                       }
-                       spin_unlock_irqrestore(&pgd_lock, flags);
-                       set_bit(pgd_index(address), insync);
+               const pgd_t *pgd_ref = pgd_offset_k(address);
+               unsigned long flags;
+               struct page *page;
+
+               if (pgd_none(*pgd_ref))
+                       continue;
+               spin_lock_irqsave(&pgd_lock, flags);
+               list_for_each_entry(page, &pgd_list, lru) {
+                       pgd_t *pgd;
+                       pgd = (pgd_t *)page_address(page) + pgd_index(address);
+                       if (pgd_none(*pgd))
+                               set_pgd(pgd, *pgd_ref);
+                       else
+                               BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
                }
-               if (address == start)
-                       start = address + PGDIR_SIZE;
+               spin_unlock_irqrestore(&pgd_lock, flags);
        }
 #endif
 }