mm: Remove slab destructors from kmem_cache_create().
[safe/jmp/linux-2.6] / arch / i386 / mm / fault.c
index f38085f..e92a101 100644 (file)
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/tty.h>
 #include <linux/vt_kern.h>             /* For unblank_screen() */
 #include <linux/highmem.h>
+#include <linux/bootmem.h>             /* for max_low_pfn */
+#include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <linux/kprobes.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
 #include <asm/desc.h>
-#include <asm/kdebug.h>
+#include <asm/segment.h>
 
 extern void die(const char *,struct pt_regs *,long);
 
-/*
- * Unlock any spinlocks which will prevent us from getting the
- * message out 
- */
-void bust_spinlocks(int yes)
+static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+int register_page_fault_notifier(struct notifier_block *nb)
 {
-       int loglevel_save = console_loglevel;
+       vmalloc_sync_all();
+       return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_page_fault_notifier);
 
-       if (yes) {
-               oops_in_progress = 1;
-               return;
-       }
-#ifdef CONFIG_VT
-       unblank_screen();
-#endif
-       oops_in_progress = 0;
-       /*
-        * OK, the message is on the console.  Now we call printk()
-        * without oops_in_progress set so that printk will give klogd
-        * a poke.  Hold onto your hats...
-        */
-       console_loglevel = 15;          /* NMI oopser may have shut the console up */
-       printk(" ");
-       console_loglevel = loglevel_save;
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
+
+static inline int notify_page_fault(struct pt_regs *regs, long err)
+{
+       struct die_args args = {
+               .regs = regs,
+               .str = "page fault",
+               .err = err,
+               .trapnr = 14,
+               .signr = SIGSEGV
+       };
+       return atomic_notifier_call_chain(&notify_page_fault_chain,
+                                         DIE_PAGE_FAULT, &args);
 }
 
 /*
@@ -77,15 +81,18 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
        unsigned seg = regs->xcs & 0xffff;
        u32 seg_ar, seg_limit, base, *desc;
 
-       /* The standard kernel/user address space limit. */
-       *eip_limit = (seg & 3) ? USER_DS.seg : KERNEL_DS.seg;
-
        /* Unlikely, but must come before segment checks. */
-       if (unlikely((regs->eflags & VM_MASK) != 0))
-               return eip + (seg << 4);
+       if (unlikely(regs->eflags & VM_MASK)) {
+               base = seg << 4;
+               *eip_limit = base + 0xffff;
+               return base + (eip & 0xffff);
+       }
+
+       /* The standard kernel/user address space limit. */
+       *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
        
        /* By far the most common cases. */
-       if (likely(seg == __USER_CS || seg == __KERNEL_CS))
+       if (likely(SEGMENT_IS_FLAT_CODE(seg)))
                return eip;
 
        /* Check the segment exists, is within the current LDT/GDT size,
@@ -135,7 +142,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
 static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
 { 
        unsigned long limit;
-       unsigned long instr = get_segment_eip (regs, &limit);
+       unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
        int scan_more = 1;
        int prefetch = 0; 
        int i;
@@ -145,9 +152,9 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
                unsigned char instr_hi;
                unsigned char instr_lo;
 
-               if (instr > limit)
+               if (instr > (unsigned char *)limit)
                        break;
-               if (__get_user(opcode, (unsigned char __user *) instr))
+               if (probe_kernel_address(instr, opcode))
                        break; 
 
                instr_hi = opcode & 0xf0; 
@@ -172,9 +179,9 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
                case 0x00:
                        /* Prefetch instruction is 0x0F0D or 0x0F18 */
                        scan_more = 0;
-                       if (instr > limit)
+                       if (instr > (unsigned char *)limit)
                                break;
-                       if (__get_user(opcode, (unsigned char __user *) instr))
+                       if (probe_kernel_address(instr, opcode))
                                break;
                        prefetch = (instr_lo == 0xF) &&
                                (opcode == 0x0D || opcode == 0x18);
@@ -295,8 +302,8 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
        struct mm_struct *mm;
        struct vm_area_struct * vma;
        unsigned long address;
-       unsigned long page;
        int write, si_code;
+       int fault;
 
        /* get the address */
         address = read_cr2();
@@ -321,8 +328,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
        if (unlikely(address >= TASK_SIZE)) {
                if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
                        return;
-               if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-                                               SIGSEGV) == NOTIFY_STOP)
+               if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
                        return;
                /*
                 * Don't take the mm semaphore here. If we fixup a prefetch
@@ -331,8 +337,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
                goto bad_area_nosemaphore;
        }
 
-       if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-                                       SIGSEGV) == NOTIFY_STOP)
+       if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
                return;
 
        /* It's safe to allow irq's after cr2 has been saved and the vmalloc
@@ -352,7 +357,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunatly, in the case of an
-        * erroneous fault occuring in a code path which already holds mmap_sem
+        * erroneous fault occurring in a code path which already holds mmap_sem
         * we will deadlock attempting to validate the fault against the
         * address space.  Luckily the kernel only validly references user
         * space from well defined areas of code, which are listed in the
@@ -399,11 +404,7 @@ good_area:
        write = 0;
        switch (error_code & 3) {
                default:        /* 3: write, present */
-#ifdef TEST_VERIFY_AREA
-                       if (regs->cs == KERNEL_CS)
-                               printk("WP fault at %08lx\n", regs->eip);
-#endif
-                       /* fall through */
+                               /* fall through */
                case 2:         /* write, not present */
                        if (!(vma->vm_flags & VM_WRITE))
                                goto bad_area;
@@ -412,7 +413,7 @@ good_area:
                case 1:         /* read, present */
                        goto bad_area;
                case 0:         /* read, not present */
-                       if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+                       if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
                                goto bad_area;
        }
 
@@ -422,20 +423,18 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       switch (handle_mm_fault(mm, vma, address, write)) {
-               case VM_FAULT_MINOR:
-                       tsk->min_flt++;
-                       break;
-               case VM_FAULT_MAJOR:
-                       tsk->maj_flt++;
-                       break;
-               case VM_FAULT_SIGBUS:
-                       goto do_sigbus;
-               case VM_FAULT_OOM:
+       fault = handle_mm_fault(mm, vma, address, write);
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
-               default:
-                       BUG();
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
        }
+       if (fault & VM_FAULT_MAJOR)
+               tsk->maj_flt++;
+       else
+               tsk->min_flt++;
 
        /*
         * Did it hit the DOS screen memory VA from vm86 mode?
@@ -458,6 +457,11 @@ bad_area:
 bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
        if (error_code & 4) {
+               /*
+                * It's possible to have interrupts off here.
+                */
+               local_irq_enable();
+
                /* 
                 * Valid to do another page fault here because this one came 
                 * from user space.
@@ -510,7 +514,9 @@ no_context:
        bust_spinlocks(1);
 
        if (oops_may_print()) {
-       #ifdef CONFIG_X86_PAE
+               __typeof__(pte_val(__pte(0))) page;
+
+#ifdef CONFIG_X86_PAE
                if (error_code & 16) {
                        pte_t *pte = lookup_address(address);
 
@@ -519,7 +525,7 @@ no_context:
                                        "NX-protected page - exploit attempt? "
                                        "(uid: %d)\n", current->uid);
                }
-       #endif
+#endif
                if (address < PAGE_SIZE)
                        printk(KERN_ALERT "BUG: unable to handle kernel NULL "
                                        "pointer dereference");
@@ -529,25 +535,38 @@ no_context:
                printk(" at virtual address %08lx\n",address);
                printk(KERN_ALERT " printing eip:\n");
                printk("%08lx\n", regs->eip);
-       }
-       page = read_cr3();
-       page = ((unsigned long *) __va(page))[address >> 22];
-       if (oops_may_print())
+
+               page = read_cr3();
+               page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
+#ifdef CONFIG_X86_PAE
+               printk(KERN_ALERT "*pdpt = %016Lx\n", page);
+               if ((page >> PAGE_SHIFT) < max_low_pfn
+                   && page & _PAGE_PRESENT) {
+                       page &= PAGE_MASK;
+                       page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
+                                                                & (PTRS_PER_PMD - 1)];
+                       printk(KERN_ALERT "*pde = %016Lx\n", page);
+                       page &= ~_PAGE_NX;
+               }
+#else
                printk(KERN_ALERT "*pde = %08lx\n", page);
-       /*
-        * We must not directly access the pte in the highpte
-        * case, the page table might be allocated in highmem.
-        * And lets rather not kmap-atomic the pte, just in case
-        * it's allocated already.
-        */
-#ifndef CONFIG_HIGHPTE
-       if ((page & 1) && oops_may_print()) {
-               page &= PAGE_MASK;
-               address &= 0x003ff000;
-               page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
-               printk(KERN_ALERT "*pte = %08lx\n", page);
-       }
 #endif
+
+               /*
+                * We must not directly access the pte in the highpte
+                * case if the page table is located in highmem.
+                * And let's rather not kmap-atomic the pte, just in case
+                * it's allocated already.
+                */
+               if ((page >> PAGE_SHIFT) < max_low_pfn
+                   && (page & _PAGE_PRESENT)) {
+                       page &= PAGE_MASK;
+                       page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
+                                                                & (PTRS_PER_PTE - 1)];
+                       printk(KERN_ALERT "*pte = %0*Lx\n", sizeof(page)*2, (u64)page);
+               }
+       }
+
        tsk->thread.cr2 = address;
        tsk->thread.trap_no = 14;
        tsk->thread.error_code = error_code;
@@ -561,7 +580,7 @@ no_context:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (tsk->pid == 1) {
+       if (is_init(tsk)) {
                yield();
                down_read(&mm->mmap_sem);
                goto survive;
@@ -588,7 +607,6 @@ do_sigbus:
        force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
 }
 
-#ifndef CONFIG_X86_PAE
 void vmalloc_sync_all(void)
 {
        /*
@@ -601,6 +619,9 @@ void vmalloc_sync_all(void)
        static unsigned long start = TASK_SIZE;
        unsigned long address;
 
+       if (SHARED_KERNEL_PMD)
+               return;
+
        BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
        for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
                if (!test_bit(pgd_index(address), insync)) {
@@ -623,4 +644,3 @@ void vmalloc_sync_all(void)
                        start = address + PGDIR_SIZE;
        }
 }
-#endif