* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/kprobes.h>
+#include <linux/kdebug.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
-#include <asm/kdebug.h>
#include <asm/siginfo.h>
+
+#ifdef CONFIG_KPROBES
+static inline int notify_page_fault(struct pt_regs *regs)
+{
+ int ret = 0;
+
+ /* kprobe_running() needs smp_processor_id() */
+ if (!user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, 11))
+ ret = 1;
+ preempt_enable();
+ }
+
+ return ret;
+}
+#else
+static inline int notify_page_fault(struct pt_regs *regs)
+{
+ return 0;
+}
+#endif
+
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
struct mm_struct *mm = current->mm;
siginfo_t info;
int code = SEGV_MAPERR;
- int is_write = 0;
+ int is_write = 0, ret;
int trap = TRAP(regs);
int is_exec = trap == 0x400;
is_write = error_code & ESR_DST;
#endif /* CONFIG_4xx || CONFIG_BOOKE */
- if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
- 11, SIGSEGV) == NOTIFY_STOP)
+ if (notify_page_fault(regs))
return 0;
- if (trap == 0x300) {
- if (debugger_fault_handler(regs))
- return 0;
- }
+ if (unlikely(debugger_fault_handler(regs)))
+ return 0;
/* On a kernel SLB miss we can only check for a valid exception entry */
if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV;
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
- printk(KERN_EMERG "Page fault in user mode with"
+ printk(KERN_EMERG "Page fault in user mode with "
"in_atomic() = %d mm = %p\n", in_atomic(), mm);
printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
regs->nip, regs->msr);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunatly, in the case of an
- * erroneous fault occuring in a code path which already holds mmap_sem
+ * kernel and should generate an OOPS. Unfortunately, in the case of an
+ * erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibilty of a deadlock.
+ * the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
#endif /* CONFIG_8xx */
if (is_exec) {
-#ifdef CONFIG_PPC64
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/* protection fault */
if (error_code & DSISR_PROTFAULT)
goto bad_area;
- if (!(vma->vm_flags & VM_EXEC))
+ /*
+ * Allow execution from readable areas if the MMU does not
+ * provide separate controls over reading and executing.
+ */
+ if (!(vma->vm_flags & VM_EXEC) &&
+ (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
+ !(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area;
-#endif
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+#else
pte_t *ptep;
pmd_t *pmdp;
flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
}
- pte_update(ptep, 0, _PAGE_HWEXEC);
- _tlbie(address);
+ pte_update(ptep, 0, _PAGE_HWEXEC |
+ _PAGE_ACCESSED);
+ _tlbie(address, mm->context.id);
pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem);
return 0;
/* protection fault */
if (error_code & 0x08000000)
goto bad_area;
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
}
* the fault.
*/
survive:
- switch (handle_mm_fault(mm, vma, address, is_write)) {
-
- case VM_FAULT_MINOR:
- current->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- current->maj_flt++;
- break;
- case VM_FAULT_SIGBUS:
- goto do_sigbus;
- case VM_FAULT_OOM:
- goto out_of_memory;
- default:
+ ret = handle_mm_fault(mm, vma, address, is_write);
+ if (unlikely(ret & VM_FAULT_ERROR)) {
+ if (ret & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (ret & VM_FAULT_SIGBUS)
+ goto do_sigbus;
BUG();
}
-
+ if (ret & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
up_read(&mm->mmap_sem);
return 0;
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (current->pid == 1) {
+ if (is_global_init(current)) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
printk("VM: killing process %s\n", current->comm);
if (user_mode(regs))
- do_exit(SIGKILL);
+ do_group_exit(SIGKILL);
return SIGKILL;
do_sigbus:
/* kernel has accessed a bad area */
- printk(KERN_ALERT "Unable to handle kernel paging request for ");
switch (regs->trap) {
- case 0x300:
- case 0x380:
- printk("data at address 0x%08lx\n", regs->dar);
- break;
- case 0x400:
- case 0x480:
- printk("instruction fetch\n");
- break;
- default:
- printk("unknown fault\n");
+ case 0x300:
+ case 0x380:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "data at address 0x%08lx\n", regs->dar);
+ break;
+ case 0x400:
+ case 0x480:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "instruction fetch\n");
+ break;
+ default:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "unknown fault\n");
+ break;
}
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
regs->nip);