amd-iommu: add function to flush tlb for all domains
[safe/jmp/linux-2.6] / arch / x86 / kernel / kprobes.c
index 711fec8..7b5169d 100644 (file)
@@ -193,6 +193,9 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
        kprobe_opcode_t opcode;
        kprobe_opcode_t *orig_opcodes = opcodes;
 
+       if (search_exception_tables((unsigned long)opcodes))
+               return 0;       /* Page fault may occur on this address. */
+
 retry:
        if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
                return 0;
@@ -376,9 +379,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
-       mutex_lock(&kprobe_mutex);
-       free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
-       mutex_unlock(&kprobe_mutex);
+       if (p->ainsn.insn) {
+               free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
+               p->ainsn.insn = NULL;
+       }
 }
 
 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
@@ -410,13 +414,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 static void __kprobes clear_btf(void)
 {
        if (test_thread_flag(TIF_DEBUGCTLMSR))
-               wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
+               update_debugctlmsr(0);
 }
 
 static void __kprobes restore_btf(void)
 {
        if (test_thread_flag(TIF_DEBUGCTLMSR))
-               wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
+               update_debugctlmsr(current->thread.debugctlmsr);
 }
 
 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -431,7 +435,6 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
                regs->ip = (unsigned long)p->ainsn.insn;
 }
 
-/* Called with kretprobe_lock held */
 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
                                      struct pt_regs *regs)
 {
@@ -442,6 +445,23 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
        /* Replace the return addr with trampoline addr */
        *sara = (unsigned long) &kretprobe_trampoline;
 }
+
+static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+                                      struct kprobe_ctlblk *kcb)
+{
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
+       if (p->ainsn.boostable == 1 && !p->post_handler) {
+               /* Boost up -- we can execute copied instructions directly */
+               reset_current_kprobe();
+               regs->ip = (unsigned long)p->ainsn.insn;
+               preempt_enable_no_resched();
+               return;
+       }
+#endif
+       prepare_singlestep(p, regs);
+       kcb->kprobe_status = KPROBE_HIT_SS;
+}
+
 /*
  * We have reentered the kprobe_handler(), since another probe was hit while
  * within the handler. We save the original kprobes variables and just single
@@ -450,13 +470,9 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
                                    struct kprobe_ctlblk *kcb)
 {
-       if (kcb->kprobe_status == KPROBE_HIT_SS &&
-           *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
-               regs->flags &= ~X86_EFLAGS_TF;
-               regs->flags |= kcb->kprobe_saved_flags;
-               return 0;
+       switch (kcb->kprobe_status) {
+       case KPROBE_HIT_SSDONE:
 #ifdef CONFIG_X86_64
-       } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
                /* TODO: Provide re-entrancy from post_kprobes_handler() and
                 * avoid exception stack corruption while single-stepping on
                 * the instruction of the new probe.
@@ -464,14 +480,35 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
                arch_disarm_kprobe(p);
                regs->ip = (unsigned long)p->addr;
                reset_current_kprobe();
-               return 1;
+               preempt_enable_no_resched();
+               break;
 #endif
+       case KPROBE_HIT_ACTIVE:
+               save_previous_kprobe(kcb);
+               set_current_kprobe(p, regs, kcb);
+               kprobes_inc_nmissed_count(p);
+               prepare_singlestep(p, regs);
+               kcb->kprobe_status = KPROBE_REENTER;
+               break;
+       case KPROBE_HIT_SS:
+               if (p == kprobe_running()) {
+                       regs->flags &= ~X86_EFLAGS_TF;
+                       regs->flags |= kcb->kprobe_saved_flags;
+                       return 0;
+               } else {
+                       /* A probe has been hit in the codepath leading up
+                        * to, or just after, single-stepping of a probed
+                        * instruction. This entire codepath should strictly
+                        * reside in .kprobes.text section. Raise a warning
+                        * to highlight this peculiar case.
+                        */
+               }
+       default:
+               /* impossible cases */
+               WARN_ON(1);
+               return 0;
        }
-       save_previous_kprobe(kcb);
-       set_current_kprobe(p, regs, kcb);
-       kprobes_inc_nmissed_count(p);
-       prepare_singlestep(p, regs);
-       kcb->kprobe_status = KPROBE_REENTER;
+
        return 1;
 }
 
@@ -481,91 +518,74 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
  */
 static int __kprobes kprobe_handler(struct pt_regs *regs)
 {
-       struct kprobe *p;
-       int ret = 0;
        kprobe_opcode_t *addr;
+       struct kprobe *p;
        struct kprobe_ctlblk *kcb;
 
        addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
+       if (*addr != BREAKPOINT_INSTRUCTION) {
+               /*
+                * The breakpoint instruction was removed right
+                * after we hit it.  Another cpu has removed
+                * either a probepoint or a debugger breakpoint
+                * at this address.  In either case, no further
+                * handling of this interrupt is appropriate.
+                * Back up over the (now missing) int3 and run
+                * the original instruction.
+                */
+               regs->ip = (unsigned long)addr;
+               return 1;
+       }
 
        /*
         * We don't want to be preempted for the entire
-        * duration of kprobe processing
+        * duration of kprobe processing. We conditionally
+        * re-enable preemption at the end of this function,
+        * and also in reenter_kprobe() and setup_singlestep().
         */
        preempt_disable();
-       kcb = get_kprobe_ctlblk();
 
+       kcb = get_kprobe_ctlblk();
        p = get_kprobe(addr);
+
        if (p) {
-               /* Check we're not actually recursing */
                if (kprobe_running()) {
-                       ret = reenter_kprobe(p, regs, kcb);
-                       if (kcb->kprobe_status == KPROBE_REENTER)
-                       {
-                               ret = 1;
-                               goto out;
-                       }
-                       goto preempt_out;
+                       if (reenter_kprobe(p, regs, kcb))
+                               return 1;
                } else {
                        set_current_kprobe(p, regs, kcb);
                        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-                       if (p->pre_handler && p->pre_handler(p, regs))
-                       {
-                               /* handler set things up, skip ss setup */
-                               ret = 1;
-                               goto out;
-                       }
-               }
-       } else {
-               if (*addr != BREAKPOINT_INSTRUCTION) {
+
                        /*
-                        * The breakpoint instruction was removed right
-                        * after we hit it.  Another cpu has removed
-                        * either a probepoint or a debugger breakpoint
-                        * at this address.  In either case, no further
-                        * handling of this interrupt is appropriate.
-                        * Back up over the (now missing) int3 and run
-                        * the original instruction.
+                        * If we have no pre-handler or it returned 0, we
+                        * continue with normal processing.  If we have a
+                        * pre-handler and it returned non-zero, it prepped
+                        * for calling the break_handler below on re-entry
+                        * for jprobe processing, so get out doing nothing
+                        * more here.
                         */
-                       regs->ip = (unsigned long)addr;
-                       ret = 1;
-                       goto preempt_out;
+                       if (!p->pre_handler || !p->pre_handler(p, regs))
+                               setup_singlestep(p, regs, kcb);
+                       return 1;
                }
-               if (kprobe_running()) {
-                       p = __get_cpu_var(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs))
-                               goto ss_probe;
+       } else if (kprobe_running()) {
+               p = __get_cpu_var(current_kprobe);
+               if (p->break_handler && p->break_handler(p, regs)) {
+                       setup_singlestep(p, regs, kcb);
+                       return 1;
                }
-               /* Not one of ours: let kernel handle it */
-               goto preempt_out;
-       }
+       } /* else: not a kprobe fault; let the kernel handle it */
 
-ss_probe:
-       ret = 1;
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
-       if (p->ainsn.boostable == 1 && !p->post_handler) {
-               /* Boost up -- we can execute copied instructions directly */
-               reset_current_kprobe();
-               regs->ip = (unsigned long)p->ainsn.insn;
-               goto preempt_out;
-       }
-#endif
-       prepare_singlestep(p, regs);
-       kcb->kprobe_status = KPROBE_HIT_SS;
-       goto out;
-
-preempt_out:
        preempt_enable_no_resched();
-out:
-       return ret;
+       return 0;
 }
 
 /*
  * When a retprobed function returns, this code saves registers and
  * calls trampoline_handler() runs, which calls the kretprobe's handler.
  */
- void __kprobes kretprobe_trampoline_holder(void)
- {
+static void __used __kprobes kretprobe_trampoline_holder(void)
+{
        asm volatile (
                        ".global kretprobe_trampoline\n"
                        "kretprobe_trampoline: \n"
@@ -618,13 +638,13 @@ out:
 #else
                        "       pushf\n"
                        /*
-                        * Skip cs, ip, orig_ax.
+                        * Skip cs, ip, orig_ax and gs.
                         * trampoline_handler() will plug in these values
                         */
-                       "       subl $12, %esp\n"
+                       "       subl $16, %esp\n"
                        "       pushl %fs\n"
-                       "       pushl %ds\n"
                        "       pushl %es\n"
+                       "       pushl %ds\n"
                        "       pushl %eax\n"
                        "       pushl %ebp\n"
                        "       pushl %edi\n"
@@ -635,10 +655,10 @@ out:
                        "       movl %esp, %eax\n"
                        "       call trampoline_handler\n"
                        /* Move flags to cs */
-                       "       movl 52(%esp), %edx\n"
-                       "       movl %edx, 48(%esp)\n"
+                       "       movl 56(%esp), %edx\n"
+                       "       movl %edx, 52(%esp)\n"
                        /* Replace saved flags with true return address. */
-                       "       movl %eax, 52(%esp)\n"
+                       "       movl %eax, 56(%esp)\n"
                        "       popl %ebx\n"
                        "       popl %ecx\n"
                        "       popl %edx\n"
@@ -646,17 +666,17 @@ out:
                        "       popl %edi\n"
                        "       popl %ebp\n"
                        "       popl %eax\n"
-                       /* Skip ip, orig_ax, es, ds, fs */
-                       "       addl $20, %esp\n"
+                       /* Skip ds, es, fs, gs, orig_ax and ip */
+                       "       addl $24, %esp\n"
                        "       popf\n"
 #endif
                        "       ret\n");
- }
+}
 
 /*
  * Called from kretprobe_trampoline
  */
-void * __kprobes trampoline_handler(struct pt_regs *regs)
+static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
 {
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
@@ -665,13 +685,13 @@ void * __kprobes trampoline_handler(struct pt_regs *regs)
        unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
 
        INIT_HLIST_HEAD(&empty_rp);
-       spin_lock_irqsave(&kretprobe_lock, flags);
-       head = kretprobe_inst_table_head(current);
+       kretprobe_hash_lock(current, &head, &flags);
        /* fixup registers */
 #ifdef CONFIG_X86_64
        regs->cs = __KERNEL_CS;
 #else
        regs->cs = __KERNEL_CS | get_kernel_rpl();
+       regs->gs = 0;
 #endif
        regs->ip = trampoline_address;
        regs->orig_ax = ~0UL;
@@ -679,7 +699,7 @@ void * __kprobes trampoline_handler(struct pt_regs *regs)
        /*
         * It is possible to have multiple instances associated with a given
         * task either because multiple functions in the call path have
-        * return probes installed on them, and/or more then one
+        * return probes installed on them, and/or more than one
         * return probe was registered for a target function.
         *
         * We can handle this because:
@@ -715,7 +735,7 @@ void * __kprobes trampoline_handler(struct pt_regs *regs)
 
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 
-       spin_unlock_irqrestore(&kretprobe_lock, flags);
+       kretprobe_hash_unlock(current, &flags);
 
        hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
@@ -841,15 +861,14 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
        if (!cur)
                return 0;
 
+       resume_execution(cur, regs, kcb);
+       regs->flags |= kcb->kprobe_saved_flags;
+
        if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
                kcb->kprobe_status = KPROBE_HIT_SSDONE;
                cur->post_handler(cur, regs, 0);
        }
 
-       resume_execution(cur, regs, kcb);
-       regs->flags |= kcb->kprobe_saved_flags;
-       trace_hardirqs_fixup_flags(regs->flags);
-
        /* Restore back the original saved kprobes variables and continue. */
        if (kcb->kprobe_status == KPROBE_REENTER) {
                restore_previous_kprobe(kcb);
@@ -936,7 +955,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
                                       unsigned long val, void *data)
 {
-       struct die_args *args = (struct die_args *)data;
+       struct die_args *args = data;
        int ret = NOTIFY_DONE;
 
        if (args->regs && user_mode_vm(args->regs))