X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fx86%2Fkernel%2Fkprobes.c;h=34a591283f5d9e5fb602e8e5f93e3dd5a1a1615e;hb=d3cfeb4fbe2a5e88fd5f98892f4dc49dcab8a9e7;hp=7848bf74e2abe087967714af9ccf76891e8a1952;hpb=6d48583ba9ade609634e694fc35ea62b7a8adaaa;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7848bf7..34a5912 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -159,7 +160,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ -static __always_inline void set_jmp_op(void *from, void *to) +static void __kprobes set_jmp_op(void *from, void *to) { struct __arch_jmp_op { char op; @@ -171,10 +172,23 @@ static __always_inline void set_jmp_op(void *from, void *to) } /* + * Check for the REX prefix which can only exist on X86_64 + * X86_32 always returns 0 + */ +static int __kprobes is_REX_prefix(kprobe_opcode_t *insn) +{ +#ifdef CONFIG_X86_64 + if ((*insn & 0xf0) == 0x40) + return 1; +#endif + return 0; +} + +/* * Returns non-zero if opcode is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ -static __always_inline int can_boost(kprobe_opcode_t *opcodes) +static int __kprobes can_boost(kprobe_opcode_t *opcodes) { kprobe_opcode_t opcode; kprobe_opcode_t *orig_opcodes = opcodes; @@ -239,26 +253,27 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) case 0x9d: /* popf/popfd */ return 1; } -#ifdef CONFIG_X86_64 + /* - * on 64 bit x86, 0x40-0x4f are prefixes so we need to look + * on X86_64, 0x40-0x4f are REX prefixes so we need to look * at the next byte instead.. but of course not recurse infinitely */ - if (*insn >= 0x40 && *insn <= 0x4f) + if (is_REX_prefix(insn)) return is_IF_modifier(++insn); -#endif + return 0; } -#ifdef CONFIG_X86_64 /* * Adjust the displacement if the instruction uses the %rip-relative * addressing mode. * If it does, Return the address of the 32-bit displacement word. * If not, return null. + * Only applicable to 64-bit x86. */ static void __kprobes fix_riprel(struct kprobe *p) { +#ifdef CONFIG_X86_64 u8 *insn = p->ainsn.insn; s64 disp; int need_modrm; @@ -284,7 +299,7 @@ static void __kprobes fix_riprel(struct kprobe *p) } /* Skip REX instruction prefix. */ - if ((*insn & 0xf0) == 0x40) + if (is_REX_prefix(insn)) ++insn; if (*insn == 0x0f) { @@ -322,15 +337,15 @@ static void __kprobes fix_riprel(struct kprobe *p) *(s32 *)insn = (s32) disp; } } -} #endif +} static void __kprobes arch_copy_kprobe(struct kprobe *p) { memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); -#ifdef CONFIG_X86_64 + fix_riprel(p); -#endif + if (can_boost(p->addr)) p->ainsn.boostable = 0; else @@ -392,16 +407,16 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; } -static __always_inline void clear_btf(void) +static void __kprobes clear_btf(void) { if (test_thread_flag(TIF_DEBUGCTLMSR)) - wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0); + wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); } -static __always_inline void restore_btf(void) +static void __kprobes restore_btf(void) { if (test_thread_flag(TIF_DEBUGCTLMSR)) - wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0); + wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr); } static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) @@ -409,7 +424,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) clear_btf(); regs->flags |= X86_EFLAGS_TF; regs->flags &= ~X86_EFLAGS_IF; - /*single step inline if the instruction is an int3*/ + /* single step inline if the instruction is an int3 */ if (p->opcode == BREAKPOINT_INSTRUCTION) regs->ip = (unsigned long)p->addr; else @@ -428,128 +443,146 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, *sara = (unsigned long) &kretprobe_trampoline; } +static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ +#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) + if (p->ainsn.boostable == 1 && !p->post_handler) { + /* Boost up -- we can execute copied instructions directly */ + reset_current_kprobe(); + regs->ip = (unsigned long)p->ainsn.insn; + preempt_enable_no_resched(); + return; + } +#endif + prepare_singlestep(p, regs); + kcb->kprobe_status = KPROBE_HIT_SS; +} + +/* + * We have reentered the kprobe_handler(), since another probe was hit while + * within the handler. We save the original kprobes variables and just single + * step on the instruction of the new probe without calling any user handlers. + */ +static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: +#ifdef CONFIG_X86_64 + /* TODO: Provide re-entrancy from post_kprobes_handler() and + * avoid exception stack corruption while single-stepping on + * the instruction of the new probe. + */ + arch_disarm_kprobe(p); + regs->ip = (unsigned long)p->addr; + reset_current_kprobe(); + preempt_enable_no_resched(); + break; +#endif + case KPROBE_HIT_ACTIVE: + save_previous_kprobe(kcb); + set_current_kprobe(p, regs, kcb); + kprobes_inc_nmissed_count(p); + prepare_singlestep(p, regs); + kcb->kprobe_status = KPROBE_REENTER; + break; + case KPROBE_HIT_SS: + if (p == kprobe_running()) { + regs->flags &= ~TF_MASK; + regs->flags |= kcb->kprobe_saved_flags; + return 0; + } else { + /* A probe has been hit in the codepath leading up + * to, or just after, single-stepping of a probed + * instruction. This entire codepath should strictly + * reside in .kprobes.text section. Raise a warning + * to highlight this peculiar case. + */ + } + default: + /* impossible cases */ + WARN_ON(1); + return 0; + } + + return 1; +} + /* * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled thorough out this function. */ static int __kprobes kprobe_handler(struct pt_regs *regs) { - struct kprobe *p; - int ret = 0; kprobe_opcode_t *addr; + struct kprobe *p; struct kprobe_ctlblk *kcb; addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); + if (*addr != BREAKPOINT_INSTRUCTION) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + * Back up over the (now missing) int3 and run + * the original instruction. + */ + regs->ip = (unsigned long)addr; + return 1; + } /* * We don't want to be preempted for the entire - * duration of kprobe processing + * duration of kprobe processing. We conditionally + * re-enable preemption at the end of this function, + * and also in reenter_kprobe() and setup_singlestep(). */ preempt_disable(); + kcb = get_kprobe_ctlblk(); + p = get_kprobe(addr); - /* Check we're not actually recursing */ - if (kprobe_running()) { - p = get_kprobe(addr); - if (p) { - if (kcb->kprobe_status == KPROBE_HIT_SS && - *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { - regs->flags &= ~X86_EFLAGS_TF; - regs->flags |= kcb->kprobe_saved_flags; - goto no_kprobe; -#ifdef CONFIG_X86_64 - } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) { - /* TODO: Provide re-entrancy from - * post_kprobes_handler() and avoid exception - * stack corruption while single-stepping on - * the instruction of the new probe. - */ - arch_disarm_kprobe(p); - regs->ip = (unsigned long)p->addr; - reset_current_kprobe(); + if (p) { + if (kprobe_running()) { + if (reenter_kprobe(p, regs, kcb)) return 1; -#endif - } - /* We have reentered the kprobe_handler(), since - * another probe was hit while within the handler. - * We here save the original kprobes variables and - * just single step on the instruction of the new probe - * without calling any user handlers. - */ - save_previous_kprobe(kcb); - set_current_kprobe(p, regs, kcb); - kprobes_inc_nmissed_count(p); - prepare_singlestep(p, regs); - kcb->kprobe_status = KPROBE_REENTER; - return 1; } else { - if (*addr != BREAKPOINT_INSTRUCTION) { - /* The breakpoint instruction was removed by - * another cpu right after we hit, no further - * handling of this interrupt is appropriate - */ - regs->ip = (unsigned long)addr; - ret = 1; - goto no_kprobe; - } - p = __get_cpu_var(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) - goto ss_probe; - } - goto no_kprobe; - } + set_current_kprobe(p, regs, kcb); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; - p = get_kprobe(addr); - if (!p) { - if (*addr != BREAKPOINT_INSTRUCTION) { /* - * The breakpoint instruction was removed right - * after we hit it. Another cpu has removed - * either a probepoint or a debugger breakpoint - * at this address. In either case, no further - * handling of this interrupt is appropriate. - * Back up over the (now missing) int3 and run - * the original instruction. + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it prepped + * for calling the break_handler below on re-entry + * for jprobe processing, so get out doing nothing + * more here. */ - regs->ip = (unsigned long)addr; - ret = 1; + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb); + return 1; } - /* Not one of ours: let kernel handle it */ - goto no_kprobe; - } - - set_current_kprobe(p, regs, kcb); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; - - if (p->pre_handler && p->pre_handler(p, regs)) - /* handler has already set things up, so skip ss setup */ - return 1; - -ss_probe: -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) - if (p->ainsn.boostable == 1 && !p->post_handler) { - /* Boost up -- we can execute copied instructions directly */ - reset_current_kprobe(); - regs->ip = (unsigned long)p->ainsn.insn; - preempt_enable_no_resched(); - return 1; - } -#endif - prepare_singlestep(p, regs); - kcb->kprobe_status = KPROBE_HIT_SS; - return 1; + } else if (kprobe_running()) { + p = __get_cpu_var(current_kprobe); + if (p->break_handler && p->break_handler(p, regs)) { + setup_singlestep(p, regs, kcb); + return 1; + } + } /* else: not a kprobe fault; let the kernel handle it */ -no_kprobe: preempt_enable_no_resched(); - return ret; + return 0; } /* * When a retprobed function returns, this code saves registers and * calls trampoline_handler() runs, which calls the kretprobe's handler. */ - void __kprobes kretprobe_trampoline_holder(void) - { +static void __used __kprobes kretprobe_trampoline_holder(void) +{ asm volatile ( ".global kretprobe_trampoline\n" "kretprobe_trampoline: \n" @@ -635,12 +668,12 @@ no_kprobe: " popf\n" #endif " ret\n"); - } +} /* * Called from kretprobe_trampoline */ -void * __kprobes trampoline_handler(struct pt_regs *regs) +static __used __kprobes void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; @@ -743,11 +776,9 @@ static void __kprobes resume_execution(struct kprobe *p, unsigned long orig_ip = (unsigned long)p->addr; kprobe_opcode_t *insn = p->ainsn.insn; -#ifdef CONFIG_X86_64 /*skip the REX prefix*/ - if (*insn >= 0x40 && *insn <= 0x4f) + if (is_REX_prefix(insn)) insn++; -#endif regs->flags &= ~X86_EFLAGS_TF; switch (*insn) { @@ -767,7 +798,7 @@ static void __kprobes resume_execution(struct kprobe *p, case 0xe8: /* call relative - Fix return addr */ *tos = orig_ip + (*tos - copy_ip); break; -#ifndef CONFIG_X86_64 +#ifdef CONFIG_X86_32 case 0x9a: /* call absolute -- same as call absolute, indirect */ *tos = orig_ip + (*tos - copy_ip); goto no_change; @@ -813,8 +844,6 @@ static void __kprobes resume_execution(struct kprobe *p, no_change: restore_btf(); - - return; } /* @@ -924,7 +953,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { - struct die_args *args = (struct die_args *)data; + struct die_args *args = data; int ret = NOTIFY_DONE; if (args->regs && user_mode_vm(args->regs)) @@ -940,12 +969,14 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ret = NOTIFY_STOP; break; case DIE_GPF: - /* kprobe_running() needs smp_processor_id() */ - preempt_disable(); - if (kprobe_running() && + /* + * To be potentially processing a kprobe fault and to + * trust the result from kprobe_running(), we have + * be non-preemptible. + */ + if (!preemptible() && kprobe_running() && kprobe_fault_handler(args->regs, args->trapnr)) ret = NOTIFY_STOP; - preempt_enable(); break; default: break;