2 * Kernel Probes (KProbes)
3 * arch/i386/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
27 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
28 * <prasanna@in.ibm.com> added function-return probes.
31 #include <linux/config.h>
32 #include <linux/kprobes.h>
33 #include <linux/ptrace.h>
34 #include <linux/spinlock.h>
35 #include <linux/preempt.h>
36 #include <asm/cacheflush.h>
37 #include <asm/kdebug.h>
40 void jprobe_return_end(void);
42 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
43 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
46 * returns non-zero if opcode modifies the interrupt flag.
48 static inline int is_IF_modifier(kprobe_opcode_t opcode)
53 case 0xcf: /* iret/iretd */
54 case 0x9d: /* popf/popfd */
60 int __kprobes arch_prepare_kprobe(struct kprobe *p)
65 void __kprobes arch_copy_kprobe(struct kprobe *p)
67 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
71 void __kprobes arch_arm_kprobe(struct kprobe *p)
73 *p->addr = BREAKPOINT_INSTRUCTION;
74 flush_icache_range((unsigned long) p->addr,
75 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
78 void __kprobes arch_disarm_kprobe(struct kprobe *p)
81 flush_icache_range((unsigned long) p->addr,
82 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
85 void __kprobes arch_remove_kprobe(struct kprobe *p)
89 static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
91 kcb->prev_kprobe.kp = kprobe_running();
92 kcb->prev_kprobe.status = kcb->kprobe_status;
93 kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
94 kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
97 static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
99 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
100 kcb->kprobe_status = kcb->prev_kprobe.status;
101 kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
102 kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
105 static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
106 struct kprobe_ctlblk *kcb)
108 __get_cpu_var(current_kprobe) = p;
109 kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
110 = (regs->eflags & (TF_MASK | IF_MASK));
111 if (is_IF_modifier(p->opcode))
112 kcb->kprobe_saved_eflags &= ~IF_MASK;
115 static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
117 regs->eflags |= TF_MASK;
118 regs->eflags &= ~IF_MASK;
119 /*single step inline if the instruction is an int3*/
120 if (p->opcode == BREAKPOINT_INSTRUCTION)
121 regs->eip = (unsigned long)p->addr;
123 regs->eip = (unsigned long)&p->ainsn.insn;
126 void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
127 struct pt_regs *regs)
129 unsigned long *sara = (unsigned long *)®s->esp;
130 struct kretprobe_instance *ri;
132 if ((ri = get_free_rp_inst(rp)) != NULL) {
135 ri->ret_addr = (kprobe_opcode_t *) *sara;
137 /* Replace the return addr with trampoline addr */
138 *sara = (unsigned long) &kretprobe_trampoline;
147 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
148 * remain disabled thorough out this function.
150 static int __kprobes kprobe_handler(struct pt_regs *regs)
154 kprobe_opcode_t *addr = NULL;
156 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
158 /* Check if the application is using LDT entry for its code segment and
159 * calculate the address by reading the base address from the LDT entry.
161 if ((regs->xcs & 4) && (current->mm)) {
162 lp = (unsigned long *) ((unsigned long)((regs->xcs >> 3) * 8)
163 + (char *) current->mm->context.ldt);
164 addr = (kprobe_opcode_t *) (get_desc_base(lp) + regs->eip -
165 sizeof(kprobe_opcode_t));
167 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
169 /* Check we're not actually recursing */
170 if (kprobe_running()) {
171 /* We *are* holding lock here, so this is safe.
172 Disarm the probe we just hit, and ignore it. */
173 p = get_kprobe(addr);
175 if (kcb->kprobe_status == KPROBE_HIT_SS &&
176 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
177 regs->eflags &= ~TF_MASK;
178 regs->eflags |= kcb->kprobe_saved_eflags;
182 /* We have reentered the kprobe_handler(), since
183 * another probe was hit while within the handler.
184 * We here save the original kprobes variables and
185 * just single step on the instruction of the new probe
186 * without calling any user handlers.
188 save_previous_kprobe(kcb);
189 set_current_kprobe(p, regs, kcb);
191 prepare_singlestep(p, regs);
192 kcb->kprobe_status = KPROBE_REENTER;
195 p = __get_cpu_var(current_kprobe);
196 if (p->break_handler && p->break_handler(p, regs)) {
200 /* If it's not ours, can't be delete race, (we hold lock). */
205 p = get_kprobe(addr);
208 if (regs->eflags & VM_MASK) {
209 /* We are in virtual-8086 mode. Return 0 */
213 if (*addr != BREAKPOINT_INSTRUCTION) {
215 * The breakpoint instruction was removed right
216 * after we hit it. Another cpu has removed
217 * either a probepoint or a debugger breakpoint
218 * at this address. In either case, no further
219 * handling of this interrupt is appropriate.
220 * Back up over the (now missing) int3 and run
221 * the original instruction.
223 regs->eip -= sizeof(kprobe_opcode_t);
226 /* Not one of ours: let kernel handle it */
231 * This preempt_disable() matches the preempt_enable_no_resched()
232 * in post_kprobe_handler()
235 set_current_kprobe(p, regs, kcb);
236 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
238 if (p->pre_handler && p->pre_handler(p, regs))
239 /* handler has already set things up, so skip ss setup */
243 prepare_singlestep(p, regs);
244 kcb->kprobe_status = KPROBE_HIT_SS;
252 * For function-return probes, init_kprobes() establishes a probepoint
253 * here. When a retprobed function returns, this probe is hit and
254 * trampoline_probe_handler() runs, calling the kretprobe's handler.
256 void kretprobe_trampoline_holder(void)
258 asm volatile ( ".global kretprobe_trampoline\n"
259 "kretprobe_trampoline: \n"
264 * Called when we hit the probe point at kretprobe_trampoline
266 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
268 struct kretprobe_instance *ri = NULL;
269 struct hlist_head *head;
270 struct hlist_node *node, *tmp;
271 unsigned long orig_ret_address = 0;
272 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
274 head = kretprobe_inst_table_head(current);
277 * It is possible to have multiple instances associated with a given
278 * task either because an multiple functions in the call path
279 * have a return probe installed on them, and/or more then one return
280 * return probe was registered for a target function.
282 * We can handle this because:
283 * - instances are always inserted at the head of the list
284 * - when multiple return probes are registered for the same
285 * function, the first instance's ret_addr will point to the
286 * real return address, and all the rest will point to
287 * kretprobe_trampoline
289 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
290 if (ri->task != current)
291 /* another task is sharing our hash bucket */
294 if (ri->rp && ri->rp->handler)
295 ri->rp->handler(ri, regs);
297 orig_ret_address = (unsigned long)ri->ret_addr;
300 if (orig_ret_address != trampoline_address)
302 * This is the real return address. Any other
303 * instances associated with this task are for
304 * other calls deeper on the call stack
309 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
310 regs->eip = orig_ret_address;
312 reset_current_kprobe();
314 preempt_enable_no_resched();
317 * By returning a non-zero value, we are telling
318 * kprobe_handler() that we have handled unlocking
319 * and re-enabling preemption
325 * Called after single-stepping. p->addr is the address of the
326 * instruction whose first byte has been replaced by the "int 3"
327 * instruction. To avoid the SMP problems that can occur when we
328 * temporarily put back the original opcode to single-step, we
329 * single-stepped a copy of the instruction. The address of this
330 * copy is p->ainsn.insn.
332 * This function prepares to return from the post-single-step
333 * interrupt. We have to fix up the stack as follows:
335 * 0) Except in the case of absolute or indirect jump or call instructions,
336 * the new eip is relative to the copied instruction. We need to make
337 * it relative to the original instruction.
339 * 1) If the single-stepped instruction was pushfl, then the TF and IF
340 * flags are set in the just-pushed eflags, and may need to be cleared.
342 * 2) If the single-stepped instruction was a call, the return address
343 * that is atop the stack is the address following the copied instruction.
344 * We need to make it the address following the original instruction.
346 static void __kprobes resume_execution(struct kprobe *p,
347 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
349 unsigned long *tos = (unsigned long *)®s->esp;
350 unsigned long next_eip = 0;
351 unsigned long copy_eip = (unsigned long)&p->ainsn.insn;
352 unsigned long orig_eip = (unsigned long)p->addr;
354 switch (p->ainsn.insn[0]) {
355 case 0x9c: /* pushfl */
356 *tos &= ~(TF_MASK | IF_MASK);
357 *tos |= kcb->kprobe_old_eflags;
359 case 0xc3: /* ret/lret */
363 regs->eflags &= ~TF_MASK;
364 /* eip is already adjusted, no more changes required*/
366 case 0xe8: /* call relative - Fix return addr */
367 *tos = orig_eip + (*tos - copy_eip);
370 if ((p->ainsn.insn[1] & 0x30) == 0x10) {
371 /* call absolute, indirect */
372 /* Fix return addr; eip is correct. */
373 next_eip = regs->eip;
374 *tos = orig_eip + (*tos - copy_eip);
375 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
376 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
377 /* eip is correct. */
378 next_eip = regs->eip;
381 case 0xea: /* jmp absolute -- eip is correct */
382 next_eip = regs->eip;
388 regs->eflags &= ~TF_MASK;
390 regs->eip = next_eip;
392 regs->eip = orig_eip + (regs->eip - copy_eip);
397 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
398 * remain disabled thoroughout this function. And we hold kprobe lock.
400 static inline int post_kprobe_handler(struct pt_regs *regs)
402 struct kprobe *cur = kprobe_running();
403 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
408 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
409 kcb->kprobe_status = KPROBE_HIT_SSDONE;
410 cur->post_handler(cur, regs, 0);
413 resume_execution(cur, regs, kcb);
414 regs->eflags |= kcb->kprobe_saved_eflags;
416 /*Restore back the original saved kprobes variables and continue. */
417 if (kcb->kprobe_status == KPROBE_REENTER) {
418 restore_previous_kprobe(kcb);
421 reset_current_kprobe();
424 preempt_enable_no_resched();
427 * if somebody else is singlestepping across a probe point, eflags
428 * will have TF set, in which case, continue the remaining processing
429 * of do_debug, as if this is not a probe hit.
431 if (regs->eflags & TF_MASK)
437 /* Interrupts disabled, kprobe_lock held. */
438 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
440 struct kprobe *cur = kprobe_running();
441 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
443 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
446 if (kcb->kprobe_status & KPROBE_HIT_SS) {
447 resume_execution(cur, regs, kcb);
448 regs->eflags |= kcb->kprobe_old_eflags;
450 reset_current_kprobe();
452 preempt_enable_no_resched();
458 * Wrapper routine to for handling exceptions.
460 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
461 unsigned long val, void *data)
463 struct die_args *args = (struct die_args *)data;
464 int ret = NOTIFY_DONE;
469 if (kprobe_handler(args->regs))
473 if (post_kprobe_handler(args->regs))
478 if (kprobe_running() &&
479 kprobe_fault_handler(args->regs, args->trapnr))
489 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
491 struct jprobe *jp = container_of(p, struct jprobe, kp);
493 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
495 kcb->jprobe_saved_regs = *regs;
496 kcb->jprobe_saved_esp = ®s->esp;
497 addr = (unsigned long)(kcb->jprobe_saved_esp);
500 * TBD: As Linus pointed out, gcc assumes that the callee
501 * owns the argument space and could overwrite it, e.g.
502 * tailcall optimization. So, to be absolutely safe
503 * we also save and restore enough stack bytes to cover
506 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
507 MIN_STACK_SIZE(addr));
508 regs->eflags &= ~IF_MASK;
509 regs->eip = (unsigned long)(jp->entry);
513 void __kprobes jprobe_return(void)
515 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
517 asm volatile (" xchgl %%ebx,%%esp \n"
519 " .globl jprobe_return_end \n"
520 " jprobe_return_end: \n"
522 (kcb->jprobe_saved_esp):"memory");
525 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
527 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
528 u8 *addr = (u8 *) (regs->eip - 1);
529 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
530 struct jprobe *jp = container_of(p, struct jprobe, kp);
532 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
533 if (®s->esp != kcb->jprobe_saved_esp) {
534 struct pt_regs *saved_regs =
535 container_of(kcb->jprobe_saved_esp,
536 struct pt_regs, esp);
537 printk("current esp %p does not match saved esp %p\n",
538 ®s->esp, kcb->jprobe_saved_esp);
539 printk("Saved registers for jprobe %p\n", jp);
540 show_registers(saved_regs);
541 printk("Current registers\n");
542 show_registers(regs);
545 *regs = kcb->jprobe_saved_regs;
546 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
547 MIN_STACK_SIZE(stack_addr));
553 static struct kprobe trampoline_p = {
554 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
555 .pre_handler = trampoline_probe_handler
558 int __init arch_init_kprobes(void)
560 return register_kprobe(&trampoline_p);