X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fkprobes.c;h=5240d75f4c60e95f95eb55e0cc46b448e2d6eb7c;hb=38dc63459f185795b24a39f3f4921a433ea9980b;hp=41dfda50e22a832f3df04e84aed7adbd4c218efe;hpb=62c27be0dd8144e11bd3ed054a0fb890579925f8;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 41dfda5..5240d75 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -35,13 +35,20 @@ #include #include #include +#include #include #include #include +#include +#include +#include +#include +#include + #include #include #include -#include +#include #define KPROBE_HASH_BITS 6 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) @@ -56,17 +63,34 @@ addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) #endif +static int kprobes_initialized; static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; -static atomic_t kprobe_count; -DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ -DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ +/* NOTE: change this value only with kprobe_mutex held */ +static bool kprobes_all_disarmed; + +static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; +static struct { + spinlock_t lock ____cacheline_aligned_in_smp; +} kretprobe_table_locks[KPROBE_TABLE_SIZE]; -static struct notifier_block kprobe_page_fault_nb = { - .notifier_call = kprobe_exceptions_notify, - .priority = 0x7fffffff /* we need to notified first */ +static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) +{ + return &(kretprobe_table_locks[hash].lock); +} + +/* + * Normally, functions that we'd want to prohibit kprobes in, are marked + * __kprobes. But, there are cases where such functions already belong to + * a different section (__sched for preempt_schedule) + * + * For such cases, we now have a blacklist + */ +static struct kprobe_blackpoint kprobe_blacklist[] = { + {"preempt_schedule",}, + {NULL} /* Terminator */ }; #ifdef __ARCH_WANT_KPROBES_INSN_SLOT @@ -79,30 +103,63 @@ static struct notifier_block kprobe_page_fault_nb = { #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) struct kprobe_insn_page { - struct hlist_node hlist; + struct list_head list; kprobe_opcode_t *insns; /* Page of instruction slots */ char slot_used[INSNS_PER_PAGE]; int nused; + int ngarbage; +}; + +enum kprobe_slot_state { + SLOT_CLEAN = 0, + SLOT_DIRTY = 1, + SLOT_USED = 2, }; -static struct hlist_head kprobe_insn_pages; +static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ +static LIST_HEAD(kprobe_insn_pages); +static int kprobe_garbage_slots; +static int collect_garbage_slots(void); + +static int __kprobes check_safety(void) +{ + int ret = 0; +#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER) + ret = freeze_processes(); + if (ret == 0) { + struct task_struct *p, *q; + do_each_thread(p, q) { + if (p != current && p->state == TASK_RUNNING && + p->pid != 0) { + printk("Check failed: %s is running\n",p->comm); + ret = -1; + goto loop_end; + } + } while_each_thread(p, q); + } +loop_end: + thaw_processes(); +#else + synchronize_sched(); +#endif + return ret; +} /** - * get_insn_slot() - Find a slot on an executable page for an instruction. + * __get_insn_slot() - Find a slot on an executable page for an instruction. * We allocate an executable page if there's no room on existing ones. */ -kprobe_opcode_t __kprobes *get_insn_slot(void) +static kprobe_opcode_t __kprobes *__get_insn_slot(void) { struct kprobe_insn_page *kip; - struct hlist_node *pos; - hlist_for_each(pos, &kprobe_insn_pages) { - kip = hlist_entry(pos, struct kprobe_insn_page, hlist); + retry: + list_for_each_entry(kip, &kprobe_insn_pages, list) { if (kip->nused < INSNS_PER_PAGE) { int i; for (i = 0; i < INSNS_PER_PAGE; i++) { - if (!kip->slot_used[i]) { - kip->slot_used[i] = 1; + if (kip->slot_used[i] == SLOT_CLEAN) { + kip->slot_used[i] = SLOT_USED; kip->nused++; return kip->insns + (i * MAX_INSN_SIZE); } @@ -112,11 +169,14 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) } } - /* All out of space. Need to allocate a new page. Use slot 0.*/ + /* If there are any garbage slots, collect it and try again. */ + if (kprobe_garbage_slots && collect_garbage_slots() == 0) { + goto retry; + } + /* All out of space. Need to allocate a new page. Use slot 0. */ kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); - if (!kip) { + if (!kip) return NULL; - } /* * Use module_alloc so this page is within +/- 2GB of where the @@ -128,46 +188,91 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) kfree(kip); return NULL; } - INIT_HLIST_NODE(&kip->hlist); - hlist_add_head(&kip->hlist, &kprobe_insn_pages); - memset(kip->slot_used, 0, INSNS_PER_PAGE); - kip->slot_used[0] = 1; + INIT_LIST_HEAD(&kip->list); + list_add(&kip->list, &kprobe_insn_pages); + memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE); + kip->slot_used[0] = SLOT_USED; kip->nused = 1; + kip->ngarbage = 0; return kip->insns; } -void __kprobes free_insn_slot(kprobe_opcode_t *slot) +kprobe_opcode_t __kprobes *get_insn_slot(void) +{ + kprobe_opcode_t *ret; + mutex_lock(&kprobe_insn_mutex); + ret = __get_insn_slot(); + mutex_unlock(&kprobe_insn_mutex); + return ret; +} + +/* Return 1 if all garbages are collected, otherwise 0. */ +static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) +{ + kip->slot_used[idx] = SLOT_CLEAN; + kip->nused--; + if (kip->nused == 0) { + /* + * Page is no longer in use. Free it unless + * it's the last one. We keep the last one + * so as not to have to set it up again the + * next time somebody inserts a probe. + */ + if (!list_is_singular(&kprobe_insn_pages)) { + list_del(&kip->list); + module_free(NULL, kip->insns); + kfree(kip); + } + return 1; + } + return 0; +} + +static int __kprobes collect_garbage_slots(void) +{ + struct kprobe_insn_page *kip, *next; + + /* Ensure no-one is preepmted on the garbages */ + if (check_safety()) + return -EAGAIN; + + list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) { + int i; + if (kip->ngarbage == 0) + continue; + kip->ngarbage = 0; /* we will collect all garbages */ + for (i = 0; i < INSNS_PER_PAGE; i++) { + if (kip->slot_used[i] == SLOT_DIRTY && + collect_one_slot(kip, i)) + break; + } + } + kprobe_garbage_slots = 0; + return 0; +} + +void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) { struct kprobe_insn_page *kip; - struct hlist_node *pos; - hlist_for_each(pos, &kprobe_insn_pages) { - kip = hlist_entry(pos, struct kprobe_insn_page, hlist); + mutex_lock(&kprobe_insn_mutex); + list_for_each_entry(kip, &kprobe_insn_pages, list) { if (kip->insns <= slot && slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { int i = (slot - kip->insns) / MAX_INSN_SIZE; - kip->slot_used[i] = 0; - kip->nused--; - if (kip->nused == 0) { - /* - * Page is no longer in use. Free it unless - * it's the last one. We keep the last one - * so as not to have to set it up again the - * next time somebody inserts a probe. - */ - hlist_del(&kip->hlist); - if (hlist_empty(&kprobe_insn_pages)) { - INIT_HLIST_NODE(&kip->hlist); - hlist_add_head(&kip->hlist, - &kprobe_insn_pages); - } else { - module_free(NULL, kip->insns); - kfree(kip); - } - } - return; + if (dirty) { + kip->slot_used[i] = SLOT_DIRTY; + kip->ngarbage++; + } else + collect_one_slot(kip, i); + break; } } + + if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) + collect_garbage_slots(); + + mutex_unlock(&kprobe_insn_mutex); } #endif @@ -202,6 +307,22 @@ struct kprobe __kprobes *get_kprobe(void *addr) return NULL; } +/* Arm a kprobe with text_mutex */ +static void __kprobes arm_kprobe(struct kprobe *kp) +{ + mutex_lock(&text_mutex); + arch_arm_kprobe(kp); + mutex_unlock(&text_mutex); +} + +/* Disarm a kprobe with text_mutex */ +static void __kprobes disarm_kprobe(struct kprobe *kp) +{ + mutex_lock(&text_mutex); + arch_disarm_kprobe(kp); + mutex_unlock(&text_mutex); +} + /* * Aggregate handlers for multiple kprobes support - these handlers * take care of invoking the individual kprobe handlers on p->list @@ -211,7 +332,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) struct kprobe *kp; list_for_each_entry_rcu(kp, &p->list, list) { - if (kp->pre_handler) { + if (kp->pre_handler && likely(!kprobe_disabled(kp))) { set_kprobe_instance(kp); if (kp->pre_handler(kp, regs)) return 1; @@ -227,13 +348,12 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, struct kprobe *kp; list_for_each_entry_rcu(kp, &p->list, list) { - if (kp->post_handler) { + if (kp->post_handler && likely(!kprobe_disabled(kp))) { set_kprobe_instance(kp); kp->post_handler(kp, regs, flags); reset_kprobe_instance(); } } - return; } static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, @@ -278,65 +398,55 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) return; } -/* Called with kretprobe_lock held */ -struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) +void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, + struct hlist_head *head) { - struct hlist_node *node; - struct kretprobe_instance *ri; - hlist_for_each_entry(ri, node, &rp->free_instances, uflist) - return ri; - return NULL; -} + struct kretprobe *rp = ri->rp; -/* Called with kretprobe_lock held */ -static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe - *rp) -{ - struct hlist_node *node; - struct kretprobe_instance *ri; - hlist_for_each_entry(ri, node, &rp->used_instances, uflist) - return ri; - return NULL; + /* remove rp inst off the rprobe_inst_table */ + hlist_del(&ri->hlist); + INIT_HLIST_NODE(&ri->hlist); + if (likely(rp)) { + spin_lock(&rp->lock); + hlist_add_head(&ri->hlist, &rp->free_instances); + spin_unlock(&rp->lock); + } else + /* Unregistering */ + hlist_add_head(&ri->hlist, head); } -/* Called with kretprobe_lock held */ -void __kprobes add_rp_inst(struct kretprobe_instance *ri) +void __kprobes kretprobe_hash_lock(struct task_struct *tsk, + struct hlist_head **head, unsigned long *flags) { - /* - * Remove rp inst off the free list - - * Add it back when probed function returns - */ - hlist_del(&ri->uflist); + unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); + spinlock_t *hlist_lock; - /* Add rp inst onto table */ - INIT_HLIST_NODE(&ri->hlist); - hlist_add_head(&ri->hlist, - &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]); + *head = &kretprobe_inst_table[hash]; + hlist_lock = kretprobe_table_lock_ptr(hash); + spin_lock_irqsave(hlist_lock, *flags); +} - /* Also add this rp inst to the used list. */ - INIT_HLIST_NODE(&ri->uflist); - hlist_add_head(&ri->uflist, &ri->rp->used_instances); +static void __kprobes kretprobe_table_lock(unsigned long hash, + unsigned long *flags) +{ + spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); + spin_lock_irqsave(hlist_lock, *flags); } -/* Called with kretprobe_lock held */ -void __kprobes recycle_rp_inst(struct kretprobe_instance *ri) +void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, + unsigned long *flags) { - /* remove rp inst off the rprobe_inst_table */ - hlist_del(&ri->hlist); - if (ri->rp) { - /* remove rp inst off the used list */ - hlist_del(&ri->uflist); - /* put rp inst back onto the free list */ - INIT_HLIST_NODE(&ri->uflist); - hlist_add_head(&ri->uflist, &ri->rp->free_instances); - } else - /* Unregistering */ - kfree(ri); + unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); + spinlock_t *hlist_lock; + + hlist_lock = kretprobe_table_lock_ptr(hash); + spin_unlock_irqrestore(hlist_lock, *flags); } -struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) +void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) { - return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; + spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); + spin_unlock_irqrestore(hlist_lock, *flags); } /* @@ -348,28 +458,60 @@ struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) void __kprobes kprobe_flush_task(struct task_struct *tk) { struct kretprobe_instance *ri; - struct hlist_head *head; + struct hlist_head *head, empty_rp; struct hlist_node *node, *tmp; - unsigned long flags = 0; + unsigned long hash, flags = 0; + + if (unlikely(!kprobes_initialized)) + /* Early boot. kretprobe_table_locks not yet initialized. */ + return; - spin_lock_irqsave(&kretprobe_lock, flags); - head = kretprobe_inst_table_head(tk); + hash = hash_ptr(tk, KPROBE_HASH_BITS); + head = &kretprobe_inst_table[hash]; + kretprobe_table_lock(hash, &flags); hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { if (ri->task == tk) - recycle_rp_inst(ri); + recycle_rp_inst(ri, &empty_rp); + } + kretprobe_table_unlock(hash, &flags); + INIT_HLIST_HEAD(&empty_rp); + hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); } - spin_unlock_irqrestore(&kretprobe_lock, flags); } static inline void free_rp_inst(struct kretprobe *rp) { struct kretprobe_instance *ri; - while ((ri = get_free_rp_inst(rp)) != NULL) { - hlist_del(&ri->uflist); + struct hlist_node *pos, *next; + + hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { + hlist_del(&ri->hlist); kfree(ri); } } +static void __kprobes cleanup_rp_inst(struct kretprobe *rp) +{ + unsigned long flags, hash; + struct kretprobe_instance *ri; + struct hlist_node *pos, *next; + struct hlist_head *head; + + /* No race here */ + for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { + kretprobe_table_lock(hash, &flags); + head = &kretprobe_inst_table[hash]; + hlist_for_each_entry_safe(ri, pos, next, head, hlist) { + if (ri->rp == rp) + ri->rp = NULL; + } + kretprobe_table_unlock(hash, &flags); + } + free_rp_inst(rp); +} + /* * Keep all fields in the kprobe consistent */ @@ -380,20 +522,28 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) } /* -* Add the new probe to old_p->list. Fail if this is the +* Add the new probe to ap->list. Fail if this is the * second jprobe at the address - two jprobes can't coexist */ -static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) +static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) { + BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); if (p->break_handler) { - if (old_p->break_handler) + if (ap->break_handler) return -EEXIST; - list_add_tail_rcu(&p->list, &old_p->list); - old_p->break_handler = aggr_break_handler; + list_add_tail_rcu(&p->list, &ap->list); + ap->break_handler = aggr_break_handler; } else - list_add_rcu(&p->list, &old_p->list); - if (p->post_handler && !old_p->post_handler) - old_p->post_handler = aggr_post_handler; + list_add_rcu(&p->list, &ap->list); + if (p->post_handler && !ap->post_handler) + ap->post_handler = aggr_post_handler; + + if (kprobe_disabled(ap) && !kprobe_disabled(p)) { + ap->flags &= ~KPROBE_FLAG_DISABLED; + if (!kprobes_all_disarmed) + /* Arm the breakpoint again. */ + arm_kprobe(ap); + } return 0; } @@ -406,11 +556,13 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) copy_kprobe(p, ap); flush_insn_slot(ap); ap->addr = p->addr; + ap->flags = p->flags; ap->pre_handler = aggr_pre_handler; ap->fault_handler = aggr_fault_handler; - if (p->post_handler) + /* We don't care the kprobe which has gone. */ + if (p->post_handler && !kprobe_gone(p)) ap->post_handler = aggr_post_handler; - if (p->break_handler) + if (p->break_handler && !kprobe_gone(p)) ap->break_handler = aggr_break_handler; INIT_LIST_HEAD(&ap->list); @@ -427,206 +579,375 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) { int ret = 0; - struct kprobe *ap; + struct kprobe *ap = old_p; - if (old_p->pre_handler == aggr_pre_handler) { - copy_kprobe(old_p, p); - ret = add_new_kprobe(old_p, p); - } else { + if (old_p->pre_handler != aggr_pre_handler) { + /* If old_p is not an aggr_probe, create new aggr_kprobe. */ ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); if (!ap) return -ENOMEM; add_aggr_kprobe(ap, old_p); - copy_kprobe(ap, p); - ret = add_new_kprobe(ap, p); } - return ret; + + if (kprobe_gone(ap)) { + /* + * Attempting to insert new probe at the same location that + * had a probe in the module vaddr area which already + * freed. So, the instruction slot has already been + * released. We need a new slot for the new probe. + */ + ret = arch_prepare_kprobe(ap); + if (ret) + /* + * Even if fail to allocate new slot, don't need to + * free aggr_probe. It will be used next time, or + * freed by unregister_kprobe. + */ + return ret; + + /* + * Clear gone flag to prevent allocating new slot again, and + * set disabled flag because it is not armed yet. + */ + ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) + | KPROBE_FLAG_DISABLED; + } + + copy_kprobe(ap, p); + return add_new_kprobe(ap, p); +} + +/* Try to disable aggr_kprobe, and return 1 if succeeded.*/ +static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p) +{ + struct kprobe *kp; + + list_for_each_entry_rcu(kp, &p->list, list) { + if (!kprobe_disabled(kp)) + /* + * There is an active probe on the list. + * We can't disable aggr_kprobe. + */ + return 0; + } + p->flags |= KPROBE_FLAG_DISABLED; + return 1; } static int __kprobes in_kprobes_functions(unsigned long addr) { - if (addr >= (unsigned long)__kprobes_text_start - && addr < (unsigned long)__kprobes_text_end) + struct kprobe_blackpoint *kb; + + if (addr >= (unsigned long)__kprobes_text_start && + addr < (unsigned long)__kprobes_text_end) return -EINVAL; + /* + * If there exists a kprobe_blacklist, verify and + * fail any probe registration in the prohibited area + */ + for (kb = kprobe_blacklist; kb->name != NULL; kb++) { + if (kb->start_addr) { + if (addr >= kb->start_addr && + addr < (kb->start_addr + kb->range)) + return -EINVAL; + } + } return 0; } -static int __kprobes __register_kprobe(struct kprobe *p, - unsigned long called_from) +/* + * If we have a symbol_name argument, look it up and add the offset field + * to it. This way, we can specify a relative address to a symbol. + */ +static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) +{ + kprobe_opcode_t *addr = p->addr; + if (p->symbol_name) { + if (addr) + return NULL; + kprobe_lookup_name(p->symbol_name, addr); + } + + if (!addr) + return NULL; + return (kprobe_opcode_t *)(((char *)addr) + p->offset); +} + +int __kprobes register_kprobe(struct kprobe *p) { int ret = 0; struct kprobe *old_p; struct module *probed_mod; + kprobe_opcode_t *addr; - /* - * If we have a symbol_name argument look it up, - * and add it to the address. That way the addr - * field can either be global or relative to a symbol. - */ - if (p->symbol_name) { - if (p->addr) - return -EINVAL; - kprobe_lookup_name(p->symbol_name, p->addr); - } - - if (!p->addr) + addr = kprobe_addr(p); + if (!addr) return -EINVAL; - p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset); + p->addr = addr; - if ((!kernel_text_address((unsigned long) p->addr)) || - in_kprobes_functions((unsigned long) p->addr)) + preempt_disable(); + if (!kernel_text_address((unsigned long) p->addr) || + in_kprobes_functions((unsigned long) p->addr)) { + preempt_enable(); return -EINVAL; + } - p->mod_refcounted = 0; - /* Check are we probing a module */ - if ((probed_mod = module_text_address((unsigned long) p->addr))) { - struct module *calling_mod = module_text_address(called_from); - /* We must allow modules to probe themself and - * in this case avoid incrementing the module refcount, - * so as to allow unloading of self probing modules. + /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ + p->flags &= KPROBE_FLAG_DISABLED; + + /* + * Check if are we probing a module. + */ + probed_mod = __module_text_address((unsigned long) p->addr); + if (probed_mod) { + /* + * We must hold a refcount of the probed module while updating + * its code to prohibit unexpected unloading. */ - if (calling_mod && (calling_mod != probed_mod)) { - if (unlikely(!try_module_get(probed_mod))) - return -EINVAL; - p->mod_refcounted = 1; - } else - probed_mod = NULL; + if (unlikely(!try_module_get(probed_mod))) { + preempt_enable(); + return -EINVAL; + } + /* + * If the module freed .init.text, we couldn't insert + * kprobes in there. + */ + if (within_module_init((unsigned long)p->addr, probed_mod) && + probed_mod->state != MODULE_STATE_COMING) { + module_put(probed_mod); + preempt_enable(); + return -EINVAL; + } } + preempt_enable(); p->nmissed = 0; + INIT_LIST_HEAD(&p->list); mutex_lock(&kprobe_mutex); old_p = get_kprobe(p->addr); if (old_p) { ret = register_aggr_kprobe(old_p, p); - if (!ret) - atomic_inc(&kprobe_count); goto out; } - if ((ret = arch_prepare_kprobe(p)) != 0) - goto out; + mutex_lock(&text_mutex); + ret = arch_prepare_kprobe(p); + if (ret) + goto out_unlock_text; INIT_HLIST_NODE(&p->hlist); hlist_add_head_rcu(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); - if (atomic_add_return(1, &kprobe_count) == \ - (ARCH_INACTIVE_KPROBE_COUNT + 1)) - register_page_fault_notifier(&kprobe_page_fault_nb); - - arch_arm_kprobe(p); + if (!kprobes_all_disarmed && !kprobe_disabled(p)) + arch_arm_kprobe(p); +out_unlock_text: + mutex_unlock(&text_mutex); out: mutex_unlock(&kprobe_mutex); - if (ret && probed_mod) + if (probed_mod) module_put(probed_mod); - return ret; -} -int __kprobes register_kprobe(struct kprobe *p) -{ - return __register_kprobe(p, - (unsigned long)__builtin_return_address(0)); + return ret; } +EXPORT_SYMBOL_GPL(register_kprobe); -void __kprobes unregister_kprobe(struct kprobe *p) +/* Check passed kprobe is valid and return kprobe in kprobe_table. */ +static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) { - struct module *mod; struct kprobe *old_p, *list_p; - int cleanup_p; - mutex_lock(&kprobe_mutex); old_p = get_kprobe(p->addr); - if (unlikely(!old_p)) { - mutex_unlock(&kprobe_mutex); - return; - } + if (unlikely(!old_p)) + return NULL; + if (p != old_p) { list_for_each_entry_rcu(list_p, &old_p->list, list) if (list_p == p) /* kprobe p is a valid probe */ - goto valid_p; - mutex_unlock(&kprobe_mutex); - return; + goto valid; + return NULL; } -valid_p: - if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && - (p->list.next == &old_p->list) && - (p->list.prev == &old_p->list))) { - /* Only probe on the hash list */ - arch_disarm_kprobe(p); +valid: + return old_p; +} + +/* + * Unregister a kprobe without a scheduler synchronization. + */ +static int __kprobes __unregister_kprobe_top(struct kprobe *p) +{ + struct kprobe *old_p, *list_p; + + old_p = __get_valid_kprobe(p); + if (old_p == NULL) + return -EINVAL; + + if (old_p == p || + (old_p->pre_handler == aggr_pre_handler && + list_is_singular(&old_p->list))) { + /* + * Only probe on the hash list. Disarm only if kprobes are + * enabled and not gone - otherwise, the breakpoint would + * already have been removed. We save on flushing icache. + */ + if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) + disarm_kprobe(p); hlist_del_rcu(&old_p->hlist); - cleanup_p = 1; } else { + if (p->break_handler && !kprobe_gone(p)) + old_p->break_handler = NULL; + if (p->post_handler && !kprobe_gone(p)) { + list_for_each_entry_rcu(list_p, &old_p->list, list) { + if ((list_p != p) && (list_p->post_handler)) + goto noclean; + } + old_p->post_handler = NULL; + } +noclean: list_del_rcu(&p->list); - cleanup_p = 0; + if (!kprobe_disabled(old_p)) { + try_to_disable_aggr_kprobe(old_p); + if (!kprobes_all_disarmed && kprobe_disabled(old_p)) + disarm_kprobe(old_p); + } } + return 0; +} - mutex_unlock(&kprobe_mutex); +static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) +{ + struct kprobe *old_p; - synchronize_sched(); - if (p->mod_refcounted && - (mod = module_text_address((unsigned long)p->addr))) - module_put(mod); - - if (cleanup_p) { - if (p != old_p) { - list_del_rcu(&p->list); - kfree(old_p); - } + if (list_empty(&p->list)) arch_remove_kprobe(p); - } else { - mutex_lock(&kprobe_mutex); - if (p->break_handler) - old_p->break_handler = NULL; - if (p->post_handler){ - list_for_each_entry_rcu(list_p, &old_p->list, list){ - if (list_p->post_handler){ - cleanup_p = 2; - break; - } - } - if (cleanup_p == 0) - old_p->post_handler = NULL; + else if (list_is_singular(&p->list)) { + /* "p" is the last child of an aggr_kprobe */ + old_p = list_entry(p->list.next, struct kprobe, list); + list_del(&p->list); + arch_remove_kprobe(old_p); + kfree(old_p); + } +} + +int __kprobes register_kprobes(struct kprobe **kps, int num) +{ + int i, ret = 0; + + if (num <= 0) + return -EINVAL; + for (i = 0; i < num; i++) { + ret = register_kprobe(kps[i]); + if (ret < 0) { + if (i > 0) + unregister_kprobes(kps, i); + break; } - mutex_unlock(&kprobe_mutex); } + return ret; +} +EXPORT_SYMBOL_GPL(register_kprobes); - /* Call unregister_page_fault_notifier() - * if no probes are active - */ +void __kprobes unregister_kprobe(struct kprobe *p) +{ + unregister_kprobes(&p, 1); +} +EXPORT_SYMBOL_GPL(unregister_kprobe); + +void __kprobes unregister_kprobes(struct kprobe **kps, int num) +{ + int i; + + if (num <= 0) + return; mutex_lock(&kprobe_mutex); - if (atomic_add_return(-1, &kprobe_count) == \ - ARCH_INACTIVE_KPROBE_COUNT) - unregister_page_fault_notifier(&kprobe_page_fault_nb); + for (i = 0; i < num; i++) + if (__unregister_kprobe_top(kps[i]) < 0) + kps[i]->addr = NULL; mutex_unlock(&kprobe_mutex); - return; + + synchronize_sched(); + for (i = 0; i < num; i++) + if (kps[i]->addr) + __unregister_kprobe_bottom(kps[i]); } +EXPORT_SYMBOL_GPL(unregister_kprobes); static struct notifier_block kprobe_exceptions_nb = { .notifier_call = kprobe_exceptions_notify, .priority = 0x7fffffff /* we need to be notified first */ }; +unsigned long __weak arch_deref_entry_point(void *entry) +{ + return (unsigned long)entry; +} -int __kprobes register_jprobe(struct jprobe *jp) +int __kprobes register_jprobes(struct jprobe **jps, int num) { - /* Todo: Verify probepoint is a function entry point */ - jp->kp.pre_handler = setjmp_pre_handler; - jp->kp.break_handler = longjmp_break_handler; + struct jprobe *jp; + int ret = 0, i; - return __register_kprobe(&jp->kp, - (unsigned long)__builtin_return_address(0)); + if (num <= 0) + return -EINVAL; + for (i = 0; i < num; i++) { + unsigned long addr; + jp = jps[i]; + addr = arch_deref_entry_point(jp->entry); + + if (!kernel_text_address(addr)) + ret = -EINVAL; + else { + /* Todo: Verify probepoint is a function entry point */ + jp->kp.pre_handler = setjmp_pre_handler; + jp->kp.break_handler = longjmp_break_handler; + ret = register_kprobe(&jp->kp); + } + if (ret < 0) { + if (i > 0) + unregister_jprobes(jps, i); + break; + } + } + return ret; +} +EXPORT_SYMBOL_GPL(register_jprobes); + +int __kprobes register_jprobe(struct jprobe *jp) +{ + return register_jprobes(&jp, 1); } +EXPORT_SYMBOL_GPL(register_jprobe); void __kprobes unregister_jprobe(struct jprobe *jp) { - unregister_kprobe(&jp->kp); + unregister_jprobes(&jp, 1); } +EXPORT_SYMBOL_GPL(unregister_jprobe); + +void __kprobes unregister_jprobes(struct jprobe **jps, int num) +{ + int i; -#ifdef ARCH_SUPPORTS_KRETPROBES + if (num <= 0) + return; + mutex_lock(&kprobe_mutex); + for (i = 0; i < num; i++) + if (__unregister_kprobe_top(&jps[i]->kp) < 0) + jps[i]->kp.addr = NULL; + mutex_unlock(&kprobe_mutex); + + synchronize_sched(); + for (i = 0; i < num; i++) { + if (jps[i]->kp.addr) + __unregister_kprobe_bottom(&jps[i]->kp); + } +} +EXPORT_SYMBOL_GPL(unregister_jprobes); +#ifdef CONFIG_KRETPROBES /* * This kprobe pre_handler is registered with every kretprobe. When probe * hits it will set up the return probe. @@ -635,12 +956,35 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) { struct kretprobe *rp = container_of(p, struct kretprobe, kp); - unsigned long flags = 0; + unsigned long hash, flags = 0; + struct kretprobe_instance *ri; /*TODO: consider to only swap the RA after the last pre_handler fired */ - spin_lock_irqsave(&kretprobe_lock, flags); - arch_prepare_kretprobe(rp, regs); - spin_unlock_irqrestore(&kretprobe_lock, flags); + hash = hash_ptr(current, KPROBE_HASH_BITS); + spin_lock_irqsave(&rp->lock, flags); + if (!hlist_empty(&rp->free_instances)) { + ri = hlist_entry(rp->free_instances.first, + struct kretprobe_instance, hlist); + hlist_del(&ri->hlist); + spin_unlock_irqrestore(&rp->lock, flags); + + ri->rp = rp; + ri->task = current; + + if (rp->entry_handler && rp->entry_handler(ri, regs)) + return 0; + + arch_prepare_kretprobe(ri, regs); + + /* XXX(hch): why is there no hlist_move_head? */ + INIT_HLIST_NODE(&ri->hlist); + kretprobe_table_lock(hash, &flags); + hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); + kretprobe_table_unlock(hash, &flags); + } else { + rp->nmissed++; + spin_unlock_irqrestore(&rp->lock, flags); + } return 0; } @@ -649,6 +993,18 @@ int __kprobes register_kretprobe(struct kretprobe *rp) int ret = 0; struct kretprobe_instance *inst; int i; + void *addr; + + if (kretprobe_blacklist_size) { + addr = kprobe_addr(&rp->kp); + if (!addr) + return -EINVAL; + + for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { + if (kretprobe_blacklist[i].addr == addr) + return -EINVAL; + } + } rp->kp.pre_handler = pre_handler_kretprobe; rp->kp.post_handler = NULL; @@ -663,77 +1019,537 @@ int __kprobes register_kretprobe(struct kretprobe *rp) rp->maxactive = NR_CPUS; #endif } - INIT_HLIST_HEAD(&rp->used_instances); + spin_lock_init(&rp->lock); INIT_HLIST_HEAD(&rp->free_instances); for (i = 0; i < rp->maxactive; i++) { - inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL); + inst = kmalloc(sizeof(struct kretprobe_instance) + + rp->data_size, GFP_KERNEL); if (inst == NULL) { free_rp_inst(rp); return -ENOMEM; } - INIT_HLIST_NODE(&inst->uflist); - hlist_add_head(&inst->uflist, &rp->free_instances); + INIT_HLIST_NODE(&inst->hlist); + hlist_add_head(&inst->hlist, &rp->free_instances); } rp->nmissed = 0; /* Establish function entry probe point */ - if ((ret = __register_kprobe(&rp->kp, - (unsigned long)__builtin_return_address(0))) != 0) + ret = register_kprobe(&rp->kp); + if (ret != 0) free_rp_inst(rp); return ret; } +EXPORT_SYMBOL_GPL(register_kretprobe); + +int __kprobes register_kretprobes(struct kretprobe **rps, int num) +{ + int ret = 0, i; + + if (num <= 0) + return -EINVAL; + for (i = 0; i < num; i++) { + ret = register_kretprobe(rps[i]); + if (ret < 0) { + if (i > 0) + unregister_kretprobes(rps, i); + break; + } + } + return ret; +} +EXPORT_SYMBOL_GPL(register_kretprobes); -#else /* ARCH_SUPPORTS_KRETPROBES */ +void __kprobes unregister_kretprobe(struct kretprobe *rp) +{ + unregister_kretprobes(&rp, 1); +} +EXPORT_SYMBOL_GPL(unregister_kretprobe); +void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) +{ + int i; + + if (num <= 0) + return; + mutex_lock(&kprobe_mutex); + for (i = 0; i < num; i++) + if (__unregister_kprobe_top(&rps[i]->kp) < 0) + rps[i]->kp.addr = NULL; + mutex_unlock(&kprobe_mutex); + + synchronize_sched(); + for (i = 0; i < num; i++) { + if (rps[i]->kp.addr) { + __unregister_kprobe_bottom(&rps[i]->kp); + cleanup_rp_inst(rps[i]); + } + } +} +EXPORT_SYMBOL_GPL(unregister_kretprobes); + +#else /* CONFIG_KRETPROBES */ int __kprobes register_kretprobe(struct kretprobe *rp) { return -ENOSYS; } +EXPORT_SYMBOL_GPL(register_kretprobe); -#endif /* ARCH_SUPPORTS_KRETPROBES */ +int __kprobes register_kretprobes(struct kretprobe **rps, int num) +{ + return -ENOSYS; +} +EXPORT_SYMBOL_GPL(register_kretprobes); void __kprobes unregister_kretprobe(struct kretprobe *rp) { - unsigned long flags; - struct kretprobe_instance *ri; +} +EXPORT_SYMBOL_GPL(unregister_kretprobe); - unregister_kprobe(&rp->kp); - /* No race here */ - spin_lock_irqsave(&kretprobe_lock, flags); - while ((ri = get_used_rp_inst(rp)) != NULL) { - ri->rp = NULL; - hlist_del(&ri->uflist); +void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) +{ +} +EXPORT_SYMBOL_GPL(unregister_kretprobes); + +static int __kprobes pre_handler_kretprobe(struct kprobe *p, + struct pt_regs *regs) +{ + return 0; +} + +#endif /* CONFIG_KRETPROBES */ + +/* Set the kprobe gone and remove its instruction buffer. */ +static void __kprobes kill_kprobe(struct kprobe *p) +{ + struct kprobe *kp; + + p->flags |= KPROBE_FLAG_GONE; + if (p->pre_handler == aggr_pre_handler) { + /* + * If this is an aggr_kprobe, we have to list all the + * chained probes and mark them GONE. + */ + list_for_each_entry_rcu(kp, &p->list, list) + kp->flags |= KPROBE_FLAG_GONE; + p->post_handler = NULL; + p->break_handler = NULL; } - spin_unlock_irqrestore(&kretprobe_lock, flags); - free_rp_inst(rp); + /* + * Here, we can remove insn_slot safely, because no thread calls + * the original probed function (which will be freed soon) any more. + */ + arch_remove_kprobe(p); +} + +/* Module notifier call back, checking kprobes on the module */ +static int __kprobes kprobes_module_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct module *mod = data; + struct hlist_head *head; + struct hlist_node *node; + struct kprobe *p; + unsigned int i; + int checkcore = (val == MODULE_STATE_GOING); + + if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) + return NOTIFY_DONE; + + /* + * When MODULE_STATE_GOING was notified, both of module .text and + * .init.text sections would be freed. When MODULE_STATE_LIVE was + * notified, only .init.text section would be freed. We need to + * disable kprobes which have been inserted in the sections. + */ + mutex_lock(&kprobe_mutex); + for (i = 0; i < KPROBE_TABLE_SIZE; i++) { + head = &kprobe_table[i]; + hlist_for_each_entry_rcu(p, node, head, hlist) + if (within_module_init((unsigned long)p->addr, mod) || + (checkcore && + within_module_core((unsigned long)p->addr, mod))) { + /* + * The vaddr this probe is installed will soon + * be vfreed buy not synced to disk. Hence, + * disarming the breakpoint isn't needed. + */ + kill_kprobe(p); + } + } + mutex_unlock(&kprobe_mutex); + return NOTIFY_DONE; } +static struct notifier_block kprobe_module_nb = { + .notifier_call = kprobes_module_callback, + .priority = 0 +}; + static int __init init_kprobes(void) { int i, err = 0; + unsigned long offset = 0, size = 0; + char *modname, namebuf[128]; + const char *symbol_name; + void *addr; + struct kprobe_blackpoint *kb; /* FIXME allocate the probe table, currently defined statically */ /* initialize all list heads */ for (i = 0; i < KPROBE_TABLE_SIZE; i++) { INIT_HLIST_HEAD(&kprobe_table[i]); INIT_HLIST_HEAD(&kretprobe_inst_table[i]); + spin_lock_init(&(kretprobe_table_locks[i].lock)); + } + + /* + * Lookup and populate the kprobe_blacklist. + * + * Unlike the kretprobe blacklist, we'll need to determine + * the range of addresses that belong to the said functions, + * since a kprobe need not necessarily be at the beginning + * of a function. + */ + for (kb = kprobe_blacklist; kb->name != NULL; kb++) { + kprobe_lookup_name(kb->name, addr); + if (!addr) + continue; + + kb->start_addr = (unsigned long)addr; + symbol_name = kallsyms_lookup(kb->start_addr, + &size, &offset, &modname, namebuf); + if (!symbol_name) + kb->range = 0; + else + kb->range = size; + } + + if (kretprobe_blacklist_size) { + /* lookup the function address from its name */ + for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { + kprobe_lookup_name(kretprobe_blacklist[i].name, + kretprobe_blacklist[i].addr); + if (!kretprobe_blacklist[i].addr) + printk("kretprobe: lookup failed: %s\n", + kretprobe_blacklist[i].name); + } } - atomic_set(&kprobe_count, 0); + + /* By default, kprobes are armed */ + kprobes_all_disarmed = false; err = arch_init_kprobes(); if (!err) err = register_die_notifier(&kprobe_exceptions_nb); + if (!err) + err = register_module_notifier(&kprobe_module_nb); + + kprobes_initialized = (err == 0); + if (!err) + init_test_probes(); return err; } -__initcall(init_kprobes); +#ifdef CONFIG_DEBUG_FS +static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, + const char *sym, int offset,char *modname) +{ + char *kprobe_type; + + if (p->pre_handler == pre_handler_kretprobe) + kprobe_type = "r"; + else if (p->pre_handler == setjmp_pre_handler) + kprobe_type = "j"; + else + kprobe_type = "k"; + if (sym) + seq_printf(pi, "%p %s %s+0x%x %s %s%s\n", + p->addr, kprobe_type, sym, offset, + (modname ? modname : " "), + (kprobe_gone(p) ? "[GONE]" : ""), + ((kprobe_disabled(p) && !kprobe_gone(p)) ? + "[DISABLED]" : "")); + else + seq_printf(pi, "%p %s %p %s%s\n", + p->addr, kprobe_type, p->addr, + (kprobe_gone(p) ? "[GONE]" : ""), + ((kprobe_disabled(p) && !kprobe_gone(p)) ? + "[DISABLED]" : "")); +} + +static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) +{ + return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; +} -EXPORT_SYMBOL_GPL(register_kprobe); -EXPORT_SYMBOL_GPL(unregister_kprobe); -EXPORT_SYMBOL_GPL(register_jprobe); -EXPORT_SYMBOL_GPL(unregister_jprobe); -EXPORT_SYMBOL_GPL(jprobe_return); -EXPORT_SYMBOL_GPL(register_kretprobe); -EXPORT_SYMBOL_GPL(unregister_kretprobe); +static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) +{ + (*pos)++; + if (*pos >= KPROBE_TABLE_SIZE) + return NULL; + return pos; +} +static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) +{ + /* Nothing to do */ +} + +static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) +{ + struct hlist_head *head; + struct hlist_node *node; + struct kprobe *p, *kp; + const char *sym = NULL; + unsigned int i = *(loff_t *) v; + unsigned long offset = 0; + char *modname, namebuf[128]; + + head = &kprobe_table[i]; + preempt_disable(); + hlist_for_each_entry_rcu(p, node, head, hlist) { + sym = kallsyms_lookup((unsigned long)p->addr, NULL, + &offset, &modname, namebuf); + if (p->pre_handler == aggr_pre_handler) { + list_for_each_entry_rcu(kp, &p->list, list) + report_probe(pi, kp, sym, offset, modname); + } else + report_probe(pi, p, sym, offset, modname); + } + preempt_enable(); + return 0; +} + +static const struct seq_operations kprobes_seq_ops = { + .start = kprobe_seq_start, + .next = kprobe_seq_next, + .stop = kprobe_seq_stop, + .show = show_kprobe_addr +}; + +static int __kprobes kprobes_open(struct inode *inode, struct file *filp) +{ + return seq_open(filp, &kprobes_seq_ops); +} + +static const struct file_operations debugfs_kprobes_operations = { + .open = kprobes_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* Disable one kprobe */ +int __kprobes disable_kprobe(struct kprobe *kp) +{ + int ret = 0; + struct kprobe *p; + + mutex_lock(&kprobe_mutex); + + /* Check whether specified probe is valid. */ + p = __get_valid_kprobe(kp); + if (unlikely(p == NULL)) { + ret = -EINVAL; + goto out; + } + + /* If the probe is already disabled (or gone), just return */ + if (kprobe_disabled(kp)) + goto out; + + kp->flags |= KPROBE_FLAG_DISABLED; + if (p != kp) + /* When kp != p, p is always enabled. */ + try_to_disable_aggr_kprobe(p); + + if (!kprobes_all_disarmed && kprobe_disabled(p)) + disarm_kprobe(p); +out: + mutex_unlock(&kprobe_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(disable_kprobe); + +/* Enable one kprobe */ +int __kprobes enable_kprobe(struct kprobe *kp) +{ + int ret = 0; + struct kprobe *p; + + mutex_lock(&kprobe_mutex); + + /* Check whether specified probe is valid. */ + p = __get_valid_kprobe(kp); + if (unlikely(p == NULL)) { + ret = -EINVAL; + goto out; + } + + if (kprobe_gone(kp)) { + /* This kprobe has gone, we couldn't enable it. */ + ret = -EINVAL; + goto out; + } + + if (!kprobes_all_disarmed && kprobe_disabled(p)) + arm_kprobe(p); + + p->flags &= ~KPROBE_FLAG_DISABLED; + if (p != kp) + kp->flags &= ~KPROBE_FLAG_DISABLED; +out: + mutex_unlock(&kprobe_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(enable_kprobe); + +static void __kprobes arm_all_kprobes(void) +{ + struct hlist_head *head; + struct hlist_node *node; + struct kprobe *p; + unsigned int i; + + mutex_lock(&kprobe_mutex); + + /* If kprobes are armed, just return */ + if (!kprobes_all_disarmed) + goto already_enabled; + + mutex_lock(&text_mutex); + for (i = 0; i < KPROBE_TABLE_SIZE; i++) { + head = &kprobe_table[i]; + hlist_for_each_entry_rcu(p, node, head, hlist) + if (!kprobe_disabled(p)) + arch_arm_kprobe(p); + } + mutex_unlock(&text_mutex); + + kprobes_all_disarmed = false; + printk(KERN_INFO "Kprobes globally enabled\n"); + +already_enabled: + mutex_unlock(&kprobe_mutex); + return; +} + +static void __kprobes disarm_all_kprobes(void) +{ + struct hlist_head *head; + struct hlist_node *node; + struct kprobe *p; + unsigned int i; + + mutex_lock(&kprobe_mutex); + + /* If kprobes are already disarmed, just return */ + if (kprobes_all_disarmed) + goto already_disabled; + + kprobes_all_disarmed = true; + printk(KERN_INFO "Kprobes globally disabled\n"); + mutex_lock(&text_mutex); + for (i = 0; i < KPROBE_TABLE_SIZE; i++) { + head = &kprobe_table[i]; + hlist_for_each_entry_rcu(p, node, head, hlist) { + if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) + arch_disarm_kprobe(p); + } + } + + mutex_unlock(&text_mutex); + mutex_unlock(&kprobe_mutex); + /* Allow all currently running kprobes to complete */ + synchronize_sched(); + return; + +already_disabled: + mutex_unlock(&kprobe_mutex); + return; +} + +/* + * XXX: The debugfs bool file interface doesn't allow for callbacks + * when the bool state is switched. We can reuse that facility when + * available + */ +static ssize_t read_enabled_file_bool(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buf[3]; + + if (!kprobes_all_disarmed) + buf[0] = '1'; + else + buf[0] = '0'; + buf[1] = '\n'; + buf[2] = 0x00; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t write_enabled_file_bool(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + char buf[32]; + int buf_size; + + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + switch (buf[0]) { + case 'y': + case 'Y': + case '1': + arm_all_kprobes(); + break; + case 'n': + case 'N': + case '0': + disarm_all_kprobes(); + break; + } + + return count; +} + +static const struct file_operations fops_kp = { + .read = read_enabled_file_bool, + .write = write_enabled_file_bool, +}; + +static int __kprobes debugfs_kprobe_init(void) +{ + struct dentry *dir, *file; + unsigned int value = 1; + + dir = debugfs_create_dir("kprobes", NULL); + if (!dir) + return -ENOMEM; + + file = debugfs_create_file("list", 0444, dir, NULL, + &debugfs_kprobes_operations); + if (!file) { + debugfs_remove(dir); + return -ENOMEM; + } + + file = debugfs_create_file("enabled", 0600, dir, + &value, &fops_kp); + if (!file) { + debugfs_remove(dir); + return -ENOMEM; + } + + return 0; +} + +late_initcall(debugfs_kprobe_init); +#endif /* CONFIG_DEBUG_FS */ + +module_init(init_kprobes); + +/* defined in arch/.../kernel/kprobes.c */ +EXPORT_SYMBOL_GPL(jprobe_return);