[PATCH] kernel/kprobes.c: fix a warning #ifndef ARCH_SUPPORTS_KRETPROBES
[safe/jmp/linux-2.6] / kernel / kprobes.c
index 6da8f9b..95ad7f8 100644 (file)
@@ -32,7 +32,6 @@
  *             <prasanna@in.ibm.com> added function-return probes.
  */
 #include <linux/kprobes.h>
-#include <linux/spinlock.h>
 #include <linux/hash.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
 
-unsigned int kprobe_cpu = NR_CPUS;
-static DEFINE_SPINLOCK(kprobe_lock);
+DECLARE_MUTEX(kprobe_mutex);           /* Protects kprobe_table */
+DEFINE_SPINLOCK(kretprobe_lock);       /* Protects kretprobe_inst_table */
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
 
+#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
 /*
  * kprobe->ainsn.insn points to the copy of the instruction to be
  * single-stepped. x86_64, POWER4 and above have no-exec support and
@@ -152,41 +152,7 @@ void __kprobes free_insn_slot(kprobe_opcode_t *slot)
                }
        }
 }
-
-/* Locks kprobe: irqs must be disabled */
-void __kprobes lock_kprobes(void)
-{
-       unsigned long flags = 0;
-
-       /* Avoiding local interrupts to happen right after we take the kprobe_lock
-        * and before we get a chance to update kprobe_cpu, this to prevent
-        * deadlock when we have a kprobe on ISR routine and a kprobe on task
-        * routine
-        */
-       local_irq_save(flags);
-
-       spin_lock(&kprobe_lock);
-       kprobe_cpu = smp_processor_id();
-
-       local_irq_restore(flags);
-}
-
-void __kprobes unlock_kprobes(void)
-{
-       unsigned long flags = 0;
-
-       /* Avoiding local interrupts to happen right after we update
-        * kprobe_cpu and before we get a a chance to release kprobe_lock,
-        * this to prevent deadlock when we have a kprobe on ISR routine and
-        * a kprobe on task routine
-        */
-       local_irq_save(flags);
-
-       kprobe_cpu = NR_CPUS;
-       spin_unlock(&kprobe_lock);
-
-       local_irq_restore(flags);
-}
+#endif
 
 /* We have preemption disabled.. so it is safe to use __ versions */
 static inline void set_kprobe_instance(struct kprobe *kp)
@@ -199,15 +165,20 @@ static inline void reset_kprobe_instance(void)
        __get_cpu_var(kprobe_instance) = NULL;
 }
 
-/* You have to be holding the kprobe_lock */
+/*
+ * This routine is called either:
+ *     - under the kprobe_mutex - during kprobe_[un]register()
+ *                             OR
+ *     - with preemption disabled - from arch/xxx/kernel/kprobes.c
+ */
 struct kprobe __kprobes *get_kprobe(void *addr)
 {
        struct hlist_head *head;
        struct hlist_node *node;
+       struct kprobe *p;
 
        head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
-       hlist_for_each(node, head) {
-               struct kprobe *p = hlist_entry(node, struct kprobe, hlist);
+       hlist_for_each_entry_rcu(p, node, head, hlist) {
                if (p->addr == addr)
                        return p;
        }
@@ -222,7 +193,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
        struct kprobe *kp;
 
-       list_for_each_entry(kp, &p->list, list) {
+       list_for_each_entry_rcu(kp, &p->list, list) {
                if (kp->pre_handler) {
                        set_kprobe_instance(kp);
                        if (kp->pre_handler(kp, regs))
@@ -238,7 +209,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
 {
        struct kprobe *kp;
 
-       list_for_each_entry(kp, &p->list, list) {
+       list_for_each_entry_rcu(kp, &p->list, list) {
                if (kp->post_handler) {
                        set_kprobe_instance(kp);
                        kp->post_handler(kp, regs, flags);
@@ -277,6 +248,20 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
        return ret;
 }
 
+/* Walks the list and increments nmissed count for multiprobe case */
+void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
+{
+       struct kprobe *kp;
+       if (p->pre_handler != aggr_pre_handler) {
+               p->nmissed++;
+       } else {
+               list_for_each_entry_rcu(kp, &p->list, list)
+                       kp->nmissed++;
+       }
+       return;
+}
+
+/* Called with kretprobe_lock held */
 struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
 {
        struct hlist_node *node;
@@ -286,6 +271,7 @@ struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
        return NULL;
 }
 
+/* Called with kretprobe_lock held */
 static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
                                                              *rp)
 {
@@ -296,6 +282,7 @@ static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
        return NULL;
 }
 
+/* Called with kretprobe_lock held */
 void __kprobes add_rp_inst(struct kretprobe_instance *ri)
 {
        /*
@@ -314,6 +301,7 @@ void __kprobes add_rp_inst(struct kretprobe_instance *ri)
        hlist_add_head(&ri->uflist, &ri->rp->used_instances);
 }
 
+/* Called with kretprobe_lock held */
 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
 {
        /* remove rp inst off the rprobe_inst_table */
@@ -347,27 +335,13 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
        struct hlist_node *node, *tmp;
        unsigned long flags = 0;
 
-       spin_lock_irqsave(&kprobe_lock, flags);
+       spin_lock_irqsave(&kretprobe_lock, flags);
         head = kretprobe_inst_table_head(current);
         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
                 if (ri->task == tk)
                         recycle_rp_inst(ri);
         }
-       spin_unlock_irqrestore(&kprobe_lock, flags);
-}
-
-/*
- * This kprobe pre_handler is registered with every kretprobe. When probe
- * hits it will set up the return probe.
- */
-static int __kprobes pre_handler_kretprobe(struct kprobe *p,
-                                          struct pt_regs *regs)
-{
-       struct kretprobe *rp = container_of(p, struct kretprobe, kp);
-
-       /*TODO: consider to only swap the RA after the last pre_handler fired */
-       arch_prepare_kretprobe(rp, regs);
-       return 0;
+       spin_unlock_irqrestore(&kretprobe_lock, flags);
 }
 
 static inline void free_rp_inst(struct kretprobe *rp)
@@ -397,13 +371,13 @@ static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
         struct kprobe *kp;
 
        if (p->break_handler) {
-               list_for_each_entry(kp, &old_p->list, list) {
+               list_for_each_entry_rcu(kp, &old_p->list, list) {
                        if (kp->break_handler)
                                return -EEXIST;
                }
-               list_add_tail(&p->list, &old_p->list);
+               list_add_tail_rcu(&p->list, &old_p->list);
        } else
-               list_add(&p->list, &old_p->list);
+               list_add_rcu(&p->list, &old_p->list);
        return 0;
 }
 
@@ -421,18 +395,14 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
        ap->break_handler = aggr_break_handler;
 
        INIT_LIST_HEAD(&ap->list);
-       list_add(&p->list, &ap->list);
+       list_add_rcu(&p->list, &ap->list);
 
-       INIT_HLIST_NODE(&ap->hlist);
-       hlist_del(&p->hlist);
-       hlist_add_head(&ap->hlist,
-               &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
+       hlist_replace_rcu(&p->hlist, &ap->hlist);
 }
 
 /*
  * This is the second or subsequent kprobe at the address - handle
  * the intricacies
- * TODO: Move kcalloc outside the spinlock
  */
 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
                                          struct kprobe *p)
@@ -444,7 +414,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
                copy_kprobe(old_p, p);
                ret = add_new_kprobe(old_p, p);
        } else {
-               ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
+               ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
                if (!ap)
                        return -ENOMEM;
                add_aggr_kprobe(ap, old_p);
@@ -454,26 +424,6 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
        return ret;
 }
 
-/* kprobe removal house-keeping routines */
-static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
-{
-       arch_disarm_kprobe(p);
-       hlist_del(&p->hlist);
-       spin_unlock_irqrestore(&kprobe_lock, flags);
-       arch_remove_kprobe(p);
-}
-
-static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
-               struct kprobe *p, unsigned long flags)
-{
-       list_del(&p->list);
-       if (list_empty(&old_p->list)) {
-               cleanup_kprobe(old_p, flags);
-               kfree(old_p);
-       } else
-               spin_unlock_irqrestore(&kprobe_lock, flags);
-}
-
 static int __kprobes in_kprobes_functions(unsigned long addr)
 {
        if (addr >= (unsigned long)__kprobes_text_start
@@ -482,54 +432,111 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
        return 0;
 }
 
-int __kprobes register_kprobe(struct kprobe *p)
+static int __kprobes __register_kprobe(struct kprobe *p,
+       unsigned long called_from)
 {
        int ret = 0;
-       unsigned long flags = 0;
        struct kprobe *old_p;
+       struct module *probed_mod;
 
-       if ((ret = in_kprobes_functions((unsigned long) p->addr)) != 0)
-               return ret;
-       if ((ret = arch_prepare_kprobe(p)) != 0)
-               goto rm_kprobe;
+       if ((!kernel_text_address((unsigned long) p->addr)) ||
+               in_kprobes_functions((unsigned long) p->addr))
+               return -EINVAL;
+
+       p->mod_refcounted = 0;
+       /* Check are we probing a module */
+       if ((probed_mod = module_text_address((unsigned long) p->addr))) {
+               struct module *calling_mod = module_text_address(called_from);
+               /* We must allow modules to probe themself and
+                * in this case avoid incrementing the module refcount,
+                * so as to allow unloading of self probing modules.
+                */
+               if (calling_mod && (calling_mod != probed_mod)) {
+                       if (unlikely(!try_module_get(probed_mod)))
+                               return -EINVAL;
+                       p->mod_refcounted = 1;
+               } else
+                       probed_mod = NULL;
+       }
 
-       spin_lock_irqsave(&kprobe_lock, flags);
-       old_p = get_kprobe(p->addr);
        p->nmissed = 0;
+       down(&kprobe_mutex);
+       old_p = get_kprobe(p->addr);
        if (old_p) {
                ret = register_aggr_kprobe(old_p, p);
                goto out;
        }
 
-       arch_copy_kprobe(p);
+       if ((ret = arch_prepare_kprobe(p)) != 0)
+               goto out;
+
        INIT_HLIST_NODE(&p->hlist);
-       hlist_add_head(&p->hlist,
+       hlist_add_head_rcu(&p->hlist,
                       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
 
        arch_arm_kprobe(p);
 
 out:
-       spin_unlock_irqrestore(&kprobe_lock, flags);
-rm_kprobe:
-       if (ret == -EEXIST)
-               arch_remove_kprobe(p);
+       up(&kprobe_mutex);
+
+       if (ret && probed_mod)
+               module_put(probed_mod);
        return ret;
 }
 
+int __kprobes register_kprobe(struct kprobe *p)
+{
+       return __register_kprobe(p,
+               (unsigned long)__builtin_return_address(0));
+}
+
 void __kprobes unregister_kprobe(struct kprobe *p)
 {
-       unsigned long flags;
-       struct kprobe *old_p;
+       struct module *mod;
+       struct kprobe *old_p, *list_p;
+       int cleanup_p;
 
-       spin_lock_irqsave(&kprobe_lock, flags);
+       down(&kprobe_mutex);
        old_p = get_kprobe(p->addr);
-       if (old_p) {
-               if (old_p->pre_handler == aggr_pre_handler)
-                       cleanup_aggr_kprobe(old_p, p, flags);
-               else
-                       cleanup_kprobe(p, flags);
-       } else
-               spin_unlock_irqrestore(&kprobe_lock, flags);
+       if (unlikely(!old_p)) {
+               up(&kprobe_mutex);
+               return;
+       }
+       if (p != old_p) {
+               list_for_each_entry_rcu(list_p, &old_p->list, list)
+                       if (list_p == p)
+                       /* kprobe p is a valid probe */
+                               goto valid_p;
+               up(&kprobe_mutex);
+               return;
+       }
+valid_p:
+       if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
+               (p->list.next == &old_p->list) &&
+               (p->list.prev == &old_p->list))) {
+               /* Only probe on the hash list */
+               arch_disarm_kprobe(p);
+               hlist_del_rcu(&old_p->hlist);
+               cleanup_p = 1;
+       } else {
+               list_del_rcu(&p->list);
+               cleanup_p = 0;
+       }
+
+       up(&kprobe_mutex);
+
+       synchronize_sched();
+       if (p->mod_refcounted &&
+           (mod = module_text_address((unsigned long)p->addr)))
+               module_put(mod);
+
+       if (cleanup_p) {
+               if (p != old_p) {
+                       list_del_rcu(&p->list);
+                       kfree(old_p);
+               }
+               arch_remove_kprobe(p);
+       }
 }
 
 static struct notifier_block kprobe_exceptions_nb = {
@@ -543,7 +550,8 @@ int __kprobes register_jprobe(struct jprobe *jp)
        jp->kp.pre_handler = setjmp_pre_handler;
        jp->kp.break_handler = longjmp_break_handler;
 
-       return register_kprobe(&jp->kp);
+       return __register_kprobe(&jp->kp,
+               (unsigned long)__builtin_return_address(0));
 }
 
 void __kprobes unregister_jprobe(struct jprobe *jp)
@@ -553,6 +561,23 @@ void __kprobes unregister_jprobe(struct jprobe *jp)
 
 #ifdef ARCH_SUPPORTS_KRETPROBES
 
+/*
+ * This kprobe pre_handler is registered with every kretprobe. When probe
+ * hits it will set up the return probe.
+ */
+static int __kprobes pre_handler_kretprobe(struct kprobe *p,
+                                          struct pt_regs *regs)
+{
+       struct kretprobe *rp = container_of(p, struct kretprobe, kp);
+       unsigned long flags = 0;
+
+       /*TODO: consider to only swap the RA after the last pre_handler fired */
+       spin_lock_irqsave(&kretprobe_lock, flags);
+       arch_prepare_kretprobe(rp, regs);
+       spin_unlock_irqrestore(&kretprobe_lock, flags);
+       return 0;
+}
+
 int __kprobes register_kretprobe(struct kretprobe *rp)
 {
        int ret = 0;
@@ -583,7 +608,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
 
        rp->nmissed = 0;
        /* Establish function entry probe point */
-       if ((ret = register_kprobe(&rp->kp)) != 0)
+       if ((ret = __register_kprobe(&rp->kp,
+               (unsigned long)__builtin_return_address(0))) != 0)
                free_rp_inst(rp);
        return ret;
 }
@@ -604,13 +630,13 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp)
 
        unregister_kprobe(&rp->kp);
        /* No race here */
-       spin_lock_irqsave(&kprobe_lock, flags);
+       spin_lock_irqsave(&kretprobe_lock, flags);
        free_rp_inst(rp);
        while ((ri = get_used_rp_inst(rp)) != NULL) {
                ri->rp = NULL;
                hlist_del(&ri->uflist);
        }
-       spin_unlock_irqrestore(&kprobe_lock, flags);
+       spin_unlock_irqrestore(&kretprobe_lock, flags);
 }
 
 static int __init init_kprobes(void)