ALSA: usb-audio: add support for Akai MPD16
[safe/jmp/linux-2.6] / kernel / kprobes.c
index 612af2d..282035f 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/freezer.h>
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
+#include <linux/sysctl.h>
 #include <linux/kdebug.h>
 #include <linux/memory.h>
 #include <linux/ftrace.h>
@@ -258,7 +259,8 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
        struct kprobe_insn_page *kip;
 
        list_for_each_entry(kip, &c->pages, list) {
-               long idx = ((long)slot - (long)kip->insns) / c->insn_size;
+               long idx = ((long)slot - (long)kip->insns) /
+                               (c->insn_size * sizeof(kprobe_opcode_t));
                if (idx >= 0 && idx < slots_per_page(c)) {
                        WARN_ON(kip->slot_used[idx] != SLOT_USED);
                        if (dirty) {
@@ -360,6 +362,9 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
 }
 
 #ifdef CONFIG_OPTPROBES
+/* NOTE: change this value only with kprobe_mutex held */
+static bool kprobes_allow_optimization;
+
 /*
  * Call all pre_handler on the list, but ignores its return value.
  * This must be called from arch-dep optimized caller.
@@ -428,7 +433,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
        /* Lock modules while optimizing kprobes */
        mutex_lock(&module_mutex);
        mutex_lock(&kprobe_mutex);
-       if (kprobes_all_disarmed)
+       if (kprobes_all_disarmed || !kprobes_allow_optimization)
                goto end;
 
        /*
@@ -471,7 +476,7 @@ static __kprobes void optimize_kprobe(struct kprobe *p)
        struct optimized_kprobe *op;
 
        /* Check if the kprobe is disabled or not ready for optimization. */
-       if (!kprobe_optready(p) ||
+       if (!kprobe_optready(p) || !kprobes_allow_optimization ||
            (kprobe_disabled(p) || kprobes_all_disarmed))
                return;
 
@@ -588,6 +593,80 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
        optimize_kprobe(ap);
 }
 
+#ifdef CONFIG_SYSCTL
+static void __kprobes optimize_all_kprobes(void)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct kprobe *p;
+       unsigned int i;
+
+       /* If optimization is already allowed, just return */
+       if (kprobes_allow_optimization)
+               return;
+
+       kprobes_allow_optimization = true;
+       mutex_lock(&text_mutex);
+       for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+               head = &kprobe_table[i];
+               hlist_for_each_entry_rcu(p, node, head, hlist)
+                       if (!kprobe_disabled(p))
+                               optimize_kprobe(p);
+       }
+       mutex_unlock(&text_mutex);
+       printk(KERN_INFO "Kprobes globally optimized\n");
+}
+
+static void __kprobes unoptimize_all_kprobes(void)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct kprobe *p;
+       unsigned int i;
+
+       /* If optimization is already prohibited, just return */
+       if (!kprobes_allow_optimization)
+               return;
+
+       kprobes_allow_optimization = false;
+       printk(KERN_INFO "Kprobes globally unoptimized\n");
+       get_online_cpus();      /* For avoiding text_mutex deadlock */
+       mutex_lock(&text_mutex);
+       for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+               head = &kprobe_table[i];
+               hlist_for_each_entry_rcu(p, node, head, hlist) {
+                       if (!kprobe_disabled(p))
+                               unoptimize_kprobe(p);
+               }
+       }
+
+       mutex_unlock(&text_mutex);
+       put_online_cpus();
+       /* Allow all currently running kprobes to complete */
+       synchronize_sched();
+}
+
+int sysctl_kprobes_optimization;
+int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
+                                     void __user *buffer, size_t *length,
+                                     loff_t *ppos)
+{
+       int ret;
+
+       mutex_lock(&kprobe_mutex);
+       sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
+       ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+       if (sysctl_kprobes_optimization)
+               optimize_all_kprobes();
+       else
+               unoptimize_all_kprobes();
+       mutex_unlock(&kprobe_mutex);
+
+       return ret;
+}
+#endif /* CONFIG_SYSCTL */
+
 static void __kprobes __arm_kprobe(struct kprobe *p)
 {
        struct kprobe *old_p;
@@ -1509,6 +1588,72 @@ static void __kprobes kill_kprobe(struct kprobe *p)
        arch_remove_kprobe(p);
 }
 
+/* Disable one kprobe */
+int __kprobes disable_kprobe(struct kprobe *kp)
+{
+       int ret = 0;
+       struct kprobe *p;
+
+       mutex_lock(&kprobe_mutex);
+
+       /* Check whether specified probe is valid. */
+       p = __get_valid_kprobe(kp);
+       if (unlikely(p == NULL)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* If the probe is already disabled (or gone), just return */
+       if (kprobe_disabled(kp))
+               goto out;
+
+       kp->flags |= KPROBE_FLAG_DISABLED;
+       if (p != kp)
+               /* When kp != p, p is always enabled. */
+               try_to_disable_aggr_kprobe(p);
+
+       if (!kprobes_all_disarmed && kprobe_disabled(p))
+               disarm_kprobe(p);
+out:
+       mutex_unlock(&kprobe_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(disable_kprobe);
+
+/* Enable one kprobe */
+int __kprobes enable_kprobe(struct kprobe *kp)
+{
+       int ret = 0;
+       struct kprobe *p;
+
+       mutex_lock(&kprobe_mutex);
+
+       /* Check whether specified probe is valid. */
+       p = __get_valid_kprobe(kp);
+       if (unlikely(p == NULL)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (kprobe_gone(kp)) {
+               /* This kprobe has gone, we couldn't enable it. */
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (p != kp)
+               kp->flags &= ~KPROBE_FLAG_DISABLED;
+
+       if (!kprobes_all_disarmed && kprobe_disabled(p)) {
+               p->flags &= ~KPROBE_FLAG_DISABLED;
+               arm_kprobe(p);
+       }
+out:
+       mutex_unlock(&kprobe_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(enable_kprobe);
+
 void __kprobes dump_kprobe(struct kprobe *kp)
 {
        printk(KERN_WARNING "Dumping kprobe:\n");
@@ -1610,10 +1755,14 @@ static int __init init_kprobes(void)
                }
        }
 
-#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
+#if defined(CONFIG_OPTPROBES)
+#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
        /* Init kprobe_optinsn_slots */
        kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
 #endif
+       /* By default, kprobes can be optimized */
+       kprobes_allow_optimization = true;
+#endif
 
        /* By default, kprobes are armed */
        kprobes_all_disarmed = false;
@@ -1722,72 +1871,6 @@ static const struct file_operations debugfs_kprobes_operations = {
        .release        = seq_release,
 };
 
-/* Disable one kprobe */
-int __kprobes disable_kprobe(struct kprobe *kp)
-{
-       int ret = 0;
-       struct kprobe *p;
-
-       mutex_lock(&kprobe_mutex);
-
-       /* Check whether specified probe is valid. */
-       p = __get_valid_kprobe(kp);
-       if (unlikely(p == NULL)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* If the probe is already disabled (or gone), just return */
-       if (kprobe_disabled(kp))
-               goto out;
-
-       kp->flags |= KPROBE_FLAG_DISABLED;
-       if (p != kp)
-               /* When kp != p, p is always enabled. */
-               try_to_disable_aggr_kprobe(p);
-
-       if (!kprobes_all_disarmed && kprobe_disabled(p))
-               disarm_kprobe(p);
-out:
-       mutex_unlock(&kprobe_mutex);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(disable_kprobe);
-
-/* Enable one kprobe */
-int __kprobes enable_kprobe(struct kprobe *kp)
-{
-       int ret = 0;
-       struct kprobe *p;
-
-       mutex_lock(&kprobe_mutex);
-
-       /* Check whether specified probe is valid. */
-       p = __get_valid_kprobe(kp);
-       if (unlikely(p == NULL)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (kprobe_gone(kp)) {
-               /* This kprobe has gone, we couldn't enable it. */
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (p != kp)
-               kp->flags &= ~KPROBE_FLAG_DISABLED;
-
-       if (!kprobes_all_disarmed && kprobe_disabled(p)) {
-               p->flags &= ~KPROBE_FLAG_DISABLED;
-               arm_kprobe(p);
-       }
-out:
-       mutex_unlock(&kprobe_mutex);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(enable_kprobe);
-
 static void __kprobes arm_all_kprobes(void)
 {
        struct hlist_head *head;