sched: Remove the cfs_rq dependency from set_task_cpu()
[safe/jmp/linux-2.6] / kernel / kprobes.c
index 1494e85..e5342a3 100644 (file)
@@ -90,6 +90,9 @@ static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  */
 static struct kprobe_blackpoint kprobe_blacklist[] = {
        {"preempt_schedule",},
+       {"native_get_debugreg",},
+       {"irq_entries_start",},
+       {"common_interrupt",},
        {NULL}    /* Terminator */
 };
 
@@ -673,6 +676,40 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
        return (kprobe_opcode_t *)(((char *)addr) + p->offset);
 }
 
+/* Check passed kprobe is valid and return kprobe in kprobe_table. */
+static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
+{
+       struct kprobe *old_p, *list_p;
+
+       old_p = get_kprobe(p->addr);
+       if (unlikely(!old_p))
+               return NULL;
+
+       if (p != old_p) {
+               list_for_each_entry_rcu(list_p, &old_p->list, list)
+                       if (list_p == p)
+                       /* kprobe p is a valid probe */
+                               goto valid;
+               return NULL;
+       }
+valid:
+       return old_p;
+}
+
+/* Return error if the kprobe is being re-registered */
+static inline int check_kprobe_rereg(struct kprobe *p)
+{
+       int ret = 0;
+       struct kprobe *old_p;
+
+       mutex_lock(&kprobe_mutex);
+       old_p = __get_valid_kprobe(p);
+       if (old_p)
+               ret = -EINVAL;
+       mutex_unlock(&kprobe_mutex);
+       return ret;
+}
+
 int __kprobes register_kprobe(struct kprobe *p)
 {
        int ret = 0;
@@ -685,6 +722,10 @@ int __kprobes register_kprobe(struct kprobe *p)
                return -EINVAL;
        p->addr = addr;
 
+       ret = check_kprobe_rereg(p);
+       if (ret)
+               return ret;
+
        preempt_disable();
        if (!kernel_text_address((unsigned long) p->addr) ||
            in_kprobes_functions((unsigned long) p->addr)) {
@@ -754,26 +795,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(register_kprobe);
 
-/* Check passed kprobe is valid and return kprobe in kprobe_table. */
-static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
-{
-       struct kprobe *old_p, *list_p;
-
-       old_p = get_kprobe(p->addr);
-       if (unlikely(!old_p))
-               return NULL;
-
-       if (p != old_p) {
-               list_for_each_entry_rcu(list_p, &old_p->list, list)
-                       if (list_p == p)
-                       /* kprobe p is a valid probe */
-                               goto valid;
-               return NULL;
-       }
-valid:
-       return old_p;
-}
-
 /*
  * Unregister a kprobe without a scheduler synchronization.
  */
@@ -1141,6 +1162,13 @@ static void __kprobes kill_kprobe(struct kprobe *p)
        arch_remove_kprobe(p);
 }
 
+void __kprobes dump_kprobe(struct kprobe *kp)
+{
+       printk(KERN_WARNING "Dumping kprobe:\n");
+       printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
+              kp->symbol_name, kp->addr, kp->offset);
+}
+
 /* Module notifier call back, checking kprobes on the module */
 static int __kprobes kprobes_module_callback(struct notifier_block *nb,
                                             unsigned long val, void *data)