drm/vmwgfx: return -EFAULT for copy_to_user errors
[safe/jmp/linux-2.6] / kernel / trace / trace_selftest.c
index 38856ba..250e7f9 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/stringify.h>
 #include <linux/kthread.h>
 #include <linux/delay.h>
+#include <linux/slab.h>
 
 static inline int trace_valid_entry(struct trace_entry *entry)
 {
@@ -16,6 +17,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
        case TRACE_BRANCH:
        case TRACE_GRAPH_ENT:
        case TRACE_GRAPH_RET:
+       case TRACE_KSYM:
                return 1;
        }
        return 0;
@@ -27,7 +29,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
        struct trace_entry *entry;
        unsigned int loops = 0;
 
-       while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
+       while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
                entry = ring_buffer_event_data(event);
 
                /*
@@ -65,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
 
        /* Don't allow flipping of max traces now */
        local_irq_save(flags);
-       __raw_spin_lock(&ftrace_max_lock);
+       arch_spin_lock(&ftrace_max_lock);
 
        cnt = ring_buffer_entries(tr->buffer);
 
@@ -83,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
                        break;
        }
        tracing_on();
-       __raw_spin_unlock(&ftrace_max_lock);
+       arch_spin_unlock(&ftrace_max_lock);
        local_irq_restore(flags);
 
        if (count)
@@ -188,6 +190,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 #else
 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 #endif /* CONFIG_DYNAMIC_FTRACE */
+
 /*
  * Simple verification test of ftrace function tracer.
  * Enable ftrace, sleep 1/10 second, and then read the trace
@@ -248,6 +251,29 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/* Maximum number of functions to trace before diagnosing a hang */
+#define GRAPH_MAX_FUNC_TEST    100000000
+
+static void
+__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
+static unsigned int graph_hang_thresh;
+
+/* Wrap the real function entry probe to avoid possible hanging */
+static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
+{
+       /* This is harmlessly racy, we want to approximately detect a hang */
+       if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
+               ftrace_graph_stop();
+               printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
+               if (ftrace_dump_on_oops)
+                       __ftrace_dump(false, DUMP_ALL);
+               return 0;
+       }
+
+       return trace_graph_entry(trace);
+}
+
 /*
  * Pretty much the same than for the function tracer from which the selftest
  * has been borrowed.
@@ -259,15 +285,30 @@ trace_selftest_startup_function_graph(struct tracer *trace,
        int ret;
        unsigned long count;
 
-       ret = tracer_init(trace, tr);
+       /*
+        * Simulate the init() callback but we attach a watchdog callback
+        * to detect and recover from possible hangs
+        */
+       tracing_reset_online_cpus(tr);
+       set_graph_array(tr);
+       ret = register_ftrace_graph(&trace_graph_return,
+                                   &trace_graph_entry_watchdog);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                goto out;
        }
+       tracing_start_cmdline_record();
 
        /* Sleep for a 1/10 of a second */
        msleep(100);
 
+       /* Have we just recovered from a hang? */
+       if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
+               tracing_selftest_disabled = true;
+               ret = -1;
+               goto out;
+       }
+
        tracing_stop();
 
        /* check the trace buffer */
@@ -713,3 +754,57 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
        return ret;
 }
 #endif /* CONFIG_BRANCH_TRACER */
+
+#ifdef CONFIG_KSYM_TRACER
+static int ksym_selftest_dummy;
+
+int
+trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
+{
+       unsigned long count;
+       int ret;
+
+       /* start the tracing */
+       ret = tracer_init(trace, tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               return ret;
+       }
+
+       ksym_selftest_dummy = 0;
+       /* Register the read-write tracing request */
+
+       ret = process_new_ksym_entry("ksym_selftest_dummy",
+                                    HW_BREAKPOINT_R | HW_BREAKPOINT_W,
+                                       (unsigned long)(&ksym_selftest_dummy));
+
+       if (ret < 0) {
+               printk(KERN_CONT "ksym_trace read-write startup test failed\n");
+               goto ret_path;
+       }
+       /* Perform a read and a write operation over the dummy variable to
+        * trigger the tracer
+        */
+       if (ksym_selftest_dummy == 0)
+               ksym_selftest_dummy++;
+
+       /* stop the tracing. */
+       tracing_stop();
+       /* check the trace buffer */
+       ret = trace_test_buffer(tr, &count);
+       trace->reset(tr);
+       tracing_start();
+
+       /* read & write operations - one each is performed on the dummy variable
+        * triggering two entries in the trace buffer
+        */
+       if (!ret && count != 2) {
+               printk(KERN_CONT "Ksym tracer startup test failed");
+               ret = -1;
+       }
+
+ret_path:
+       return ret;
+}
+#endif /* CONFIG_KSYM_TRACER */
+