sysfs: Update s_iattr on link and unlink.
[safe/jmp/linux-2.6] / kernel / trace / trace_selftest.c
index a2ca6f0..dc98309 100644 (file)
@@ -16,6 +16,8 @@ static inline int trace_valid_entry(struct trace_entry *entry)
        case TRACE_BRANCH:
        case TRACE_GRAPH_ENT:
        case TRACE_GRAPH_RET:
+       case TRACE_HW_BRANCHES:
+       case TRACE_KSYM:
                return 1;
        }
        return 0;
@@ -188,6 +190,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 #else
 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 #endif /* CONFIG_DYNAMIC_FTRACE */
+
 /*
  * Simple verification test of ftrace function tracer.
  * Enable ftrace, sleep 1/10 second, and then read the trace
@@ -248,6 +251,28 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/* Maximum number of functions to trace before diagnosing a hang */
+#define GRAPH_MAX_FUNC_TEST    100000000
+
+static void __ftrace_dump(bool disable_tracing);
+static unsigned int graph_hang_thresh;
+
+/* Wrap the real function entry probe to avoid possible hanging */
+static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
+{
+       /* This is harmlessly racy, we want to approximately detect a hang */
+       if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
+               ftrace_graph_stop();
+               printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
+               if (ftrace_dump_on_oops)
+                       __ftrace_dump(false);
+               return 0;
+       }
+
+       return trace_graph_entry(trace);
+}
+
 /*
  * Pretty much the same than for the function tracer from which the selftest
  * has been borrowed.
@@ -259,15 +284,30 @@ trace_selftest_startup_function_graph(struct tracer *trace,
        int ret;
        unsigned long count;
 
-       ret = tracer_init(trace, tr);
+       /*
+        * Simulate the init() callback but we attach a watchdog callback
+        * to detect and recover from possible hangs
+        */
+       tracing_reset_online_cpus(tr);
+       set_graph_array(tr);
+       ret = register_ftrace_graph(&trace_graph_return,
+                                   &trace_graph_entry_watchdog);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                goto out;
        }
+       tracing_start_cmdline_record();
 
        /* Sleep for a 1/10 of a second */
        msleep(100);
 
+       /* Have we just recovered from a hang? */
+       if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
+               tracing_selftest_disabled = true;
+               ret = -1;
+               goto out;
+       }
+
        tracing_stop();
 
        /* check the trace buffer */
@@ -315,6 +355,14 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
        local_irq_disable();
        udelay(100);
        local_irq_enable();
+
+       /*
+        * Stop the tracer to avoid a warning subsequent
+        * to buffer flipping failure because tracing_stop()
+        * disables the tr and max buffers, making flipping impossible
+        * in case of parallels max irqs off latencies.
+        */
+       trace->stop(tr);
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
@@ -369,6 +417,14 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
        preempt_disable();
        udelay(100);
        preempt_enable();
+
+       /*
+        * Stop the tracer to avoid a warning subsequent
+        * to buffer flipping failure because tracing_stop()
+        * disables the tr and max buffers, making flipping impossible
+        * in case of parallels max preempt off latencies.
+        */
+       trace->stop(tr);
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
@@ -428,6 +484,13 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        /* reverse the order of preempt vs irqs */
        local_irq_enable();
 
+       /*
+        * Stop the tracer to avoid a warning subsequent
+        * to buffer flipping failure because tracing_stop()
+        * disables the tr and max buffers, making flipping impossible
+        * in case of parallels max irqs/preempt off latencies.
+        */
+       trace->stop(tr);
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
@@ -448,6 +511,8 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        /* do the test by disabling interrupts first this time */
        tracing_max_latency = 0;
        tracing_start();
+       trace->start(tr);
+
        preempt_disable();
        local_irq_disable();
        udelay(100);
@@ -455,6 +520,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        /* reverse the order of preempt vs irqs */
        local_irq_enable();
 
+       trace->stop(tr);
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
@@ -687,3 +753,113 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
        return ret;
 }
 #endif /* CONFIG_BRANCH_TRACER */
+
+#ifdef CONFIG_HW_BRANCH_TRACER
+int
+trace_selftest_startup_hw_branches(struct tracer *trace,
+                                  struct trace_array *tr)
+{
+       struct trace_iterator *iter;
+       struct tracer tracer;
+       unsigned long count;
+       int ret;
+
+       if (!trace->open) {
+               printk(KERN_CONT "missing open function...");
+               return -1;
+       }
+
+       ret = tracer_init(trace, tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               return ret;
+       }
+
+       /*
+        * The hw-branch tracer needs to collect the trace from the various
+        * cpu trace buffers - before tracing is stopped.
+        */
+       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+       if (!iter)
+               return -ENOMEM;
+
+       memcpy(&tracer, trace, sizeof(tracer));
+
+       iter->trace = &tracer;
+       iter->tr = tr;
+       iter->pos = -1;
+       mutex_init(&iter->mutex);
+
+       trace->open(iter);
+
+       mutex_destroy(&iter->mutex);
+       kfree(iter);
+
+       tracing_stop();
+
+       ret = trace_test_buffer(tr, &count);
+       trace->reset(tr);
+       tracing_start();
+
+       if (!ret && !count) {
+               printk(KERN_CONT "no entries found..");
+               ret = -1;
+       }
+
+       return ret;
+}
+#endif /* CONFIG_HW_BRANCH_TRACER */
+
+#ifdef CONFIG_KSYM_TRACER
+static int ksym_selftest_dummy;
+
+int
+trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
+{
+       unsigned long count;
+       int ret;
+
+       /* start the tracing */
+       ret = tracer_init(trace, tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               return ret;
+       }
+
+       ksym_selftest_dummy = 0;
+       /* Register the read-write tracing request */
+
+       ret = process_new_ksym_entry("ksym_selftest_dummy",
+                                    HW_BREAKPOINT_R | HW_BREAKPOINT_W,
+                                       (unsigned long)(&ksym_selftest_dummy));
+
+       if (ret < 0) {
+               printk(KERN_CONT "ksym_trace read-write startup test failed\n");
+               goto ret_path;
+       }
+       /* Perform a read and a write operation over the dummy variable to
+        * trigger the tracer
+        */
+       if (ksym_selftest_dummy == 0)
+               ksym_selftest_dummy++;
+
+       /* stop the tracing. */
+       tracing_stop();
+       /* check the trace buffer */
+       ret = trace_test_buffer(tr, &count);
+       trace->reset(tr);
+       tracing_start();
+
+       /* read & write operations - one each is performed on the dummy variable
+        * triggering two entries in the trace buffer
+        */
+       if (!ret && count != 2) {
+               printk(KERN_CONT "Ksym tracer startup test failed");
+               ret = -1;
+       }
+
+ret_path:
+       return ret;
+}
+#endif /* CONFIG_KSYM_TRACER */
+