KVM: MMU: invalidate and flush on spte small->large page size change
[safe/jmp/linux-2.6] / kernel / trace / trace_branch.c
index 297deb2..8d3538b 100644 (file)
@@ -30,10 +30,12 @@ static struct trace_array *branch_tracer;
 static void
 probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
 {
+       struct ftrace_event_call *call = &event_branch;
        struct trace_array *tr = branch_tracer;
        struct ring_buffer_event *event;
        struct trace_branch *entry;
-       unsigned long flags, irq_flags;
+       struct ring_buffer *buffer;
+       unsigned long flags;
        int cpu, pc;
        const char *p;
 
@@ -52,15 +54,14 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
                goto out;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       pc = preempt_count();
+       buffer = tr->buffer;
+       event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
+                                         sizeof(*entry), flags, pc);
        if (!event)
                goto out;
 
-       pc = preempt_count();
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags, pc);
-       entry->ent.type         = TRACE_BRANCH;
 
        /* Strip off the path, only save the file */
        p = f->file + strlen(f->file);
@@ -75,7 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        entry->line = f->line;
        entry->correct = val == expect;
 
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       if (!filter_check_discard(call, entry, buffer, event))
+               ring_buffer_unlock_commit(buffer, event);
 
  out:
        atomic_dec(&tr->data[cpu]->disabled);
@@ -93,8 +95,6 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
 
 int enable_branch_tracing(struct trace_array *tr)
 {
-       int ret = 0;
-
        mutex_lock(&branch_tracing_mutex);
        branch_tracer = tr;
        /*
@@ -105,7 +105,7 @@ int enable_branch_tracing(struct trace_array *tr)
        branch_tracing_enabled++;
        mutex_unlock(&branch_tracing_mutex);
 
-       return ret;
+       return 0;
 }
 
 void disable_branch_tracing(void)
@@ -133,7 +133,6 @@ static void stop_branch_trace(struct trace_array *tr)
 
 static int branch_trace_init(struct trace_array *tr)
 {
-       tracing_reset_online_cpus(tr);
        start_branch_trace(tr);
        return 0;
 }
@@ -143,25 +142,8 @@ static void branch_trace_reset(struct trace_array *tr)
        stop_branch_trace(tr);
 }
 
-static int
-trace_print_print(struct trace_seq *s, struct trace_entry *entry, int flags)
-{
-       struct print_entry *field;
-
-       trace_assign_type(field, entry);
-
-       if (seq_print_ip_sym(s, field->ip, flags))
-               goto partial;
-
-       if (trace_seq_printf(s, ": %s", field->buf))
-               goto partial;
-
- partial:
-       return TRACE_TYPE_PARTIAL_LINE;
-}
-
 static enum print_line_t trace_branch_print(struct trace_iterator *iter,
-                                           int flags)
+                                           int flags, struct trace_event *event)
 {
        struct trace_branch *field;
 
@@ -177,11 +159,21 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter,
        return TRACE_TYPE_HANDLED;
 }
 
+static void branch_print_header(struct seq_file *s)
+{
+       seq_puts(s, "#           TASK-PID    CPU#    TIMESTAMP  CORRECT"
+               "  FUNC:FILE:LINE\n");
+       seq_puts(s, "#              | |       |          |         |   "
+               "    |\n");
+}
 
-static struct trace_event trace_branch_event = {
-       .type           = TRACE_BRANCH,
+static struct trace_event_functions trace_branch_funcs = {
        .trace          = trace_branch_print,
-       .latency_trace  = trace_branch_print,
+};
+
+static struct trace_event trace_branch_event = {
+       .type           = TRACE_BRANCH,
+       .funcs          = &trace_branch_funcs,
 };
 
 static struct tracer branch_trace __read_mostly =
@@ -192,6 +184,7 @@ static struct tracer branch_trace __read_mostly =
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest       = trace_selftest_startup_branch,
 #endif /* CONFIG_FTRACE_SELFTEST */
+       .print_header   = branch_print_header,
 };
 
 __init static int init_branch_tracer(void)
@@ -286,7 +279,7 @@ static int branch_stat_show(struct seq_file *m, void *v)
        return 0;
 }
 
-static void *annotated_branch_stat_start(void)
+static void *annotated_branch_stat_start(struct tracer_stat *trace)
 {
        return __start_annotated_branch_profile;
 }
@@ -318,8 +311,23 @@ static int annotated_branch_stat_cmp(void *p1, void *p2)
                return -1;
        if (percent_a > percent_b)
                return 1;
-       else
-               return 0;
+
+       if (a->incorrect < b->incorrect)
+               return -1;
+       if (a->incorrect > b->incorrect)
+               return 1;
+
+       /*
+        * Since the above shows worse (incorrect) cases
+        * first, we continue that by showing best (correct)
+        * cases last.
+        */
+       if (a->correct > b->correct)
+               return -1;
+       if (a->correct < b->correct)
+               return 1;
+
+       return 0;
 }
 
 static struct tracer_stat annotated_branch_stats = {
@@ -361,7 +369,7 @@ static int all_branch_stat_headers(struct seq_file *m)
        return 0;
 }
 
-static void *all_branch_stat_start(void)
+static void *all_branch_stat_start(struct tracer_stat *trace)
 {
        return __start_branch_profile;
 }