kfifo: move out spinlock
[safe/jmp/linux-2.6] / kernel / trace / trace_branch.c
index 8333715..4a194f0 100644 (file)
@@ -30,9 +30,11 @@ static struct trace_array *branch_tracer;
 static void
 probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
 {
+       struct ftrace_event_call *call = &event_branch;
        struct trace_array *tr = branch_tracer;
        struct ring_buffer_event *event;
        struct trace_branch *entry;
+       struct ring_buffer *buffer;
        unsigned long flags;
        int cpu, pc;
        const char *p;
@@ -53,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
                goto out;
 
        pc = preempt_count();
-       event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
+       buffer = tr->buffer;
+       event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
                                          sizeof(*entry), flags, pc);
        if (!event)
                goto out;
@@ -73,7 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        entry->line = f->line;
        entry->correct = val == expect;
 
-       ring_buffer_unlock_commit(tr->buffer, event);
+       if (!filter_check_discard(call, entry, buffer, event))
+               ring_buffer_unlock_commit(buffer, event);
 
  out:
        atomic_dec(&tr->data[cpu]->disabled);
@@ -271,7 +275,7 @@ static int branch_stat_show(struct seq_file *m, void *v)
        return 0;
 }
 
-static void *annotated_branch_stat_start(void)
+static void *annotated_branch_stat_start(struct tracer_stat *trace)
 {
        return __start_annotated_branch_profile;
 }
@@ -346,7 +350,7 @@ static int all_branch_stat_headers(struct seq_file *m)
        return 0;
 }
 
-static void *all_branch_stat_start(void)
+static void *all_branch_stat_start(struct tracer_stat *trace)
 {
        return __start_branch_profile;
 }