X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Ftrace%2Ftrace_hw_branches.c;h=7b97000745f5cc38601aff4101b58d9d8bb99b6c;hb=46bbffad54bd48bb809f2691c1970a79a588976b;hp=fff3545fc8666c1696beb89e41ba3d6cb214e3be;hpb=11edda06289d412d13ff7c672bd72e043f637e74;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index fff3545..7b97000 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c @@ -1,125 +1,133 @@ /* - * h/w branch tracer for x86 based on bts + * h/w branch tracer for x86 based on BTS * * Copyright (C) 2008-2009 Intel Corporation. * Markus Metzger , 2008-2009 - * */ - -#include -#include +#include #include #include -#include -#include +#include #include #include +#include #include -#include "trace.h" #include "trace_output.h" +#include "trace.h" -#define SIZEOF_BTS (1 << 13) +#define BTS_BUFFER_SIZE (1 << 13) -/* The tracer mutex protects the below per-cpu tracer array. - It needs to be held to: - - start tracing on all cpus - - stop tracing on all cpus - - start tracing on a single hotplug cpu - - stop tracing on a single hotplug cpu - - read the trace from all cpus - - read the trace from a single cpu -*/ -static DEFINE_MUTEX(bts_tracer_mutex); -static DEFINE_PER_CPU(struct bts_tracer *, tracer); -static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); +static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer); +static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer); -#define this_tracer per_cpu(tracer, smp_processor_id()) -#define this_buffer per_cpu(buffer, smp_processor_id()) +#define this_tracer per_cpu(hwb_tracer, smp_processor_id()) -static int __read_mostly trace_hw_branches_enabled; +static int trace_hw_branches_enabled __read_mostly; +static int trace_hw_branches_suspended __read_mostly; static struct trace_array *hw_branch_trace __read_mostly; -/* - * Start tracing on the current cpu. - * The argument is ignored. - * - * pre: bts_tracer_mutex must be locked. - */ -static void bts_trace_start_cpu(void *arg) +static void bts_trace_init_cpu(int cpu) { - if (this_tracer) - ds_release_bts(this_tracer); - - this_tracer = - ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS, - /* ovfl = */ NULL, /* th = */ (size_t)-1, - BTS_KERNEL); - if (IS_ERR(this_tracer)) { - this_tracer = NULL; - return; - } + per_cpu(hwb_tracer, cpu) = + ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu), + BTS_BUFFER_SIZE, NULL, (size_t)-1, + BTS_KERNEL); + + if (IS_ERR(per_cpu(hwb_tracer, cpu))) + per_cpu(hwb_tracer, cpu) = NULL; } -static void bts_trace_start(struct trace_array *tr) +static int bts_trace_init(struct trace_array *tr) { - mutex_lock(&bts_tracer_mutex); + int cpu; - on_each_cpu(bts_trace_start_cpu, NULL, 1); - trace_hw_branches_enabled = 1; + hw_branch_trace = tr; + trace_hw_branches_enabled = 0; - mutex_unlock(&bts_tracer_mutex); + get_online_cpus(); + for_each_online_cpu(cpu) { + bts_trace_init_cpu(cpu); + + if (likely(per_cpu(hwb_tracer, cpu))) + trace_hw_branches_enabled = 1; + } + trace_hw_branches_suspended = 0; + put_online_cpus(); + + /* If we could not enable tracing on a single cpu, we fail. */ + return trace_hw_branches_enabled ? 0 : -EOPNOTSUPP; } -/* - * Start tracing on the current cpu. - * The argument is ignored. - * - * pre: bts_tracer_mutex must be locked. - */ -static void bts_trace_stop_cpu(void *arg) +static void bts_trace_reset(struct trace_array *tr) { - if (this_tracer) { - ds_release_bts(this_tracer); - this_tracer = NULL; + int cpu; + + get_online_cpus(); + for_each_online_cpu(cpu) { + if (likely(per_cpu(hwb_tracer, cpu))) { + ds_release_bts(per_cpu(hwb_tracer, cpu)); + per_cpu(hwb_tracer, cpu) = NULL; + } } + trace_hw_branches_enabled = 0; + trace_hw_branches_suspended = 0; + put_online_cpus(); } -static void bts_trace_stop(struct trace_array *tr) +static void bts_trace_start(struct trace_array *tr) { - mutex_lock(&bts_tracer_mutex); + int cpu; - trace_hw_branches_enabled = 0; - on_each_cpu(bts_trace_stop_cpu, NULL, 1); + get_online_cpus(); + for_each_online_cpu(cpu) + if (likely(per_cpu(hwb_tracer, cpu))) + ds_resume_bts(per_cpu(hwb_tracer, cpu)); + trace_hw_branches_suspended = 0; + put_online_cpus(); +} - mutex_unlock(&bts_tracer_mutex); +static void bts_trace_stop(struct trace_array *tr) +{ + int cpu; + + get_online_cpus(); + for_each_online_cpu(cpu) + if (likely(per_cpu(hwb_tracer, cpu))) + ds_suspend_bts(per_cpu(hwb_tracer, cpu)); + trace_hw_branches_suspended = 1; + put_online_cpus(); } static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, unsigned long action, void *hcpu) { - unsigned int cpu = (unsigned long)hcpu; - - mutex_lock(&bts_tracer_mutex); - - if (!trace_hw_branches_enabled) - goto out; + int cpu = (long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: - smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); + /* The notification is sent with interrupts enabled. */ + if (trace_hw_branches_enabled) { + bts_trace_init_cpu(cpu); + + if (trace_hw_branches_suspended && + likely(per_cpu(hwb_tracer, cpu))) + ds_suspend_bts(per_cpu(hwb_tracer, cpu)); + } break; + case CPU_DOWN_PREPARE: - smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); - break; + /* The notification is sent with interrupts enabled. */ + if (likely(per_cpu(hwb_tracer, cpu))) { + ds_release_bts(per_cpu(hwb_tracer, cpu)); + per_cpu(hwb_tracer, cpu) = NULL; + } } - out: - mutex_unlock(&bts_tracer_mutex); return NOTIFY_DONE; } @@ -127,23 +135,6 @@ static struct notifier_block bts_hotcpu_notifier __cpuinitdata = { .notifier_call = bts_hotcpu_handler }; -static int bts_trace_init(struct trace_array *tr) -{ - hw_branch_trace = tr; - - register_hotcpu_notifier(&bts_hotcpu_notifier); - tracing_reset_online_cpus(tr); - bts_trace_start(tr); - - return 0; -} - -static void bts_trace_reset(struct trace_array *tr) -{ - bts_trace_stop(tr); - unregister_hotcpu_notifier(&bts_hotcpu_notifier); -} - static void bts_trace_print_header(struct seq_file *m) { seq_puts(m, "# CPU# TO <- FROM\n"); @@ -151,31 +142,33 @@ static void bts_trace_print_header(struct seq_file *m) static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) { + unsigned long symflags = TRACE_ITER_SYM_OFFSET; struct trace_entry *entry = iter->ent; struct trace_seq *seq = &iter->seq; struct hw_branch_entry *it; - unsigned long symflags = TRACE_ITER_SYM_OFFSET; trace_assign_type(it, entry); if (entry->type == TRACE_HW_BRANCHES) { - if (trace_seq_printf(seq, "%4d ", entry->cpu) && + if (trace_seq_printf(seq, "%4d ", iter->cpu) && seq_print_ip_sym(seq, it->to, symflags) && trace_seq_printf(seq, "\t <- ") && seq_print_ip_sym(seq, it->from, symflags) && trace_seq_printf(seq, "\n")) return TRACE_TYPE_HANDLED; - return TRACE_TYPE_PARTIAL_LINE;; + return TRACE_TYPE_PARTIAL_LINE; } return TRACE_TYPE_UNHANDLED; } void trace_hw_branch(u64 from, u64 to) { + struct ftrace_event_call *call = &event_hw_branch; struct trace_array *tr = hw_branch_trace; struct ring_buffer_event *event; + struct ring_buffer *buf; struct hw_branch_entry *entry; - unsigned long irq1, irq2; + unsigned long irq1; int cpu; if (unlikely(!tr)) @@ -189,16 +182,18 @@ void trace_hw_branch(u64 from, u64 to) if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) goto out; - event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq2); + buf = tr->buffer; + event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES, + sizeof(*entry), 0, 0); if (!event) goto out; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, 0, from); entry->ent.type = TRACE_HW_BRANCHES; - entry->ent.cpu = cpu; entry->from = from; entry->to = to; - ring_buffer_unlock_commit(tr->buffer, event, irq2); + if (!filter_check_discard(call, entry, buf, event)) + trace_buffer_unlock_commit(buf, event, 0, 0); out: atomic_dec(&tr->data[cpu]->disabled); @@ -228,11 +223,11 @@ static void trace_bts_at(const struct bts_trace *trace, void *at) /* * Collect the trace on the current cpu and write it into the ftrace buffer. * - * pre: bts_tracer_mutex must be locked + * pre: tracing must be suspended on the current cpu */ static void trace_bts_cpu(void *arg) { - struct trace_array *tr = (struct trace_array *) arg; + struct trace_array *tr = (struct trace_array *)arg; const struct bts_trace *trace; unsigned char *at; @@ -245,10 +240,9 @@ static void trace_bts_cpu(void *arg) if (unlikely(!this_tracer)) return; - ds_suspend_bts(this_tracer); trace = ds_read_bts(this_tracer); if (!trace) - goto out; + return; for (at = trace->ds.top; (void *)at < trace->ds.end; at += trace->ds.size) @@ -257,18 +251,27 @@ static void trace_bts_cpu(void *arg) for (at = trace->ds.begin; (void *)at < trace->ds.top; at += trace->ds.size) trace_bts_at(trace, at); - -out: - ds_resume_bts(this_tracer); } static void trace_bts_prepare(struct trace_iterator *iter) { - mutex_lock(&bts_tracer_mutex); + int cpu; + get_online_cpus(); + for_each_online_cpu(cpu) + if (likely(per_cpu(hwb_tracer, cpu))) + ds_suspend_bts(per_cpu(hwb_tracer, cpu)); + /* + * We need to collect the trace on the respective cpu since ftrace + * implicitly adds the record for the current cpu. + * Once that is more flexible, we could collect the data from any cpu. + */ on_each_cpu(trace_bts_cpu, iter->tr, 1); - mutex_unlock(&bts_tracer_mutex); + for_each_online_cpu(cpu) + if (likely(per_cpu(hwb_tracer, cpu))) + ds_resume_bts(per_cpu(hwb_tracer, cpu)); + put_online_cpus(); } static void trace_bts_close(struct trace_iterator *iter) @@ -278,11 +281,11 @@ static void trace_bts_close(struct trace_iterator *iter) void trace_hw_branch_oops(void) { - mutex_lock(&bts_tracer_mutex); - - trace_bts_cpu(hw_branch_trace); - - mutex_unlock(&bts_tracer_mutex); + if (this_tracer) { + ds_suspend_bts_noirq(this_tracer); + trace_bts_cpu(hw_branch_trace); + ds_resume_bts_noirq(this_tracer); + } } struct tracer bts_tracer __read_mostly = @@ -295,11 +298,15 @@ struct tracer bts_tracer __read_mostly = .start = bts_trace_start, .stop = bts_trace_stop, .open = trace_bts_prepare, - .close = trace_bts_close + .close = trace_bts_close, +#ifdef CONFIG_FTRACE_SELFTEST + .selftest = trace_selftest_startup_hw_branches, +#endif /* CONFIG_FTRACE_SELFTEST */ }; __init static int init_bts_trace(void) { + register_hotcpu_notifier(&bts_hotcpu_notifier); return register_tracer(&bts_tracer); } device_initcall(init_bts_trace);