/*
* trace stack traces
*
+ * Copyright (C) 2004-2008, Soeren Sandmann
* Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2004, 2005, Soeren Sandmann
*/
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
}
}
-const static struct stacktrace_ops backtrace_ops = {
+static const struct stacktrace_ops backtrace_ops = {
.warning = backtrace_warning,
.warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
.address = backtrace_address,
};
-static struct pt_regs *
+static int
trace_kernel(struct pt_regs *regs, struct trace_array *tr,
struct trace_array_cpu *data)
{
struct backtrace_info info;
unsigned long bp;
- char *user_stack;
char *stack;
info.tr = tr;
dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info);
- /* Now trace the user stack */
- user_stack = ((char *)current->thread.sp0 - sizeof(struct pt_regs));
-
- return (struct pt_regs *)user_stack;
+ return info.pos;
}
static void timer_notify(struct pt_regs *regs, int cpu)
__trace_special(tr, data, 0, 0, current->pid);
if (!is_user)
- regs = trace_kernel(regs, tr, data);
+ i = trace_kernel(regs, tr, data);
+ else
+ i = 0;
- fp = (void __user *)regs->bp;
+ /*
+ * Trace user stack if we are not a kernel thread
+ */
+ if (current->mm && i < sample_max_depth) {
+ regs = (struct pt_regs *)current->thread.sp0 - 1;
- __trace_special(tr, data, 2, regs->ip, 0);
+ fp = (void __user *)regs->bp;
- for (i = 0; i < sample_max_depth; i++) {
- frame.next_fp = 0;
- frame.return_address = 0;
- if (!copy_stack_frame(fp, &frame))
- break;
- if ((unsigned long)fp < regs->sp)
- break;
+ __trace_special(tr, data, 2, regs->ip, 0);
- __trace_special(tr, data, 2, frame.return_address,
- (unsigned long)fp);
- fp = frame.next_fp;
- }
+ while (i < sample_max_depth) {
+ frame.next_fp = NULL;
+ frame.return_address = 0;
+ if (!copy_stack_frame(fp, &frame))
+ break;
+ if ((unsigned long)fp < regs->sp)
+ break;
- __trace_special(tr, data, 3, current->pid, i);
+ __trace_special(tr, data, 2, frame.return_address,
+ (unsigned long)fp);
+ fp = frame.next_fp;
+
+ i++;
+ }
+
+ }
/*
* Special trace entry if we overflow the max depth:
*/
if (i == sample_max_depth)
__trace_special(tr, data, -1, -1, -1);
+
+ __trace_special(tr, data, 3, current->pid, i);
}
static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
-static void start_stack_timer(int cpu)
+static void start_stack_timer(void *unused)
{
- struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
+ struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = stack_trace_timer_fn;
- hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
- hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
+ hrtimer_start(hrtimer, ns_to_ktime(sample_period),
+ HRTIMER_MODE_REL_PINNED);
}
static void start_stack_timers(void)
{
- cpumask_t saved_mask = current->cpus_allowed;
- int cpu;
-
- for_each_online_cpu(cpu) {
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
- start_stack_timer(cpu);
- }
- set_cpus_allowed_ptr(current, &saved_mask);
+ on_each_cpu(start_stack_timer, NULL, 1);
}
static void stop_stack_timer(int cpu)
stop_stack_timer(cpu);
}
-static void stack_reset(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr->data[cpu]);
-}
-
-static void start_stack_trace(struct trace_array *tr)
-{
- mutex_lock(&sample_timer_lock);
- stack_reset(tr);
- start_stack_timers();
- tracer_enabled = 1;
- mutex_unlock(&sample_timer_lock);
-}
-
static void stop_stack_trace(struct trace_array *tr)
{
mutex_lock(&sample_timer_lock);
mutex_unlock(&sample_timer_lock);
}
-static void stack_trace_init(struct trace_array *tr)
+static int stack_trace_init(struct trace_array *tr)
{
sysprof_trace = tr;
- if (tr->ctrl)
- start_stack_trace(tr);
-}
+ tracing_start_cmdline_record();
-static void stack_trace_reset(struct trace_array *tr)
-{
- if (tr->ctrl)
- stop_stack_trace(tr);
+ mutex_lock(&sample_timer_lock);
+ start_stack_timers();
+ tracer_enabled = 1;
+ mutex_unlock(&sample_timer_lock);
+ return 0;
}
-static void stack_trace_ctrl_update(struct trace_array *tr)
+static void stack_trace_reset(struct trace_array *tr)
{
- /* When starting a new trace, reset the buffers */
- if (tr->ctrl)
- start_stack_trace(tr);
- else
- stop_stack_trace(tr);
+ tracing_stop_cmdline_record();
+ stop_stack_trace(tr);
}
static struct tracer stack_trace __read_mostly =
.name = "sysprof",
.init = stack_trace_init,
.reset = stack_trace_reset,
- .ctrl_update = stack_trace_ctrl_update,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_sysprof,
#endif
return cnt;
}
-static struct file_operations sysprof_sample_fops = {
+static const struct file_operations sysprof_sample_fops = {
.read = sysprof_sample_read,
.write = sysprof_sample_write,
};
void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
{
- struct dentry *entry;
- entry = debugfs_create_file("sysprof_sample_period", 0644,
+ trace_create_file("sysprof_sample_period", 0644,
d_tracer, NULL, &sysprof_sample_fops);
- if (entry)
- return;
- pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n");
}