#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
+#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/fs.h>
#include "trace.h"
static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
+static DEFINE_MUTEX(stack_sysctl_mutex);
+
+int stack_tracer_enabled;
+static int last_stack_tracer_enabled;
static inline void check_stack(void)
{
if (!object_is_on_stack(&this_size))
return;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
/* a race could have already updated it */
* on a new max, so it is far from a fast path.
*/
while (i < max_stack_trace.nr_entries) {
+ int found = 0;
stack_dump_index[i] = this_size;
p = start;
if (*p == stack_dump_trace[i]) {
this_size = stack_dump_index[i++] =
(top - p) * sizeof(unsigned long);
+ found = 1;
/* Start the search from here */
start = p + 1;
}
}
- i++;
+ if (!found)
+ i++;
}
out:
__raw_spin_unlock(&max_stack_lock);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
static void
if (unlikely(!ftrace_enabled || stack_trace_disabled))
return;
- resched = need_resched();
- preempt_disable_notrace();
+ resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id();
/* no atomic needed, we only modify this variable by this cpu */
out:
per_cpu(trace_active, cpu)--;
/* prevent recursion in schedule */
- if (resched)
- preempt_enable_no_resched_notrace();
- else
- preempt_enable_notrace();
+ ftrace_preempt_enable(resched);
}
static struct ftrace_ops trace_ops __read_mostly =
if (ret < 0)
return ret;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
*ptr = val;
__raw_spin_unlock(&max_stack_lock);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
return count;
}
-static struct file_operations stack_max_size_fops = {
+static const struct file_operations stack_max_size_fops = {
.open = tracing_open_generic,
.read = stack_max_size_read,
.write = stack_max_size_write,
#endif
}
+static void print_disabled(struct seq_file *m)
+{
+ seq_puts(m, "#\n"
+ "# Stack tracer disabled\n"
+ "#\n"
+ "# To enable the stack tracer, either add 'stacktrace' to the\n"
+ "# kernel command line\n"
+ "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
+ "#\n");
+}
+
static int t_show(struct seq_file *m, void *v)
{
long i;
int size;
if (v == SEQ_START_TOKEN) {
- seq_printf(m, " Depth Size Location"
+ seq_printf(m, " Depth Size Location"
" (%d entries)\n"
- " ----- ---- --------\n",
+ " ----- ---- --------\n",
max_stack_trace.nr_entries);
+
+ if (!stack_tracer_enabled && !max_stack_size)
+ print_disabled(m);
+
return 0;
}
return 0;
}
-static struct seq_operations stack_trace_seq_ops = {
+static const struct seq_operations stack_trace_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
return ret;
}
-static struct file_operations stack_trace_fops = {
+static const struct file_operations stack_trace_fops = {
.open = stack_trace_open,
.read = seq_read,
.llseek = seq_lseek,
};
+int
+stack_trace_sysctl(struct ctl_table *table, int write,
+ struct file *file, void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+
+ mutex_lock(&stack_sysctl_mutex);
+
+ ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
+
+ if (ret || !write ||
+ (last_stack_tracer_enabled == stack_tracer_enabled))
+ goto out;
+
+ last_stack_tracer_enabled = stack_tracer_enabled;
+
+ if (stack_tracer_enabled)
+ register_ftrace_function(&trace_ops);
+ else
+ unregister_ftrace_function(&trace_ops);
+
+ out:
+ mutex_unlock(&stack_sysctl_mutex);
+ return ret;
+}
+
+static __init int enable_stacktrace(char *str)
+{
+ stack_tracer_enabled = 1;
+ last_stack_tracer_enabled = 1;
+ return 1;
+}
+__setup("stacktrace", enable_stacktrace);
+
static __init int stack_trace_init(void)
{
struct dentry *d_tracer;
if (!entry)
pr_warning("Could not create debugfs 'stack_trace' entry\n");
- register_ftrace_function(&trace_ops);
+ if (stack_tracer_enabled)
+ register_ftrace_function(&trace_ops);
return 0;
}