#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
-#include <linux/kthread.h>
#include <linux/hardirq.h>
-#include <linux/ftrace.h>
+#include <linux/kthread.h>
#include <linux/uaccess.h>
+#include <linux/kprobes.h>
+#include <linux/ftrace.h>
#include <linux/sysctl.h>
-#include <linux/hash.h>
#include <linux/ctype.h>
+#include <linux/hash.h>
#include <linux/list.h>
+#include <asm/ftrace.h>
+
#include "trace.h"
-int ftrace_enabled;
+/* ftrace_enabled is a method to turn ftrace on or off */
+int ftrace_enabled __read_mostly;
static int last_ftrace_enabled;
+/*
+ * ftrace_disabled is set when an anomaly is discovered.
+ * ftrace_disabled is much stronger than ftrace_enabled.
+ */
+static int ftrace_disabled __read_mostly;
+
static DEFINE_SPINLOCK(ftrace_lock);
static DEFINE_MUTEX(ftrace_sysctl_lock);
static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-/* mcount is defined per arch in assembly */
-EXPORT_SYMBOL(mcount);
-
-notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
+static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_ops *op = ftrace_list;
ftrace_trace_function = ftrace_stub;
}
-static int notrace __register_ftrace_function(struct ftrace_ops *ops)
+static int __register_ftrace_function(struct ftrace_ops *ops)
{
- /* Should never be called by interrupts */
+ /* should not be called from interrupt context */
spin_lock(&ftrace_lock);
ops->next = ftrace_list;
return 0;
}
-static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
+static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
struct ftrace_ops **p;
int ret = 0;
+ /* should not be called from interrupt context */
spin_lock(&ftrace_lock);
/*
#ifdef CONFIG_DYNAMIC_FTRACE
+#ifndef CONFIG_FTRACE_MCOUNT_RECORD
+/*
+ * The hash lock is only needed when the recording of the mcount
+ * callers are dynamic. That is, by the caller themselves and
+ * not recorded via the compilation.
+ */
+static DEFINE_SPINLOCK(ftrace_hash_lock);
+#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
+#define ftrace_hash_unlock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
+#else
+/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
+#define ftrace_hash_lock(flags) do { (void)flags; } while (0)
+#define ftrace_hash_unlock(flags) do { } while(0)
+#endif
+
static struct task_struct *ftraced_task;
-static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
-static unsigned long ftraced_iteration_counter;
enum {
FTRACE_ENABLE_CALLS = (1 << 0),
};
static int ftrace_filtered;
+static int tracing_on;
+static int frozen_record_count;
static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
-static DEFINE_SPINLOCK(ftrace_shutdown_lock);
static DEFINE_MUTEX(ftraced_lock);
-static DEFINE_MUTEX(ftrace_filter_lock);
+static DEFINE_MUTEX(ftrace_regex_lock);
struct ftrace_page {
struct ftrace_page *next;
- int index;
+ unsigned long index;
struct dyn_ftrace records[];
-} __attribute__((packed));
+};
#define ENTRIES_PER_PAGE \
((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
static int ftraced_trigger;
static int ftraced_suspend;
+static int ftraced_stop;
static int ftrace_record_suspend;
static struct dyn_ftrace *ftrace_free_records;
+
+#ifdef CONFIG_KPROBES
+static inline void freeze_record(struct dyn_ftrace *rec)
+{
+ if (!(rec->flags & FTRACE_FL_FROZEN)) {
+ rec->flags |= FTRACE_FL_FROZEN;
+ frozen_record_count++;
+ }
+}
+
+static inline void unfreeze_record(struct dyn_ftrace *rec)
+{
+ if (rec->flags & FTRACE_FL_FROZEN) {
+ rec->flags &= ~FTRACE_FL_FROZEN;
+ frozen_record_count--;
+ }
+}
+
+static inline int record_frozen(struct dyn_ftrace *rec)
+{
+ return rec->flags & FTRACE_FL_FROZEN;
+}
+#else
+# define freeze_record(rec) ({ 0; })
+# define unfreeze_record(rec) ({ 0; })
+# define record_frozen(rec) ({ 0; })
+#endif /* CONFIG_KPROBES */
+
+int skip_trace(unsigned long ip)
+{
+ unsigned long fl;
+ struct dyn_ftrace *rec;
+ struct hlist_node *t;
+ struct hlist_head *head;
+
+ if (frozen_record_count == 0)
+ return 0;
+
+ head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
+ hlist_for_each_entry_rcu(rec, t, head, node) {
+ if (rec->ip == ip) {
+ if (record_frozen(rec)) {
+ if (rec->flags & FTRACE_FL_FAILED)
+ return 1;
+
+ if (!(rec->flags & FTRACE_FL_CONVERTED))
+ return 1;
+
+ if (!tracing_on || !ftrace_enabled)
+ return 1;
+
+ if (ftrace_filtered) {
+ fl = rec->flags & (FTRACE_FL_FILTER |
+ FTRACE_FL_NOTRACE);
+ if (!fl || (fl & FTRACE_FL_NOTRACE))
+ return 1;
+ }
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
static inline int
-notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
+ftrace_ip_in_hash(unsigned long ip, unsigned long key)
{
struct dyn_ftrace *p;
struct hlist_node *t;
int found = 0;
- hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
+ hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
if (p->ip == ip) {
found = 1;
break;
return found;
}
-static inline void notrace
+static inline void
ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
{
- hlist_add_head(&node->node, &ftrace_hash[key]);
+ hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
}
-static notrace void ftrace_free_rec(struct dyn_ftrace *rec)
+/* called from kstop_machine */
+static inline void ftrace_del_hash(struct dyn_ftrace *node)
{
- /* no locking, only called from kstop_machine */
+ hlist_del(&node->node);
+}
+static void ftrace_free_rec(struct dyn_ftrace *rec)
+{
rec->ip = (unsigned long)ftrace_free_records;
ftrace_free_records = rec;
rec->flags |= FTRACE_FL_FREE;
}
-static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
+void ftrace_release(void *start, unsigned long size)
+{
+ struct dyn_ftrace *rec;
+ struct ftrace_page *pg;
+ unsigned long s = (unsigned long)start;
+ unsigned long e = s + size;
+ int i;
+
+ if (ftrace_disabled || !start)
+ return;
+
+ /* should not be called from interrupt context */
+ spin_lock(&ftrace_lock);
+
+ for (pg = ftrace_pages_start; pg; pg = pg->next) {
+ for (i = 0; i < pg->index; i++) {
+ rec = &pg->records[i];
+
+ if ((rec->ip >= s) && (rec->ip < e))
+ ftrace_free_rec(rec);
+ }
+ }
+ spin_unlock(&ftrace_lock);
+
+}
+
+static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
{
struct dyn_ftrace *rec;
if (ftrace_free_records) {
rec = ftrace_free_records;
- /* todo, disable tracing altogether on this warning */
if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
WARN_ON_ONCE(1);
ftrace_free_records = NULL;
+ ftrace_disabled = 1;
+ ftrace_enabled = 0;
return NULL;
}
return &ftrace_pages->records[ftrace_pages->index++];
}
-static void notrace
+static void
ftrace_record_ip(unsigned long ip)
{
struct dyn_ftrace *node;
unsigned long flags;
unsigned long key;
int resched;
- int atomic;
+ int cpu;
- if (!ftrace_enabled)
+ if (!ftrace_enabled || ftrace_disabled)
return;
resched = need_resched();
preempt_disable_notrace();
- /* We simply need to protect against recursion */
- __get_cpu_var(ftrace_shutdown_disable_cpu)++;
- if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
+ /*
+ * We simply need to protect against recursion.
+ * Use the the raw version of smp_processor_id and not
+ * __get_cpu_var which can call debug hooks that can
+ * cause a recursive crash here.
+ */
+ cpu = raw_smp_processor_id();
+ per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
+ if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
goto out;
if (unlikely(ftrace_record_suspend))
if (ftrace_ip_in_hash(ip, key))
goto out;
- atomic = irqs_disabled();
-
- spin_lock_irqsave(&ftrace_shutdown_lock, flags);
+ ftrace_hash_lock(flags);
/* This ip may have hit the hash before the lock */
if (ftrace_ip_in_hash(ip, key))
goto out_unlock;
- /*
- * There's a slight race that the ftraced will update the
- * hash and reset here. If it is already converted, skip it.
- */
- if (ftrace_ip_converted(ip))
- goto out_unlock;
-
node = ftrace_alloc_dyn_node(ip);
if (!node)
goto out_unlock;
ftraced_trigger = 1;
out_unlock:
- spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
+ ftrace_hash_unlock(flags);
out:
- __get_cpu_var(ftrace_shutdown_disable_cpu)--;
+ per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
/* prevent recursion with scheduler */
if (resched)
preempt_enable_notrace();
}
-#define FTRACE_ADDR ((long)(&ftrace_caller))
-#define MCOUNT_ADDR ((long)(&mcount))
+#define FTRACE_ADDR ((long)(ftrace_caller))
-static void notrace
+static int
__ftrace_replace_code(struct dyn_ftrace *rec,
unsigned char *old, unsigned char *new, int enable)
{
- unsigned long ip;
- int failed;
+ unsigned long ip, fl;
ip = rec->ip;
if (ftrace_filtered && enable) {
- unsigned long fl;
/*
* If filtering is on:
*
* If this record is not set to be filtered
* and it is not enabled do nothing.
*
+ * If this record is set not to trace then
+ * do nothing.
+ *
+ * If this record is set not to trace and
+ * it is enabled then disable it.
+ *
* If this record is not set to be filtered and
* it is enabled, disable it.
*/
- fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
+
+ fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
+ FTRACE_FL_ENABLED);
if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
- (fl == 0))
- return;
+ (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
+ !fl || (fl == FTRACE_FL_NOTRACE))
+ return 0;
/*
* If it is enabled disable it,
* otherwise enable it!
*/
- if (fl == FTRACE_FL_ENABLED) {
+ if (fl & FTRACE_FL_ENABLED) {
/* swap new and old */
new = old;
old = ftrace_call_replace(ip, FTRACE_ADDR);
}
} else {
- if (enable)
+ if (enable) {
+ /*
+ * If this record is set not to trace and is
+ * not enabled, do nothing.
+ */
+ fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
+ if (fl == FTRACE_FL_NOTRACE)
+ return 0;
+
new = ftrace_call_replace(ip, FTRACE_ADDR);
- else
+ } else
old = ftrace_call_replace(ip, FTRACE_ADDR);
if (enable) {
if (rec->flags & FTRACE_FL_ENABLED)
- return;
+ return 0;
rec->flags |= FTRACE_FL_ENABLED;
} else {
if (!(rec->flags & FTRACE_FL_ENABLED))
- return;
+ return 0;
rec->flags &= ~FTRACE_FL_ENABLED;
}
}
- failed = ftrace_modify_code(ip, old, new);
- if (failed) {
- unsigned long key;
- /* It is possible that the function hasn't been converted yet */
- key = hash_long(ip, FTRACE_HASHBITS);
- if (!ftrace_ip_in_hash(ip, key)) {
- rec->flags |= FTRACE_FL_FAILED;
- ftrace_free_rec(rec);
- }
-
- }
+ return ftrace_modify_code(ip, old, new);
}
-static void notrace ftrace_replace_code(int enable)
+static void ftrace_replace_code(int enable)
{
+ int i, failed;
unsigned char *new = NULL, *old = NULL;
struct dyn_ftrace *rec;
struct ftrace_page *pg;
- int i;
if (enable)
old = ftrace_nop_replace();
if (rec->flags & FTRACE_FL_FAILED)
continue;
- __ftrace_replace_code(rec, old, new, enable);
+ /* ignore updates to this record's mcount site */
+ if (get_kprobe((void *)rec->ip)) {
+ freeze_record(rec);
+ continue;
+ } else {
+ unfreeze_record(rec);
+ }
+
+ failed = __ftrace_replace_code(rec, old, new, enable);
+ if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
+ rec->flags |= FTRACE_FL_FAILED;
+ if ((system_state == SYSTEM_BOOTING) ||
+ !core_kernel_text(rec->ip)) {
+ ftrace_del_hash(rec);
+ ftrace_free_rec(rec);
+ }
+ }
}
}
}
-static notrace void ftrace_shutdown_replenish(void)
+static void ftrace_shutdown_replenish(void)
{
if (ftrace_pages->next)
return;
ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
}
-static notrace void
+static int
ftrace_code_disable(struct dyn_ftrace *rec)
{
unsigned long ip;
failed = ftrace_modify_code(ip, call, nop);
if (failed) {
rec->flags |= FTRACE_FL_FAILED;
- ftrace_free_rec(rec);
+ return 0;
}
+ return 1;
}
-static int notrace __ftrace_modify_code(void *data)
+static int __ftrace_update_code(void *ignore);
+
+static int __ftrace_modify_code(void *data)
{
unsigned long addr;
int *command = data;
- if (*command & FTRACE_ENABLE_CALLS)
+ if (*command & FTRACE_ENABLE_CALLS) {
+ /*
+ * Update any recorded ips now that we have the
+ * machine stopped
+ */
+ __ftrace_update_code(NULL);
ftrace_replace_code(1);
- else if (*command & FTRACE_DISABLE_CALLS)
+ tracing_on = 1;
+ } else if (*command & FTRACE_DISABLE_CALLS) {
ftrace_replace_code(0);
+ tracing_on = 0;
+ }
if (*command & FTRACE_UPDATE_TRACE_FUNC)
ftrace_update_ftrace_func(ftrace_trace_function);
return 0;
}
-static void notrace ftrace_run_update_code(int command)
+static void ftrace_run_update_code(int command)
{
- stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
+ stop_machine(__ftrace_modify_code, &command, NULL);
+}
+
+void ftrace_disable_daemon(void)
+{
+ /* Stop the daemon from calling kstop_machine */
+ mutex_lock(&ftraced_lock);
+ ftraced_stop = 1;
+ mutex_unlock(&ftraced_lock);
+
+ ftrace_force_update();
+}
+
+void ftrace_enable_daemon(void)
+{
+ mutex_lock(&ftraced_lock);
+ ftraced_stop = 0;
+ mutex_unlock(&ftraced_lock);
+
+ ftrace_force_update();
}
static ftrace_func_t saved_ftrace_func;
-static void notrace ftrace_startup(void)
+static void ftrace_startup(void)
{
int command = 0;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
ftraced_suspend++;
if (ftraced_suspend == 1)
mutex_unlock(&ftraced_lock);
}
-static void notrace ftrace_shutdown(void)
+static void ftrace_shutdown(void)
{
int command = 0;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
ftraced_suspend--;
if (!ftraced_suspend)
mutex_unlock(&ftraced_lock);
}
-static void notrace ftrace_startup_sysctl(void)
+static void ftrace_startup_sysctl(void)
{
int command = FTRACE_ENABLE_MCOUNT;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
/* Force update next time */
saved_ftrace_func = NULL;
mutex_unlock(&ftraced_lock);
}
-static void notrace ftrace_shutdown_sysctl(void)
+static void ftrace_shutdown_sysctl(void)
{
int command = FTRACE_DISABLE_MCOUNT;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
/* ftraced_suspend is true if ftrace is running */
if (ftraced_suspend)
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
-static int notrace __ftrace_update_code(void *ignore)
+static int __ftrace_update_code(void *ignore)
{
- struct dyn_ftrace *p;
- struct hlist_head head;
- struct hlist_node *t;
- int save_ftrace_enabled;
+ int i, save_ftrace_enabled;
cycle_t start, stop;
- int i;
+ struct dyn_ftrace *p;
+ struct hlist_node *t, *n;
+ struct hlist_head *head, temp_list;
/* Don't be recording funcs now */
+ ftrace_record_suspend++;
save_ftrace_enabled = ftrace_enabled;
ftrace_enabled = 0;
/* No locks needed, the machine is stopped! */
for (i = 0; i < FTRACE_HASHSIZE; i++) {
- if (hlist_empty(&ftrace_hash[i]))
- continue;
-
- head = ftrace_hash[i];
- INIT_HLIST_HEAD(&ftrace_hash[i]);
+ INIT_HLIST_HEAD(&temp_list);
+ head = &ftrace_hash[i];
/* all CPUS are stopped, we are safe to modify code */
- hlist_for_each_entry(p, t, &head, node) {
- ftrace_code_disable(p);
- ftrace_update_cnt++;
+ hlist_for_each_entry_safe(p, t, n, head, node) {
+ /* Skip over failed records which have not been
+ * freed. */
+ if (p->flags & FTRACE_FL_FAILED)
+ continue;
+
+ /* Unconverted records are always at the head of the
+ * hash bucket. Once we encounter a converted record,
+ * simply skip over to the next bucket. Saves ftraced
+ * some processor cycles (ftrace does its bid for
+ * global warming :-p ). */
+ if (p->flags & (FTRACE_FL_CONVERTED))
+ break;
+
+ /* Ignore updates to this record's mcount site.
+ * Reintroduce this record at the head of this
+ * bucket to attempt to "convert" it again if
+ * the kprobe on it is unregistered before the
+ * next run. */
+ if (get_kprobe((void *)p->ip)) {
+ ftrace_del_hash(p);
+ INIT_HLIST_NODE(&p->node);
+ hlist_add_head(&p->node, &temp_list);
+ freeze_record(p);
+ continue;
+ } else {
+ unfreeze_record(p);
+ }
+
+ /* convert record (i.e, patch mcount-call with NOP) */
+ if (ftrace_code_disable(p)) {
+ p->flags |= FTRACE_FL_CONVERTED;
+ ftrace_update_cnt++;
+ } else {
+ if ((system_state == SYSTEM_BOOTING) ||
+ !core_kernel_text(p->ip)) {
+ ftrace_del_hash(p);
+ ftrace_free_rec(p);
+ }
+ }
}
+ hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
+ hlist_del(&p->node);
+ INIT_HLIST_NODE(&p->node);
+ hlist_add_head(&p->node, head);
+ }
}
stop = ftrace_now(raw_smp_processor_id());
ftrace_update_time = stop - start;
ftrace_update_tot_cnt += ftrace_update_cnt;
+ ftraced_trigger = 0;
ftrace_enabled = save_ftrace_enabled;
+ ftrace_record_suspend--;
return 0;
}
-static void notrace ftrace_update_code(void)
-{
- stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
-}
-
-static int notrace ftraced(void *ignore)
+static int ftrace_update_code(void)
{
- unsigned long usecs;
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- while (!kthread_should_stop()) {
-
- /* check once a second */
- schedule_timeout(HZ);
-
- mutex_lock(&ftrace_sysctl_lock);
- mutex_lock(&ftraced_lock);
- if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
- ftrace_record_suspend++;
- ftrace_update_code();
- usecs = nsecs_to_usecs(ftrace_update_time);
- if (ftrace_update_tot_cnt > 100000) {
- ftrace_update_tot_cnt = 0;
- pr_info("hm, dftrace overflow: %lu change%s"
- " (%lu total) in %lu usec%s\n",
- ftrace_update_cnt,
- ftrace_update_cnt != 1 ? "s" : "",
- ftrace_update_tot_cnt,
- usecs, usecs != 1 ? "s" : "");
- WARN_ON_ONCE(1);
- }
- ftraced_trigger = 0;
- ftrace_record_suspend--;
- }
- ftraced_iteration_counter++;
- mutex_unlock(&ftraced_lock);
- mutex_unlock(&ftrace_sysctl_lock);
-
- wake_up_interruptible(&ftraced_waiters);
+ if (unlikely(ftrace_disabled) ||
+ !ftrace_enabled || !ftraced_trigger)
+ return 0;
- ftrace_shutdown_replenish();
+ stop_machine(__ftrace_update_code, NULL, NULL);
- set_current_state(TASK_INTERRUPTIBLE);
- }
- __set_current_state(TASK_RUNNING);
- return 0;
+ return 1;
}
-static int __init ftrace_dyn_table_alloc(void)
+static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
{
struct ftrace_page *pg;
int cnt;
pg = ftrace_pages = ftrace_pages_start;
- cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
+ cnt = num_to_init / ENTRIES_PER_PAGE;
+ pr_info("ftrace: allocating %ld hash entries in %d pages\n",
+ num_to_init, cnt);
for (i = 0; i < cnt; i++) {
pg->next = (void *)get_zeroed_page(GFP_KERNEL);
enum {
FTRACE_ITER_FILTER = (1 << 0),
FTRACE_ITER_CONT = (1 << 1),
+ FTRACE_ITER_NOTRACE = (1 << 2),
+ FTRACE_ITER_FAILURES = (1 << 3),
};
#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
unsigned filtered;
};
-static void notrace *
+static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
(*pos)++;
+ /* should not be called from interrupt context */
+ spin_lock(&ftrace_lock);
retry:
if (iter->idx >= iter->pg->index) {
if (iter->pg->next) {
}
} else {
rec = &iter->pg->records[iter->idx++];
- if ((rec->flags & FTRACE_FL_FAILED) ||
- ((iter->flags & FTRACE_ITER_FILTER) &&
- !(rec->flags & FTRACE_FL_FILTER))) {
+ if ((rec->flags & FTRACE_FL_FREE) ||
+
+ (!(iter->flags & FTRACE_ITER_FAILURES) &&
+ (rec->flags & FTRACE_FL_FAILED)) ||
+
+ ((iter->flags & FTRACE_ITER_FAILURES) &&
+ !(rec->flags & FTRACE_FL_FAILED)) ||
+
+ ((iter->flags & FTRACE_ITER_NOTRACE) &&
+ !(rec->flags & FTRACE_FL_NOTRACE))) {
rec = NULL;
goto retry;
}
}
+ spin_unlock(&ftrace_lock);
iter->pos = *pos;
.show = t_show,
};
-static int notrace
+static int
ftrace_avail_open(struct inode *inode, struct file *file)
{
struct ftrace_iterator *iter;
int ret;
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
return 0;
}
-static void notrace ftrace_filter_reset(void)
+static int
+ftrace_failures_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct seq_file *m;
+ struct ftrace_iterator *iter;
+
+ ret = ftrace_avail_open(inode, file);
+ if (!ret) {
+ m = (struct seq_file *)file->private_data;
+ iter = (struct ftrace_iterator *)m->private;
+ iter->flags = FTRACE_ITER_FAILURES;
+ }
+
+ return ret;
+}
+
+
+static void ftrace_filter_reset(int enable)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec;
+ unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
unsigned i;
- /* keep kstop machine from running */
- preempt_disable();
- ftrace_filtered = 0;
+ /* should not be called from interrupt context */
+ spin_lock(&ftrace_lock);
+ if (enable)
+ ftrace_filtered = 0;
pg = ftrace_pages_start;
while (pg) {
for (i = 0; i < pg->index; i++) {
rec = &pg->records[i];
if (rec->flags & FTRACE_FL_FAILED)
continue;
- rec->flags &= ~FTRACE_FL_FILTER;
+ rec->flags &= ~type;
}
pg = pg->next;
}
- preempt_enable();
+ spin_unlock(&ftrace_lock);
}
-static int notrace
-ftrace_filter_open(struct inode *inode, struct file *file)
+static int
+ftrace_regex_open(struct inode *inode, struct file *file, int enable)
{
struct ftrace_iterator *iter;
int ret = 0;
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
- mutex_lock(&ftrace_filter_lock);
+ mutex_lock(&ftrace_regex_lock);
if ((file->f_mode & FMODE_WRITE) &&
!(file->f_flags & O_APPEND))
- ftrace_filter_reset();
+ ftrace_filter_reset(enable);
if (file->f_mode & FMODE_READ) {
iter->pg = ftrace_pages_start;
iter->pos = -1;
- iter->flags = FTRACE_ITER_FILTER;
+ iter->flags = enable ? FTRACE_ITER_FILTER :
+ FTRACE_ITER_NOTRACE;
ret = seq_open(file, &show_ftrace_seq_ops);
if (!ret) {
kfree(iter);
} else
file->private_data = iter;
- mutex_unlock(&ftrace_filter_lock);
+ mutex_unlock(&ftrace_regex_lock);
return ret;
}
-static ssize_t notrace
-ftrace_filter_read(struct file *file, char __user *ubuf,
+static int
+ftrace_filter_open(struct inode *inode, struct file *file)
+{
+ return ftrace_regex_open(inode, file, 1);
+}
+
+static int
+ftrace_notrace_open(struct inode *inode, struct file *file)
+{
+ return ftrace_regex_open(inode, file, 0);
+}
+
+static ssize_t
+ftrace_regex_read(struct file *file, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
if (file->f_mode & FMODE_READ)
return -EPERM;
}
-static loff_t notrace
-ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
+static loff_t
+ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
{
loff_t ret;
MATCH_END_ONLY,
};
-static void notrace
-ftrace_match(unsigned char *buff, int len)
+static void
+ftrace_match(unsigned char *buff, int len, int enable)
{
char str[KSYM_SYMBOL_LEN];
char *search = NULL;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
int type = MATCH_FULL;
+ unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
unsigned i, match = 0, search_len = 0;
for (i = 0; i < len; i++) {
}
}
- /* keep kstop machine from running */
- preempt_disable();
- ftrace_filtered = 1;
+ /* should not be called from interrupt context */
+ spin_lock(&ftrace_lock);
+ if (enable)
+ ftrace_filtered = 1;
pg = ftrace_pages_start;
while (pg) {
for (i = 0; i < pg->index; i++) {
break;
}
if (matched)
- rec->flags |= FTRACE_FL_FILTER;
+ rec->flags |= flag;
}
pg = pg->next;
}
- preempt_enable();
+ spin_unlock(&ftrace_lock);
}
-static ssize_t notrace
-ftrace_filter_write(struct file *file, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static ssize_t
+ftrace_regex_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos, int enable)
{
struct ftrace_iterator *iter;
char ch;
if (!cnt || cnt < 0)
return 0;
- mutex_lock(&ftrace_filter_lock);
+ mutex_lock(&ftrace_regex_lock);
if (file->f_mode & FMODE_READ) {
struct seq_file *m = file->private_data;
cnt--;
}
-
if (isspace(ch)) {
file->f_pos += read;
ret = read;
if (isspace(ch)) {
iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
- ftrace_match(iter->buffer, iter->buffer_idx);
+ ftrace_match(iter->buffer, iter->buffer_idx, enable);
iter->buffer_idx = 0;
} else
iter->flags |= FTRACE_ITER_CONT;
ret = read;
out:
- mutex_unlock(&ftrace_filter_lock);
+ mutex_unlock(&ftrace_regex_lock);
return ret;
}
+static ssize_t
+ftrace_filter_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t
+ftrace_notrace_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
+}
+
+static void
+ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
+{
+ if (unlikely(ftrace_disabled))
+ return;
+
+ mutex_lock(&ftrace_regex_lock);
+ if (reset)
+ ftrace_filter_reset(enable);
+ if (buf)
+ ftrace_match(buf, len, enable);
+ mutex_unlock(&ftrace_regex_lock);
+}
+
/**
* ftrace_set_filter - set a function to filter on in ftrace
* @buf - the string that holds the function filter text.
* Filters denote which functions should be enabled when tracing is enabled.
* If @buf is NULL and reset is set, all functions will be enabled for tracing.
*/
-notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
+void ftrace_set_filter(unsigned char *buf, int len, int reset)
{
- mutex_lock(&ftrace_filter_lock);
- if (reset)
- ftrace_filter_reset();
- if (buf)
- ftrace_match(buf, len);
- mutex_unlock(&ftrace_filter_lock);
+ ftrace_set_regex(buf, len, reset, 1);
}
-static int notrace
-ftrace_filter_release(struct inode *inode, struct file *file)
+/**
+ * ftrace_set_notrace - set a function to not trace in ftrace
+ * @buf - the string that holds the function notrace text.
+ * @len - the length of the string.
+ * @reset - non zero to reset all filters before applying this filter.
+ *
+ * Notrace Filters denote which functions should not be enabled when tracing
+ * is enabled. If @buf is NULL and reset is set, all functions will be enabled
+ * for tracing.
+ */
+void ftrace_set_notrace(unsigned char *buf, int len, int reset)
+{
+ ftrace_set_regex(buf, len, reset, 0);
+}
+
+static int
+ftrace_regex_release(struct inode *inode, struct file *file, int enable)
{
struct seq_file *m = (struct seq_file *)file->private_data;
struct ftrace_iterator *iter;
- mutex_lock(&ftrace_filter_lock);
+ mutex_lock(&ftrace_regex_lock);
if (file->f_mode & FMODE_READ) {
iter = m->private;
if (iter->buffer_idx) {
iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
- ftrace_match(iter->buffer, iter->buffer_idx);
+ ftrace_match(iter->buffer, iter->buffer_idx, enable);
}
mutex_lock(&ftrace_sysctl_lock);
mutex_unlock(&ftrace_sysctl_lock);
kfree(iter);
- mutex_unlock(&ftrace_filter_lock);
+ mutex_unlock(&ftrace_regex_lock);
return 0;
}
+static int
+ftrace_filter_release(struct inode *inode, struct file *file)
+{
+ return ftrace_regex_release(inode, file, 1);
+}
+
+static int
+ftrace_notrace_release(struct inode *inode, struct file *file)
+{
+ return ftrace_regex_release(inode, file, 0);
+}
+
+static ssize_t
+ftraced_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ /* don't worry about races */
+ char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
+ int r = strlen(buf);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+ftraced_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ if (strncmp(buf, "enable", 6) == 0)
+ val = 1;
+ else if (strncmp(buf, "disable", 7) == 0)
+ val = 0;
+ else {
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ val = !!val;
+ }
+
+ if (val)
+ ftrace_enable_daemon();
+ else
+ ftrace_disable_daemon();
+
+ filp->f_pos += cnt;
+
+ return cnt;
+}
+
static struct file_operations ftrace_avail_fops = {
.open = ftrace_avail_open,
.read = seq_read,
.release = ftrace_avail_release,
};
+static struct file_operations ftrace_failures_fops = {
+ .open = ftrace_failures_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = ftrace_avail_release,
+};
+
static struct file_operations ftrace_filter_fops = {
.open = ftrace_filter_open,
- .read = ftrace_filter_read,
+ .read = ftrace_regex_read,
.write = ftrace_filter_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = ftrace_regex_lseek,
.release = ftrace_filter_release,
};
+static struct file_operations ftrace_notrace_fops = {
+ .open = ftrace_notrace_open,
+ .read = ftrace_regex_read,
+ .write = ftrace_notrace_write,
+ .llseek = ftrace_regex_lseek,
+ .release = ftrace_notrace_release,
+};
+
+static struct file_operations ftraced_fops = {
+ .open = tracing_open_generic,
+ .read = ftraced_read,
+ .write = ftraced_write,
+};
+
/**
* ftrace_force_update - force an update to all recording ftrace functions
- *
- * The ftrace dynamic update daemon only wakes up once a second.
- * There may be cases where an update needs to be done immediately
- * for tests or internal kernel tracing to begin. This function
- * wakes the daemon to do an update and will not return until the
- * update is complete.
*/
int ftrace_force_update(void)
{
- unsigned long last_counter;
- DECLARE_WAITQUEUE(wait, current);
int ret = 0;
- if (!ftraced_task)
+ if (unlikely(ftrace_disabled))
return -ENODEV;
+ mutex_lock(&ftrace_sysctl_lock);
mutex_lock(&ftraced_lock);
- last_counter = ftraced_iteration_counter;
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&ftraced_waiters, &wait);
-
- do {
- mutex_unlock(&ftraced_lock);
- wake_up_process(ftraced_task);
- schedule();
- mutex_lock(&ftraced_lock);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- set_current_state(TASK_INTERRUPTIBLE);
- } while (last_counter == ftraced_iteration_counter);
+ /*
+ * If ftraced_trigger is not set, then there is nothing
+ * to update.
+ */
+ if (ftraced_trigger && !ftrace_update_code())
+ ret = -EBUSY;
mutex_unlock(&ftraced_lock);
- remove_wait_queue(&ftraced_waiters, &wait);
- set_current_state(TASK_RUNNING);
+ mutex_unlock(&ftrace_sysctl_lock);
return ret;
}
+static void ftrace_force_shutdown(void)
+{
+ struct task_struct *task;
+ int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
+
+ mutex_lock(&ftraced_lock);
+ task = ftraced_task;
+ ftraced_task = NULL;
+ ftraced_suspend = -1;
+ ftrace_run_update_code(command);
+ mutex_unlock(&ftraced_lock);
+
+ if (task)
+ kthread_stop(task);
+}
+
static __init int ftrace_init_debugfs(void)
{
struct dentry *d_tracer;
pr_warning("Could not create debugfs "
"'available_filter_functions' entry\n");
+ entry = debugfs_create_file("failures", 0444,
+ d_tracer, NULL, &ftrace_failures_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'failures' entry\n");
+
entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
NULL, &ftrace_filter_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'set_ftrace_filter' entry\n");
+
+ entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
+ NULL, &ftrace_notrace_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'set_ftrace_notrace' entry\n");
+
+ entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
+ NULL, &ftraced_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'ftraced_enabled' entry\n");
return 0;
}
fs_initcall(ftrace_init_debugfs);
-static int __init notrace ftrace_dynamic_init(void)
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+static int ftrace_convert_nops(unsigned long *start,
+ unsigned long *end)
+{
+ unsigned long *p;
+ unsigned long addr;
+ unsigned long flags;
+
+ p = start;
+ while (p < end) {
+ addr = ftrace_call_adjust(*p++);
+ /* should not be called from interrupt context */
+ spin_lock(&ftrace_lock);
+ ftrace_record_ip(addr);
+ spin_unlock(&ftrace_lock);
+ ftrace_shutdown_replenish();
+ }
+
+ /* p is ignored */
+ local_irq_save(flags);
+ __ftrace_update_code(p);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+void ftrace_init_module(unsigned long *start, unsigned long *end)
+{
+ if (ftrace_disabled || start == end)
+ return;
+ ftrace_convert_nops(start, end);
+}
+
+extern unsigned long __start_mcount_loc[];
+extern unsigned long __stop_mcount_loc[];
+
+void __init ftrace_init(void)
+{
+ unsigned long count, addr, flags;
+ int ret;
+
+ /* Keep the ftrace pointer to the stub */
+ addr = (unsigned long)ftrace_stub;
+
+ local_irq_save(flags);
+ ftrace_dyn_arch_init(&addr);
+ local_irq_restore(flags);
+
+ /* ftrace_dyn_arch_init places the return code in addr */
+ if (addr)
+ goto failed;
+
+ count = __stop_mcount_loc - __start_mcount_loc;
+
+ ret = ftrace_dyn_table_alloc(count);
+ if (ret)
+ goto failed;
+
+ last_ftrace_enabled = ftrace_enabled = 1;
+
+ ret = ftrace_convert_nops(__start_mcount_loc,
+ __stop_mcount_loc);
+
+ return;
+ failed:
+ ftrace_disabled = 1;
+}
+#else /* CONFIG_FTRACE_MCOUNT_RECORD */
+static int ftraced(void *ignore)
+{
+ unsigned long usecs;
+
+ while (!kthread_should_stop()) {
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* check once a second */
+ schedule_timeout(HZ);
+
+ if (unlikely(ftrace_disabled))
+ continue;
+
+ mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftraced_lock);
+ if (!ftraced_suspend && !ftraced_stop &&
+ ftrace_update_code()) {
+ usecs = nsecs_to_usecs(ftrace_update_time);
+ if (ftrace_update_tot_cnt > 100000) {
+ ftrace_update_tot_cnt = 0;
+ pr_info("hm, dftrace overflow: %lu change%s"
+ " (%lu total) in %lu usec%s\n",
+ ftrace_update_cnt,
+ ftrace_update_cnt != 1 ? "s" : "",
+ ftrace_update_tot_cnt,
+ usecs, usecs != 1 ? "s" : "");
+ ftrace_disabled = 1;
+ WARN_ON_ONCE(1);
+ }
+ }
+ mutex_unlock(&ftraced_lock);
+ mutex_unlock(&ftrace_sysctl_lock);
+
+ ftrace_shutdown_replenish();
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static int __init ftrace_dynamic_init(void)
{
struct task_struct *p;
unsigned long addr;
int ret;
addr = (unsigned long)ftrace_record_ip;
- stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
+
+ stop_machine(ftrace_dyn_arch_init, &addr, NULL);
/* ftrace_dyn_arch_init places the return code in addr */
- if (addr)
- return addr;
+ if (addr) {
+ ret = (int)addr;
+ goto failed;
+ }
- ret = ftrace_dyn_table_alloc();
+ ret = ftrace_dyn_table_alloc(NR_TO_INIT);
if (ret)
- return ret;
+ goto failed;
p = kthread_run(ftraced, NULL, "ftraced");
- if (IS_ERR(p))
- return -1;
+ if (IS_ERR(p)) {
+ ret = -1;
+ goto failed;
+ }
last_ftrace_enabled = ftrace_enabled = 1;
ftraced_task = p;
return 0;
+
+ failed:
+ ftrace_disabled = 1;
+ return ret;
}
core_initcall(ftrace_dynamic_init);
+#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
+
#else
# define ftrace_startup() do { } while (0)
# define ftrace_shutdown() do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
+# define ftrace_force_shutdown() do { } while (0)
#endif /* CONFIG_DYNAMIC_FTRACE */
/**
+ * ftrace_kill_atomic - kill ftrace from critical sections
+ *
+ * This function should be used by panic code. It stops ftrace
+ * but in a not so nice way. If you need to simply kill ftrace
+ * from a non-atomic section, use ftrace_kill.
+ */
+void ftrace_kill_atomic(void)
+{
+ ftrace_disabled = 1;
+ ftrace_enabled = 0;
+#ifdef CONFIG_DYNAMIC_FTRACE
+ ftraced_suspend = -1;
+#endif
+ clear_ftrace_function();
+}
+
+/**
+ * ftrace_kill - totally shutdown ftrace
+ *
+ * This is a safety measure. If something was detected that seems
+ * wrong, calling this function will keep ftrace from doing
+ * any more modifications, and updates.
+ * used when something went wrong.
+ */
+void ftrace_kill(void)
+{
+ mutex_lock(&ftrace_sysctl_lock);
+ ftrace_disabled = 1;
+ ftrace_enabled = 0;
+
+ clear_ftrace_function();
+ mutex_unlock(&ftrace_sysctl_lock);
+
+ /* Try to totally disable ftrace */
+ ftrace_force_shutdown();
+}
+
+/**
* register_ftrace_function - register a function for profiling
* @ops - ops structure that holds the function for profiling.
*
{
int ret;
+ if (unlikely(ftrace_disabled))
+ return -1;
+
mutex_lock(&ftrace_sysctl_lock);
ret = __register_ftrace_function(ops);
ftrace_startup();
return ret;
}
-notrace int
+int
ftrace_enable_sysctl(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
mutex_lock(&ftrace_sysctl_lock);
ret = proc_dointvec(table, write, file, buffer, lenp, ppos);