2 * Infrastructure for statistic tracing (histogram output).
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
6 * Based on the code from trace_branch.c which is
7 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
12 #include <linux/list.h>
13 #include <linux/seq_file.h>
14 #include <linux/debugfs.h>
18 /* List of stat entries from a tracer */
19 struct trace_stat_list {
20 struct list_head list;
24 static struct trace_stat_list stat_list;
27 * This is a copy of the current tracer to avoid racy
28 * and dangerous output while the current tracer is
31 static struct tracer current_tracer;
34 * Protect both the current tracer and the global
37 static DEFINE_MUTEX(stat_list_mutex);
40 static void reset_stat_list(void)
42 struct trace_stat_list *node;
43 struct list_head *next;
45 if (list_empty(&stat_list.list))
48 node = list_entry(stat_list.list.next, struct trace_stat_list, list);
49 next = node->list.next;
51 while (&node->list != next) {
53 node = list_entry(next, struct trace_stat_list, list);
57 INIT_LIST_HEAD(&stat_list.list);
60 void init_tracer_stat(struct tracer *trace)
62 mutex_lock(&stat_list_mutex);
63 current_tracer = *trace;
64 mutex_unlock(&stat_list_mutex);
68 * For tracers that don't provide a stat_cmp callback.
69 * This one will force an immediate insertion on tail of
72 static int dummy_cmp(void *p1, void *p2)
78 * Initialize the stat list at each trace_stat file opening.
79 * All of these copies and sorting are required on all opening
80 * since the stats could have changed between two file sessions.
82 static int stat_seq_init(void)
84 struct trace_stat_list *iter_entry, *new_entry;
89 mutex_lock(&stat_list_mutex);
92 if (!current_tracer.stat_start || !current_tracer.stat_next ||
93 !current_tracer.stat_show)
96 if (!current_tracer.stat_cmp)
97 current_tracer.stat_cmp = dummy_cmp;
100 * The first entry. Actually this is the second, but the first
101 * one (the stat_list head) is pointless.
103 new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
109 INIT_LIST_HEAD(&new_entry->list);
110 list_add(&new_entry->list, &stat_list.list);
111 new_entry->stat = current_tracer.stat_start();
113 prev_stat = new_entry->stat;
116 * Iterate over the tracer stat entries and store them in a sorted
120 new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
126 INIT_LIST_HEAD(&new_entry->list);
127 new_entry->stat = current_tracer.stat_next(prev_stat, i);
129 /* End of insertion */
130 if (!new_entry->stat)
133 list_for_each_entry(iter_entry, &stat_list.list, list) {
134 /* Insertion with a descendent sorting */
135 if (current_tracer.stat_cmp(new_entry->stat,
136 iter_entry->stat) > 0) {
138 list_add_tail(&new_entry->list,
142 /* The current smaller value */
143 } else if (list_is_last(&iter_entry->list,
145 list_add(&new_entry->list, &iter_entry->list);
150 prev_stat = new_entry->stat;
153 mutex_unlock(&stat_list_mutex);
158 mutex_unlock(&stat_list_mutex);
163 static void *stat_seq_start(struct seq_file *s, loff_t *pos)
165 struct trace_stat_list *l = (struct trace_stat_list *)s->private;
167 /* Prevent from tracer switch or stat_list modification */
168 mutex_lock(&stat_list_mutex);
170 /* If we are in the beginning of the file, print the headers */
171 if (!*pos && current_tracer.stat_headers)
172 current_tracer.stat_headers(s);
174 return seq_list_start(&l->list, *pos);
177 static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
179 struct trace_stat_list *l = (struct trace_stat_list *)s->private;
181 return seq_list_next(p, &l->list, pos);
184 static void stat_seq_stop(struct seq_file *m, void *p)
186 mutex_unlock(&stat_list_mutex);
189 static int stat_seq_show(struct seq_file *s, void *v)
191 struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
192 return current_tracer.stat_show(s, l->stat);
195 static const struct seq_operations trace_stat_seq_ops = {
196 .start = stat_seq_start,
197 .next = stat_seq_next,
198 .stop = stat_seq_stop,
199 .show = stat_seq_show
202 static int tracing_stat_open(struct inode *inode, struct file *file)
206 ret = seq_open(file, &trace_stat_seq_ops);
208 struct seq_file *m = file->private_data;
209 m->private = &stat_list;
210 ret = stat_seq_init();
218 * Avoid consuming memory with our now useless list.
220 static int tracing_stat_release(struct inode *i, struct file *f)
222 mutex_lock(&stat_list_mutex);
224 mutex_unlock(&stat_list_mutex);
228 static const struct file_operations tracing_stat_fops = {
229 .open = tracing_stat_open,
232 .release = tracing_stat_release
235 static int __init tracing_stat_init(void)
237 struct dentry *d_tracing;
238 struct dentry *entry;
240 INIT_LIST_HEAD(&stat_list.list);
241 d_tracing = tracing_init_dentry();
243 entry = debugfs_create_file("trace_stat", 0444, d_tracing,
247 pr_warning("Could not create debugfs "
248 "'trace_stat' entry\n");
251 fs_initcall(tracing_stat_init);