tracing: add __print_symbolic to trace events
[safe/jmp/linux-2.6] / kernel / trace / trace_output.c
index b451417..c12d95d 100644 (file)
 /* must be a power of 2 */
 #define EVENT_HASHSIZE 128
 
-static DEFINE_MUTEX(trace_event_mutex);
+static DECLARE_RWSEM(trace_event_mutex);
+
+DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
+
 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
 
 static int next_event_type = __TRACE_LAST_TYPE + 1;
 
+void trace_print_seq(struct seq_file *m, struct trace_seq *s)
+{
+       int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
+
+       s->buffer[len] = 0;
+       seq_puts(m, s->buffer);
+
+       trace_seq_init(s);
+}
+
 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
 {
        struct trace_seq *s = &iter->seq;
@@ -84,6 +97,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
 
        return len;
 }
+EXPORT_SYMBOL_GPL(trace_seq_printf);
 
 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
 {
@@ -137,7 +151,7 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c)
        return 1;
 }
 
-int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
+int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
 {
        if (len > ((PAGE_SIZE - 1) - s->len))
                return 0;
@@ -148,10 +162,10 @@ int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
        return len;
 }
 
-int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
+int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
 {
        unsigned char hex[HEX_CHARS];
-       unsigned char *data = mem;
+       const unsigned char *data = mem;
        int i, j;
 
 #ifdef __BIG_ENDIAN
@@ -167,6 +181,19 @@ int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
        return trace_seq_putmem(s, hex, j);
 }
 
+void *trace_seq_reserve(struct trace_seq *s, size_t len)
+{
+       void *ret;
+
+       if (len > ((PAGE_SIZE - 1) - s->len))
+               return NULL;
+
+       ret = s->buffer + s->len;
+       s->len += len;
+
+       return ret;
+}
+
 int trace_seq_path(struct trace_seq *s, struct path *path)
 {
        unsigned char *p;
@@ -188,6 +215,67 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
        return 0;
 }
 
+const char *
+ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
+                      unsigned long flags,
+                      const struct trace_print_flags *flag_array)
+{
+       unsigned long mask;
+       const char *str;
+       int i;
+
+       trace_seq_init(p);
+
+       for (i = 0;  flag_array[i].name && flags; i++) {
+
+               mask = flag_array[i].mask;
+               if ((flags & mask) != mask)
+                       continue;
+
+               str = flag_array[i].name;
+               flags &= ~mask;
+               if (p->len && delim)
+                       trace_seq_puts(p, delim);
+               trace_seq_puts(p, str);
+       }
+
+       /* check for left over flags */
+       if (flags) {
+               if (p->len && delim)
+                       trace_seq_puts(p, delim);
+               trace_seq_printf(p, "0x%lx", flags);
+       }
+
+       trace_seq_putc(p, 0);
+
+       return p->buffer;
+}
+
+const char *
+ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
+                        const struct trace_print_flags *symbol_array)
+{
+       int i;
+
+       trace_seq_init(p);
+
+       for (i = 0;  symbol_array[i].name; i++) {
+
+               if (val != symbol_array[i].mask)
+                       continue;
+
+               trace_seq_puts(p, symbol_array[i].name);
+               break;
+       }
+
+       if (!p->len)
+               trace_seq_printf(p, "0x%lx", val);
+               
+       trace_seq_putc(p, 0);
+
+       return p->buffer;
+}
+
 #ifdef CONFIG_KRETPROBES
 static inline const char *kretprobed(const char *name)
 {
@@ -410,7 +498,7 @@ int trace_print_lat_context(struct trace_iterator *iter)
 
                trace_find_cmdline(entry->pid, comm);
 
-               ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]"
+               ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
                                       " %ld.%03ldms (+%ld.%03ldms): ", comm,
                                       entry->pid, iter->cpu, entry->flags,
                                       entry->preempt_count, iter->idx,
@@ -442,6 +530,7 @@ static int task_state_char(unsigned long state)
  * @type: the type of event to look for
  *
  * Returns an event of type @type otherwise NULL
+ * Called with trace_event_read_lock() held.
  */
 struct trace_event *ftrace_find_event(int type)
 {
@@ -451,7 +540,7 @@ struct trace_event *ftrace_find_event(int type)
 
        key = type & (EVENT_HASHSIZE - 1);
 
-       hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
+       hlist_for_each_entry(event, n, &event_hash[key], node) {
                if (event->type == type)
                        return event;
        }
@@ -459,6 +548,46 @@ struct trace_event *ftrace_find_event(int type)
        return NULL;
 }
 
+static LIST_HEAD(ftrace_event_list);
+
+static int trace_search_list(struct list_head **list)
+{
+       struct trace_event *e;
+       int last = __TRACE_LAST_TYPE;
+
+       if (list_empty(&ftrace_event_list)) {
+               *list = &ftrace_event_list;
+               return last + 1;
+       }
+
+       /*
+        * We used up all possible max events,
+        * lets see if somebody freed one.
+        */
+       list_for_each_entry(e, &ftrace_event_list, list) {
+               if (e->type != last + 1)
+                       break;
+               last++;
+       }
+
+       /* Did we used up all 65 thousand events??? */
+       if ((last + 1) > FTRACE_MAX_EVENT)
+               return 0;
+
+       *list = &e->list;
+       return last + 1;
+}
+
+void trace_event_read_lock(void)
+{
+       down_read(&trace_event_mutex);
+}
+
+void trace_event_read_unlock(void)
+{
+       up_read(&trace_event_mutex);
+}
+
 /**
  * register_ftrace_event - register output for an event type
  * @event: the event type to register
@@ -479,17 +608,42 @@ int register_ftrace_event(struct trace_event *event)
        unsigned key;
        int ret = 0;
 
-       mutex_lock(&trace_event_mutex);
+       down_write(&trace_event_mutex);
+
+       if (WARN_ON(!event))
+               goto out;
+
+       INIT_LIST_HEAD(&event->list);
+
+       if (!event->type) {
+               struct list_head *list = NULL;
+
+               if (next_event_type > FTRACE_MAX_EVENT) {
+
+                       event->type = trace_search_list(&list);
+                       if (!event->type)
+                               goto out;
 
-       if (!event->type)
-               event->type = next_event_type++;
-       else if (event->type > __TRACE_LAST_TYPE) {
+               } else {
+                       
+                       event->type = next_event_type++;
+                       list = &ftrace_event_list;
+               }
+
+               if (WARN_ON(ftrace_find_event(event->type)))
+                       goto out;
+
+               list_add_tail(&event->list, list);
+
+       } else if (event->type > __TRACE_LAST_TYPE) {
                printk(KERN_WARNING "Need to add type to trace.h\n");
                WARN_ON(1);
-       }
-
-       if (ftrace_find_event(event->type))
                goto out;
+       } else {
+               /* Is this event already used */
+               if (ftrace_find_event(event->type))
+                       goto out;
+       }
 
        if (event->trace == NULL)
                event->trace = trace_nop_print;
@@ -502,14 +656,15 @@ int register_ftrace_event(struct trace_event *event)
 
        key = event->type & (EVENT_HASHSIZE - 1);
 
-       hlist_add_head_rcu(&event->node, &event_hash[key]);
+       hlist_add_head(&event->node, &event_hash[key]);
 
        ret = event->type;
  out:
-       mutex_unlock(&trace_event_mutex);
+       up_write(&trace_event_mutex);
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(register_ftrace_event);
 
 /**
  * unregister_ftrace_event - remove a no longer used event
@@ -517,12 +672,14 @@ int register_ftrace_event(struct trace_event *event)
  */
 int unregister_ftrace_event(struct trace_event *event)
 {
-       mutex_lock(&trace_event_mutex);
+       down_write(&trace_event_mutex);
        hlist_del(&event->node);
-       mutex_unlock(&trace_event_mutex);
+       list_del(&event->list);
+       up_write(&trace_event_mutex);
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(unregister_ftrace_event);
 
 /*
  * Standard events
@@ -816,6 +973,8 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
        trace_assign_type(field, iter->ent);
 
        for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+               if (!field->caller[i])
+                       break;
                if (i) {
                        if (!trace_seq_puts(s, " <= "))
                                goto partial;