tracing/fastboot: Use the ring-buffer timestamp for initcall entries
[safe/jmp/linux-2.6] / kernel / trace / trace_boot.c
1 /*
2  * ring buffer based initcalls tracer
3  *
4  * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5  *
6  */
7
8 #include <linux/init.h>
9 #include <linux/debugfs.h>
10 #include <linux/ftrace.h>
11 #include <linux/kallsyms.h>
12
13 #include "trace.h"
14
15 static struct trace_array *boot_trace;
16 static bool pre_initcalls_finished;
17
18 /* Tells the boot tracer that the pre_smp_initcalls are finished.
19  * So we are ready .
20  * It doesn't enable sched events tracing however.
21  * You have to call enable_boot_trace to do so.
22  */
23 void start_boot_trace(void)
24 {
25         pre_initcalls_finished = true;
26 }
27
28 void enable_boot_trace(void)
29 {
30         if (pre_initcalls_finished)
31                 tracing_start_sched_switch_record();
32 }
33
34 void disable_boot_trace(void)
35 {
36         if (pre_initcalls_finished)
37                 tracing_stop_sched_switch_record();
38 }
39
40 static void reset_boot_trace(struct trace_array *tr)
41 {
42         int cpu;
43
44         tr->time_start = ftrace_now(tr->cpu);
45
46         for_each_online_cpu(cpu)
47                 tracing_reset(tr, cpu);
48 }
49
50 static void boot_trace_init(struct trace_array *tr)
51 {
52         int cpu;
53         boot_trace = tr;
54
55         for_each_cpu_mask(cpu, cpu_possible_map)
56                 tracing_reset(tr, cpu);
57
58         tracing_sched_switch_assign_trace(tr);
59 }
60
61 static enum print_line_t
62 initcall_call_print_line(struct trace_iterator *iter)
63 {
64         struct trace_entry *entry = iter->ent;
65         struct trace_seq *s = &iter->seq;
66         struct trace_boot_call *field;
67         struct boot_trace_call *call;
68         u64 ts;
69         unsigned long nsec_rem;
70         int ret;
71
72         trace_assign_type(field, entry);
73         call = &field->boot_call;
74         ts = iter->ts;
75         nsec_rem = do_div(ts, 1000000000);
76
77         ret = trace_seq_printf(s, "[%5ld.%09ld] calling  %s @ %i\n",
78                         (unsigned long)ts, nsec_rem, call->func, call->caller);
79
80         if (!ret)
81                 return TRACE_TYPE_PARTIAL_LINE;
82         else
83                 return TRACE_TYPE_HANDLED;
84 }
85
86 static enum print_line_t
87 initcall_ret_print_line(struct trace_iterator *iter)
88 {
89         struct trace_entry *entry = iter->ent;
90         struct trace_seq *s = &iter->seq;
91         struct trace_boot_ret *field;
92         struct boot_trace_ret *init_ret;
93         u64 ts;
94         unsigned long nsec_rem;
95         int ret;
96
97         trace_assign_type(field, entry);
98         init_ret = &field->boot_ret;
99         ts = iter->ts;
100         nsec_rem = do_div(ts, 1000000000);
101
102         ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
103                         "returned %d after %llu msecs\n",
104                         (unsigned long) ts,
105                         nsec_rem,
106                         init_ret->func, init_ret->result, init_ret->duration);
107
108         if (!ret)
109                 return TRACE_TYPE_PARTIAL_LINE;
110         else
111                 return TRACE_TYPE_HANDLED;
112 }
113
114 static enum print_line_t initcall_print_line(struct trace_iterator *iter)
115 {
116         struct trace_entry *entry = iter->ent;
117
118         switch (entry->type) {
119         case TRACE_BOOT_CALL:
120                 return initcall_call_print_line(iter);
121         case TRACE_BOOT_RET:
122                 return initcall_ret_print_line(iter);
123         default:
124                 return TRACE_TYPE_UNHANDLED;
125         }
126 }
127
128 struct tracer boot_tracer __read_mostly =
129 {
130         .name           = "initcall",
131         .init           = boot_trace_init,
132         .reset          = reset_boot_trace,
133         .print_line     = initcall_print_line,
134 };
135
136 void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
137 {
138         struct ring_buffer_event *event;
139         struct trace_boot_call *entry;
140         unsigned long irq_flags;
141         struct trace_array *tr = boot_trace;
142
143         if (!pre_initcalls_finished)
144                 return;
145
146         /* Get its name now since this function could
147          * disappear because it is in the .init section.
148          */
149         sprint_symbol(bt->func, (unsigned long)fn);
150         preempt_disable();
151
152         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
153                                          &irq_flags);
154         if (!event)
155                 goto out;
156         entry   = ring_buffer_event_data(event);
157         tracing_generic_entry_update(&entry->ent, 0, 0);
158         entry->ent.type = TRACE_BOOT_CALL;
159         entry->boot_call = *bt;
160         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
161
162         trace_wake_up();
163
164  out:
165         preempt_enable();
166 }
167
168 void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
169 {
170         struct ring_buffer_event *event;
171         struct trace_boot_ret *entry;
172         unsigned long irq_flags;
173         struct trace_array *tr = boot_trace;
174
175         if (!pre_initcalls_finished)
176                 return;
177
178         sprint_symbol(bt->func, (unsigned long)fn);
179         preempt_disable();
180
181         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
182                                          &irq_flags);
183         if (!event)
184                 goto out;
185         entry   = ring_buffer_event_data(event);
186         tracing_generic_entry_update(&entry->ent, 0, 0);
187         entry->ent.type = TRACE_BOOT_RET;
188         entry->boot_ret = *bt;
189         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
190
191         trace_wake_up();
192
193  out:
194         preempt_enable();
195 }