2 * Memory allocator tracing
4 * Copyright (C) 2008 Eduard - Gabriel Munteanu
5 * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <linux/dcache.h>
10 #include <linux/debugfs.h>
12 #include <linux/seq_file.h>
13 #include <linux/tracepoint.h>
14 #include <trace/kmemtrace.h>
17 #include "trace_output.h"
19 /* Select an alternative, minimalistic output than the original one */
20 #define TRACE_KMEM_OPT_MINIMAL 0x1
22 static struct tracer_opt kmem_opts[] = {
23 /* Default disable the minimalistic output */
24 { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
28 static struct tracer_flags kmem_tracer_flags = {
33 static struct trace_array *kmemtrace_array;
35 /* Trace allocations */
36 static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
37 unsigned long call_site,
44 struct ring_buffer_event *event;
45 struct kmemtrace_alloc_entry *entry;
46 struct trace_array *tr = kmemtrace_array;
48 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
51 entry = ring_buffer_event_data(event);
52 tracing_generic_entry_update(&entry->ent, 0, 0);
54 entry->ent.type = TRACE_KMEM_ALLOC;
55 entry->call_site = call_site;
57 entry->bytes_req = bytes_req;
58 entry->bytes_alloc = bytes_alloc;
59 entry->gfp_flags = gfp_flags;
62 ring_buffer_unlock_commit(tr->buffer, event);
67 static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
68 unsigned long call_site,
71 struct ring_buffer_event *event;
72 struct kmemtrace_free_entry *entry;
73 struct trace_array *tr = kmemtrace_array;
75 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
78 entry = ring_buffer_event_data(event);
79 tracing_generic_entry_update(&entry->ent, 0, 0);
81 entry->ent.type = TRACE_KMEM_FREE;
82 entry->type_id = type_id;
83 entry->call_site = call_site;
86 ring_buffer_unlock_commit(tr->buffer, event);
91 static void kmemtrace_kmalloc(unsigned long call_site,
97 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
98 bytes_req, bytes_alloc, gfp_flags, -1);
101 static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
107 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
108 bytes_req, bytes_alloc, gfp_flags, -1);
111 static void kmemtrace_kmalloc_node(unsigned long call_site,
118 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
119 bytes_req, bytes_alloc, gfp_flags, node);
122 static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
129 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
130 bytes_req, bytes_alloc, gfp_flags, node);
133 static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
135 kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
138 static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
140 kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
143 static int kmemtrace_start_probes(void)
147 err = register_trace_kmalloc(kmemtrace_kmalloc);
150 err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
153 err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
156 err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
159 err = register_trace_kfree(kmemtrace_kfree);
162 err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
167 static void kmemtrace_stop_probes(void)
169 unregister_trace_kmalloc(kmemtrace_kmalloc);
170 unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
171 unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
172 unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
173 unregister_trace_kfree(kmemtrace_kfree);
174 unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
177 static int kmem_trace_init(struct trace_array *tr)
180 kmemtrace_array = tr;
182 for_each_cpu_mask(cpu, cpu_possible_map)
183 tracing_reset(tr, cpu);
185 kmemtrace_start_probes();
190 static void kmem_trace_reset(struct trace_array *tr)
192 kmemtrace_stop_probes();
195 static void kmemtrace_headers(struct seq_file *s)
197 /* Don't need headers for the original kmemtrace output */
198 if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
201 seq_printf(s, "#\n");
202 seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
203 " POINTER NODE CALLER\n");
204 seq_printf(s, "# FREE | | | | "
206 seq_printf(s, "# |\n\n");
210 * The two following functions give the original output from kmemtrace,
211 * or something close to....perhaps they need some missing things
213 static enum print_line_t
214 kmemtrace_print_alloc_original(struct trace_iterator *iter,
215 struct kmemtrace_alloc_entry *entry)
217 struct trace_seq *s = &iter->seq;
220 /* Taken from the old linux/kmemtrace.h */
221 ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu "
222 "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
223 entry->type_id, entry->call_site, (unsigned long) entry->ptr,
224 (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc,
225 (unsigned long) entry->gfp_flags, entry->node);
228 return TRACE_TYPE_PARTIAL_LINE;
230 return TRACE_TYPE_HANDLED;
233 static enum print_line_t
234 kmemtrace_print_free_original(struct trace_iterator *iter,
235 struct kmemtrace_free_entry *entry)
237 struct trace_seq *s = &iter->seq;
240 /* Taken from the old linux/kmemtrace.h */
241 ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n",
242 entry->type_id, entry->call_site, (unsigned long) entry->ptr);
245 return TRACE_TYPE_PARTIAL_LINE;
247 return TRACE_TYPE_HANDLED;
251 /* The two other following provide a more minimalistic output */
252 static enum print_line_t
253 kmemtrace_print_alloc_compress(struct trace_iterator *iter,
254 struct kmemtrace_alloc_entry *entry)
256 struct trace_seq *s = &iter->seq;
260 ret = trace_seq_printf(s, " + ");
262 return TRACE_TYPE_PARTIAL_LINE;
265 switch (entry->type_id) {
266 case KMEMTRACE_TYPE_KMALLOC:
267 ret = trace_seq_printf(s, "K ");
269 case KMEMTRACE_TYPE_CACHE:
270 ret = trace_seq_printf(s, "C ");
272 case KMEMTRACE_TYPE_PAGES:
273 ret = trace_seq_printf(s, "P ");
276 ret = trace_seq_printf(s, "? ");
280 return TRACE_TYPE_PARTIAL_LINE;
283 ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
285 return TRACE_TYPE_PARTIAL_LINE;
288 ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
290 return TRACE_TYPE_PARTIAL_LINE;
293 * TODO: would be better to see the name of the GFP flag names
295 ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
297 return TRACE_TYPE_PARTIAL_LINE;
299 /* Pointer to allocated */
300 ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
302 return TRACE_TYPE_PARTIAL_LINE;
305 ret = trace_seq_printf(s, "%4d ", entry->node);
307 return TRACE_TYPE_PARTIAL_LINE;
310 ret = seq_print_ip_sym(s, entry->call_site, 0);
312 return TRACE_TYPE_PARTIAL_LINE;
314 if (!trace_seq_printf(s, "\n"))
315 return TRACE_TYPE_PARTIAL_LINE;
317 return TRACE_TYPE_HANDLED;
320 static enum print_line_t
321 kmemtrace_print_free_compress(struct trace_iterator *iter,
322 struct kmemtrace_free_entry *entry)
324 struct trace_seq *s = &iter->seq;
328 ret = trace_seq_printf(s, " - ");
330 return TRACE_TYPE_PARTIAL_LINE;
333 switch (entry->type_id) {
334 case KMEMTRACE_TYPE_KMALLOC:
335 ret = trace_seq_printf(s, "K ");
337 case KMEMTRACE_TYPE_CACHE:
338 ret = trace_seq_printf(s, "C ");
340 case KMEMTRACE_TYPE_PAGES:
341 ret = trace_seq_printf(s, "P ");
344 ret = trace_seq_printf(s, "? ");
348 return TRACE_TYPE_PARTIAL_LINE;
350 /* Skip requested/allocated/flags */
351 ret = trace_seq_printf(s, " ");
353 return TRACE_TYPE_PARTIAL_LINE;
355 /* Pointer to allocated */
356 ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
358 return TRACE_TYPE_PARTIAL_LINE;
361 ret = trace_seq_printf(s, " ");
363 return TRACE_TYPE_PARTIAL_LINE;
366 ret = seq_print_ip_sym(s, entry->call_site, 0);
368 return TRACE_TYPE_PARTIAL_LINE;
370 if (!trace_seq_printf(s, "\n"))
371 return TRACE_TYPE_PARTIAL_LINE;
373 return TRACE_TYPE_HANDLED;
376 static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
378 struct trace_entry *entry = iter->ent;
380 switch (entry->type) {
381 case TRACE_KMEM_ALLOC: {
382 struct kmemtrace_alloc_entry *field;
383 trace_assign_type(field, entry);
384 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
385 return kmemtrace_print_alloc_compress(iter, field);
387 return kmemtrace_print_alloc_original(iter, field);
390 case TRACE_KMEM_FREE: {
391 struct kmemtrace_free_entry *field;
392 trace_assign_type(field, entry);
393 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
394 return kmemtrace_print_free_compress(iter, field);
396 return kmemtrace_print_free_original(iter, field);
400 return TRACE_TYPE_UNHANDLED;
404 static struct tracer kmem_tracer __read_mostly = {
406 .init = kmem_trace_init,
407 .reset = kmem_trace_reset,
408 .print_line = kmemtrace_print_line,
409 .print_header = kmemtrace_headers,
410 .flags = &kmem_tracer_flags
413 void kmemtrace_init(void)
415 /* earliest opportunity to start kmem tracing */
418 static int __init init_kmem_tracer(void)
420 return register_tracer(&kmem_tracer);
423 device_initcall(init_kmem_tracer);