5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
14 #include "util/data_map.h"
16 #include <linux/rbtree.h>
19 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
21 static char const *input_name = "perf.data";
23 static struct perf_header *header;
24 static u64 sample_type;
26 static int alloc_flag;
27 static int caller_flag;
29 static int alloc_lines = -1;
30 static int caller_lines = -1;
34 static char default_sort_order[] = "frag,hit,bytes";
39 static int *cpunode_map;
40 static int max_cpu_num;
54 static struct rb_root root_alloc_stat;
55 static struct rb_root root_alloc_sorted;
56 static struct rb_root root_caller_stat;
57 static struct rb_root root_caller_sorted;
59 static unsigned long total_requested, total_allocated;
60 static unsigned long nr_allocs, nr_cross_allocs;
62 struct raw_event_sample {
67 #define PATH_SYS_NODE "/sys/devices/system/node"
69 static void init_cpunode_map(void)
74 fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
80 if (fscanf(fp, "%d", &max_cpu_num) < 1)
81 die("Failed to read 'kernel_max' from sysfs");
84 cpunode_map = calloc(max_cpu_num, sizeof(int));
87 for (i = 0; i < max_cpu_num; i++)
92 static void setup_cpunode_map(void)
94 struct dirent *dent1, *dent2;
96 unsigned int cpu, mem;
101 dir1 = opendir(PATH_SYS_NODE);
106 dent1 = readdir(dir1);
110 if (sscanf(dent1->d_name, "node%u", &mem) < 1)
113 snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
118 dent2 = readdir(dir2);
121 if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
123 cpunode_map[cpu] = mem;
129 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
131 struct thread *thread = threads__findnew(event->comm.pid);
133 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
134 (void *)(offset + head),
135 (void *)(long)(event->header.size),
136 event->comm.comm, event->comm.pid);
138 if (thread == NULL ||
139 thread__set_comm(thread, event->comm.comm)) {
140 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
147 static void insert_alloc_stat(unsigned long ptr,
148 int bytes_req, int bytes_alloc)
150 struct rb_node **node = &root_alloc_stat.rb_node;
151 struct rb_node *parent = NULL;
152 struct alloc_stat *data = NULL;
159 data = rb_entry(*node, struct alloc_stat, node);
162 node = &(*node)->rb_right;
163 else if (ptr < data->ptr)
164 node = &(*node)->rb_left;
169 if (data && data->ptr == ptr) {
171 data->bytes_req += bytes_req;
172 data->bytes_alloc += bytes_req;
174 data = malloc(sizeof(*data));
177 data->bytes_req = bytes_req;
178 data->bytes_alloc = bytes_alloc;
180 rb_link_node(&data->node, parent, node);
181 rb_insert_color(&data->node, &root_alloc_stat);
185 static void insert_caller_stat(unsigned long call_site,
186 int bytes_req, int bytes_alloc)
188 struct rb_node **node = &root_caller_stat.rb_node;
189 struct rb_node *parent = NULL;
190 struct alloc_stat *data = NULL;
197 data = rb_entry(*node, struct alloc_stat, node);
199 if (call_site > data->call_site)
200 node = &(*node)->rb_right;
201 else if (call_site < data->call_site)
202 node = &(*node)->rb_left;
207 if (data && data->call_site == call_site) {
209 data->bytes_req += bytes_req;
210 data->bytes_alloc += bytes_req;
212 data = malloc(sizeof(*data));
213 data->call_site = call_site;
215 data->bytes_req = bytes_req;
216 data->bytes_alloc = bytes_alloc;
218 rb_link_node(&data->node, parent, node);
219 rb_insert_color(&data->node, &root_caller_stat);
223 static void process_alloc_event(struct raw_event_sample *raw,
226 u64 timestamp __used,
227 struct thread *thread __used,
230 unsigned long call_site;
236 ptr = raw_field_value(event, "ptr", raw->data);
237 call_site = raw_field_value(event, "call_site", raw->data);
238 bytes_req = raw_field_value(event, "bytes_req", raw->data);
239 bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data);
241 insert_alloc_stat(ptr, bytes_req, bytes_alloc);
242 insert_caller_stat(call_site, bytes_req, bytes_alloc);
244 total_requested += bytes_req;
245 total_allocated += bytes_alloc;
248 node1 = cpunode_map[cpu];
249 node2 = raw_field_value(event, "node", raw->data);
256 static void process_free_event(struct raw_event_sample *raw __used,
257 struct event *event __used,
259 u64 timestamp __used,
260 struct thread *thread __used)
265 process_raw_event(event_t *raw_event __used, void *more_data,
266 int cpu, u64 timestamp, struct thread *thread)
268 struct raw_event_sample *raw = more_data;
272 type = trace_parse_common_type(raw->data);
273 event = trace_find_event(type);
275 if (!strcmp(event->name, "kmalloc") ||
276 !strcmp(event->name, "kmem_cache_alloc")) {
277 process_alloc_event(raw, event, cpu, timestamp, thread, 0);
281 if (!strcmp(event->name, "kmalloc_node") ||
282 !strcmp(event->name, "kmem_cache_alloc_node")) {
283 process_alloc_event(raw, event, cpu, timestamp, thread, 1);
287 if (!strcmp(event->name, "kfree") ||
288 !strcmp(event->name, "kmem_cache_free")) {
289 process_free_event(raw, event, cpu, timestamp, thread);
295 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
297 u64 ip = event->ip.ip;
301 void *more_data = event->ip.__more_data;
302 struct thread *thread = threads__findnew(event->ip.pid);
304 if (sample_type & PERF_SAMPLE_TIME) {
305 timestamp = *(u64 *)more_data;
306 more_data += sizeof(u64);
309 if (sample_type & PERF_SAMPLE_CPU) {
310 cpu = *(u32 *)more_data;
311 more_data += sizeof(u32);
312 more_data += sizeof(u32); /* reserved */
315 if (sample_type & PERF_SAMPLE_PERIOD) {
316 period = *(u64 *)more_data;
317 more_data += sizeof(u64);
320 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
321 (void *)(offset + head),
322 (void *)(long)(event->header.size),
324 event->ip.pid, event->ip.tid,
328 if (thread == NULL) {
329 pr_debug("problem processing %d event, skipping it.\n",
334 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
336 process_raw_event(event, more_data, cpu, timestamp, thread);
341 static int sample_type_check(u64 type)
345 if (!(sample_type & PERF_SAMPLE_RAW)) {
347 "No trace sample to read. Did you call perf record "
355 static struct perf_file_handler file_handler = {
356 .process_sample_event = process_sample_event,
357 .process_comm_event = process_comm_event,
358 .sample_type_check = sample_type_check,
361 static int read_events(void)
363 register_idle_thread();
364 register_perf_file_handler(&file_handler);
366 return mmap_dispatch_perf_file(&header, input_name, NULL, false, 0, 0,
370 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
375 return 100.0 - (100.0 * n_req / n_alloc);
378 static void __print_result(struct rb_root *root, int n_lines, int is_caller)
380 struct rb_node *next;
382 printf("%.78s\n", graph_dotted_line);
383 printf("%-28s|", is_caller ? "Callsite": "Alloc Ptr");
384 printf("Total_alloc/Per | Total_req/Per | Hit | Frag\n");
385 printf("%.78s\n", graph_dotted_line);
387 next = rb_first(root);
389 while (next && n_lines--) {
390 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
392 struct symbol *sym = NULL;
397 addr = data->call_site;
399 sym = kernel_maps__find_symbol(addr,
405 snprintf(bf, sizeof(bf), "%s+%Lx", sym->name,
408 snprintf(bf, sizeof(bf), "%#Lx", addr);
410 printf("%-28s|%8llu/%-6lu |%8llu/%-6lu|%6lu|%8.3f%%\n",
411 bf, (unsigned long long)data->bytes_alloc,
412 (unsigned long)data->bytes_alloc / data->hit,
413 (unsigned long long)data->bytes_req,
414 (unsigned long)data->bytes_req / data->hit,
415 (unsigned long)data->hit,
416 fragmentation(data->bytes_req, data->bytes_alloc));
418 next = rb_next(next);
422 printf(" ... | ... | ... | ... | ... \n");
424 printf("%.78s\n", graph_dotted_line);
427 static void print_summary(void)
429 printf("\nSUMMARY\n=======\n");
430 printf("Total bytes requested: %lu\n", total_requested);
431 printf("Total bytes allocated: %lu\n", total_allocated);
432 printf("Total bytes wasted on internal fragmentation: %lu\n",
433 total_allocated - total_requested);
434 printf("Internal fragmentation: %f%%\n",
435 fragmentation(total_requested, total_allocated));
436 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
439 static void print_result(void)
442 __print_result(&root_caller_sorted, caller_lines, 1);
444 __print_result(&root_alloc_sorted, alloc_lines, 0);
448 struct sort_dimension {
451 struct list_head list;
454 static LIST_HEAD(caller_sort);
455 static LIST_HEAD(alloc_sort);
457 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
458 struct list_head *sort_list)
460 struct rb_node **new = &(root->rb_node);
461 struct rb_node *parent = NULL;
462 struct sort_dimension *sort;
465 struct alloc_stat *this;
468 this = rb_entry(*new, struct alloc_stat, node);
471 list_for_each_entry(sort, sort_list, list) {
472 cmp = sort->cmp(data, this);
478 new = &((*new)->rb_left);
480 new = &((*new)->rb_right);
483 rb_link_node(&data->node, parent, new);
484 rb_insert_color(&data->node, root);
487 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
488 struct list_head *sort_list)
490 struct rb_node *node;
491 struct alloc_stat *data;
494 node = rb_first(root);
498 rb_erase(node, root);
499 data = rb_entry(node, struct alloc_stat, node);
500 sort_insert(root_sorted, data, sort_list);
504 static void sort_result(void)
506 __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
507 __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
510 static int __cmd_kmem(void)
520 static const char * const kmem_usage[] = {
521 "perf kmem [<options>] {record}",
525 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
529 else if (l->ptr > r->ptr)
534 static struct sort_dimension ptr_sort_dimension = {
539 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
541 if (l->call_site < r->call_site)
543 else if (l->call_site > r->call_site)
548 static struct sort_dimension callsite_sort_dimension = {
553 static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
557 else if (l->hit > r->hit)
562 static struct sort_dimension hit_sort_dimension = {
567 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
569 if (l->bytes_alloc < r->bytes_alloc)
571 else if (l->bytes_alloc > r->bytes_alloc)
576 static struct sort_dimension bytes_sort_dimension = {
581 static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
585 x = fragmentation(l->bytes_req, l->bytes_alloc);
586 y = fragmentation(r->bytes_req, r->bytes_alloc);
595 static struct sort_dimension frag_sort_dimension = {
600 static struct sort_dimension *avail_sorts[] = {
602 &callsite_sort_dimension,
604 &bytes_sort_dimension,
605 &frag_sort_dimension,
608 #define NUM_AVAIL_SORTS \
609 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
611 static int sort_dimension__add(const char *tok, struct list_head *list)
613 struct sort_dimension *sort;
616 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
617 if (!strcmp(avail_sorts[i]->name, tok)) {
618 sort = malloc(sizeof(*sort));
621 memcpy(sort, avail_sorts[i], sizeof(*sort));
622 list_add_tail(&sort->list, list);
630 static int setup_sorting(struct list_head *sort_list, const char *arg)
633 char *str = strdup(arg);
639 tok = strsep(&str, ",");
642 if (sort_dimension__add(tok, sort_list) < 0) {
643 error("Unknown --sort key: '%s'", tok);
652 static int parse_sort_opt(const struct option *opt __used,
653 const char *arg, int unset __used)
658 if (caller_flag > alloc_flag)
659 return setup_sorting(&caller_sort, arg);
661 return setup_sorting(&alloc_sort, arg);
666 static int parse_stat_opt(const struct option *opt __used,
667 const char *arg, int unset __used)
672 if (strcmp(arg, "alloc") == 0)
673 alloc_flag = (caller_flag + 1);
674 else if (strcmp(arg, "caller") == 0)
675 caller_flag = (alloc_flag + 1);
681 static int parse_line_opt(const struct option *opt __used,
682 const char *arg, int unset __used)
689 lines = strtoul(arg, NULL, 10);
691 if (caller_flag > alloc_flag)
692 caller_lines = lines;
699 static const struct option kmem_options[] = {
700 OPT_STRING('i', "input", &input_name, "file",
702 OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>",
703 "stat selector, Pass 'alloc' or 'caller'.",
705 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
706 "sort by key(s): ptr, call_site, bytes, hit, frag",
708 OPT_CALLBACK('l', "line", NULL, "num",
711 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
715 static const char *record_args[] = {
722 "-e", "kmem:kmalloc",
723 "-e", "kmem:kmalloc_node",
725 "-e", "kmem:kmem_cache_alloc",
726 "-e", "kmem:kmem_cache_alloc_node",
727 "-e", "kmem:kmem_cache_free",
730 static int __cmd_record(int argc, const char **argv)
732 unsigned int rec_argc, i, j;
733 const char **rec_argv;
735 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
736 rec_argv = calloc(rec_argc + 1, sizeof(char *));
738 for (i = 0; i < ARRAY_SIZE(record_args); i++)
739 rec_argv[i] = strdup(record_args[i]);
741 for (j = 1; j < (unsigned int)argc; j++, i++)
742 rec_argv[i] = argv[j];
744 return cmd_record(i, rec_argv, NULL);
747 int cmd_kmem(int argc, const char **argv, const char *prefix __used)
751 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
753 if (argc && !strncmp(argv[0], "rec", 3))
754 return __cmd_record(argc, argv);
756 usage_with_options(kmem_usage, kmem_options);
758 if (list_empty(&caller_sort))
759 setup_sorting(&caller_sort, default_sort_order);
760 if (list_empty(&alloc_sort))
761 setup_sorting(&alloc_sort, default_sort_order);