5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
15 #include <sys/types.h>
16 #include <sys/prctl.h>
18 #include <semaphore.h>
22 static char const *input_name = "perf.data";
24 static unsigned long page_size;
25 static unsigned long mmap_window = 32;
27 static unsigned long total_comm = 0;
29 static struct rb_root threads;
30 static struct thread *last_match;
32 static struct perf_header *header;
33 static u64 sample_type;
35 static char default_sort_order[] = "avg, max, switch, runtime";
36 static char *sort_order = default_sort_order;
38 #define PR_SET_NAME 15 /* Set process name */
41 #define BUG_ON(x) assert(!(x))
43 static u64 run_measurement_overhead;
44 static u64 sleep_measurement_overhead;
51 static unsigned long nr_tasks;
60 unsigned long nr_events;
61 unsigned long curr_event;
62 struct sched_event **events;
73 enum sched_event_type {
80 enum sched_event_type type;
86 struct task_desc *wakee;
89 static struct task_desc *pid_to_task[MAX_PID];
91 static struct task_desc **tasks;
93 static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
94 static u64 start_time;
96 static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
98 static unsigned long nr_run_events;
99 static unsigned long nr_sleep_events;
100 static unsigned long nr_wakeup_events;
102 static unsigned long nr_sleep_corrections;
103 static unsigned long nr_run_events_optimized;
105 static unsigned long targetless_wakeups;
106 static unsigned long multitarget_wakeups;
108 static u64 cpu_usage;
109 static u64 runavg_cpu_usage;
110 static u64 parent_cpu_usage;
111 static u64 runavg_parent_cpu_usage;
113 static unsigned long nr_runs;
114 static u64 sum_runtime;
115 static u64 sum_fluct;
118 static unsigned long replay_repeat = 10;
119 static unsigned long nr_timestamps;
120 static unsigned long unordered_timestamps;
122 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
132 struct list_head list;
133 enum thread_state state;
141 struct list_head atom_list;
142 struct thread *thread;
150 typedef int (*sort_fn_t)(struct task_atoms *, struct task_atoms *);
152 static struct rb_root atom_root, sorted_atom_root;
154 static u64 all_runtime;
155 static u64 all_count;
157 static int read_events(void);
160 static u64 get_nsecs(void)
164 clock_gettime(CLOCK_MONOTONIC, &ts);
166 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
169 static void burn_nsecs(u64 nsecs)
171 u64 T0 = get_nsecs(), T1;
175 } while (T1 + run_measurement_overhead < T0 + nsecs);
178 static void sleep_nsecs(u64 nsecs)
182 ts.tv_nsec = nsecs % 999999999;
183 ts.tv_sec = nsecs / 999999999;
185 nanosleep(&ts, NULL);
188 static void calibrate_run_measurement_overhead(void)
190 u64 T0, T1, delta, min_delta = 1000000000ULL;
193 for (i = 0; i < 10; i++) {
198 min_delta = min(min_delta, delta);
200 run_measurement_overhead = min_delta;
202 printf("run measurement overhead: %Ld nsecs\n", min_delta);
205 static void calibrate_sleep_measurement_overhead(void)
207 u64 T0, T1, delta, min_delta = 1000000000ULL;
210 for (i = 0; i < 10; i++) {
215 min_delta = min(min_delta, delta);
218 sleep_measurement_overhead = min_delta;
220 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
223 static struct sched_event *
224 get_new_event(struct task_desc *task, u64 timestamp)
226 struct sched_event *event = calloc(1, sizeof(*event));
227 unsigned long idx = task->nr_events;
230 event->timestamp = timestamp;
234 size = sizeof(struct sched_event *) * task->nr_events;
235 task->events = realloc(task->events, size);
236 BUG_ON(!task->events);
238 task->events[idx] = event;
243 static struct sched_event *last_event(struct task_desc *task)
245 if (!task->nr_events)
248 return task->events[task->nr_events - 1];
252 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
254 struct sched_event *event, *curr_event = last_event(task);
257 * optimize an existing RUN event by merging this one
260 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
261 nr_run_events_optimized++;
262 curr_event->duration += duration;
266 event = get_new_event(task, timestamp);
268 event->type = SCHED_EVENT_RUN;
269 event->duration = duration;
275 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
276 struct task_desc *wakee)
278 struct sched_event *event, *wakee_event;
280 event = get_new_event(task, timestamp);
281 event->type = SCHED_EVENT_WAKEUP;
282 event->wakee = wakee;
284 wakee_event = last_event(wakee);
285 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
286 targetless_wakeups++;
289 if (wakee_event->wait_sem) {
290 multitarget_wakeups++;
294 wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
295 sem_init(wakee_event->wait_sem, 0, 0);
296 wakee_event->specific_wait = 1;
297 event->wait_sem = wakee_event->wait_sem;
303 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
304 u64 task_state __used)
306 struct sched_event *event = get_new_event(task, timestamp);
308 event->type = SCHED_EVENT_SLEEP;
313 static struct task_desc *register_pid(unsigned long pid, const char *comm)
315 struct task_desc *task;
317 BUG_ON(pid >= MAX_PID);
319 task = pid_to_task[pid];
324 task = calloc(1, sizeof(*task));
327 strcpy(task->comm, comm);
329 * every task starts in sleeping state - this gets ignored
330 * if there's no wakeup pointing to this sleep state:
332 add_sched_event_sleep(task, 0, 0);
334 pid_to_task[pid] = task;
336 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
338 tasks[task->nr] = task;
341 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
347 static void print_task_traces(void)
349 struct task_desc *task;
352 for (i = 0; i < nr_tasks; i++) {
354 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
355 task->nr, task->comm, task->pid, task->nr_events);
359 static void add_cross_task_wakeups(void)
361 struct task_desc *task1, *task2;
364 for (i = 0; i < nr_tasks; i++) {
370 add_sched_event_wakeup(task1, 0, task2);
375 process_sched_event(struct task_desc *this_task __used, struct sched_event *event)
382 delta = start_time + event->timestamp - now;
384 switch (event->type) {
385 case SCHED_EVENT_RUN:
386 burn_nsecs(event->duration);
388 case SCHED_EVENT_SLEEP:
390 ret = sem_wait(event->wait_sem);
393 case SCHED_EVENT_WAKEUP:
395 ret = sem_post(event->wait_sem);
403 static u64 get_cpu_usage_nsec_parent(void)
409 err = getrusage(RUSAGE_SELF, &ru);
412 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
413 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
418 static u64 get_cpu_usage_nsec_self(void)
420 char filename [] = "/proc/1234567890/sched";
421 unsigned long msecs, nsecs;
429 sprintf(filename, "/proc/%d/sched", getpid());
430 file = fopen(filename, "r");
433 while ((chars = getline(&line, &len, file)) != -1) {
434 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
437 total = msecs*1e6 + nsecs;
448 static void *thread_func(void *ctx)
450 struct task_desc *this_task = ctx;
451 u64 cpu_usage_0, cpu_usage_1;
452 unsigned long i, ret;
455 sprintf(comm2, ":%s", this_task->comm);
456 prctl(PR_SET_NAME, comm2);
459 ret = sem_post(&this_task->ready_for_work);
461 ret = pthread_mutex_lock(&start_work_mutex);
463 ret = pthread_mutex_unlock(&start_work_mutex);
466 cpu_usage_0 = get_cpu_usage_nsec_self();
468 for (i = 0; i < this_task->nr_events; i++) {
469 this_task->curr_event = i;
470 process_sched_event(this_task, this_task->events[i]);
473 cpu_usage_1 = get_cpu_usage_nsec_self();
474 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
476 ret = sem_post(&this_task->work_done_sem);
479 ret = pthread_mutex_lock(&work_done_wait_mutex);
481 ret = pthread_mutex_unlock(&work_done_wait_mutex);
487 static void create_tasks(void)
489 struct task_desc *task;
494 err = pthread_attr_init(&attr);
496 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
498 err = pthread_mutex_lock(&start_work_mutex);
500 err = pthread_mutex_lock(&work_done_wait_mutex);
502 for (i = 0; i < nr_tasks; i++) {
504 sem_init(&task->sleep_sem, 0, 0);
505 sem_init(&task->ready_for_work, 0, 0);
506 sem_init(&task->work_done_sem, 0, 0);
507 task->curr_event = 0;
508 err = pthread_create(&task->thread, &attr, thread_func, task);
513 static void wait_for_tasks(void)
515 u64 cpu_usage_0, cpu_usage_1;
516 struct task_desc *task;
517 unsigned long i, ret;
519 start_time = get_nsecs();
521 pthread_mutex_unlock(&work_done_wait_mutex);
523 for (i = 0; i < nr_tasks; i++) {
525 ret = sem_wait(&task->ready_for_work);
527 sem_init(&task->ready_for_work, 0, 0);
529 ret = pthread_mutex_lock(&work_done_wait_mutex);
532 cpu_usage_0 = get_cpu_usage_nsec_parent();
534 pthread_mutex_unlock(&start_work_mutex);
536 for (i = 0; i < nr_tasks; i++) {
538 ret = sem_wait(&task->work_done_sem);
540 sem_init(&task->work_done_sem, 0, 0);
541 cpu_usage += task->cpu_usage;
545 cpu_usage_1 = get_cpu_usage_nsec_parent();
546 if (!runavg_cpu_usage)
547 runavg_cpu_usage = cpu_usage;
548 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
550 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
551 if (!runavg_parent_cpu_usage)
552 runavg_parent_cpu_usage = parent_cpu_usage;
553 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
554 parent_cpu_usage)/10;
556 ret = pthread_mutex_lock(&start_work_mutex);
559 for (i = 0; i < nr_tasks; i++) {
561 sem_init(&task->sleep_sem, 0, 0);
562 task->curr_event = 0;
566 static void run_one_test(void)
568 u64 T0, T1, delta, avg_delta, fluct, std_dev;
575 sum_runtime += delta;
578 avg_delta = sum_runtime / nr_runs;
579 if (delta < avg_delta)
580 fluct = avg_delta - delta;
582 fluct = delta - avg_delta;
584 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
587 run_avg = (run_avg*9 + delta)/10;
589 printf("#%-3ld: %0.3f, ",
590 nr_runs, (double)delta/1000000.0);
592 printf("ravg: %0.2f, ",
593 (double)run_avg/1e6);
595 printf("cpu: %0.2f / %0.2f",
596 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
600 * rusage statistics done by the parent, these are less
601 * accurate than the sum_exec_runtime based statistics:
603 printf(" [%0.2f / %0.2f]",
604 (double)parent_cpu_usage/1e6,
605 (double)runavg_parent_cpu_usage/1e6);
610 if (nr_sleep_corrections)
611 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
612 nr_sleep_corrections = 0;
615 static void test_calibrations(void)
623 printf("the run test took %Ld nsecs\n", T1-T0);
629 printf("the sleep test took %Ld nsecs\n", T1-T0);
632 static void __cmd_replay(void)
636 calibrate_run_measurement_overhead();
637 calibrate_sleep_measurement_overhead();
643 printf("nr_run_events: %ld\n", nr_run_events);
644 printf("nr_sleep_events: %ld\n", nr_sleep_events);
645 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
647 if (targetless_wakeups)
648 printf("target-less wakeups: %ld\n", targetless_wakeups);
649 if (multitarget_wakeups)
650 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
651 if (nr_run_events_optimized)
652 printf("run events optimized: %ld\n",
653 nr_run_events_optimized);
656 add_cross_task_wakeups();
659 printf("------------------------------------------------------------\n");
660 for (i = 0; i < replay_repeat; i++)
665 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
667 struct thread *thread;
669 thread = threads__findnew(event->comm.pid, &threads, &last_match);
671 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
672 (void *)(offset + head),
673 (void *)(long)(event->header.size),
674 event->comm.comm, event->comm.pid);
676 if (thread == NULL ||
677 thread__set_comm(thread, event->comm.comm)) {
678 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
687 struct raw_event_sample {
692 #define FILL_FIELD(ptr, field, event, data) \
693 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
695 #define FILL_ARRAY(ptr, array, event, data) \
697 void *__array = raw_field_ptr(event, #array, data); \
698 memcpy(ptr.array, __array, sizeof(ptr.array)); \
701 #define FILL_COMMON_FIELDS(ptr, event, data) \
703 FILL_FIELD(ptr, common_type, event, data); \
704 FILL_FIELD(ptr, common_flags, event, data); \
705 FILL_FIELD(ptr, common_preempt_count, event, data); \
706 FILL_FIELD(ptr, common_pid, event, data); \
707 FILL_FIELD(ptr, common_tgid, event, data); \
712 struct trace_switch_event {
717 u8 common_preempt_count;
731 struct trace_wakeup_event {
736 u8 common_preempt_count;
748 struct trace_fork_event {
753 u8 common_preempt_count;
757 char parent_comm[16];
763 struct trace_sched_handler {
764 void (*switch_event)(struct trace_switch_event *,
768 struct thread *thread);
770 void (*wakeup_event)(struct trace_wakeup_event *,
774 struct thread *thread);
776 void (*fork_event)(struct trace_fork_event *,
780 struct thread *thread);
785 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
788 u64 timestamp __used,
789 struct thread *thread __used)
791 struct task_desc *waker, *wakee;
794 printf("sched_wakeup event %p\n", event);
796 printf(" ... pid %d woke up %s/%d\n",
797 wakeup_event->common_pid,
802 waker = register_pid(wakeup_event->common_pid, "<unknown>");
803 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
805 add_sched_event_wakeup(waker, timestamp, wakee);
808 static unsigned long cpu_last_switched[MAX_CPUS];
811 replay_switch_event(struct trace_switch_event *switch_event,
815 struct thread *thread __used)
817 struct task_desc *prev, *next;
822 printf("sched_switch event %p\n", event);
824 if (cpu >= MAX_CPUS || cpu < 0)
827 timestamp0 = cpu_last_switched[cpu];
829 delta = timestamp - timestamp0;
834 die("hm, delta: %Ld < 0 ?\n", delta);
837 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
838 switch_event->prev_comm, switch_event->prev_pid,
839 switch_event->next_comm, switch_event->next_pid,
843 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
844 next = register_pid(switch_event->next_pid, switch_event->next_comm);
846 cpu_last_switched[cpu] = timestamp;
848 add_sched_event_run(prev, timestamp, delta);
849 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
854 replay_fork_event(struct trace_fork_event *fork_event,
857 u64 timestamp __used,
858 struct thread *thread __used)
861 printf("sched_fork event %p\n", event);
862 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
863 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
865 register_pid(fork_event->parent_pid, fork_event->parent_comm);
866 register_pid(fork_event->child_pid, fork_event->child_comm);
869 static struct trace_sched_handler replay_ops = {
870 .wakeup_event = replay_wakeup_event,
871 .switch_event = replay_switch_event,
872 .fork_event = replay_fork_event,
875 struct sort_dimension {
878 struct list_head list;
881 static LIST_HEAD(cmp_pid);
884 thread_lat_cmp(struct list_head *list, struct task_atoms *l, struct task_atoms *r)
886 struct sort_dimension *sort;
889 BUG_ON(list_empty(list));
891 list_for_each_entry(sort, list, list) {
892 ret = sort->cmp(l, r);
900 static struct task_atoms *
901 thread_atoms_search(struct rb_root *root, struct thread *thread,
902 struct list_head *sort_list)
904 struct rb_node *node = root->rb_node;
905 struct task_atoms key = { .thread = thread };
908 struct task_atoms *atoms;
911 atoms = container_of(node, struct task_atoms, node);
913 cmp = thread_lat_cmp(sort_list, &key, atoms);
915 node = node->rb_left;
917 node = node->rb_right;
919 BUG_ON(thread != atoms->thread);
927 __thread_latency_insert(struct rb_root *root, struct task_atoms *data,
928 struct list_head *sort_list)
930 struct rb_node **new = &(root->rb_node), *parent = NULL;
933 struct task_atoms *this;
936 this = container_of(*new, struct task_atoms, node);
939 cmp = thread_lat_cmp(sort_list, data, this);
942 new = &((*new)->rb_left);
944 new = &((*new)->rb_right);
947 rb_link_node(&data->node, parent, new);
948 rb_insert_color(&data->node, root);
951 static void thread_atoms_insert(struct thread *thread)
953 struct task_atoms *atoms;
955 atoms = calloc(sizeof(*atoms), 1);
959 atoms->thread = thread;
960 INIT_LIST_HEAD(&atoms->atom_list);
961 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
965 latency_fork_event(struct trace_fork_event *fork_event __used,
966 struct event *event __used,
968 u64 timestamp __used,
969 struct thread *thread __used)
971 /* should insert the newcomer */
975 static char sched_out_state(struct trace_switch_event *switch_event)
977 const char *str = TASK_STATE_TO_CHAR_STR;
979 return str[switch_event->prev_state];
983 lat_sched_out(struct task_atoms *atoms,
984 struct trace_switch_event *switch_event __used,
988 struct work_atom *atom;
990 atom = calloc(sizeof(*atom), 1);
994 atom->sched_out_time = timestamp;
996 if (sched_out_state(switch_event) == 'R') {
997 atom->state = THREAD_WAIT_CPU;
998 atom->wake_up_time = atom->sched_out_time;
1001 atom->runtime = delta;
1002 list_add_tail(&atom->list, &atoms->atom_list);
1006 lat_sched_in(struct task_atoms *atoms, u64 timestamp)
1008 struct work_atom *atom;
1011 if (list_empty(&atoms->atom_list))
1014 atom = list_entry(atoms->atom_list.prev, struct work_atom, list);
1016 if (atom->state != THREAD_WAIT_CPU)
1019 if (timestamp < atom->wake_up_time) {
1020 atom->state = THREAD_IGNORE;
1024 atom->state = THREAD_SCHED_IN;
1025 atom->sched_in_time = timestamp;
1027 delta = atom->sched_in_time - atom->wake_up_time;
1028 atoms->total_lat += delta;
1029 if (delta > atoms->max_lat)
1030 atoms->max_lat = delta;
1032 atoms->total_runtime += atom->runtime;
1036 latency_switch_event(struct trace_switch_event *switch_event,
1037 struct event *event __used,
1040 struct thread *thread __used)
1042 struct task_atoms *out_atoms, *in_atoms;
1043 struct thread *sched_out, *sched_in;
1047 if (cpu >= MAX_CPUS || cpu < 0)
1050 timestamp0 = cpu_last_switched[cpu];
1051 cpu_last_switched[cpu] = timestamp;
1053 delta = timestamp - timestamp0;
1058 die("hm, delta: %Ld < 0 ?\n", delta);
1061 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1062 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1064 in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1066 thread_atoms_insert(sched_in);
1067 in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1069 die("in-atom: Internal tree error");
1072 out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1074 thread_atoms_insert(sched_out);
1075 out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1077 die("out-atom: Internal tree error");
1080 lat_sched_in(in_atoms, timestamp);
1081 lat_sched_out(out_atoms, switch_event, delta, timestamp);
1085 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1086 struct event *event __used,
1089 struct thread *thread __used)
1091 struct task_atoms *atoms;
1092 struct work_atom *atom;
1093 struct thread *wakee;
1095 /* Note for later, it may be interesting to observe the failing cases */
1096 if (!wakeup_event->success)
1099 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
1100 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1102 thread_atoms_insert(wakee);
1106 if (list_empty(&atoms->atom_list))
1109 atom = list_entry(atoms->atom_list.prev, struct work_atom, list);
1111 if (atom->state != THREAD_SLEEPING)
1115 if (atom->sched_out_time > timestamp) {
1116 unordered_timestamps++;
1120 atom->state = THREAD_WAIT_CPU;
1121 atom->wake_up_time = timestamp;
1124 static struct trace_sched_handler lat_ops = {
1125 .wakeup_event = latency_wakeup_event,
1126 .switch_event = latency_switch_event,
1127 .fork_event = latency_fork_event,
1130 static void output_lat_thread(struct task_atoms *atom_list)
1136 if (!atom_list->nb_atoms)
1139 * Ignore idle threads:
1141 if (!atom_list->thread->pid)
1144 all_runtime += atom_list->total_runtime;
1145 all_count += atom_list->nb_atoms;
1147 ret = printf(" %s ", atom_list->thread->comm);
1149 for (i = 0; i < 19 - ret; i++)
1152 avg = atom_list->total_lat / atom_list->nb_atoms;
1154 printf("|%9.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
1155 (double)atom_list->total_runtime / 1e6,
1156 atom_list->nb_atoms, (double)avg / 1e6,
1157 (double)atom_list->max_lat / 1e6);
1160 static int pid_cmp(struct task_atoms *l, struct task_atoms *r)
1162 if (l->thread->pid < r->thread->pid)
1164 if (l->thread->pid > r->thread->pid)
1170 static struct sort_dimension pid_sort_dimension = {
1175 static int avg_cmp(struct task_atoms *l, struct task_atoms *r)
1185 avgl = l->total_lat / l->nb_atoms;
1186 avgr = r->total_lat / r->nb_atoms;
1196 static struct sort_dimension avg_sort_dimension = {
1201 static int max_cmp(struct task_atoms *l, struct task_atoms *r)
1203 if (l->max_lat < r->max_lat)
1205 if (l->max_lat > r->max_lat)
1211 static struct sort_dimension max_sort_dimension = {
1216 static int switch_cmp(struct task_atoms *l, struct task_atoms *r)
1218 if (l->nb_atoms < r->nb_atoms)
1220 if (l->nb_atoms > r->nb_atoms)
1226 static struct sort_dimension switch_sort_dimension = {
1231 static int runtime_cmp(struct task_atoms *l, struct task_atoms *r)
1233 if (l->total_runtime < r->total_runtime)
1235 if (l->total_runtime > r->total_runtime)
1241 static struct sort_dimension runtime_sort_dimension = {
1246 static struct sort_dimension *available_sorts[] = {
1247 &pid_sort_dimension,
1248 &avg_sort_dimension,
1249 &max_sort_dimension,
1250 &switch_sort_dimension,
1251 &runtime_sort_dimension,
1254 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1256 static LIST_HEAD(sort_list);
1258 static int sort_dimension__add(char *tok, struct list_head *list)
1262 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1263 if (!strcmp(available_sorts[i]->name, tok)) {
1264 list_add_tail(&available_sorts[i]->list, list);
1273 static void setup_sorting(void);
1275 static void sort_lat(void)
1277 struct rb_node *node;
1280 struct task_atoms *data;
1281 node = rb_first(&atom_root);
1285 rb_erase(node, &atom_root);
1286 data = rb_entry(node, struct task_atoms, node);
1287 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1291 static void __cmd_lat(void)
1293 struct rb_node *next;
1299 printf("-----------------------------------------------------------------------------------\n");
1300 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1301 printf("-----------------------------------------------------------------------------------\n");
1303 next = rb_first(&sorted_atom_root);
1306 struct task_atoms *atom_list;
1308 atom_list = rb_entry(next, struct task_atoms, node);
1309 output_lat_thread(atom_list);
1310 next = rb_next(next);
1313 printf("-----------------------------------------------------------------------------------\n");
1314 printf(" TOTAL: |%9.3f ms |%9Ld |",
1315 (double)all_runtime/1e6, all_count);
1317 if (unordered_timestamps && nr_timestamps) {
1318 printf(" INFO: %.2f%% unordered events.\n",
1319 (double)unordered_timestamps/(double)nr_timestamps*100.0);
1324 printf("---------------------------------------------\n");
1327 static struct trace_sched_handler *trace_handler;
1330 process_sched_wakeup_event(struct raw_event_sample *raw,
1331 struct event *event,
1333 u64 timestamp __used,
1334 struct thread *thread __used)
1336 struct trace_wakeup_event wakeup_event;
1338 FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
1340 FILL_ARRAY(wakeup_event, comm, event, raw->data);
1341 FILL_FIELD(wakeup_event, pid, event, raw->data);
1342 FILL_FIELD(wakeup_event, prio, event, raw->data);
1343 FILL_FIELD(wakeup_event, success, event, raw->data);
1344 FILL_FIELD(wakeup_event, cpu, event, raw->data);
1346 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
1350 process_sched_switch_event(struct raw_event_sample *raw,
1351 struct event *event,
1353 u64 timestamp __used,
1354 struct thread *thread __used)
1356 struct trace_switch_event switch_event;
1358 FILL_COMMON_FIELDS(switch_event, event, raw->data);
1360 FILL_ARRAY(switch_event, prev_comm, event, raw->data);
1361 FILL_FIELD(switch_event, prev_pid, event, raw->data);
1362 FILL_FIELD(switch_event, prev_prio, event, raw->data);
1363 FILL_FIELD(switch_event, prev_state, event, raw->data);
1364 FILL_ARRAY(switch_event, next_comm, event, raw->data);
1365 FILL_FIELD(switch_event, next_pid, event, raw->data);
1366 FILL_FIELD(switch_event, next_prio, event, raw->data);
1368 trace_handler->switch_event(&switch_event, event, cpu, timestamp, thread);
1372 process_sched_fork_event(struct raw_event_sample *raw,
1373 struct event *event,
1375 u64 timestamp __used,
1376 struct thread *thread __used)
1378 struct trace_fork_event fork_event;
1380 FILL_COMMON_FIELDS(fork_event, event, raw->data);
1382 FILL_ARRAY(fork_event, parent_comm, event, raw->data);
1383 FILL_FIELD(fork_event, parent_pid, event, raw->data);
1384 FILL_ARRAY(fork_event, child_comm, event, raw->data);
1385 FILL_FIELD(fork_event, child_pid, event, raw->data);
1387 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
1391 process_sched_exit_event(struct event *event,
1393 u64 timestamp __used,
1394 struct thread *thread __used)
1397 printf("sched_exit event %p\n", event);
1401 process_raw_event(event_t *raw_event __used, void *more_data,
1402 int cpu, u64 timestamp, struct thread *thread)
1404 struct raw_event_sample *raw = more_data;
1405 struct event *event;
1408 type = trace_parse_common_type(raw->data);
1409 event = trace_find_event(type);
1411 if (!strcmp(event->name, "sched_switch"))
1412 process_sched_switch_event(raw, event, cpu, timestamp, thread);
1413 if (!strcmp(event->name, "sched_wakeup"))
1414 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1415 if (!strcmp(event->name, "sched_wakeup_new"))
1416 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1417 if (!strcmp(event->name, "sched_process_fork"))
1418 process_sched_fork_event(raw, event, cpu, timestamp, thread);
1419 if (!strcmp(event->name, "sched_process_exit"))
1420 process_sched_exit_event(event, cpu, timestamp, thread);
1424 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1428 struct dso *dso = NULL;
1429 struct thread *thread;
1430 u64 ip = event->ip.ip;
1434 void *more_data = event->ip.__more_data;
1437 thread = threads__findnew(event->ip.pid, &threads, &last_match);
1439 if (sample_type & PERF_SAMPLE_TIME) {
1440 timestamp = *(u64 *)more_data;
1441 more_data += sizeof(u64);
1444 if (sample_type & PERF_SAMPLE_CPU) {
1445 cpu = *(u32 *)more_data;
1446 more_data += sizeof(u32);
1447 more_data += sizeof(u32); /* reserved */
1450 if (sample_type & PERF_SAMPLE_PERIOD) {
1451 period = *(u64 *)more_data;
1452 more_data += sizeof(u64);
1455 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1456 (void *)(offset + head),
1457 (void *)(long)(event->header.size),
1459 event->ip.pid, event->ip.tid,
1463 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1465 if (thread == NULL) {
1466 eprintf("problem processing %d event, skipping it.\n",
1467 event->header.type);
1471 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
1473 if (cpumode == PERF_EVENT_MISC_KERNEL) {
1479 dump_printf(" ...... dso: %s\n", dso->name);
1481 } else if (cpumode == PERF_EVENT_MISC_USER) {
1490 dso = hypervisor_dso;
1492 dump_printf(" ...... dso: [hypervisor]\n");
1495 if (sample_type & PERF_SAMPLE_RAW)
1496 process_raw_event(event, more_data, cpu, timestamp, thread);
1502 process_event(event_t *event, unsigned long offset, unsigned long head)
1506 switch (event->header.type) {
1507 case PERF_EVENT_MMAP ... PERF_EVENT_LOST:
1510 case PERF_EVENT_COMM:
1511 return process_comm_event(event, offset, head);
1513 case PERF_EVENT_EXIT ... PERF_EVENT_READ:
1516 case PERF_EVENT_SAMPLE:
1517 return process_sample_event(event, offset, head);
1519 case PERF_EVENT_MAX:
1527 static int read_events(void)
1529 int ret, rc = EXIT_FAILURE;
1530 unsigned long offset = 0;
1531 unsigned long head = 0;
1532 struct stat perf_stat;
1538 register_idle_thread(&threads, &last_match);
1540 input = open(input_name, O_RDONLY);
1542 perror("failed to open file");
1546 ret = fstat(input, &perf_stat);
1548 perror("failed to stat file");
1552 if (!perf_stat.st_size) {
1553 fprintf(stderr, "zero-sized file, nothing to do!\n");
1556 header = perf_header__read(input);
1557 head = header->data_offset;
1558 sample_type = perf_header__sample_type(header);
1560 if (!(sample_type & PERF_SAMPLE_RAW))
1561 die("No trace sample to read. Did you call perf record "
1564 if (load_kernel() < 0) {
1565 perror("failed to load kernel symbols");
1566 return EXIT_FAILURE;
1570 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1571 MAP_SHARED, input, offset);
1572 if (buf == MAP_FAILED) {
1573 perror("failed to mmap file");
1578 event = (event_t *)(buf + head);
1580 size = event->header.size;
1584 if (head + event->header.size >= page_size * mmap_window) {
1585 unsigned long shift = page_size * (head / page_size);
1588 res = munmap(buf, page_size * mmap_window);
1596 size = event->header.size;
1599 if (!size || process_event(event, offset, head) < 0) {
1602 * assume we lost track of the stream, check alignment, and
1603 * increment a single u64 in the hope to catch on again 'soon'.
1606 if (unlikely(head & 7))
1614 if (offset + head < (unsigned long)perf_stat.st_size)
1623 static const char * const sched_usage[] = {
1624 "perf sched [<options>] {record|latency|replay|trace}",
1628 static const struct option sched_options[] = {
1629 OPT_BOOLEAN('v', "verbose", &verbose,
1630 "be more verbose (show symbol address, etc)"),
1631 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1632 "dump raw trace in ASCII"),
1636 static const char * const latency_usage[] = {
1637 "perf sched latency [<options>]",
1641 static const struct option latency_options[] = {
1642 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1643 "sort by key(s): runtime, switch, avg, max"),
1644 OPT_BOOLEAN('v', "verbose", &verbose,
1645 "be more verbose (show symbol address, etc)"),
1646 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1647 "dump raw trace in ASCII"),
1651 static const char * const replay_usage[] = {
1652 "perf sched replay [<options>]",
1656 static const struct option replay_options[] = {
1657 OPT_INTEGER('r', "repeat", &replay_repeat,
1658 "repeat the workload replay N times (-1: infinite)"),
1659 OPT_BOOLEAN('v', "verbose", &verbose,
1660 "be more verbose (show symbol address, etc)"),
1661 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1662 "dump raw trace in ASCII"),
1666 static void setup_sorting(void)
1668 char *tmp, *tok, *str = strdup(sort_order);
1670 for (tok = strtok_r(str, ", ", &tmp);
1671 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1672 if (sort_dimension__add(tok, &sort_list) < 0) {
1673 error("Unknown --sort key: `%s'", tok);
1674 usage_with_options(latency_usage, latency_options);
1680 sort_dimension__add((char *)"pid", &cmp_pid);
1683 static const char *record_args[] = {
1690 "-e", "sched:sched_switch:r",
1691 "-e", "sched:sched_stat_wait:r",
1692 "-e", "sched:sched_stat_sleep:r",
1693 "-e", "sched:sched_stat_iowait:r",
1694 "-e", "sched:sched_stat_runtime:r",
1695 "-e", "sched:sched_process_exit:r",
1696 "-e", "sched:sched_process_fork:r",
1697 "-e", "sched:sched_wakeup:r",
1698 "-e", "sched:sched_migrate_task:r",
1701 static int __cmd_record(int argc, const char **argv)
1703 unsigned int rec_argc, i, j;
1704 const char **rec_argv;
1706 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1707 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1709 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1710 rec_argv[i] = strdup(record_args[i]);
1712 for (j = 1; j < (unsigned int)argc; j++, i++)
1713 rec_argv[i] = argv[j];
1715 BUG_ON(i != rec_argc);
1717 return cmd_record(i, rec_argv, NULL);
1720 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1723 page_size = getpagesize();
1725 argc = parse_options(argc, argv, sched_options, sched_usage,
1726 PARSE_OPT_STOP_AT_NON_OPTION);
1728 usage_with_options(sched_usage, sched_options);
1730 if (!strncmp(argv[0], "rec", 3)) {
1731 return __cmd_record(argc, argv);
1732 } else if (!strncmp(argv[0], "lat", 3)) {
1733 trace_handler = &lat_ops;
1735 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1737 usage_with_options(latency_usage, latency_options);
1741 } else if (!strncmp(argv[0], "rep", 3)) {
1742 trace_handler = &replay_ops;
1744 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1746 usage_with_options(replay_usage, replay_options);
1749 } else if (!strcmp(argv[0], "trace")) {
1751 * Aliased to 'perf trace' for now:
1753 return cmd_trace(argc, argv, prefix);
1755 usage_with_options(sched_usage, sched_options);