5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
11 #include "util/parse-options.h"
12 #include "util/trace-event.h"
14 #include "util/debug.h"
16 #include <sys/prctl.h>
18 #include <semaphore.h>
22 static char const *input_name = "perf.data";
24 static u64 sample_type;
26 static char default_sort_order[] = "avg, max, switch, runtime";
27 static char *sort_order = default_sort_order;
29 static int profile_cpu = -1;
31 #define PR_SET_NAME 15 /* Set process name */
34 static u64 run_measurement_overhead;
35 static u64 sleep_measurement_overhead;
42 static unsigned long nr_tasks;
51 unsigned long nr_events;
52 unsigned long curr_event;
53 struct sched_atom **atoms;
64 enum sched_event_type {
68 SCHED_EVENT_MIGRATION,
72 enum sched_event_type type;
78 struct task_desc *wakee;
81 static struct task_desc *pid_to_task[MAX_PID];
83 static struct task_desc **tasks;
85 static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
86 static u64 start_time;
88 static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
90 static unsigned long nr_run_events;
91 static unsigned long nr_sleep_events;
92 static unsigned long nr_wakeup_events;
94 static unsigned long nr_sleep_corrections;
95 static unsigned long nr_run_events_optimized;
97 static unsigned long targetless_wakeups;
98 static unsigned long multitarget_wakeups;
100 static u64 cpu_usage;
101 static u64 runavg_cpu_usage;
102 static u64 parent_cpu_usage;
103 static u64 runavg_parent_cpu_usage;
105 static unsigned long nr_runs;
106 static u64 sum_runtime;
107 static u64 sum_fluct;
110 static unsigned long replay_repeat = 10;
111 static unsigned long nr_timestamps;
112 static unsigned long nr_unordered_timestamps;
113 static unsigned long nr_state_machine_bugs;
114 static unsigned long nr_context_switch_bugs;
115 static unsigned long nr_events;
116 static unsigned long nr_lost_chunks;
117 static unsigned long nr_lost_events;
119 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
129 struct list_head list;
130 enum thread_state state;
138 struct list_head work_list;
139 struct thread *thread;
148 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
150 static struct rb_root atom_root, sorted_atom_root;
152 static u64 all_runtime;
153 static u64 all_count;
156 static u64 get_nsecs(void)
160 clock_gettime(CLOCK_MONOTONIC, &ts);
162 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
165 static void burn_nsecs(u64 nsecs)
167 u64 T0 = get_nsecs(), T1;
171 } while (T1 + run_measurement_overhead < T0 + nsecs);
174 static void sleep_nsecs(u64 nsecs)
178 ts.tv_nsec = nsecs % 999999999;
179 ts.tv_sec = nsecs / 999999999;
181 nanosleep(&ts, NULL);
184 static void calibrate_run_measurement_overhead(void)
186 u64 T0, T1, delta, min_delta = 1000000000ULL;
189 for (i = 0; i < 10; i++) {
194 min_delta = min(min_delta, delta);
196 run_measurement_overhead = min_delta;
198 printf("run measurement overhead: %Ld nsecs\n", min_delta);
201 static void calibrate_sleep_measurement_overhead(void)
203 u64 T0, T1, delta, min_delta = 1000000000ULL;
206 for (i = 0; i < 10; i++) {
211 min_delta = min(min_delta, delta);
214 sleep_measurement_overhead = min_delta;
216 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
219 static struct sched_atom *
220 get_new_event(struct task_desc *task, u64 timestamp)
222 struct sched_atom *event = zalloc(sizeof(*event));
223 unsigned long idx = task->nr_events;
226 event->timestamp = timestamp;
230 size = sizeof(struct sched_atom *) * task->nr_events;
231 task->atoms = realloc(task->atoms, size);
232 BUG_ON(!task->atoms);
234 task->atoms[idx] = event;
239 static struct sched_atom *last_event(struct task_desc *task)
241 if (!task->nr_events)
244 return task->atoms[task->nr_events - 1];
248 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
250 struct sched_atom *event, *curr_event = last_event(task);
253 * optimize an existing RUN event by merging this one
256 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
257 nr_run_events_optimized++;
258 curr_event->duration += duration;
262 event = get_new_event(task, timestamp);
264 event->type = SCHED_EVENT_RUN;
265 event->duration = duration;
271 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
272 struct task_desc *wakee)
274 struct sched_atom *event, *wakee_event;
276 event = get_new_event(task, timestamp);
277 event->type = SCHED_EVENT_WAKEUP;
278 event->wakee = wakee;
280 wakee_event = last_event(wakee);
281 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
282 targetless_wakeups++;
285 if (wakee_event->wait_sem) {
286 multitarget_wakeups++;
290 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
291 sem_init(wakee_event->wait_sem, 0, 0);
292 wakee_event->specific_wait = 1;
293 event->wait_sem = wakee_event->wait_sem;
299 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
300 u64 task_state __used)
302 struct sched_atom *event = get_new_event(task, timestamp);
304 event->type = SCHED_EVENT_SLEEP;
309 static struct task_desc *register_pid(unsigned long pid, const char *comm)
311 struct task_desc *task;
313 BUG_ON(pid >= MAX_PID);
315 task = pid_to_task[pid];
320 task = zalloc(sizeof(*task));
323 strcpy(task->comm, comm);
325 * every task starts in sleeping state - this gets ignored
326 * if there's no wakeup pointing to this sleep state:
328 add_sched_event_sleep(task, 0, 0);
330 pid_to_task[pid] = task;
332 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
334 tasks[task->nr] = task;
337 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
343 static void print_task_traces(void)
345 struct task_desc *task;
348 for (i = 0; i < nr_tasks; i++) {
350 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
351 task->nr, task->comm, task->pid, task->nr_events);
355 static void add_cross_task_wakeups(void)
357 struct task_desc *task1, *task2;
360 for (i = 0; i < nr_tasks; i++) {
366 add_sched_event_wakeup(task1, 0, task2);
371 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
378 delta = start_time + atom->timestamp - now;
380 switch (atom->type) {
381 case SCHED_EVENT_RUN:
382 burn_nsecs(atom->duration);
384 case SCHED_EVENT_SLEEP:
386 ret = sem_wait(atom->wait_sem);
389 case SCHED_EVENT_WAKEUP:
391 ret = sem_post(atom->wait_sem);
394 case SCHED_EVENT_MIGRATION:
401 static u64 get_cpu_usage_nsec_parent(void)
407 err = getrusage(RUSAGE_SELF, &ru);
410 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
411 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
416 static int self_open_counters(void)
418 struct perf_event_attr attr;
421 memset(&attr, 0, sizeof(attr));
423 attr.type = PERF_TYPE_SOFTWARE;
424 attr.config = PERF_COUNT_SW_TASK_CLOCK;
426 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
429 die("Error: sys_perf_event_open() syscall returned"
430 "with %d (%s)\n", fd, strerror(errno));
434 static u64 get_cpu_usage_nsec_self(int fd)
439 ret = read(fd, &runtime, sizeof(runtime));
440 BUG_ON(ret != sizeof(runtime));
445 static void *thread_func(void *ctx)
447 struct task_desc *this_task = ctx;
448 u64 cpu_usage_0, cpu_usage_1;
449 unsigned long i, ret;
453 sprintf(comm2, ":%s", this_task->comm);
454 prctl(PR_SET_NAME, comm2);
455 fd = self_open_counters();
458 ret = sem_post(&this_task->ready_for_work);
460 ret = pthread_mutex_lock(&start_work_mutex);
462 ret = pthread_mutex_unlock(&start_work_mutex);
465 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
467 for (i = 0; i < this_task->nr_events; i++) {
468 this_task->curr_event = i;
469 process_sched_event(this_task, this_task->atoms[i]);
472 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
473 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
474 ret = sem_post(&this_task->work_done_sem);
477 ret = pthread_mutex_lock(&work_done_wait_mutex);
479 ret = pthread_mutex_unlock(&work_done_wait_mutex);
485 static void create_tasks(void)
487 struct task_desc *task;
492 err = pthread_attr_init(&attr);
494 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
496 err = pthread_mutex_lock(&start_work_mutex);
498 err = pthread_mutex_lock(&work_done_wait_mutex);
500 for (i = 0; i < nr_tasks; i++) {
502 sem_init(&task->sleep_sem, 0, 0);
503 sem_init(&task->ready_for_work, 0, 0);
504 sem_init(&task->work_done_sem, 0, 0);
505 task->curr_event = 0;
506 err = pthread_create(&task->thread, &attr, thread_func, task);
511 static void wait_for_tasks(void)
513 u64 cpu_usage_0, cpu_usage_1;
514 struct task_desc *task;
515 unsigned long i, ret;
517 start_time = get_nsecs();
519 pthread_mutex_unlock(&work_done_wait_mutex);
521 for (i = 0; i < nr_tasks; i++) {
523 ret = sem_wait(&task->ready_for_work);
525 sem_init(&task->ready_for_work, 0, 0);
527 ret = pthread_mutex_lock(&work_done_wait_mutex);
530 cpu_usage_0 = get_cpu_usage_nsec_parent();
532 pthread_mutex_unlock(&start_work_mutex);
534 for (i = 0; i < nr_tasks; i++) {
536 ret = sem_wait(&task->work_done_sem);
538 sem_init(&task->work_done_sem, 0, 0);
539 cpu_usage += task->cpu_usage;
543 cpu_usage_1 = get_cpu_usage_nsec_parent();
544 if (!runavg_cpu_usage)
545 runavg_cpu_usage = cpu_usage;
546 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
548 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
549 if (!runavg_parent_cpu_usage)
550 runavg_parent_cpu_usage = parent_cpu_usage;
551 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
552 parent_cpu_usage)/10;
554 ret = pthread_mutex_lock(&start_work_mutex);
557 for (i = 0; i < nr_tasks; i++) {
559 sem_init(&task->sleep_sem, 0, 0);
560 task->curr_event = 0;
564 static void run_one_test(void)
566 u64 T0, T1, delta, avg_delta, fluct, std_dev;
573 sum_runtime += delta;
576 avg_delta = sum_runtime / nr_runs;
577 if (delta < avg_delta)
578 fluct = avg_delta - delta;
580 fluct = delta - avg_delta;
582 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
585 run_avg = (run_avg*9 + delta)/10;
587 printf("#%-3ld: %0.3f, ",
588 nr_runs, (double)delta/1000000.0);
590 printf("ravg: %0.2f, ",
591 (double)run_avg/1e6);
593 printf("cpu: %0.2f / %0.2f",
594 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
598 * rusage statistics done by the parent, these are less
599 * accurate than the sum_exec_runtime based statistics:
601 printf(" [%0.2f / %0.2f]",
602 (double)parent_cpu_usage/1e6,
603 (double)runavg_parent_cpu_usage/1e6);
608 if (nr_sleep_corrections)
609 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
610 nr_sleep_corrections = 0;
613 static void test_calibrations(void)
621 printf("the run test took %Ld nsecs\n", T1-T0);
627 printf("the sleep test took %Ld nsecs\n", T1-T0);
630 #define FILL_FIELD(ptr, field, event, data) \
631 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
633 #define FILL_ARRAY(ptr, array, event, data) \
635 void *__array = raw_field_ptr(event, #array, data); \
636 memcpy(ptr.array, __array, sizeof(ptr.array)); \
639 #define FILL_COMMON_FIELDS(ptr, event, data) \
641 FILL_FIELD(ptr, common_type, event, data); \
642 FILL_FIELD(ptr, common_flags, event, data); \
643 FILL_FIELD(ptr, common_preempt_count, event, data); \
644 FILL_FIELD(ptr, common_pid, event, data); \
645 FILL_FIELD(ptr, common_tgid, event, data); \
650 struct trace_switch_event {
655 u8 common_preempt_count;
668 struct trace_runtime_event {
673 u8 common_preempt_count;
683 struct trace_wakeup_event {
688 u8 common_preempt_count;
700 struct trace_fork_event {
705 u8 common_preempt_count;
709 char parent_comm[16];
715 struct trace_migrate_task_event {
720 u8 common_preempt_count;
731 struct trace_sched_handler {
732 void (*switch_event)(struct trace_switch_event *,
733 struct perf_session *,
737 struct thread *thread);
739 void (*runtime_event)(struct trace_runtime_event *,
740 struct perf_session *,
744 struct thread *thread);
746 void (*wakeup_event)(struct trace_wakeup_event *,
747 struct perf_session *,
751 struct thread *thread);
753 void (*fork_event)(struct trace_fork_event *,
757 struct thread *thread);
759 void (*migrate_task_event)(struct trace_migrate_task_event *,
760 struct perf_session *session,
764 struct thread *thread);
769 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
770 struct perf_session *session __used,
773 u64 timestamp __used,
774 struct thread *thread __used)
776 struct task_desc *waker, *wakee;
779 printf("sched_wakeup event %p\n", event);
781 printf(" ... pid %d woke up %s/%d\n",
782 wakeup_event->common_pid,
787 waker = register_pid(wakeup_event->common_pid, "<unknown>");
788 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
790 add_sched_event_wakeup(waker, timestamp, wakee);
793 static u64 cpu_last_switched[MAX_CPUS];
796 replay_switch_event(struct trace_switch_event *switch_event,
797 struct perf_session *session __used,
801 struct thread *thread __used)
803 struct task_desc *prev, *next;
808 printf("sched_switch event %p\n", event);
810 if (cpu >= MAX_CPUS || cpu < 0)
813 timestamp0 = cpu_last_switched[cpu];
815 delta = timestamp - timestamp0;
820 die("hm, delta: %Ld < 0 ?\n", delta);
823 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
824 switch_event->prev_comm, switch_event->prev_pid,
825 switch_event->next_comm, switch_event->next_pid,
829 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
830 next = register_pid(switch_event->next_pid, switch_event->next_comm);
832 cpu_last_switched[cpu] = timestamp;
834 add_sched_event_run(prev, timestamp, delta);
835 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
840 replay_fork_event(struct trace_fork_event *fork_event,
843 u64 timestamp __used,
844 struct thread *thread __used)
847 printf("sched_fork event %p\n", event);
848 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
849 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
851 register_pid(fork_event->parent_pid, fork_event->parent_comm);
852 register_pid(fork_event->child_pid, fork_event->child_comm);
855 static struct trace_sched_handler replay_ops = {
856 .wakeup_event = replay_wakeup_event,
857 .switch_event = replay_switch_event,
858 .fork_event = replay_fork_event,
861 struct sort_dimension {
864 struct list_head list;
867 static LIST_HEAD(cmp_pid);
870 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
872 struct sort_dimension *sort;
875 BUG_ON(list_empty(list));
877 list_for_each_entry(sort, list, list) {
878 ret = sort->cmp(l, r);
886 static struct work_atoms *
887 thread_atoms_search(struct rb_root *root, struct thread *thread,
888 struct list_head *sort_list)
890 struct rb_node *node = root->rb_node;
891 struct work_atoms key = { .thread = thread };
894 struct work_atoms *atoms;
897 atoms = container_of(node, struct work_atoms, node);
899 cmp = thread_lat_cmp(sort_list, &key, atoms);
901 node = node->rb_left;
903 node = node->rb_right;
905 BUG_ON(thread != atoms->thread);
913 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
914 struct list_head *sort_list)
916 struct rb_node **new = &(root->rb_node), *parent = NULL;
919 struct work_atoms *this;
922 this = container_of(*new, struct work_atoms, node);
925 cmp = thread_lat_cmp(sort_list, data, this);
928 new = &((*new)->rb_left);
930 new = &((*new)->rb_right);
933 rb_link_node(&data->node, parent, new);
934 rb_insert_color(&data->node, root);
937 static void thread_atoms_insert(struct thread *thread)
939 struct work_atoms *atoms = zalloc(sizeof(*atoms));
943 atoms->thread = thread;
944 INIT_LIST_HEAD(&atoms->work_list);
945 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
949 latency_fork_event(struct trace_fork_event *fork_event __used,
950 struct event *event __used,
952 u64 timestamp __used,
953 struct thread *thread __used)
955 /* should insert the newcomer */
959 static char sched_out_state(struct trace_switch_event *switch_event)
961 const char *str = TASK_STATE_TO_CHAR_STR;
963 return str[switch_event->prev_state];
967 add_sched_out_event(struct work_atoms *atoms,
971 struct work_atom *atom = zalloc(sizeof(*atom));
975 atom->sched_out_time = timestamp;
977 if (run_state == 'R') {
978 atom->state = THREAD_WAIT_CPU;
979 atom->wake_up_time = atom->sched_out_time;
982 list_add_tail(&atom->list, &atoms->work_list);
986 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
988 struct work_atom *atom;
990 BUG_ON(list_empty(&atoms->work_list));
992 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
994 atom->runtime += delta;
995 atoms->total_runtime += delta;
999 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1001 struct work_atom *atom;
1004 if (list_empty(&atoms->work_list))
1007 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1009 if (atom->state != THREAD_WAIT_CPU)
1012 if (timestamp < atom->wake_up_time) {
1013 atom->state = THREAD_IGNORE;
1017 atom->state = THREAD_SCHED_IN;
1018 atom->sched_in_time = timestamp;
1020 delta = atom->sched_in_time - atom->wake_up_time;
1021 atoms->total_lat += delta;
1022 if (delta > atoms->max_lat) {
1023 atoms->max_lat = delta;
1024 atoms->max_lat_at = timestamp;
1030 latency_switch_event(struct trace_switch_event *switch_event,
1031 struct perf_session *session,
1032 struct event *event __used,
1035 struct thread *thread __used)
1037 struct work_atoms *out_events, *in_events;
1038 struct thread *sched_out, *sched_in;
1042 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1044 timestamp0 = cpu_last_switched[cpu];
1045 cpu_last_switched[cpu] = timestamp;
1047 delta = timestamp - timestamp0;
1052 die("hm, delta: %Ld < 0 ?\n", delta);
1055 sched_out = perf_session__findnew(session, switch_event->prev_pid);
1056 sched_in = perf_session__findnew(session, switch_event->next_pid);
1058 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1060 thread_atoms_insert(sched_out);
1061 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1063 die("out-event: Internal tree error");
1065 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1067 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1069 thread_atoms_insert(sched_in);
1070 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1072 die("in-event: Internal tree error");
1074 * Take came in we have not heard about yet,
1075 * add in an initial atom in runnable state:
1077 add_sched_out_event(in_events, 'R', timestamp);
1079 add_sched_in_event(in_events, timestamp);
1083 latency_runtime_event(struct trace_runtime_event *runtime_event,
1084 struct perf_session *session,
1085 struct event *event __used,
1088 struct thread *this_thread __used)
1090 struct thread *thread = perf_session__findnew(session, runtime_event->pid);
1091 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1093 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1095 thread_atoms_insert(thread);
1096 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1098 die("in-event: Internal tree error");
1099 add_sched_out_event(atoms, 'R', timestamp);
1102 add_runtime_event(atoms, runtime_event->runtime, timestamp);
1106 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1107 struct perf_session *session,
1108 struct event *__event __used,
1111 struct thread *thread __used)
1113 struct work_atoms *atoms;
1114 struct work_atom *atom;
1115 struct thread *wakee;
1117 /* Note for later, it may be interesting to observe the failing cases */
1118 if (!wakeup_event->success)
1121 wakee = perf_session__findnew(session, wakeup_event->pid);
1122 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1124 thread_atoms_insert(wakee);
1125 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1127 die("wakeup-event: Internal tree error");
1128 add_sched_out_event(atoms, 'S', timestamp);
1131 BUG_ON(list_empty(&atoms->work_list));
1133 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1136 * You WILL be missing events if you've recorded only
1137 * one CPU, or are only looking at only one, so don't
1138 * make useless noise.
1140 if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1141 nr_state_machine_bugs++;
1144 if (atom->sched_out_time > timestamp) {
1145 nr_unordered_timestamps++;
1149 atom->state = THREAD_WAIT_CPU;
1150 atom->wake_up_time = timestamp;
1154 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1155 struct perf_session *session,
1156 struct event *__event __used,
1159 struct thread *thread __used)
1161 struct work_atoms *atoms;
1162 struct work_atom *atom;
1163 struct thread *migrant;
1166 * Only need to worry about migration when profiling one CPU.
1168 if (profile_cpu == -1)
1171 migrant = perf_session__findnew(session, migrate_task_event->pid);
1172 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1174 thread_atoms_insert(migrant);
1175 register_pid(migrant->pid, migrant->comm);
1176 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1178 die("migration-event: Internal tree error");
1179 add_sched_out_event(atoms, 'R', timestamp);
1182 BUG_ON(list_empty(&atoms->work_list));
1184 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1185 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1189 if (atom->sched_out_time > timestamp)
1190 nr_unordered_timestamps++;
1193 static struct trace_sched_handler lat_ops = {
1194 .wakeup_event = latency_wakeup_event,
1195 .switch_event = latency_switch_event,
1196 .runtime_event = latency_runtime_event,
1197 .fork_event = latency_fork_event,
1198 .migrate_task_event = latency_migrate_task_event,
1201 static void output_lat_thread(struct work_atoms *work_list)
1207 if (!work_list->nb_atoms)
1210 * Ignore idle threads:
1212 if (!strcmp(work_list->thread->comm, "swapper"))
1215 all_runtime += work_list->total_runtime;
1216 all_count += work_list->nb_atoms;
1218 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
1220 for (i = 0; i < 24 - ret; i++)
1223 avg = work_list->total_lat / work_list->nb_atoms;
1225 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1226 (double)work_list->total_runtime / 1e6,
1227 work_list->nb_atoms, (double)avg / 1e6,
1228 (double)work_list->max_lat / 1e6,
1229 (double)work_list->max_lat_at / 1e9);
1232 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1234 if (l->thread->pid < r->thread->pid)
1236 if (l->thread->pid > r->thread->pid)
1242 static struct sort_dimension pid_sort_dimension = {
1247 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1257 avgl = l->total_lat / l->nb_atoms;
1258 avgr = r->total_lat / r->nb_atoms;
1268 static struct sort_dimension avg_sort_dimension = {
1273 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1275 if (l->max_lat < r->max_lat)
1277 if (l->max_lat > r->max_lat)
1283 static struct sort_dimension max_sort_dimension = {
1288 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1290 if (l->nb_atoms < r->nb_atoms)
1292 if (l->nb_atoms > r->nb_atoms)
1298 static struct sort_dimension switch_sort_dimension = {
1303 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1305 if (l->total_runtime < r->total_runtime)
1307 if (l->total_runtime > r->total_runtime)
1313 static struct sort_dimension runtime_sort_dimension = {
1318 static struct sort_dimension *available_sorts[] = {
1319 &pid_sort_dimension,
1320 &avg_sort_dimension,
1321 &max_sort_dimension,
1322 &switch_sort_dimension,
1323 &runtime_sort_dimension,
1326 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1328 static LIST_HEAD(sort_list);
1330 static int sort_dimension__add(const char *tok, struct list_head *list)
1334 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1335 if (!strcmp(available_sorts[i]->name, tok)) {
1336 list_add_tail(&available_sorts[i]->list, list);
1345 static void setup_sorting(void);
1347 static void sort_lat(void)
1349 struct rb_node *node;
1352 struct work_atoms *data;
1353 node = rb_first(&atom_root);
1357 rb_erase(node, &atom_root);
1358 data = rb_entry(node, struct work_atoms, node);
1359 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1363 static struct trace_sched_handler *trace_handler;
1366 process_sched_wakeup_event(void *data, struct perf_session *session,
1367 struct event *event,
1369 u64 timestamp __used,
1370 struct thread *thread __used)
1372 struct trace_wakeup_event wakeup_event;
1374 FILL_COMMON_FIELDS(wakeup_event, event, data);
1376 FILL_ARRAY(wakeup_event, comm, event, data);
1377 FILL_FIELD(wakeup_event, pid, event, data);
1378 FILL_FIELD(wakeup_event, prio, event, data);
1379 FILL_FIELD(wakeup_event, success, event, data);
1380 FILL_FIELD(wakeup_event, cpu, event, data);
1382 if (trace_handler->wakeup_event)
1383 trace_handler->wakeup_event(&wakeup_event, session, event,
1384 cpu, timestamp, thread);
1388 * Track the current task - that way we can know whether there's any
1389 * weird events, such as a task being switched away that is not current.
1393 static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1395 static struct thread *curr_thread[MAX_CPUS];
1397 static char next_shortname1 = 'A';
1398 static char next_shortname2 = '0';
1401 map_switch_event(struct trace_switch_event *switch_event,
1402 struct perf_session *session,
1403 struct event *event __used,
1406 struct thread *thread __used)
1408 struct thread *sched_out, *sched_in;
1414 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1416 if (this_cpu > max_cpu)
1419 timestamp0 = cpu_last_switched[this_cpu];
1420 cpu_last_switched[this_cpu] = timestamp;
1422 delta = timestamp - timestamp0;
1427 die("hm, delta: %Ld < 0 ?\n", delta);
1430 sched_out = perf_session__findnew(session, switch_event->prev_pid);
1431 sched_in = perf_session__findnew(session, switch_event->next_pid);
1433 curr_thread[this_cpu] = sched_in;
1438 if (!sched_in->shortname[0]) {
1439 sched_in->shortname[0] = next_shortname1;
1440 sched_in->shortname[1] = next_shortname2;
1442 if (next_shortname1 < 'Z') {
1445 next_shortname1='A';
1446 if (next_shortname2 < '9') {
1449 next_shortname2='0';
1455 for (cpu = 0; cpu <= max_cpu; cpu++) {
1456 if (cpu != this_cpu)
1461 if (curr_thread[cpu]) {
1462 if (curr_thread[cpu]->pid)
1463 printf("%2s ", curr_thread[cpu]->shortname);
1470 printf(" %12.6f secs ", (double)timestamp/1e9);
1471 if (new_shortname) {
1472 printf("%s => %s:%d\n",
1473 sched_in->shortname, sched_in->comm, sched_in->pid);
1481 process_sched_switch_event(void *data, struct perf_session *session,
1482 struct event *event,
1484 u64 timestamp __used,
1485 struct thread *thread __used)
1487 struct trace_switch_event switch_event;
1489 FILL_COMMON_FIELDS(switch_event, event, data);
1491 FILL_ARRAY(switch_event, prev_comm, event, data);
1492 FILL_FIELD(switch_event, prev_pid, event, data);
1493 FILL_FIELD(switch_event, prev_prio, event, data);
1494 FILL_FIELD(switch_event, prev_state, event, data);
1495 FILL_ARRAY(switch_event, next_comm, event, data);
1496 FILL_FIELD(switch_event, next_pid, event, data);
1497 FILL_FIELD(switch_event, next_prio, event, data);
1499 if (curr_pid[this_cpu] != (u32)-1) {
1501 * Are we trying to switch away a PID that is
1504 if (curr_pid[this_cpu] != switch_event.prev_pid)
1505 nr_context_switch_bugs++;
1507 if (trace_handler->switch_event)
1508 trace_handler->switch_event(&switch_event, session, event,
1509 this_cpu, timestamp, thread);
1511 curr_pid[this_cpu] = switch_event.next_pid;
1515 process_sched_runtime_event(void *data, struct perf_session *session,
1516 struct event *event,
1518 u64 timestamp __used,
1519 struct thread *thread __used)
1521 struct trace_runtime_event runtime_event;
1523 FILL_ARRAY(runtime_event, comm, event, data);
1524 FILL_FIELD(runtime_event, pid, event, data);
1525 FILL_FIELD(runtime_event, runtime, event, data);
1526 FILL_FIELD(runtime_event, vruntime, event, data);
1528 if (trace_handler->runtime_event)
1529 trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
1533 process_sched_fork_event(void *data,
1534 struct event *event,
1536 u64 timestamp __used,
1537 struct thread *thread __used)
1539 struct trace_fork_event fork_event;
1541 FILL_COMMON_FIELDS(fork_event, event, data);
1543 FILL_ARRAY(fork_event, parent_comm, event, data);
1544 FILL_FIELD(fork_event, parent_pid, event, data);
1545 FILL_ARRAY(fork_event, child_comm, event, data);
1546 FILL_FIELD(fork_event, child_pid, event, data);
1548 if (trace_handler->fork_event)
1549 trace_handler->fork_event(&fork_event, event,
1550 cpu, timestamp, thread);
1554 process_sched_exit_event(struct event *event,
1556 u64 timestamp __used,
1557 struct thread *thread __used)
1560 printf("sched_exit event %p\n", event);
1564 process_sched_migrate_task_event(void *data, struct perf_session *session,
1565 struct event *event,
1567 u64 timestamp __used,
1568 struct thread *thread __used)
1570 struct trace_migrate_task_event migrate_task_event;
1572 FILL_COMMON_FIELDS(migrate_task_event, event, data);
1574 FILL_ARRAY(migrate_task_event, comm, event, data);
1575 FILL_FIELD(migrate_task_event, pid, event, data);
1576 FILL_FIELD(migrate_task_event, prio, event, data);
1577 FILL_FIELD(migrate_task_event, cpu, event, data);
1579 if (trace_handler->migrate_task_event)
1580 trace_handler->migrate_task_event(&migrate_task_event, session,
1581 event, cpu, timestamp, thread);
1585 process_raw_event(event_t *raw_event __used, struct perf_session *session,
1586 void *data, int cpu, u64 timestamp, struct thread *thread)
1588 struct event *event;
1592 type = trace_parse_common_type(data);
1593 event = trace_find_event(type);
1595 if (!strcmp(event->name, "sched_switch"))
1596 process_sched_switch_event(data, session, event, cpu, timestamp, thread);
1597 if (!strcmp(event->name, "sched_stat_runtime"))
1598 process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
1599 if (!strcmp(event->name, "sched_wakeup"))
1600 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
1601 if (!strcmp(event->name, "sched_wakeup_new"))
1602 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
1603 if (!strcmp(event->name, "sched_process_fork"))
1604 process_sched_fork_event(data, event, cpu, timestamp, thread);
1605 if (!strcmp(event->name, "sched_process_exit"))
1606 process_sched_exit_event(event, cpu, timestamp, thread);
1607 if (!strcmp(event->name, "sched_migrate_task"))
1608 process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
1611 static int process_sample_event(event_t *event, struct perf_session *session)
1613 struct sample_data data;
1614 struct thread *thread;
1616 if (!(sample_type & PERF_SAMPLE_RAW))
1619 memset(&data, 0, sizeof(data));
1624 event__parse_sample(event, sample_type, &data);
1626 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
1629 (void *)(long)data.ip,
1630 (long long)data.period);
1632 thread = perf_session__findnew(session, data.pid);
1633 if (thread == NULL) {
1634 pr_debug("problem processing %d event, skipping it.\n",
1635 event->header.type);
1639 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1641 if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
1644 process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread);
1649 static int process_lost_event(event_t *event __used,
1650 struct perf_session *session __used)
1653 nr_lost_events += event->lost.lost;
1658 static int sample_type_check(u64 type)
1662 if (!(sample_type & PERF_SAMPLE_RAW)) {
1664 "No trace sample to read. Did you call perf record "
1672 static struct perf_event_ops event_ops = {
1673 .process_sample_event = process_sample_event,
1674 .process_comm_event = event__process_comm,
1675 .process_lost_event = process_lost_event,
1676 .sample_type_check = sample_type_check,
1679 static int read_events(void)
1682 struct perf_session *session = perf_session__new(input_name, O_RDONLY,
1684 if (session == NULL)
1687 err = perf_session__process_events(session, &event_ops);
1688 perf_session__delete(session);
1692 static void print_bad_events(void)
1694 if (nr_unordered_timestamps && nr_timestamps) {
1695 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1696 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1697 nr_unordered_timestamps, nr_timestamps);
1699 if (nr_lost_events && nr_events) {
1700 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1701 (double)nr_lost_events/(double)nr_events*100.0,
1702 nr_lost_events, nr_events, nr_lost_chunks);
1704 if (nr_state_machine_bugs && nr_timestamps) {
1705 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1706 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1707 nr_state_machine_bugs, nr_timestamps);
1709 printf(" (due to lost events?)");
1712 if (nr_context_switch_bugs && nr_timestamps) {
1713 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1714 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1715 nr_context_switch_bugs, nr_timestamps);
1717 printf(" (due to lost events?)");
1722 static void __cmd_lat(void)
1724 struct rb_node *next;
1730 printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1731 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
1732 printf(" ---------------------------------------------------------------------------------------------------------------\n");
1734 next = rb_first(&sorted_atom_root);
1737 struct work_atoms *work_list;
1739 work_list = rb_entry(next, struct work_atoms, node);
1740 output_lat_thread(work_list);
1741 next = rb_next(next);
1744 printf(" -----------------------------------------------------------------------------------------\n");
1745 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1746 (double)all_runtime/1e6, all_count);
1748 printf(" ---------------------------------------------------\n");
1755 static struct trace_sched_handler map_ops = {
1756 .wakeup_event = NULL,
1757 .switch_event = map_switch_event,
1758 .runtime_event = NULL,
1762 static void __cmd_map(void)
1764 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1771 static void __cmd_replay(void)
1775 calibrate_run_measurement_overhead();
1776 calibrate_sleep_measurement_overhead();
1778 test_calibrations();
1782 printf("nr_run_events: %ld\n", nr_run_events);
1783 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1784 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1786 if (targetless_wakeups)
1787 printf("target-less wakeups: %ld\n", targetless_wakeups);
1788 if (multitarget_wakeups)
1789 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1790 if (nr_run_events_optimized)
1791 printf("run atoms optimized: %ld\n",
1792 nr_run_events_optimized);
1794 print_task_traces();
1795 add_cross_task_wakeups();
1798 printf("------------------------------------------------------------\n");
1799 for (i = 0; i < replay_repeat; i++)
1804 static const char * const sched_usage[] = {
1805 "perf sched [<options>] {record|latency|map|replay|trace}",
1809 static const struct option sched_options[] = {
1810 OPT_STRING('i', "input", &input_name, "file",
1812 OPT_BOOLEAN('v', "verbose", &verbose,
1813 "be more verbose (show symbol address, etc)"),
1814 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1815 "dump raw trace in ASCII"),
1819 static const char * const latency_usage[] = {
1820 "perf sched latency [<options>]",
1824 static const struct option latency_options[] = {
1825 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1826 "sort by key(s): runtime, switch, avg, max"),
1827 OPT_BOOLEAN('v', "verbose", &verbose,
1828 "be more verbose (show symbol address, etc)"),
1829 OPT_INTEGER('C', "CPU", &profile_cpu,
1830 "CPU to profile on"),
1831 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1832 "dump raw trace in ASCII"),
1836 static const char * const replay_usage[] = {
1837 "perf sched replay [<options>]",
1841 static const struct option replay_options[] = {
1842 OPT_INTEGER('r', "repeat", &replay_repeat,
1843 "repeat the workload replay N times (-1: infinite)"),
1844 OPT_BOOLEAN('v', "verbose", &verbose,
1845 "be more verbose (show symbol address, etc)"),
1846 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1847 "dump raw trace in ASCII"),
1851 static void setup_sorting(void)
1853 char *tmp, *tok, *str = strdup(sort_order);
1855 for (tok = strtok_r(str, ", ", &tmp);
1856 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1857 if (sort_dimension__add(tok, &sort_list) < 0) {
1858 error("Unknown --sort key: `%s'", tok);
1859 usage_with_options(latency_usage, latency_options);
1865 sort_dimension__add("pid", &cmp_pid);
1868 static const char *record_args[] = {
1876 "-e", "sched:sched_switch:r",
1877 "-e", "sched:sched_stat_wait:r",
1878 "-e", "sched:sched_stat_sleep:r",
1879 "-e", "sched:sched_stat_iowait:r",
1880 "-e", "sched:sched_stat_runtime:r",
1881 "-e", "sched:sched_process_exit:r",
1882 "-e", "sched:sched_process_fork:r",
1883 "-e", "sched:sched_wakeup:r",
1884 "-e", "sched:sched_migrate_task:r",
1887 static int __cmd_record(int argc, const char **argv)
1889 unsigned int rec_argc, i, j;
1890 const char **rec_argv;
1892 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1893 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1895 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1896 rec_argv[i] = strdup(record_args[i]);
1898 for (j = 1; j < (unsigned int)argc; j++, i++)
1899 rec_argv[i] = argv[j];
1901 BUG_ON(i != rec_argc);
1903 return cmd_record(i, rec_argv, NULL);
1906 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1908 argc = parse_options(argc, argv, sched_options, sched_usage,
1909 PARSE_OPT_STOP_AT_NON_OPTION);
1911 usage_with_options(sched_usage, sched_options);
1914 * Aliased to 'perf trace' for now:
1916 if (!strcmp(argv[0], "trace"))
1917 return cmd_trace(argc, argv, prefix);
1920 if (!strncmp(argv[0], "rec", 3)) {
1921 return __cmd_record(argc, argv);
1922 } else if (!strncmp(argv[0], "lat", 3)) {
1923 trace_handler = &lat_ops;
1925 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1927 usage_with_options(latency_usage, latency_options);
1931 } else if (!strcmp(argv[0], "map")) {
1932 trace_handler = &map_ops;
1935 } else if (!strncmp(argv[0], "rep", 3)) {
1936 trace_handler = &replay_ops;
1938 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1940 usage_with_options(replay_usage, replay_options);
1944 usage_with_options(sched_usage, sched_options);