5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
15 #include <sys/types.h>
16 #include <sys/prctl.h>
18 #include <semaphore.h>
22 static char const *input_name = "perf.data";
24 static unsigned long page_size;
25 static unsigned long mmap_window = 32;
27 static unsigned long total_comm = 0;
29 static struct rb_root threads;
30 static struct thread *last_match;
32 static struct perf_header *header;
33 static u64 sample_type;
35 static char default_sort_order[] = "avg, max, switch, runtime";
36 static char *sort_order = default_sort_order;
38 #define PR_SET_NAME 15 /* Set process name */
41 #define BUG_ON(x) assert(!(x))
43 static u64 run_measurement_overhead;
44 static u64 sleep_measurement_overhead;
51 static unsigned long nr_tasks;
60 unsigned long nr_events;
61 unsigned long curr_event;
62 struct sched_atom **atoms;
73 enum sched_event_type {
80 enum sched_event_type type;
86 struct task_desc *wakee;
89 static struct task_desc *pid_to_task[MAX_PID];
91 static struct task_desc **tasks;
93 static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
94 static u64 start_time;
96 static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
98 static unsigned long nr_run_events;
99 static unsigned long nr_sleep_events;
100 static unsigned long nr_wakeup_events;
102 static unsigned long nr_sleep_corrections;
103 static unsigned long nr_run_events_optimized;
105 static unsigned long targetless_wakeups;
106 static unsigned long multitarget_wakeups;
108 static u64 cpu_usage;
109 static u64 runavg_cpu_usage;
110 static u64 parent_cpu_usage;
111 static u64 runavg_parent_cpu_usage;
113 static unsigned long nr_runs;
114 static u64 sum_runtime;
115 static u64 sum_fluct;
118 static unsigned long replay_repeat = 10;
119 static unsigned long nr_timestamps;
120 static unsigned long nr_unordered_timestamps;
121 static unsigned long nr_state_machine_bugs;
122 static unsigned long nr_events;
123 static unsigned long nr_lost_chunks;
124 static unsigned long nr_lost_events;
126 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
136 struct list_head list;
137 enum thread_state state;
145 struct list_head work_list;
146 struct thread *thread;
154 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
156 static struct rb_root atom_root, sorted_atom_root;
158 static u64 all_runtime;
159 static u64 all_count;
161 static int read_events(void);
164 static u64 get_nsecs(void)
168 clock_gettime(CLOCK_MONOTONIC, &ts);
170 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
173 static void burn_nsecs(u64 nsecs)
175 u64 T0 = get_nsecs(), T1;
179 } while (T1 + run_measurement_overhead < T0 + nsecs);
182 static void sleep_nsecs(u64 nsecs)
186 ts.tv_nsec = nsecs % 999999999;
187 ts.tv_sec = nsecs / 999999999;
189 nanosleep(&ts, NULL);
192 static void calibrate_run_measurement_overhead(void)
194 u64 T0, T1, delta, min_delta = 1000000000ULL;
197 for (i = 0; i < 10; i++) {
202 min_delta = min(min_delta, delta);
204 run_measurement_overhead = min_delta;
206 printf("run measurement overhead: %Ld nsecs\n", min_delta);
209 static void calibrate_sleep_measurement_overhead(void)
211 u64 T0, T1, delta, min_delta = 1000000000ULL;
214 for (i = 0; i < 10; i++) {
219 min_delta = min(min_delta, delta);
222 sleep_measurement_overhead = min_delta;
224 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
227 static struct sched_atom *
228 get_new_event(struct task_desc *task, u64 timestamp)
230 struct sched_atom *event = calloc(1, sizeof(*event));
231 unsigned long idx = task->nr_events;
234 event->timestamp = timestamp;
238 size = sizeof(struct sched_atom *) * task->nr_events;
239 task->atoms = realloc(task->atoms, size);
240 BUG_ON(!task->atoms);
242 task->atoms[idx] = event;
247 static struct sched_atom *last_event(struct task_desc *task)
249 if (!task->nr_events)
252 return task->atoms[task->nr_events - 1];
256 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
258 struct sched_atom *event, *curr_event = last_event(task);
261 * optimize an existing RUN event by merging this one
264 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
265 nr_run_events_optimized++;
266 curr_event->duration += duration;
270 event = get_new_event(task, timestamp);
272 event->type = SCHED_EVENT_RUN;
273 event->duration = duration;
279 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
280 struct task_desc *wakee)
282 struct sched_atom *event, *wakee_event;
284 event = get_new_event(task, timestamp);
285 event->type = SCHED_EVENT_WAKEUP;
286 event->wakee = wakee;
288 wakee_event = last_event(wakee);
289 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
290 targetless_wakeups++;
293 if (wakee_event->wait_sem) {
294 multitarget_wakeups++;
298 wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
299 sem_init(wakee_event->wait_sem, 0, 0);
300 wakee_event->specific_wait = 1;
301 event->wait_sem = wakee_event->wait_sem;
307 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
308 u64 task_state __used)
310 struct sched_atom *event = get_new_event(task, timestamp);
312 event->type = SCHED_EVENT_SLEEP;
317 static struct task_desc *register_pid(unsigned long pid, const char *comm)
319 struct task_desc *task;
321 BUG_ON(pid >= MAX_PID);
323 task = pid_to_task[pid];
328 task = calloc(1, sizeof(*task));
331 strcpy(task->comm, comm);
333 * every task starts in sleeping state - this gets ignored
334 * if there's no wakeup pointing to this sleep state:
336 add_sched_event_sleep(task, 0, 0);
338 pid_to_task[pid] = task;
340 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
342 tasks[task->nr] = task;
345 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
351 static void print_task_traces(void)
353 struct task_desc *task;
356 for (i = 0; i < nr_tasks; i++) {
358 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
359 task->nr, task->comm, task->pid, task->nr_events);
363 static void add_cross_task_wakeups(void)
365 struct task_desc *task1, *task2;
368 for (i = 0; i < nr_tasks; i++) {
374 add_sched_event_wakeup(task1, 0, task2);
379 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
386 delta = start_time + atom->timestamp - now;
388 switch (atom->type) {
389 case SCHED_EVENT_RUN:
390 burn_nsecs(atom->duration);
392 case SCHED_EVENT_SLEEP:
394 ret = sem_wait(atom->wait_sem);
397 case SCHED_EVENT_WAKEUP:
399 ret = sem_post(atom->wait_sem);
407 static u64 get_cpu_usage_nsec_parent(void)
413 err = getrusage(RUSAGE_SELF, &ru);
416 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
417 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
422 static u64 get_cpu_usage_nsec_self(void)
424 char filename [] = "/proc/1234567890/sched";
425 unsigned long msecs, nsecs;
433 sprintf(filename, "/proc/%d/sched", getpid());
434 file = fopen(filename, "r");
437 while ((chars = getline(&line, &len, file)) != -1) {
438 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
441 total = msecs*1e6 + nsecs;
452 static void *thread_func(void *ctx)
454 struct task_desc *this_task = ctx;
455 u64 cpu_usage_0, cpu_usage_1;
456 unsigned long i, ret;
459 sprintf(comm2, ":%s", this_task->comm);
460 prctl(PR_SET_NAME, comm2);
463 ret = sem_post(&this_task->ready_for_work);
465 ret = pthread_mutex_lock(&start_work_mutex);
467 ret = pthread_mutex_unlock(&start_work_mutex);
470 cpu_usage_0 = get_cpu_usage_nsec_self();
472 for (i = 0; i < this_task->nr_events; i++) {
473 this_task->curr_event = i;
474 process_sched_event(this_task, this_task->atoms[i]);
477 cpu_usage_1 = get_cpu_usage_nsec_self();
478 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
480 ret = sem_post(&this_task->work_done_sem);
483 ret = pthread_mutex_lock(&work_done_wait_mutex);
485 ret = pthread_mutex_unlock(&work_done_wait_mutex);
491 static void create_tasks(void)
493 struct task_desc *task;
498 err = pthread_attr_init(&attr);
500 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
502 err = pthread_mutex_lock(&start_work_mutex);
504 err = pthread_mutex_lock(&work_done_wait_mutex);
506 for (i = 0; i < nr_tasks; i++) {
508 sem_init(&task->sleep_sem, 0, 0);
509 sem_init(&task->ready_for_work, 0, 0);
510 sem_init(&task->work_done_sem, 0, 0);
511 task->curr_event = 0;
512 err = pthread_create(&task->thread, &attr, thread_func, task);
517 static void wait_for_tasks(void)
519 u64 cpu_usage_0, cpu_usage_1;
520 struct task_desc *task;
521 unsigned long i, ret;
523 start_time = get_nsecs();
525 pthread_mutex_unlock(&work_done_wait_mutex);
527 for (i = 0; i < nr_tasks; i++) {
529 ret = sem_wait(&task->ready_for_work);
531 sem_init(&task->ready_for_work, 0, 0);
533 ret = pthread_mutex_lock(&work_done_wait_mutex);
536 cpu_usage_0 = get_cpu_usage_nsec_parent();
538 pthread_mutex_unlock(&start_work_mutex);
540 for (i = 0; i < nr_tasks; i++) {
542 ret = sem_wait(&task->work_done_sem);
544 sem_init(&task->work_done_sem, 0, 0);
545 cpu_usage += task->cpu_usage;
549 cpu_usage_1 = get_cpu_usage_nsec_parent();
550 if (!runavg_cpu_usage)
551 runavg_cpu_usage = cpu_usage;
552 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
554 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
555 if (!runavg_parent_cpu_usage)
556 runavg_parent_cpu_usage = parent_cpu_usage;
557 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
558 parent_cpu_usage)/10;
560 ret = pthread_mutex_lock(&start_work_mutex);
563 for (i = 0; i < nr_tasks; i++) {
565 sem_init(&task->sleep_sem, 0, 0);
566 task->curr_event = 0;
570 static void run_one_test(void)
572 u64 T0, T1, delta, avg_delta, fluct, std_dev;
579 sum_runtime += delta;
582 avg_delta = sum_runtime / nr_runs;
583 if (delta < avg_delta)
584 fluct = avg_delta - delta;
586 fluct = delta - avg_delta;
588 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
591 run_avg = (run_avg*9 + delta)/10;
593 printf("#%-3ld: %0.3f, ",
594 nr_runs, (double)delta/1000000.0);
596 printf("ravg: %0.2f, ",
597 (double)run_avg/1e6);
599 printf("cpu: %0.2f / %0.2f",
600 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
604 * rusage statistics done by the parent, these are less
605 * accurate than the sum_exec_runtime based statistics:
607 printf(" [%0.2f / %0.2f]",
608 (double)parent_cpu_usage/1e6,
609 (double)runavg_parent_cpu_usage/1e6);
614 if (nr_sleep_corrections)
615 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
616 nr_sleep_corrections = 0;
619 static void test_calibrations(void)
627 printf("the run test took %Ld nsecs\n", T1-T0);
633 printf("the sleep test took %Ld nsecs\n", T1-T0);
636 static void __cmd_replay(void)
640 calibrate_run_measurement_overhead();
641 calibrate_sleep_measurement_overhead();
647 printf("nr_run_events: %ld\n", nr_run_events);
648 printf("nr_sleep_events: %ld\n", nr_sleep_events);
649 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
651 if (targetless_wakeups)
652 printf("target-less wakeups: %ld\n", targetless_wakeups);
653 if (multitarget_wakeups)
654 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
655 if (nr_run_events_optimized)
656 printf("run atoms optimized: %ld\n",
657 nr_run_events_optimized);
660 add_cross_task_wakeups();
663 printf("------------------------------------------------------------\n");
664 for (i = 0; i < replay_repeat; i++)
669 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
671 struct thread *thread;
673 thread = threads__findnew(event->comm.pid, &threads, &last_match);
675 dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
676 (void *)(offset + head),
677 (void *)(long)(event->header.size),
678 event->comm.comm, event->comm.pid);
680 if (thread == NULL ||
681 thread__set_comm(thread, event->comm.comm)) {
682 dump_printf("problem processing perf_event_comm, skipping event.\n");
691 struct raw_event_sample {
696 #define FILL_FIELD(ptr, field, event, data) \
697 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
699 #define FILL_ARRAY(ptr, array, event, data) \
701 void *__array = raw_field_ptr(event, #array, data); \
702 memcpy(ptr.array, __array, sizeof(ptr.array)); \
705 #define FILL_COMMON_FIELDS(ptr, event, data) \
707 FILL_FIELD(ptr, common_type, event, data); \
708 FILL_FIELD(ptr, common_flags, event, data); \
709 FILL_FIELD(ptr, common_preempt_count, event, data); \
710 FILL_FIELD(ptr, common_pid, event, data); \
711 FILL_FIELD(ptr, common_tgid, event, data); \
716 struct trace_switch_event {
721 u8 common_preempt_count;
734 struct trace_runtime_event {
739 u8 common_preempt_count;
749 struct trace_wakeup_event {
754 u8 common_preempt_count;
766 struct trace_fork_event {
771 u8 common_preempt_count;
775 char parent_comm[16];
781 struct trace_sched_handler {
782 void (*switch_event)(struct trace_switch_event *,
786 struct thread *thread);
788 void (*runtime_event)(struct trace_runtime_event *,
792 struct thread *thread);
794 void (*wakeup_event)(struct trace_wakeup_event *,
798 struct thread *thread);
800 void (*fork_event)(struct trace_fork_event *,
804 struct thread *thread);
809 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
812 u64 timestamp __used,
813 struct thread *thread __used)
815 struct task_desc *waker, *wakee;
818 printf("sched_wakeup event %p\n", event);
820 printf(" ... pid %d woke up %s/%d\n",
821 wakeup_event->common_pid,
826 waker = register_pid(wakeup_event->common_pid, "<unknown>");
827 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
829 add_sched_event_wakeup(waker, timestamp, wakee);
832 static u64 cpu_last_switched[MAX_CPUS];
835 replay_switch_event(struct trace_switch_event *switch_event,
839 struct thread *thread __used)
841 struct task_desc *prev, *next;
846 printf("sched_switch event %p\n", event);
848 if (cpu >= MAX_CPUS || cpu < 0)
851 timestamp0 = cpu_last_switched[cpu];
853 delta = timestamp - timestamp0;
858 die("hm, delta: %Ld < 0 ?\n", delta);
861 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
862 switch_event->prev_comm, switch_event->prev_pid,
863 switch_event->next_comm, switch_event->next_pid,
867 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
868 next = register_pid(switch_event->next_pid, switch_event->next_comm);
870 cpu_last_switched[cpu] = timestamp;
872 add_sched_event_run(prev, timestamp, delta);
873 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
878 replay_fork_event(struct trace_fork_event *fork_event,
881 u64 timestamp __used,
882 struct thread *thread __used)
885 printf("sched_fork event %p\n", event);
886 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
887 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
889 register_pid(fork_event->parent_pid, fork_event->parent_comm);
890 register_pid(fork_event->child_pid, fork_event->child_comm);
893 static struct trace_sched_handler replay_ops = {
894 .wakeup_event = replay_wakeup_event,
895 .switch_event = replay_switch_event,
896 .fork_event = replay_fork_event,
899 struct sort_dimension {
902 struct list_head list;
905 static LIST_HEAD(cmp_pid);
908 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
910 struct sort_dimension *sort;
913 BUG_ON(list_empty(list));
915 list_for_each_entry(sort, list, list) {
916 ret = sort->cmp(l, r);
924 static struct work_atoms *
925 thread_atoms_search(struct rb_root *root, struct thread *thread,
926 struct list_head *sort_list)
928 struct rb_node *node = root->rb_node;
929 struct work_atoms key = { .thread = thread };
932 struct work_atoms *atoms;
935 atoms = container_of(node, struct work_atoms, node);
937 cmp = thread_lat_cmp(sort_list, &key, atoms);
939 node = node->rb_left;
941 node = node->rb_right;
943 BUG_ON(thread != atoms->thread);
951 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
952 struct list_head *sort_list)
954 struct rb_node **new = &(root->rb_node), *parent = NULL;
957 struct work_atoms *this;
960 this = container_of(*new, struct work_atoms, node);
963 cmp = thread_lat_cmp(sort_list, data, this);
966 new = &((*new)->rb_left);
968 new = &((*new)->rb_right);
971 rb_link_node(&data->node, parent, new);
972 rb_insert_color(&data->node, root);
975 static void thread_atoms_insert(struct thread *thread)
977 struct work_atoms *atoms;
979 atoms = calloc(sizeof(*atoms), 1);
983 atoms->thread = thread;
984 INIT_LIST_HEAD(&atoms->work_list);
985 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
989 latency_fork_event(struct trace_fork_event *fork_event __used,
990 struct event *event __used,
992 u64 timestamp __used,
993 struct thread *thread __used)
995 /* should insert the newcomer */
999 static char sched_out_state(struct trace_switch_event *switch_event)
1001 const char *str = TASK_STATE_TO_CHAR_STR;
1003 return str[switch_event->prev_state];
1007 add_sched_out_event(struct work_atoms *atoms,
1011 struct work_atom *atom;
1013 atom = calloc(sizeof(*atom), 1);
1017 atom->sched_out_time = timestamp;
1019 if (run_state == 'R') {
1020 atom->state = THREAD_WAIT_CPU;
1021 atom->wake_up_time = atom->sched_out_time;
1024 list_add_tail(&atom->list, &atoms->work_list);
1028 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
1030 struct work_atom *atom;
1032 BUG_ON(list_empty(&atoms->work_list));
1034 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1036 atom->runtime += delta;
1037 atoms->total_runtime += delta;
1041 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1043 struct work_atom *atom;
1046 if (list_empty(&atoms->work_list))
1049 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1051 if (atom->state != THREAD_WAIT_CPU)
1054 if (timestamp < atom->wake_up_time) {
1055 atom->state = THREAD_IGNORE;
1059 atom->state = THREAD_SCHED_IN;
1060 atom->sched_in_time = timestamp;
1062 delta = atom->sched_in_time - atom->wake_up_time;
1063 atoms->total_lat += delta;
1064 if (delta > atoms->max_lat)
1065 atoms->max_lat = delta;
1070 latency_switch_event(struct trace_switch_event *switch_event,
1071 struct event *event __used,
1074 struct thread *thread __used)
1076 struct work_atoms *out_events, *in_events;
1077 struct thread *sched_out, *sched_in;
1081 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1083 timestamp0 = cpu_last_switched[cpu];
1084 cpu_last_switched[cpu] = timestamp;
1086 delta = timestamp - timestamp0;
1091 die("hm, delta: %Ld < 0 ?\n", delta);
1094 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1095 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1097 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1099 thread_atoms_insert(sched_out);
1100 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1102 die("out-event: Internal tree error");
1104 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1106 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1108 thread_atoms_insert(sched_in);
1109 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1111 die("in-event: Internal tree error");
1113 * Take came in we have not heard about yet,
1114 * add in an initial atom in runnable state:
1116 add_sched_out_event(in_events, 'R', timestamp);
1118 add_sched_in_event(in_events, timestamp);
1122 latency_runtime_event(struct trace_runtime_event *runtime_event,
1123 struct event *event __used,
1126 struct thread *this_thread __used)
1128 struct work_atoms *atoms;
1129 struct thread *thread;
1131 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1133 thread = threads__findnew(runtime_event->pid, &threads, &last_match);
1134 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1136 thread_atoms_insert(thread);
1137 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1139 die("in-event: Internal tree error");
1140 add_sched_out_event(atoms, 'R', timestamp);
1143 add_runtime_event(atoms, runtime_event->runtime, timestamp);
1147 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1148 struct event *__event __used,
1151 struct thread *thread __used)
1153 struct work_atoms *atoms;
1154 struct work_atom *atom;
1155 struct thread *wakee;
1157 /* Note for later, it may be interesting to observe the failing cases */
1158 if (!wakeup_event->success)
1161 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
1162 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1164 thread_atoms_insert(wakee);
1165 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1167 die("wakeup-event: Internal tree error");
1168 add_sched_out_event(atoms, 'S', timestamp);
1171 BUG_ON(list_empty(&atoms->work_list));
1173 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1175 if (atom->state != THREAD_SLEEPING)
1176 nr_state_machine_bugs++;
1179 if (atom->sched_out_time > timestamp) {
1180 nr_unordered_timestamps++;
1184 atom->state = THREAD_WAIT_CPU;
1185 atom->wake_up_time = timestamp;
1188 static struct trace_sched_handler lat_ops = {
1189 .wakeup_event = latency_wakeup_event,
1190 .switch_event = latency_switch_event,
1191 .runtime_event = latency_runtime_event,
1192 .fork_event = latency_fork_event,
1195 static void output_lat_thread(struct work_atoms *work_list)
1201 if (!work_list->nb_atoms)
1204 * Ignore idle threads:
1206 if (!work_list->thread->pid)
1209 all_runtime += work_list->total_runtime;
1210 all_count += work_list->nb_atoms;
1212 ret = printf(" %s-%d ", work_list->thread->comm, work_list->thread->pid);
1214 for (i = 0; i < 24 - ret; i++)
1217 avg = work_list->total_lat / work_list->nb_atoms;
1219 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
1220 (double)work_list->total_runtime / 1e6,
1221 work_list->nb_atoms, (double)avg / 1e6,
1222 (double)work_list->max_lat / 1e6);
1225 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1227 if (l->thread->pid < r->thread->pid)
1229 if (l->thread->pid > r->thread->pid)
1235 static struct sort_dimension pid_sort_dimension = {
1240 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1250 avgl = l->total_lat / l->nb_atoms;
1251 avgr = r->total_lat / r->nb_atoms;
1261 static struct sort_dimension avg_sort_dimension = {
1266 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1268 if (l->max_lat < r->max_lat)
1270 if (l->max_lat > r->max_lat)
1276 static struct sort_dimension max_sort_dimension = {
1281 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1283 if (l->nb_atoms < r->nb_atoms)
1285 if (l->nb_atoms > r->nb_atoms)
1291 static struct sort_dimension switch_sort_dimension = {
1296 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1298 if (l->total_runtime < r->total_runtime)
1300 if (l->total_runtime > r->total_runtime)
1306 static struct sort_dimension runtime_sort_dimension = {
1311 static struct sort_dimension *available_sorts[] = {
1312 &pid_sort_dimension,
1313 &avg_sort_dimension,
1314 &max_sort_dimension,
1315 &switch_sort_dimension,
1316 &runtime_sort_dimension,
1319 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1321 static LIST_HEAD(sort_list);
1323 static int sort_dimension__add(char *tok, struct list_head *list)
1327 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1328 if (!strcmp(available_sorts[i]->name, tok)) {
1329 list_add_tail(&available_sorts[i]->list, list);
1338 static void setup_sorting(void);
1340 static void sort_lat(void)
1342 struct rb_node *node;
1345 struct work_atoms *data;
1346 node = rb_first(&atom_root);
1350 rb_erase(node, &atom_root);
1351 data = rb_entry(node, struct work_atoms, node);
1352 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1356 static void __cmd_lat(void)
1358 struct rb_node *next;
1364 printf("\n -----------------------------------------------------------------------------------------\n");
1365 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1366 printf(" -----------------------------------------------------------------------------------------\n");
1368 next = rb_first(&sorted_atom_root);
1371 struct work_atoms *work_list;
1373 work_list = rb_entry(next, struct work_atoms, node);
1374 output_lat_thread(work_list);
1375 next = rb_next(next);
1378 printf(" -----------------------------------------------------------------------------------------\n");
1379 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1380 (double)all_runtime/1e6, all_count);
1382 printf(" ---------------------------------------------------\n");
1383 if (nr_unordered_timestamps && nr_timestamps) {
1384 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1385 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1386 nr_unordered_timestamps, nr_timestamps);
1389 if (nr_lost_events && nr_events) {
1390 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1391 (double)nr_lost_events/(double)nr_events*100.0,
1392 nr_lost_events, nr_events, nr_lost_chunks);
1394 if (nr_state_machine_bugs && nr_timestamps) {
1395 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1396 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1397 nr_state_machine_bugs, nr_timestamps);
1399 printf(" (due to lost events?)");
1406 static struct trace_sched_handler *trace_handler;
1409 process_sched_wakeup_event(struct raw_event_sample *raw,
1410 struct event *event,
1412 u64 timestamp __used,
1413 struct thread *thread __used)
1415 struct trace_wakeup_event wakeup_event;
1417 FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
1419 FILL_ARRAY(wakeup_event, comm, event, raw->data);
1420 FILL_FIELD(wakeup_event, pid, event, raw->data);
1421 FILL_FIELD(wakeup_event, prio, event, raw->data);
1422 FILL_FIELD(wakeup_event, success, event, raw->data);
1423 FILL_FIELD(wakeup_event, cpu, event, raw->data);
1425 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
1429 process_sched_switch_event(struct raw_event_sample *raw,
1430 struct event *event,
1432 u64 timestamp __used,
1433 struct thread *thread __used)
1435 struct trace_switch_event switch_event;
1437 FILL_COMMON_FIELDS(switch_event, event, raw->data);
1439 FILL_ARRAY(switch_event, prev_comm, event, raw->data);
1440 FILL_FIELD(switch_event, prev_pid, event, raw->data);
1441 FILL_FIELD(switch_event, prev_prio, event, raw->data);
1442 FILL_FIELD(switch_event, prev_state, event, raw->data);
1443 FILL_ARRAY(switch_event, next_comm, event, raw->data);
1444 FILL_FIELD(switch_event, next_pid, event, raw->data);
1445 FILL_FIELD(switch_event, next_prio, event, raw->data);
1447 trace_handler->switch_event(&switch_event, event, cpu, timestamp, thread);
1451 process_sched_runtime_event(struct raw_event_sample *raw,
1452 struct event *event,
1454 u64 timestamp __used,
1455 struct thread *thread __used)
1457 struct trace_runtime_event runtime_event;
1459 FILL_ARRAY(runtime_event, comm, event, raw->data);
1460 FILL_FIELD(runtime_event, pid, event, raw->data);
1461 FILL_FIELD(runtime_event, runtime, event, raw->data);
1462 FILL_FIELD(runtime_event, vruntime, event, raw->data);
1464 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
1468 process_sched_fork_event(struct raw_event_sample *raw,
1469 struct event *event,
1471 u64 timestamp __used,
1472 struct thread *thread __used)
1474 struct trace_fork_event fork_event;
1476 FILL_COMMON_FIELDS(fork_event, event, raw->data);
1478 FILL_ARRAY(fork_event, parent_comm, event, raw->data);
1479 FILL_FIELD(fork_event, parent_pid, event, raw->data);
1480 FILL_ARRAY(fork_event, child_comm, event, raw->data);
1481 FILL_FIELD(fork_event, child_pid, event, raw->data);
1483 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
1487 process_sched_exit_event(struct event *event,
1489 u64 timestamp __used,
1490 struct thread *thread __used)
1493 printf("sched_exit event %p\n", event);
1497 process_raw_event(event_t *raw_event __used, void *more_data,
1498 int cpu, u64 timestamp, struct thread *thread)
1500 struct raw_event_sample *raw = more_data;
1501 struct event *event;
1504 type = trace_parse_common_type(raw->data);
1505 event = trace_find_event(type);
1507 if (!strcmp(event->name, "sched_switch"))
1508 process_sched_switch_event(raw, event, cpu, timestamp, thread);
1509 if (!strcmp(event->name, "sched_stat_runtime"))
1510 process_sched_runtime_event(raw, event, cpu, timestamp, thread);
1511 if (!strcmp(event->name, "sched_wakeup"))
1512 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1513 if (!strcmp(event->name, "sched_wakeup_new"))
1514 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1515 if (!strcmp(event->name, "sched_process_fork"))
1516 process_sched_fork_event(raw, event, cpu, timestamp, thread);
1517 if (!strcmp(event->name, "sched_process_exit"))
1518 process_sched_exit_event(event, cpu, timestamp, thread);
1522 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1526 struct dso *dso = NULL;
1527 struct thread *thread;
1528 u64 ip = event->ip.ip;
1532 void *more_data = event->ip.__more_data;
1535 thread = threads__findnew(event->ip.pid, &threads, &last_match);
1537 if (sample_type & PERF_SAMPLE_TIME) {
1538 timestamp = *(u64 *)more_data;
1539 more_data += sizeof(u64);
1542 if (sample_type & PERF_SAMPLE_CPU) {
1543 cpu = *(u32 *)more_data;
1544 more_data += sizeof(u32);
1545 more_data += sizeof(u32); /* reserved */
1548 if (sample_type & PERF_SAMPLE_PERIOD) {
1549 period = *(u64 *)more_data;
1550 more_data += sizeof(u64);
1553 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1554 (void *)(offset + head),
1555 (void *)(long)(event->header.size),
1557 event->ip.pid, event->ip.tid,
1561 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1563 if (thread == NULL) {
1564 eprintf("problem processing %d event, skipping it.\n",
1565 event->header.type);
1569 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
1571 if (cpumode == PERF_EVENT_MISC_KERNEL) {
1577 dump_printf(" ...... dso: %s\n", dso->name);
1579 } else if (cpumode == PERF_EVENT_MISC_USER) {
1588 dso = hypervisor_dso;
1590 dump_printf(" ...... dso: [hypervisor]\n");
1593 if (sample_type & PERF_SAMPLE_RAW)
1594 process_raw_event(event, more_data, cpu, timestamp, thread);
1600 process_event(event_t *event, unsigned long offset, unsigned long head)
1605 switch (event->header.type) {
1606 case PERF_EVENT_MMAP:
1608 case PERF_EVENT_LOST:
1610 nr_lost_events += event->lost.lost;
1613 case PERF_EVENT_COMM:
1614 return process_comm_event(event, offset, head);
1616 case PERF_EVENT_EXIT ... PERF_EVENT_READ:
1619 case PERF_EVENT_SAMPLE:
1620 return process_sample_event(event, offset, head);
1622 case PERF_EVENT_MAX:
1630 static int read_events(void)
1632 int ret, rc = EXIT_FAILURE;
1633 unsigned long offset = 0;
1634 unsigned long head = 0;
1635 struct stat perf_stat;
1641 register_idle_thread(&threads, &last_match);
1643 input = open(input_name, O_RDONLY);
1645 perror("failed to open file");
1649 ret = fstat(input, &perf_stat);
1651 perror("failed to stat file");
1655 if (!perf_stat.st_size) {
1656 fprintf(stderr, "zero-sized file, nothing to do!\n");
1659 header = perf_header__read(input);
1660 head = header->data_offset;
1661 sample_type = perf_header__sample_type(header);
1663 if (!(sample_type & PERF_SAMPLE_RAW))
1664 die("No trace sample to read. Did you call perf record "
1667 if (load_kernel() < 0) {
1668 perror("failed to load kernel symbols");
1669 return EXIT_FAILURE;
1673 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1674 MAP_SHARED, input, offset);
1675 if (buf == MAP_FAILED) {
1676 perror("failed to mmap file");
1681 event = (event_t *)(buf + head);
1683 size = event->header.size;
1687 if (head + event->header.size >= page_size * mmap_window) {
1688 unsigned long shift = page_size * (head / page_size);
1691 res = munmap(buf, page_size * mmap_window);
1699 size = event->header.size;
1702 if (!size || process_event(event, offset, head) < 0) {
1705 * assume we lost track of the stream, check alignment, and
1706 * increment a single u64 in the hope to catch on again 'soon'.
1709 if (unlikely(head & 7))
1717 if (offset + head < (unsigned long)perf_stat.st_size)
1726 static const char * const sched_usage[] = {
1727 "perf sched [<options>] {record|latency|replay|trace}",
1731 static const struct option sched_options[] = {
1732 OPT_BOOLEAN('v', "verbose", &verbose,
1733 "be more verbose (show symbol address, etc)"),
1734 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1735 "dump raw trace in ASCII"),
1739 static const char * const latency_usage[] = {
1740 "perf sched latency [<options>]",
1744 static const struct option latency_options[] = {
1745 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1746 "sort by key(s): runtime, switch, avg, max"),
1747 OPT_BOOLEAN('v', "verbose", &verbose,
1748 "be more verbose (show symbol address, etc)"),
1749 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1750 "dump raw trace in ASCII"),
1754 static const char * const replay_usage[] = {
1755 "perf sched replay [<options>]",
1759 static const struct option replay_options[] = {
1760 OPT_INTEGER('r', "repeat", &replay_repeat,
1761 "repeat the workload replay N times (-1: infinite)"),
1762 OPT_BOOLEAN('v', "verbose", &verbose,
1763 "be more verbose (show symbol address, etc)"),
1764 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1765 "dump raw trace in ASCII"),
1769 static void setup_sorting(void)
1771 char *tmp, *tok, *str = strdup(sort_order);
1773 for (tok = strtok_r(str, ", ", &tmp);
1774 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1775 if (sort_dimension__add(tok, &sort_list) < 0) {
1776 error("Unknown --sort key: `%s'", tok);
1777 usage_with_options(latency_usage, latency_options);
1783 sort_dimension__add((char *)"pid", &cmp_pid);
1786 static const char *record_args[] = {
1794 "-e", "sched:sched_switch:r",
1795 "-e", "sched:sched_stat_wait:r",
1796 "-e", "sched:sched_stat_sleep:r",
1797 "-e", "sched:sched_stat_iowait:r",
1798 "-e", "sched:sched_stat_runtime:r",
1799 "-e", "sched:sched_process_exit:r",
1800 "-e", "sched:sched_process_fork:r",
1801 "-e", "sched:sched_wakeup:r",
1802 "-e", "sched:sched_migrate_task:r",
1805 static int __cmd_record(int argc, const char **argv)
1807 unsigned int rec_argc, i, j;
1808 const char **rec_argv;
1810 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1811 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1813 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1814 rec_argv[i] = strdup(record_args[i]);
1816 for (j = 1; j < (unsigned int)argc; j++, i++)
1817 rec_argv[i] = argv[j];
1819 BUG_ON(i != rec_argc);
1821 return cmd_record(i, rec_argv, NULL);
1824 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1827 page_size = getpagesize();
1829 argc = parse_options(argc, argv, sched_options, sched_usage,
1830 PARSE_OPT_STOP_AT_NON_OPTION);
1832 usage_with_options(sched_usage, sched_options);
1834 if (!strncmp(argv[0], "rec", 3)) {
1835 return __cmd_record(argc, argv);
1836 } else if (!strncmp(argv[0], "lat", 3)) {
1837 trace_handler = &lat_ops;
1839 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1841 usage_with_options(latency_usage, latency_options);
1845 } else if (!strncmp(argv[0], "rep", 3)) {
1846 trace_handler = &replay_ops;
1848 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1850 usage_with_options(replay_usage, replay_options);
1853 } else if (!strcmp(argv[0], "trace")) {
1855 * Aliased to 'perf trace' for now:
1857 return cmd_trace(argc, argv, prefix);
1859 usage_with_options(sched_usage, sched_options);