4 #include "util/cache.h"
5 #include "util/symbol.h"
6 #include "util/thread.h"
7 #include "util/header.h"
9 #include "util/parse-options.h"
12 #include "util/debug.h"
14 #include "util/trace-event.h"
15 #include <sys/types.h>
17 static char const *input_name = "perf.data";
19 static unsigned long page_size;
20 static unsigned long mmap_window = 32;
22 static unsigned long total_comm = 0;
24 static struct rb_root threads;
25 static struct thread *last_match;
27 static struct perf_header *header;
28 static u64 sample_type;
32 * Scheduler benchmarks
34 #include <sys/resource.h>
35 #include <sys/types.h>
38 #include <sys/prctl.h>
40 #include <linux/unistd.h>
42 #include <semaphore.h>
56 #define PR_SET_NAME 15 /* Set process name */
58 #define BUG_ON(x) assert(!(x))
62 typedef unsigned long long nsec_t;
64 #define printk(x...) do { printf(x); fflush(stdout); } while (0)
68 #define __dprintk(x,y...) do { \
69 nsec_t __now = get_nsecs(), __delta = __now - prev_printk; \
71 prev_printk = __now; \
73 printf("%.3f [%Ld] [%.3f]: " x, (double)__now/1e6, __now, (double)__delta/1e6, y);\
77 # define dprintk(x...) do { } while (0)
79 # define dprintk(x...) __dprintk(x)
82 #define __DP() __dprintk("parent: line %d\n", __LINE__)
83 #define DP() dprintk("parent: line %d\n", __LINE__)
84 #define D() dprintk("task %ld: line %d\n", this_task->nr, __LINE__)
87 static nsec_t run_measurement_overhead;
88 static nsec_t sleep_measurement_overhead;
90 static nsec_t get_nsecs(void)
94 clock_gettime(CLOCK_MONOTONIC, &ts);
96 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
99 static void burn_nsecs(nsec_t nsecs)
101 nsec_t T0 = get_nsecs(), T1;
105 } while (T1 + run_measurement_overhead < T0 + nsecs);
108 static void sleep_nsecs(nsec_t nsecs)
112 ts.tv_nsec = nsecs % 999999999;
113 ts.tv_sec = nsecs / 999999999;
115 nanosleep(&ts, NULL);
118 static void calibrate_run_measurement_overhead(void)
120 nsec_t T0, T1, delta, min_delta = 1000000000ULL;
123 for (i = 0; i < 10; i++) {
128 min_delta = min(min_delta, delta);
130 run_measurement_overhead = min_delta;
132 printk("run measurement overhead: %Ld nsecs\n", min_delta);
135 static void calibrate_sleep_measurement_overhead(void)
137 nsec_t T0, T1, delta, min_delta = 1000000000ULL;
140 for (i = 0; i < 10; i++) {
145 min_delta = min(min_delta, delta);
148 sleep_measurement_overhead = min_delta;
150 printk("sleep measurement overhead: %Ld nsecs\n", min_delta);
156 #define MAX_PID 65536
158 static unsigned long nr_tasks;
167 unsigned long nr_events;
168 unsigned long curr_event;
169 struct sched_event **events;
174 sem_t ready_for_work;
180 enum sched_event_type {
187 enum sched_event_type type;
193 struct task_desc *wakee;
196 static struct task_desc *pid_to_task[MAX_PID];
198 static struct task_desc **tasks;
200 static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
201 static nsec_t start_time;
203 static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
205 static unsigned long nr_run_events;
206 static unsigned long nr_sleep_events;
207 static unsigned long nr_wakeup_events;
209 static unsigned long nr_sleep_corrections;
210 static unsigned long nr_run_events_optimized;
212 static struct sched_event *
213 get_new_event(struct task_desc *task, nsec_t timestamp)
215 struct sched_event *event = calloc(1, sizeof(*event));
216 unsigned long idx = task->nr_events;
219 event->timestamp = timestamp;
223 size = sizeof(struct sched_event *) * task->nr_events;
224 task->events = realloc(task->events, size);
225 BUG_ON(!task->events);
227 task->events[idx] = event;
232 static struct sched_event *last_event(struct task_desc *task)
234 if (!task->nr_events)
237 return task->events[task->nr_events - 1];
241 add_sched_event_run(struct task_desc *task, nsec_t timestamp, u64 duration)
243 struct sched_event *event, *curr_event = last_event(task);
246 * optimize an existing RUN event by merging this one
249 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
250 nr_run_events_optimized++;
251 curr_event->duration += duration;
255 event = get_new_event(task, timestamp);
257 event->type = SCHED_EVENT_RUN;
258 event->duration = duration;
263 static unsigned long targetless_wakeups;
264 static unsigned long multitarget_wakeups;
267 add_sched_event_wakeup(struct task_desc *task, nsec_t timestamp,
268 struct task_desc *wakee)
270 struct sched_event *event, *wakee_event;
272 event = get_new_event(task, timestamp);
273 event->type = SCHED_EVENT_WAKEUP;
274 event->wakee = wakee;
276 wakee_event = last_event(wakee);
277 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
278 targetless_wakeups++;
281 if (wakee_event->wait_sem) {
282 multitarget_wakeups++;
286 wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
287 sem_init(wakee_event->wait_sem, 0, 0);
288 wakee_event->specific_wait = 1;
289 event->wait_sem = wakee_event->wait_sem;
295 add_sched_event_sleep(struct task_desc *task, nsec_t timestamp,
296 unsigned long uninterruptible __used)
298 struct sched_event *event = get_new_event(task, timestamp);
300 event->type = SCHED_EVENT_SLEEP;
305 static struct task_desc *register_pid(unsigned long pid, const char *comm)
307 struct task_desc *task;
309 BUG_ON(pid >= MAX_PID);
311 task = pid_to_task[pid];
316 task = calloc(1, sizeof(*task));
319 strcpy(task->comm, comm);
321 * every task starts in sleeping state - this gets ignored
322 * if there's no wakeup pointing to this sleep state:
324 add_sched_event_sleep(task, 0, 0);
326 pid_to_task[pid] = task;
328 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
330 tasks[task->nr] = task;
332 printk("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
338 static int first_trace_line = 1;
340 static nsec_t first_timestamp;
341 static nsec_t prev_timestamp;
343 void parse_line(char *line);
345 void parse_line(char *line)
347 unsigned long param1 = 0, param2 = 0;
348 char comm[COMM_LEN], comm2[COMM_LEN];
349 unsigned long pid, pid2, timestamp0;
350 struct task_desc *task, *task2;
351 char func_str[SYM_LEN];
355 //" <idle> 0 0D.s3 0us+: try_to_wake_up <events/0 9> (1 0)"
356 ret = sscanf(line, "%20s %5ld %*s %ldus%*c:"
357 " %128s <%20s %ld> (%ld %ld)\n",
358 comm, &pid, ×tamp0,
359 func_str, comm2, &pid2, ¶m1, ¶m2);
360 dprintk("ret: %d\n", ret);
364 timestamp = timestamp0 * 1000LL;
366 if (first_trace_line) {
367 first_trace_line = 0;
368 first_timestamp = timestamp;
371 timestamp -= first_timestamp;
372 BUG_ON(timestamp < prev_timestamp);
373 prev_timestamp = timestamp;
375 dprintk("parsed: %s - %ld %Ld: %s - <%s %ld> (%ld %ld)\n",
385 task = register_pid(pid, comm);
386 task2 = register_pid(pid2, comm2);
388 if (!strcmp(func_str, "update_curr")) {
389 dprintk("%Ld: task %ld runs for %ld nsecs\n",
390 timestamp, task->nr, param1);
391 add_sched_event_run(task, timestamp, param1);
392 } else if (!strcmp(func_str, "try_to_wake_up")) {
393 dprintk("%Ld: task %ld wakes up task %ld\n",
394 timestamp, task->nr, task2->nr);
395 add_sched_event_wakeup(task, timestamp, task2);
396 } else if (!strcmp(func_str, "deactivate_task")) {
397 dprintk("%Ld: task %ld goes to sleep (uninterruptible: %ld)\n",
398 timestamp, task->nr, param1);
399 add_sched_event_sleep(task, timestamp, param1);
403 static void print_task_traces(void)
405 struct task_desc *task;
408 for (i = 0; i < nr_tasks; i++) {
410 printk("task %6ld (%20s:%10ld), nr_events: %ld\n",
411 task->nr, task->comm, task->pid, task->nr_events);
415 static void add_cross_task_wakeups(void)
417 struct task_desc *task1, *task2;
420 for (i = 0; i < nr_tasks; i++) {
426 add_sched_event_wakeup(task1, 0, task2);
431 process_sched_event(struct task_desc *this_task __used, struct sched_event *event)
438 delta = start_time + event->timestamp - now;
440 dprintk("task %ld, event #%ld, %Ld, delta: %.3f (%Ld)\n",
441 this_task->nr, event->nr, event->timestamp,
442 (double)delta/1e6, delta);
444 if (0 && delta > 0) {
445 dprintk("%.3f: task %ld FIX %.3f\n",
446 (double)event->timestamp/1e6,
449 sleep_nsecs(start_time + event->timestamp - now);
450 nr_sleep_corrections++;
453 switch (event->type) {
454 case SCHED_EVENT_RUN:
455 dprintk("%.3f: task %ld RUN for %.3f\n",
456 (double)event->timestamp/1e6,
458 (double)event->duration/1e6);
459 burn_nsecs(event->duration);
461 case SCHED_EVENT_SLEEP:
462 dprintk("%.3f: task %ld %s SLEEP\n",
463 (double)event->timestamp/1e6,
464 this_task->nr, event->wait_sem ? "" : "SKIP");
466 ret = sem_wait(event->wait_sem);
469 case SCHED_EVENT_WAKEUP:
470 dprintk("%.3f: task %ld WAKEUP => task %ld\n",
471 (double)event->timestamp/1e6,
475 ret = sem_post(event->wait_sem);
483 static nsec_t get_cpu_usage_nsec_parent(void)
489 err = getrusage(RUSAGE_SELF, &ru);
492 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
493 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
498 static nsec_t get_cpu_usage_nsec_self(void)
500 char filename [] = "/proc/1234567890/sched";
501 unsigned long msecs, nsecs;
509 sprintf(filename, "/proc/%d/sched", getpid());
510 file = fopen(filename, "r");
513 while ((chars = getline(&line, &len, file)) != -1) {
514 dprintk("got line with length %zu :\n", chars);
516 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
519 total = msecs*1e6 + nsecs;
520 dprintk("total: (%ld.%06ld) %Ld\n",
521 msecs, nsecs, total);
532 static void *thread_func(void *ctx)
534 struct task_desc *this_task = ctx;
535 nsec_t cpu_usage_0, cpu_usage_1;
536 unsigned long i, ret;
539 dprintk("task %ld started up.\n", this_task->nr);
540 sprintf(comm2, ":%s", this_task->comm);
541 prctl(PR_SET_NAME, comm2);
544 ret = sem_post(&this_task->ready_for_work);
547 ret = pthread_mutex_lock(&start_work_mutex);
549 ret = pthread_mutex_unlock(&start_work_mutex);
553 cpu_usage_0 = get_cpu_usage_nsec_self();
555 for (i = 0; i < this_task->nr_events; i++) {
556 this_task->curr_event = i;
557 process_sched_event(this_task, this_task->events[i]);
560 cpu_usage_1 = get_cpu_usage_nsec_self();
561 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
563 dprintk("task %ld cpu usage: %0.3f msecs\n",
564 this_task->nr, (double)this_task->cpu_usage / 1e6);
567 ret = sem_post(&this_task->work_done_sem);
571 ret = pthread_mutex_lock(&work_done_wait_mutex);
573 ret = pthread_mutex_unlock(&work_done_wait_mutex);
580 static void create_tasks(void)
582 struct task_desc *task;
587 err = pthread_attr_init(&attr);
589 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
591 err = pthread_mutex_lock(&start_work_mutex);
593 err = pthread_mutex_lock(&work_done_wait_mutex);
595 for (i = 0; i < nr_tasks; i++) {
597 sem_init(&task->sleep_sem, 0, 0);
598 sem_init(&task->ready_for_work, 0, 0);
599 sem_init(&task->work_done_sem, 0, 0);
600 task->curr_event = 0;
601 err = pthread_create(&task->thread, &attr, thread_func, task);
606 static nsec_t cpu_usage;
607 static nsec_t runavg_cpu_usage;
608 static nsec_t parent_cpu_usage;
609 static nsec_t runavg_parent_cpu_usage;
611 static void wait_for_tasks(void)
613 nsec_t cpu_usage_0, cpu_usage_1;
614 struct task_desc *task;
615 unsigned long i, ret;
618 start_time = get_nsecs();
621 pthread_mutex_unlock(&work_done_wait_mutex);
623 for (i = 0; i < nr_tasks; i++) {
625 ret = sem_wait(&task->ready_for_work);
627 sem_init(&task->ready_for_work, 0, 0);
629 ret = pthread_mutex_lock(&work_done_wait_mutex);
632 cpu_usage_0 = get_cpu_usage_nsec_parent();
634 pthread_mutex_unlock(&start_work_mutex);
637 for (i = 0; i < nr_tasks; i++) {
638 unsigned long missed;
641 while (task->curr_event + 1 < task->nr_events) {
642 dprintk("parent waiting for %ld (%ld != %ld)\n",
643 i, task->curr_event, task->nr_events);
644 sleep_nsecs(100000000);
646 missed = task->nr_events - 1 - task->curr_event;
648 printk("task %ld missed events: %ld\n", i, missed);
649 ret = sem_post(&task->sleep_sem);
654 for (i = 0; i < nr_tasks; i++) {
656 ret = sem_wait(&task->work_done_sem);
658 sem_init(&task->work_done_sem, 0, 0);
659 cpu_usage += task->cpu_usage;
663 cpu_usage_1 = get_cpu_usage_nsec_parent();
664 if (!runavg_cpu_usage)
665 runavg_cpu_usage = cpu_usage;
666 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
668 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
669 if (!runavg_parent_cpu_usage)
670 runavg_parent_cpu_usage = parent_cpu_usage;
671 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
672 parent_cpu_usage)/10;
674 ret = pthread_mutex_lock(&start_work_mutex);
677 for (i = 0; i < nr_tasks; i++) {
679 sem_init(&task->sleep_sem, 0, 0);
680 task->curr_event = 0;
684 static int __cmd_sched(void);
686 static void parse_trace(void)
690 printk("nr_run_events: %ld\n", nr_run_events);
691 printk("nr_sleep_events: %ld\n", nr_sleep_events);
692 printk("nr_wakeup_events: %ld\n", nr_wakeup_events);
694 if (targetless_wakeups)
695 printk("target-less wakeups: %ld\n", targetless_wakeups);
696 if (multitarget_wakeups)
697 printk("multi-target wakeups: %ld\n", multitarget_wakeups);
698 if (nr_run_events_optimized)
699 printk("run events optimized: %ld\n",
700 nr_run_events_optimized);
703 static unsigned long nr_runs;
704 static nsec_t sum_runtime;
705 static nsec_t sum_fluct;
706 static nsec_t run_avg;
708 static void run_one_test(void)
710 nsec_t T0, T1, delta, avg_delta, fluct, std_dev;
717 sum_runtime += delta;
720 avg_delta = sum_runtime / nr_runs;
721 if (delta < avg_delta)
722 fluct = avg_delta - delta;
724 fluct = delta - avg_delta;
726 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
729 run_avg = (run_avg*9 + delta)/10;
731 printk("#%-3ld: %0.3f, ",
732 nr_runs, (double)delta/1000000.0);
735 printk("%0.2f +- %0.2f, ",
736 (double)avg_delta/1e6, (double)std_dev/1e6);
738 printk("ravg: %0.2f, ",
739 (double)run_avg/1e6);
741 printk("cpu: %0.2f / %0.2f",
742 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
746 * rusage statistics done by the parent, these are less
747 * accurate than the sum_exec_runtime based statistics:
749 printk(" [%0.2f / %0.2f]",
750 (double)parent_cpu_usage/1e6,
751 (double)runavg_parent_cpu_usage/1e6);
756 if (nr_sleep_corrections)
757 printk(" (%ld sleep corrections)\n", nr_sleep_corrections);
758 nr_sleep_corrections = 0;
761 static void test_calibrations(void)
769 printk("the run test took %Ld nsecs\n", T1-T0);
775 printk("the sleep test took %Ld nsecs\n", T1-T0);
779 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
781 struct thread *thread;
783 thread = threads__findnew(event->comm.pid, &threads, &last_match);
785 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
786 (void *)(offset + head),
787 (void *)(long)(event->header.size),
788 event->comm.comm, event->comm.pid);
790 if (thread == NULL ||
791 thread__set_comm(thread, event->comm.comm)) {
792 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
800 struct trace_wakeup_event {
805 u8 common_preempt_count;
818 process_sched_wakeup_event(struct trace_wakeup_event *wakeup_event, struct event *event,
819 int cpu __used, u64 timestamp __used, struct thread *thread __used)
821 struct task_desc *waker, *wakee;
823 printf("sched_wakeup event %p\n", event);
825 printf(" ... pid %d woke up %s/%d\n",
826 wakeup_event->common_pid,
830 waker = register_pid(wakeup_event->common_pid, "<unknown>");
831 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
833 add_sched_event_wakeup(waker, timestamp, wakee);
836 struct trace_switch_event {
841 u8 common_preempt_count;
854 #define MAX_CPUS 4096
856 unsigned long cpu_last_switched[MAX_CPUS];
859 process_sched_switch_event(struct trace_switch_event *switch_event, struct event *event,
860 int cpu __used, u64 timestamp __used, struct thread *thread __used)
862 struct task_desc *prev, *next;
866 printf("sched_switch event %p\n", event);
867 if (cpu >= MAX_CPUS || cpu < 0)
870 timestamp0 = cpu_last_switched[cpu];
872 delta = timestamp - timestamp0;
877 die("hm, delta: %Ld < 0 ?\n", delta);
879 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
880 switch_event->prev_comm, switch_event->prev_pid,
881 switch_event->next_comm, switch_event->next_pid,
884 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
885 next = register_pid(switch_event->next_pid, switch_event->next_comm);
887 cpu_last_switched[cpu] = timestamp;
889 add_sched_event_run(prev, timestamp, delta);
892 struct trace_fork_event {
897 u8 common_preempt_count;
901 char parent_comm[16];
908 process_sched_fork_event(struct trace_fork_event *fork_event, struct event *event,
909 int cpu __used, u64 timestamp __used, struct thread *thread __used)
911 printf("sched_fork event %p\n", event);
912 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
913 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
914 register_pid(fork_event->parent_pid, fork_event->parent_comm);
915 register_pid(fork_event->child_pid, fork_event->child_comm);
918 static void process_sched_exit_event(struct event *event,
919 int cpu __used, u64 timestamp __used, struct thread *thread __used)
921 printf("sched_exit event %p\n", event);
925 process_raw_event(event_t *raw_event, void *more_data,
926 int cpu, u64 timestamp, struct thread *thread)
935 type = trace_parse_common_type(raw->data);
936 event = trace_find_event(type);
939 * FIXME: better resolve from pid from the struct trace_entry
940 * field, although it should be the same than this perf
943 printf("id %d, type: %d, event: %s\n",
944 raw_event->header.type, type, event->name);
946 if (!strcmp(event->name, "sched_switch"))
947 process_sched_switch_event(more_data, event, cpu, timestamp, thread);
948 if (!strcmp(event->name, "sched_wakeup"))
949 process_sched_wakeup_event(more_data, event, cpu, timestamp, thread);
950 if (!strcmp(event->name, "sched_wakeup_new"))
951 process_sched_wakeup_event(more_data, event, cpu, timestamp, thread);
952 if (!strcmp(event->name, "sched_process_fork"))
953 process_sched_fork_event(more_data, event, cpu, timestamp, thread);
954 if (!strcmp(event->name, "sched_process_exit"))
955 process_sched_exit_event(event, cpu, timestamp, thread);
959 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
963 struct dso *dso = NULL;
964 struct thread *thread;
965 u64 ip = event->ip.ip;
969 void *more_data = event->ip.__more_data;
972 thread = threads__findnew(event->ip.pid, &threads, &last_match);
974 if (sample_type & PERF_SAMPLE_TIME) {
975 timestamp = *(u64 *)more_data;
976 more_data += sizeof(u64);
979 if (sample_type & PERF_SAMPLE_CPU) {
980 cpu = *(u32 *)more_data;
981 more_data += sizeof(u32);
982 more_data += sizeof(u32); /* reserved */
985 if (sample_type & PERF_SAMPLE_PERIOD) {
986 period = *(u64 *)more_data;
987 more_data += sizeof(u64);
990 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
991 (void *)(offset + head),
992 (void *)(long)(event->header.size),
994 event->ip.pid, event->ip.tid,
998 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1000 if (thread == NULL) {
1001 eprintf("problem processing %d event, skipping it.\n",
1002 event->header.type);
1006 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
1008 if (cpumode == PERF_EVENT_MISC_KERNEL) {
1014 dump_printf(" ...... dso: %s\n", dso->name);
1016 } else if (cpumode == PERF_EVENT_MISC_USER) {
1025 dso = hypervisor_dso;
1027 dump_printf(" ...... dso: [hypervisor]\n");
1030 if (sample_type & PERF_SAMPLE_RAW)
1031 process_raw_event(event, more_data, cpu, timestamp, thread);
1037 process_event(event_t *event, unsigned long offset, unsigned long head)
1041 switch (event->header.type) {
1042 case PERF_EVENT_MMAP ... PERF_EVENT_LOST:
1045 case PERF_EVENT_COMM:
1046 return process_comm_event(event, offset, head);
1048 case PERF_EVENT_EXIT ... PERF_EVENT_READ:
1051 case PERF_EVENT_SAMPLE:
1052 return process_sample_event(event, offset, head);
1054 case PERF_EVENT_MAX:
1062 static int __cmd_sched(void)
1064 int ret, rc = EXIT_FAILURE;
1065 unsigned long offset = 0;
1066 unsigned long head = 0;
1067 struct stat perf_stat;
1073 register_idle_thread(&threads, &last_match);
1075 input = open(input_name, O_RDONLY);
1077 perror("failed to open file");
1081 ret = fstat(input, &perf_stat);
1083 perror("failed to stat file");
1087 if (!perf_stat.st_size) {
1088 fprintf(stderr, "zero-sized file, nothing to do!\n");
1091 header = perf_header__read(input);
1092 head = header->data_offset;
1093 sample_type = perf_header__sample_type(header);
1095 if (!(sample_type & PERF_SAMPLE_RAW))
1096 die("No trace sample to read. Did you call perf record "
1099 if (load_kernel() < 0) {
1100 perror("failed to load kernel symbols");
1101 return EXIT_FAILURE;
1105 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1106 MAP_SHARED, input, offset);
1107 if (buf == MAP_FAILED) {
1108 perror("failed to mmap file");
1113 event = (event_t *)(buf + head);
1115 size = event->header.size;
1119 if (head + event->header.size >= page_size * mmap_window) {
1120 unsigned long shift = page_size * (head / page_size);
1123 res = munmap(buf, page_size * mmap_window);
1131 size = event->header.size;
1134 if (!size || process_event(event, offset, head) < 0) {
1137 * assume we lost track of the stream, check alignment, and
1138 * increment a single u64 in the hope to catch on again 'soon'.
1141 if (unlikely(head & 7))
1149 if (offset + head < (unsigned long)perf_stat.st_size)
1158 static const char * const annotate_usage[] = {
1159 "perf trace [<options>] <command>",
1163 static const struct option options[] = {
1164 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1165 "dump raw trace in ASCII"),
1166 OPT_BOOLEAN('v', "verbose", &verbose,
1167 "be more verbose (show symbol address, etc)"),
1171 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1173 long nr_iterations = 10, i;
1176 page_size = getpagesize();
1178 argc = parse_options(argc, argv, options, annotate_usage, 0);
1181 * Special case: if there's an argument left then assume tha
1182 * it's a symbol filter:
1185 usage_with_options(annotate_usage, options);
1190 calibrate_run_measurement_overhead();
1191 calibrate_sleep_measurement_overhead();
1193 test_calibrations();
1196 print_task_traces();
1197 add_cross_task_wakeups();
1200 printk("------------------------------------------------------------\n");
1201 for (i = 0; i < nr_iterations; i++)