4 #include "util/cache.h"
5 #include "util/symbol.h"
6 #include "util/thread.h"
7 #include "util/header.h"
9 #include "util/parse-options.h"
12 #include "util/debug.h"
14 #include "util/trace-event.h"
15 #include <sys/types.h>
20 static char const *input_name = "perf.data";
22 static unsigned long page_size;
23 static unsigned long mmap_window = 32;
25 static unsigned long total_comm = 0;
27 static struct rb_root threads;
28 static struct thread *last_match;
30 static struct perf_header *header;
31 static u64 sample_type;
33 static int replay_mode;
37 * Scheduler benchmarks
39 #include <sys/resource.h>
40 #include <sys/types.h>
43 #include <sys/prctl.h>
45 #include <linux/unistd.h>
47 #include <semaphore.h>
61 #define PR_SET_NAME 15 /* Set process name */
63 #define BUG_ON(x) assert(!(x))
67 typedef unsigned long long nsec_t;
69 static nsec_t run_measurement_overhead;
70 static nsec_t sleep_measurement_overhead;
72 static nsec_t get_nsecs(void)
76 clock_gettime(CLOCK_MONOTONIC, &ts);
78 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
81 static void burn_nsecs(nsec_t nsecs)
83 nsec_t T0 = get_nsecs(), T1;
87 } while (T1 + run_measurement_overhead < T0 + nsecs);
90 static void sleep_nsecs(nsec_t nsecs)
94 ts.tv_nsec = nsecs % 999999999;
95 ts.tv_sec = nsecs / 999999999;
100 static void calibrate_run_measurement_overhead(void)
102 nsec_t T0, T1, delta, min_delta = 1000000000ULL;
105 for (i = 0; i < 10; i++) {
110 min_delta = min(min_delta, delta);
112 run_measurement_overhead = min_delta;
114 printf("run measurement overhead: %Ld nsecs\n", min_delta);
117 static void calibrate_sleep_measurement_overhead(void)
119 nsec_t T0, T1, delta, min_delta = 1000000000ULL;
122 for (i = 0; i < 10; i++) {
127 min_delta = min(min_delta, delta);
130 sleep_measurement_overhead = min_delta;
132 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
138 #define MAX_PID 65536
140 static unsigned long nr_tasks;
149 unsigned long nr_events;
150 unsigned long curr_event;
151 struct sched_event **events;
156 sem_t ready_for_work;
162 enum sched_event_type {
169 enum sched_event_type type;
175 struct task_desc *wakee;
178 static struct task_desc *pid_to_task[MAX_PID];
180 static struct task_desc **tasks;
182 static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
183 static nsec_t start_time;
185 static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
187 static unsigned long nr_run_events;
188 static unsigned long nr_sleep_events;
189 static unsigned long nr_wakeup_events;
191 static unsigned long nr_sleep_corrections;
192 static unsigned long nr_run_events_optimized;
194 static struct sched_event *
195 get_new_event(struct task_desc *task, nsec_t timestamp)
197 struct sched_event *event = calloc(1, sizeof(*event));
198 unsigned long idx = task->nr_events;
201 event->timestamp = timestamp;
205 size = sizeof(struct sched_event *) * task->nr_events;
206 task->events = realloc(task->events, size);
207 BUG_ON(!task->events);
209 task->events[idx] = event;
214 static struct sched_event *last_event(struct task_desc *task)
216 if (!task->nr_events)
219 return task->events[task->nr_events - 1];
223 add_sched_event_run(struct task_desc *task, nsec_t timestamp, u64 duration)
225 struct sched_event *event, *curr_event = last_event(task);
228 * optimize an existing RUN event by merging this one
231 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
232 nr_run_events_optimized++;
233 curr_event->duration += duration;
237 event = get_new_event(task, timestamp);
239 event->type = SCHED_EVENT_RUN;
240 event->duration = duration;
245 static unsigned long targetless_wakeups;
246 static unsigned long multitarget_wakeups;
249 add_sched_event_wakeup(struct task_desc *task, nsec_t timestamp,
250 struct task_desc *wakee)
252 struct sched_event *event, *wakee_event;
254 event = get_new_event(task, timestamp);
255 event->type = SCHED_EVENT_WAKEUP;
256 event->wakee = wakee;
258 wakee_event = last_event(wakee);
259 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
260 targetless_wakeups++;
263 if (wakee_event->wait_sem) {
264 multitarget_wakeups++;
268 wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
269 sem_init(wakee_event->wait_sem, 0, 0);
270 wakee_event->specific_wait = 1;
271 event->wait_sem = wakee_event->wait_sem;
277 add_sched_event_sleep(struct task_desc *task, nsec_t timestamp,
278 u64 task_state __used)
280 struct sched_event *event = get_new_event(task, timestamp);
282 event->type = SCHED_EVENT_SLEEP;
287 static struct task_desc *register_pid(unsigned long pid, const char *comm)
289 struct task_desc *task;
291 BUG_ON(pid >= MAX_PID);
293 task = pid_to_task[pid];
298 task = calloc(1, sizeof(*task));
301 strcpy(task->comm, comm);
303 * every task starts in sleeping state - this gets ignored
304 * if there's no wakeup pointing to this sleep state:
306 add_sched_event_sleep(task, 0, 0);
308 pid_to_task[pid] = task;
310 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
312 tasks[task->nr] = task;
315 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
321 static void print_task_traces(void)
323 struct task_desc *task;
326 for (i = 0; i < nr_tasks; i++) {
328 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
329 task->nr, task->comm, task->pid, task->nr_events);
333 static void add_cross_task_wakeups(void)
335 struct task_desc *task1, *task2;
338 for (i = 0; i < nr_tasks; i++) {
344 add_sched_event_wakeup(task1, 0, task2);
349 process_sched_event(struct task_desc *this_task __used, struct sched_event *event)
356 delta = start_time + event->timestamp - now;
358 switch (event->type) {
359 case SCHED_EVENT_RUN:
360 burn_nsecs(event->duration);
362 case SCHED_EVENT_SLEEP:
364 ret = sem_wait(event->wait_sem);
367 case SCHED_EVENT_WAKEUP:
369 ret = sem_post(event->wait_sem);
377 static nsec_t get_cpu_usage_nsec_parent(void)
383 err = getrusage(RUSAGE_SELF, &ru);
386 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
387 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
392 static nsec_t get_cpu_usage_nsec_self(void)
394 char filename [] = "/proc/1234567890/sched";
395 unsigned long msecs, nsecs;
403 sprintf(filename, "/proc/%d/sched", getpid());
404 file = fopen(filename, "r");
407 while ((chars = getline(&line, &len, file)) != -1) {
408 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
411 total = msecs*1e6 + nsecs;
422 static void *thread_func(void *ctx)
424 struct task_desc *this_task = ctx;
425 nsec_t cpu_usage_0, cpu_usage_1;
426 unsigned long i, ret;
429 sprintf(comm2, ":%s", this_task->comm);
430 prctl(PR_SET_NAME, comm2);
433 ret = sem_post(&this_task->ready_for_work);
435 ret = pthread_mutex_lock(&start_work_mutex);
437 ret = pthread_mutex_unlock(&start_work_mutex);
440 cpu_usage_0 = get_cpu_usage_nsec_self();
442 for (i = 0; i < this_task->nr_events; i++) {
443 this_task->curr_event = i;
444 process_sched_event(this_task, this_task->events[i]);
447 cpu_usage_1 = get_cpu_usage_nsec_self();
448 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
450 ret = sem_post(&this_task->work_done_sem);
453 ret = pthread_mutex_lock(&work_done_wait_mutex);
455 ret = pthread_mutex_unlock(&work_done_wait_mutex);
461 static void create_tasks(void)
463 struct task_desc *task;
468 err = pthread_attr_init(&attr);
470 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
472 err = pthread_mutex_lock(&start_work_mutex);
474 err = pthread_mutex_lock(&work_done_wait_mutex);
476 for (i = 0; i < nr_tasks; i++) {
478 sem_init(&task->sleep_sem, 0, 0);
479 sem_init(&task->ready_for_work, 0, 0);
480 sem_init(&task->work_done_sem, 0, 0);
481 task->curr_event = 0;
482 err = pthread_create(&task->thread, &attr, thread_func, task);
487 static nsec_t cpu_usage;
488 static nsec_t runavg_cpu_usage;
489 static nsec_t parent_cpu_usage;
490 static nsec_t runavg_parent_cpu_usage;
492 static void wait_for_tasks(void)
494 nsec_t cpu_usage_0, cpu_usage_1;
495 struct task_desc *task;
496 unsigned long i, ret;
498 start_time = get_nsecs();
500 pthread_mutex_unlock(&work_done_wait_mutex);
502 for (i = 0; i < nr_tasks; i++) {
504 ret = sem_wait(&task->ready_for_work);
506 sem_init(&task->ready_for_work, 0, 0);
508 ret = pthread_mutex_lock(&work_done_wait_mutex);
511 cpu_usage_0 = get_cpu_usage_nsec_parent();
513 pthread_mutex_unlock(&start_work_mutex);
515 for (i = 0; i < nr_tasks; i++) {
517 ret = sem_wait(&task->work_done_sem);
519 sem_init(&task->work_done_sem, 0, 0);
520 cpu_usage += task->cpu_usage;
524 cpu_usage_1 = get_cpu_usage_nsec_parent();
525 if (!runavg_cpu_usage)
526 runavg_cpu_usage = cpu_usage;
527 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
529 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
530 if (!runavg_parent_cpu_usage)
531 runavg_parent_cpu_usage = parent_cpu_usage;
532 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
533 parent_cpu_usage)/10;
535 ret = pthread_mutex_lock(&start_work_mutex);
538 for (i = 0; i < nr_tasks; i++) {
540 sem_init(&task->sleep_sem, 0, 0);
541 task->curr_event = 0;
545 static int __cmd_sched(void);
547 static void parse_trace(void)
551 printf("nr_run_events: %ld\n", nr_run_events);
552 printf("nr_sleep_events: %ld\n", nr_sleep_events);
553 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
555 if (targetless_wakeups)
556 printf("target-less wakeups: %ld\n", targetless_wakeups);
557 if (multitarget_wakeups)
558 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
559 if (nr_run_events_optimized)
560 printf("run events optimized: %ld\n",
561 nr_run_events_optimized);
564 static unsigned long nr_runs;
565 static nsec_t sum_runtime;
566 static nsec_t sum_fluct;
567 static nsec_t run_avg;
569 static void run_one_test(void)
571 nsec_t T0, T1, delta, avg_delta, fluct, std_dev;
578 sum_runtime += delta;
581 avg_delta = sum_runtime / nr_runs;
582 if (delta < avg_delta)
583 fluct = avg_delta - delta;
585 fluct = delta - avg_delta;
587 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
590 run_avg = (run_avg*9 + delta)/10;
592 printf("#%-3ld: %0.3f, ",
593 nr_runs, (double)delta/1000000.0);
596 printf("%0.2f +- %0.2f, ",
597 (double)avg_delta/1e6, (double)std_dev/1e6);
599 printf("ravg: %0.2f, ",
600 (double)run_avg/1e6);
602 printf("cpu: %0.2f / %0.2f",
603 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
607 * rusage statistics done by the parent, these are less
608 * accurate than the sum_exec_runtime based statistics:
610 printf(" [%0.2f / %0.2f]",
611 (double)parent_cpu_usage/1e6,
612 (double)runavg_parent_cpu_usage/1e6);
617 if (nr_sleep_corrections)
618 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
619 nr_sleep_corrections = 0;
622 static void test_calibrations(void)
630 printf("the run test took %Ld nsecs\n", T1-T0);
636 printf("the sleep test took %Ld nsecs\n", T1-T0);
640 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
642 struct thread *thread;
644 thread = threads__findnew(event->comm.pid, &threads, &last_match);
646 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
647 (void *)(offset + head),
648 (void *)(long)(event->header.size),
649 event->comm.comm, event->comm.pid);
651 if (thread == NULL ||
652 thread__set_comm(thread, event->comm.comm)) {
653 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
662 struct raw_event_sample {
667 #define FILL_FIELD(ptr, field, event, data) \
668 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
670 #define FILL_ARRAY(ptr, array, event, data) \
672 void *__array = raw_field_ptr(event, #array, data); \
673 memcpy(ptr.array, __array, sizeof(ptr.array)); \
676 #define FILL_COMMON_FIELDS(ptr, event, data) \
678 FILL_FIELD(ptr, common_type, event, data); \
679 FILL_FIELD(ptr, common_flags, event, data); \
680 FILL_FIELD(ptr, common_preempt_count, event, data); \
681 FILL_FIELD(ptr, common_pid, event, data); \
682 FILL_FIELD(ptr, common_tgid, event, data); \
687 struct trace_switch_event {
692 u8 common_preempt_count;
706 struct trace_wakeup_event {
711 u8 common_preempt_count;
723 struct trace_fork_event {
728 u8 common_preempt_count;
732 char parent_comm[16];
738 struct trace_sched_handler {
739 void (*switch_event)(struct trace_switch_event *,
743 struct thread *thread);
745 void (*wakeup_event)(struct trace_wakeup_event *,
749 struct thread *thread);
751 void (*fork_event)(struct trace_fork_event *,
755 struct thread *thread);
760 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
763 u64 timestamp __used,
764 struct thread *thread __used)
766 struct task_desc *waker, *wakee;
769 printf("sched_wakeup event %p\n", event);
771 printf(" ... pid %d woke up %s/%d\n",
772 wakeup_event->common_pid,
777 waker = register_pid(wakeup_event->common_pid, "<unknown>");
778 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
780 add_sched_event_wakeup(waker, timestamp, wakee);
783 static unsigned long cpu_last_switched[MAX_CPUS];
786 replay_switch_event(struct trace_switch_event *switch_event,
790 struct thread *thread __used)
792 struct task_desc *prev, *next;
797 printf("sched_switch event %p\n", event);
799 if (cpu >= MAX_CPUS || cpu < 0)
802 timestamp0 = cpu_last_switched[cpu];
804 delta = timestamp - timestamp0;
809 die("hm, delta: %Ld < 0 ?\n", delta);
812 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
813 switch_event->prev_comm, switch_event->prev_pid,
814 switch_event->next_comm, switch_event->next_pid,
818 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
819 next = register_pid(switch_event->next_pid, switch_event->next_comm);
821 cpu_last_switched[cpu] = timestamp;
823 add_sched_event_run(prev, timestamp, delta);
824 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
829 replay_fork_event(struct trace_fork_event *fork_event,
832 u64 timestamp __used,
833 struct thread *thread __used)
836 printf("sched_fork event %p\n", event);
837 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
838 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
840 register_pid(fork_event->parent_pid, fork_event->parent_comm);
841 register_pid(fork_event->child_pid, fork_event->child_comm);
844 static struct trace_sched_handler replay_ops = {
845 .wakeup_event = replay_wakeup_event,
846 .switch_event = replay_switch_event,
847 .fork_event = replay_fork_event,
851 static struct trace_sched_handler *trace_handler;
854 process_sched_wakeup_event(struct raw_event_sample *raw,
857 u64 timestamp __used,
858 struct thread *thread __used)
860 struct trace_wakeup_event wakeup_event;
862 FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
864 FILL_ARRAY(wakeup_event, comm, event, raw->data);
865 FILL_FIELD(wakeup_event, pid, event, raw->data);
866 FILL_FIELD(wakeup_event, prio, event, raw->data);
867 FILL_FIELD(wakeup_event, success, event, raw->data);
868 FILL_FIELD(wakeup_event, cpu, event, raw->data);
870 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
874 process_sched_switch_event(struct raw_event_sample *raw,
877 u64 timestamp __used,
878 struct thread *thread __used)
880 struct trace_switch_event switch_event;
882 FILL_COMMON_FIELDS(switch_event, event, raw->data);
884 FILL_ARRAY(switch_event, prev_comm, event, raw->data);
885 FILL_FIELD(switch_event, prev_pid, event, raw->data);
886 FILL_FIELD(switch_event, prev_prio, event, raw->data);
887 FILL_FIELD(switch_event, prev_state, event, raw->data);
888 FILL_ARRAY(switch_event, next_comm, event, raw->data);
889 FILL_FIELD(switch_event, next_pid, event, raw->data);
890 FILL_FIELD(switch_event, next_prio, event, raw->data);
892 trace_handler->switch_event(&switch_event, event, cpu, timestamp, thread);
896 process_sched_fork_event(struct raw_event_sample *raw,
899 u64 timestamp __used,
900 struct thread *thread __used)
902 struct trace_fork_event fork_event;
904 FILL_COMMON_FIELDS(fork_event, event, raw->data);
906 FILL_ARRAY(fork_event, parent_comm, event, raw->data);
907 FILL_FIELD(fork_event, parent_pid, event, raw->data);
908 FILL_ARRAY(fork_event, child_comm, event, raw->data);
909 FILL_FIELD(fork_event, child_pid, event, raw->data);
911 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
915 process_sched_exit_event(struct event *event,
917 u64 timestamp __used,
918 struct thread *thread __used)
921 printf("sched_exit event %p\n", event);
925 process_raw_event(event_t *raw_event __used, void *more_data,
926 int cpu, u64 timestamp, struct thread *thread)
928 struct raw_event_sample *raw = more_data;
932 type = trace_parse_common_type(raw->data);
933 event = trace_find_event(type);
935 if (!strcmp(event->name, "sched_switch"))
936 process_sched_switch_event(raw, event, cpu, timestamp, thread);
937 if (!strcmp(event->name, "sched_wakeup"))
938 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
939 if (!strcmp(event->name, "sched_wakeup_new"))
940 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
941 if (!strcmp(event->name, "sched_process_fork"))
942 process_sched_fork_event(raw, event, cpu, timestamp, thread);
943 if (!strcmp(event->name, "sched_process_exit"))
944 process_sched_exit_event(event, cpu, timestamp, thread);
948 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
952 struct dso *dso = NULL;
953 struct thread *thread;
954 u64 ip = event->ip.ip;
958 void *more_data = event->ip.__more_data;
961 thread = threads__findnew(event->ip.pid, &threads, &last_match);
963 if (sample_type & PERF_SAMPLE_TIME) {
964 timestamp = *(u64 *)more_data;
965 more_data += sizeof(u64);
968 if (sample_type & PERF_SAMPLE_CPU) {
969 cpu = *(u32 *)more_data;
970 more_data += sizeof(u32);
971 more_data += sizeof(u32); /* reserved */
974 if (sample_type & PERF_SAMPLE_PERIOD) {
975 period = *(u64 *)more_data;
976 more_data += sizeof(u64);
979 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
980 (void *)(offset + head),
981 (void *)(long)(event->header.size),
983 event->ip.pid, event->ip.tid,
987 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
989 if (thread == NULL) {
990 eprintf("problem processing %d event, skipping it.\n",
995 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
997 if (cpumode == PERF_EVENT_MISC_KERNEL) {
1003 dump_printf(" ...... dso: %s\n", dso->name);
1005 } else if (cpumode == PERF_EVENT_MISC_USER) {
1014 dso = hypervisor_dso;
1016 dump_printf(" ...... dso: [hypervisor]\n");
1019 if (sample_type & PERF_SAMPLE_RAW)
1020 process_raw_event(event, more_data, cpu, timestamp, thread);
1026 process_event(event_t *event, unsigned long offset, unsigned long head)
1030 switch (event->header.type) {
1031 case PERF_EVENT_MMAP ... PERF_EVENT_LOST:
1034 case PERF_EVENT_COMM:
1035 return process_comm_event(event, offset, head);
1037 case PERF_EVENT_EXIT ... PERF_EVENT_READ:
1040 case PERF_EVENT_SAMPLE:
1041 return process_sample_event(event, offset, head);
1043 case PERF_EVENT_MAX:
1051 static int __cmd_sched(void)
1053 int ret, rc = EXIT_FAILURE;
1054 unsigned long offset = 0;
1055 unsigned long head = 0;
1056 struct stat perf_stat;
1062 register_idle_thread(&threads, &last_match);
1064 input = open(input_name, O_RDONLY);
1066 perror("failed to open file");
1070 ret = fstat(input, &perf_stat);
1072 perror("failed to stat file");
1076 if (!perf_stat.st_size) {
1077 fprintf(stderr, "zero-sized file, nothing to do!\n");
1080 header = perf_header__read(input);
1081 head = header->data_offset;
1082 sample_type = perf_header__sample_type(header);
1084 if (!(sample_type & PERF_SAMPLE_RAW))
1085 die("No trace sample to read. Did you call perf record "
1088 if (load_kernel() < 0) {
1089 perror("failed to load kernel symbols");
1090 return EXIT_FAILURE;
1094 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1095 MAP_SHARED, input, offset);
1096 if (buf == MAP_FAILED) {
1097 perror("failed to mmap file");
1102 event = (event_t *)(buf + head);
1104 size = event->header.size;
1108 if (head + event->header.size >= page_size * mmap_window) {
1109 unsigned long shift = page_size * (head / page_size);
1112 res = munmap(buf, page_size * mmap_window);
1120 size = event->header.size;
1123 if (!size || process_event(event, offset, head) < 0) {
1126 * assume we lost track of the stream, check alignment, and
1127 * increment a single u64 in the hope to catch on again 'soon'.
1130 if (unlikely(head & 7))
1138 if (offset + head < (unsigned long)perf_stat.st_size)
1147 static const char * const annotate_usage[] = {
1148 "perf trace [<options>] <command>",
1152 static const struct option options[] = {
1153 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1154 "dump raw trace in ASCII"),
1155 OPT_BOOLEAN('r', "replay", &replay_mode,
1156 "replay sched behaviour from traces"),
1157 OPT_BOOLEAN('v', "verbose", &verbose,
1158 "be more verbose (show symbol address, etc)"),
1162 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1164 long nr_iterations = 10, i;
1167 page_size = getpagesize();
1169 argc = parse_options(argc, argv, options, annotate_usage, 0);
1172 * Special case: if there's an argument left then assume tha
1173 * it's a symbol filter:
1176 usage_with_options(annotate_usage, options);
1182 trace_handler = &replay_ops;
1183 else /* We may need a default subcommand */
1184 die("Please select a sub command (-r)\n");
1186 calibrate_run_measurement_overhead();
1187 calibrate_sleep_measurement_overhead();
1189 test_calibrations();
1192 print_task_traces();
1193 add_cross_task_wakeups();
1196 printf("------------------------------------------------------------\n");
1197 for (i = 0; i < nr_iterations; i++)