perf_counter: move PERF_RECORD_TIME
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 8 Apr 2009 13:01:32 +0000 (15:01 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 8 Apr 2009 17:05:55 +0000 (19:05 +0200)
Move PERF_RECORD_TIME so that all the fixed length items come before
the variable length ones.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090408130409.307926436@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/perf_counter.h
kernel/perf_counter.c

index a70a55f..8bd1be5 100644 (file)
@@ -100,9 +100,9 @@ enum sw_event_ids {
 enum perf_counter_record_format {
        PERF_RECORD_IP          = 1U << 0,
        PERF_RECORD_TID         = 1U << 1,
-       PERF_RECORD_GROUP       = 1U << 2,
-       PERF_RECORD_CALLCHAIN   = 1U << 3,
-       PERF_RECORD_TIME        = 1U << 4,
+       PERF_RECORD_TIME        = 1U << 2,
+       PERF_RECORD_GROUP       = 1U << 3,
+       PERF_RECORD_CALLCHAIN   = 1U << 4,
 };
 
 /*
@@ -250,6 +250,7 @@ enum perf_event_type {
         *
         *      { u64                   ip;       } && PERF_RECORD_IP
         *      { u32                   pid, tid; } && PERF_RECORD_TID
+        *      { u64                   time;     } && PERF_RECORD_TIME
         *
         *      { u64                   nr;
         *        { u64 event, val; }   cnt[nr];  } && PERF_RECORD_GROUP
@@ -259,8 +260,6 @@ enum perf_event_type {
         *                              kernel,
         *                              user;
         *        u64                   ips[nr];  } && PERF_RECORD_CALLCHAIN
-        *
-        *      { u64                   time;     } && PERF_RECORD_TIME
         * };
         */
 };
index 2d4aebb..4dc8600 100644 (file)
@@ -1850,6 +1850,16 @@ static void perf_counter_output(struct perf_counter *counter,
                header.size += sizeof(tid_entry);
        }
 
+       if (record_type & PERF_RECORD_TIME) {
+               /*
+                * Maybe do better on x86 and provide cpu_clock_nmi()
+                */
+               time = sched_clock();
+
+               header.type |= PERF_RECORD_TIME;
+               header.size += sizeof(u64);
+       }
+
        if (record_type & PERF_RECORD_GROUP) {
                header.type |= PERF_RECORD_GROUP;
                header.size += sizeof(u64) +
@@ -1867,16 +1877,6 @@ static void perf_counter_output(struct perf_counter *counter,
                }
        }
 
-       if (record_type & PERF_RECORD_TIME) {
-               /*
-                * Maybe do better on x86 and provide cpu_clock_nmi()
-                */
-               time = sched_clock();
-
-               header.type |= PERF_RECORD_TIME;
-               header.size += sizeof(u64);
-       }
-
        ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
        if (ret)
                return;
@@ -1889,6 +1889,9 @@ static void perf_counter_output(struct perf_counter *counter,
        if (record_type & PERF_RECORD_TID)
                perf_output_put(&handle, tid_entry);
 
+       if (record_type & PERF_RECORD_TIME)
+               perf_output_put(&handle, time);
+
        if (record_type & PERF_RECORD_GROUP) {
                struct perf_counter *leader, *sub;
                u64 nr = counter->nr_siblings;
@@ -1910,9 +1913,6 @@ static void perf_counter_output(struct perf_counter *counter,
        if (callchain)
                perf_output_copy(&handle, callchain, callchain_size);
 
-       if (record_type & PERF_RECORD_TIME)
-               perf_output_put(&handle, time);
-
        perf_output_end(&handle);
 }