perf: Provide better condition for event rotation
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 8 Mar 2010 12:51:20 +0000 (13:51 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 10 Mar 2010 12:22:36 +0000 (13:22 +0100)
Try to avoid useless rotation and PMU disables.

[ Could be improved by keeping a nr_runnable count to better account
  for the < PERF_STAT_INACTIVE counters ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/perf_event.c

index d810846..52c69a3 100644 (file)
@@ -1524,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
                 */
                if (interrupts == MAX_INTERRUPTS) {
                        perf_log_throttle(event, 1);
+                       perf_disable();
                        event->pmu->unthrottle(event);
+                       perf_enable();
                }
 
                if (!event->attr.freq || !event->attr.sample_freq)
                        continue;
 
+               perf_disable();
                event->pmu->read(event);
                now = atomic64_read(&event->count);
                delta = now - hwc->freq_count_stamp;
@@ -1537,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
 
                if (delta > 0)
                        perf_adjust_period(event, TICK_NSEC, delta);
+               perf_enable();
        }
        raw_spin_unlock(&ctx->lock);
 }
@@ -1546,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
  */
 static void rotate_ctx(struct perf_event_context *ctx)
 {
-       if (!ctx->nr_events)
-               return;
-
        raw_spin_lock(&ctx->lock);
 
        /* Rotate the first entry last of non-pinned groups */
@@ -1561,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr)
 {
        struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
+       int rotate = 0;
 
        if (!atomic_read(&nr_events))
                return;
 
        cpuctx = &__get_cpu_var(perf_cpu_context);
-       ctx = curr->perf_event_ctxp;
+       if (cpuctx->ctx.nr_events &&
+           cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
+               rotate = 1;
 
-       perf_disable();
+       ctx = curr->perf_event_ctxp;
+       if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
+               rotate = 1;
 
        perf_ctx_adjust_freq(&cpuctx->ctx);
        if (ctx)
                perf_ctx_adjust_freq(ctx);
 
+       if (!rotate)
+               return;
+
+       perf_disable();
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1585,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr)
        cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_in(curr, EVENT_FLEXIBLE);
-
        perf_enable();
 }