perf: Round robin flexible groups of events using list_rotate_left()
authorFrederic Weisbecker <fweisbec@gmail.com>
Sat, 9 Jan 2010 20:05:28 +0000 (21:05 +0100)
committerFrederic Weisbecker <fweisbec@gmail.com>
Sat, 16 Jan 2010 11:30:28 +0000 (12:30 +0100)
This is more proper that doing it through a list_for_each_entry()
that breaks after the first entry.

v2: Don't rotate pinned groups as its not needed to time share
them.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
kernel/perf_event.c

index c9f8a75..bbebe28 100644 (file)
@@ -1454,25 +1454,16 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
  */
 static void rotate_ctx(struct perf_event_context *ctx)
 {
-       struct perf_event *event;
-
        if (!ctx->nr_events)
                return;
 
        raw_spin_lock(&ctx->lock);
-       /*
-        * Rotate the first entry last (works just fine for group events too):
-        */
+
+       /* Rotate the first entry last of non-pinned groups */
        perf_disable();
-       list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
-               list_move_tail(&event->group_entry, &ctx->pinned_groups);
-               break;
-       }
 
-       list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
-               list_move_tail(&event->group_entry, &ctx->flexible_groups);
-               break;
-       }
+       list_rotate_left(&ctx->flexible_groups);
+
        perf_enable();
 
        raw_spin_unlock(&ctx->lock);