perf_counter: Increase mmap limit
[safe/jmp/linux-2.6] / kernel / perf_counter.c
index 679c3b5..6cdf824 100644 (file)
@@ -134,8 +134,6 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
 
        list_add_rcu(&counter->event_entry, &ctx->event_list);
        ctx->nr_counters++;
-       if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
-               ctx->nr_enabled++;
 }
 
 /*
@@ -150,8 +148,6 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
        if (list_empty(&counter->list_entry))
                return;
        ctx->nr_counters--;
-       if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
-               ctx->nr_enabled--;
 
        list_del_init(&counter->list_entry);
        list_del_rcu(&counter->event_entry);
@@ -406,7 +402,6 @@ static void __perf_counter_disable(void *info)
                else
                        counter_sched_out(counter, cpuctx, ctx);
                counter->state = PERF_COUNTER_STATE_OFF;
-               ctx->nr_enabled--;
        }
 
        spin_unlock_irqrestore(&ctx->lock, flags);
@@ -448,7 +443,6 @@ static void perf_counter_disable(struct perf_counter *counter)
        if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
                update_counter_times(counter);
                counter->state = PERF_COUNTER_STATE_OFF;
-               ctx->nr_enabled--;
        }
 
        spin_unlock_irq(&ctx->lock);
@@ -597,6 +591,8 @@ static void add_counter_to_ctx(struct perf_counter *counter,
 
 /*
  * Cross CPU call to install and enable a performance counter
+ *
+ * Must be called with ctx->mutex held
  */
 static void __perf_install_in_context(void *info)
 {
@@ -757,7 +753,6 @@ static void __perf_counter_enable(void *info)
                goto unlock;
        counter->state = PERF_COUNTER_STATE_INACTIVE;
        counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
-       ctx->nr_enabled++;
 
        /*
         * If the counter is in a group and isn't the group leader,
@@ -848,7 +843,6 @@ static void perf_counter_enable(struct perf_counter *counter)
                counter->state = PERF_COUNTER_STATE_INACTIVE;
                counter->tstamp_enabled =
                        ctx->time - counter->total_time_enabled;
-               ctx->nr_enabled++;
        }
  out:
        spin_unlock_irq(&ctx->lock);
@@ -908,8 +902,7 @@ static int context_equiv(struct perf_counter_context *ctx1,
                         struct perf_counter_context *ctx2)
 {
        return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
-               && ctx1->parent_gen == ctx2->parent_gen
-               && ctx1->nr_enabled == ctx2->nr_enabled;
+               && ctx1->parent_gen == ctx2->parent_gen;
 }
 
 /*
@@ -1074,79 +1067,26 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
        __perf_counter_sched_in(ctx, cpuctx, cpu);
 }
 
-int perf_counter_task_disable(void)
+int perf_counter_task_enable(void)
 {
-       struct task_struct *curr = current;
-       struct perf_counter_context *ctx = curr->perf_counter_ctxp;
        struct perf_counter *counter;
-       unsigned long flags;
-
-       if (!ctx || !ctx->nr_counters)
-               return 0;
 
-       local_irq_save(flags);
-
-       __perf_counter_task_sched_out(ctx);
-
-       spin_lock(&ctx->lock);
-
-       /*
-        * Disable all the counters:
-        */
-       perf_disable();
-
-       list_for_each_entry(counter, &ctx->counter_list, list_entry) {
-               if (counter->state != PERF_COUNTER_STATE_ERROR) {
-                       update_group_times(counter);
-                       counter->state = PERF_COUNTER_STATE_OFF;
-               }
-       }
-
-       perf_enable();
-
-       spin_unlock_irqrestore(&ctx->lock, flags);
+       mutex_lock(&current->perf_counter_mutex);
+       list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
+               perf_counter_enable(counter);
+       mutex_unlock(&current->perf_counter_mutex);
 
        return 0;
 }
 
-int perf_counter_task_enable(void)
+int perf_counter_task_disable(void)
 {
-       struct task_struct *curr = current;
-       struct perf_counter_context *ctx = curr->perf_counter_ctxp;
        struct perf_counter *counter;
-       unsigned long flags;
-       int cpu;
 
-       if (!ctx || !ctx->nr_counters)
-               return 0;
-
-       local_irq_save(flags);
-       cpu = smp_processor_id();
-
-       __perf_counter_task_sched_out(ctx);
-
-       spin_lock(&ctx->lock);
-
-       /*
-        * Disable all the counters:
-        */
-       perf_disable();
-
-       list_for_each_entry(counter, &ctx->counter_list, list_entry) {
-               if (counter->state > PERF_COUNTER_STATE_OFF)
-                       continue;
-               counter->state = PERF_COUNTER_STATE_INACTIVE;
-               counter->tstamp_enabled =
-                       ctx->time - counter->total_time_enabled;
-               counter->hw_event.disabled = 0;
-       }
-       perf_enable();
-
-       spin_unlock(&ctx->lock);
-
-       perf_counter_task_sched_in(curr, cpu);
-
-       local_irq_restore(flags);
+       mutex_lock(&current->perf_counter_mutex);
+       list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
+               perf_counter_disable(counter);
+       mutex_unlock(&current->perf_counter_mutex);
 
        return 0;
 }
@@ -1414,6 +1354,11 @@ static int perf_release(struct inode *inode, struct file *file)
        perf_counter_remove_from_context(counter);
        mutex_unlock(&ctx->mutex);
 
+       mutex_lock(&counter->owner->perf_counter_mutex);
+       list_del_init(&counter->owner_entry);
+       mutex_unlock(&counter->owner->perf_counter_mutex);
+       put_task_struct(counter->owner);
+
        free_counter(counter);
        put_context(ctx);
 
@@ -1496,13 +1441,13 @@ static void perf_counter_for_each_sibling(struct perf_counter *counter,
        struct perf_counter_context *ctx = counter->ctx;
        struct perf_counter *sibling;
 
-       spin_lock_irq(&ctx->lock);
+       mutex_lock(&ctx->mutex);
        counter = counter->group_leader;
 
        func(counter);
        list_for_each_entry(sibling, &counter->sibling_list, list_entry)
                func(sibling);
-       spin_unlock_irq(&ctx->lock);
+       mutex_unlock(&ctx->mutex);
 }
 
 static void perf_counter_for_each_child(struct perf_counter *counter,
@@ -1759,6 +1704,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 
        user_extra = nr_pages + 1;
        user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
+
+       /*
+        * Increase the limit linearly with more CPUs:
+        */
+       user_lock_limit *= num_online_cpus();
+
        user_locked = atomic_long_read(&user->locked_vm) + user_extra;
 
        extra = 0;
@@ -3270,6 +3221,12 @@ SYSCALL_DEFINE5(perf_counter_open,
        perf_install_in_context(ctx, counter, cpu);
        mutex_unlock(&ctx->mutex);
 
+       counter->owner = current;
+       get_task_struct(current);
+       mutex_lock(&current->perf_counter_mutex);
+       list_add_tail(&counter->owner_entry, &current->perf_counter_list);
+       mutex_unlock(&current->perf_counter_mutex);
+
        fput_light(counter_file, fput_needed2);
 
 out_fput:
@@ -3414,7 +3371,7 @@ __perf_counter_exit_task(struct task_struct *child,
        struct perf_counter *parent_counter;
 
        update_counter_times(child_counter);
-       list_del_counter(child_counter, child_ctx);
+       perf_counter_remove_from_context(child_counter);
 
        parent_counter = child_counter->parent;
        /*
@@ -3486,6 +3443,9 @@ void perf_counter_init_task(struct task_struct *child)
 
        child->perf_counter_ctxp = NULL;
 
+       mutex_init(&child->perf_counter_mutex);
+       INIT_LIST_HEAD(&child->perf_counter_list);
+
        /*
         * This is executed from the parent task context, so inherit
         * counters that have been marked for cloning.