sched: Update normalized values on user updates via proc
[safe/jmp/linux-2.6] / kernel / perf_event.c
index abe1ef4..6b7ddba 100644 (file)
@@ -246,6 +246,49 @@ static void perf_unpin_context(struct perf_event_context *ctx)
        put_ctx(ctx);
 }
 
+static inline u64 perf_clock(void)
+{
+       return cpu_clock(smp_processor_id());
+}
+
+/*
+ * Update the record of the current time in a context.
+ */
+static void update_context_time(struct perf_event_context *ctx)
+{
+       u64 now = perf_clock();
+
+       ctx->time += now - ctx->timestamp;
+       ctx->timestamp = now;
+}
+
+/*
+ * Update the total_time_enabled and total_time_running fields for a event.
+ */
+static void update_event_times(struct perf_event *event)
+{
+       struct perf_event_context *ctx = event->ctx;
+       u64 run_end;
+
+       if (event->state < PERF_EVENT_STATE_INACTIVE ||
+           event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
+               return;
+
+       if (ctx->is_active)
+               run_end = ctx->time;
+       else
+               run_end = event->tstamp_stopped;
+
+       event->total_time_enabled = run_end - event->tstamp_enabled;
+
+       if (event->state == PERF_EVENT_STATE_INACTIVE)
+               run_end = event->tstamp_stopped;
+       else
+               run_end = ctx->time;
+
+       event->total_time_running = run_end - event->tstamp_running;
+}
+
 /*
  * Add a event from the lists for its context.
  * Must be called with ctx->mutex and ctx->lock held.
@@ -294,6 +337,18 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
        if (event->group_leader != event)
                event->group_leader->nr_siblings--;
 
+       update_event_times(event);
+
+       /*
+        * If event was in error state, then keep it
+        * that way, otherwise bogus counts will be
+        * returned on read(). The only way to get out
+        * of error state is by explicit re-enabling
+        * of the event
+        */
+       if (event->state > PERF_EVENT_STATE_OFF)
+               event->state = PERF_EVENT_STATE_OFF;
+
        /*
         * If this was a group event with sibling events then
         * upgrade the siblings to singleton events by adding them
@@ -447,50 +502,11 @@ retry:
         * can remove the event safely, if the call above did not
         * succeed.
         */
-       if (!list_empty(&event->group_entry)) {
+       if (!list_empty(&event->group_entry))
                list_del_event(event, ctx);
-       }
        spin_unlock_irq(&ctx->lock);
 }
 
-static inline u64 perf_clock(void)
-{
-       return cpu_clock(smp_processor_id());
-}
-
-/*
- * Update the record of the current time in a context.
- */
-static void update_context_time(struct perf_event_context *ctx)
-{
-       u64 now = perf_clock();
-
-       ctx->time += now - ctx->timestamp;
-       ctx->timestamp = now;
-}
-
-/*
- * Update the total_time_enabled and total_time_running fields for a event.
- */
-static void update_event_times(struct perf_event *event)
-{
-       struct perf_event_context *ctx = event->ctx;
-       u64 run_end;
-
-       if (event->state < PERF_EVENT_STATE_INACTIVE ||
-           event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
-               return;
-
-       event->total_time_enabled = ctx->time - event->tstamp_enabled;
-
-       if (event->state == PERF_EVENT_STATE_INACTIVE)
-               run_end = event->tstamp_stopped;
-       else
-               run_end = ctx->time;
-
-       event->total_time_running = run_end - event->tstamp_running;
-}
-
 /*
  * Update total_time_enabled and total_time_running for all events in a group.
  */
@@ -1033,10 +1049,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
        update_context_time(ctx);
 
        perf_disable();
-       if (ctx->nr_active)
+       if (ctx->nr_active) {
                list_for_each_entry(event, &ctx->group_list, group_entry)
                        group_sched_out(event, cpuctx, ctx);
-
+       }
        perf_enable();
  out:
        spin_unlock(&ctx->lock);
@@ -1704,16 +1720,10 @@ static void free_event(struct perf_event *event)
        call_rcu(&event->rcu_head, free_event_rcu);
 }
 
-/*
- * Called when the last reference to the file is gone.
- */
-static int perf_release(struct inode *inode, struct file *file)
+int perf_event_release_kernel(struct perf_event *event)
 {
-       struct perf_event *event = file->private_data;
        struct perf_event_context *ctx = event->ctx;
 
-       file->private_data = NULL;
-
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
        perf_event_remove_from_context(event);
@@ -1728,26 +1738,19 @@ static int perf_release(struct inode *inode, struct file *file)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(perf_event_release_kernel);
 
-int perf_event_release_kernel(struct perf_event *event)
+/*
+ * Called when the last reference to the file is gone.
+ */
+static int perf_release(struct inode *inode, struct file *file)
 {
-       struct perf_event_context *ctx = event->ctx;
-
-       WARN_ON_ONCE(ctx->parent_ctx);
-       mutex_lock(&ctx->mutex);
-       perf_event_remove_from_context(event);
-       mutex_unlock(&ctx->mutex);
-
-       mutex_lock(&event->owner->perf_event_mutex);
-       list_del_init(&event->owner_entry);
-       mutex_unlock(&event->owner->perf_event_mutex);
-       put_task_struct(event->owner);
+       struct perf_event *event = file->private_data;
 
-       free_event(event);
+       file->private_data = NULL;
 
-       return 0;
+       return perf_event_release_kernel(event);
 }
-EXPORT_SYMBOL_GPL(perf_event_release_kernel);
 
 static int perf_event_read_size(struct perf_event *event)
 {
@@ -1837,7 +1840,7 @@ static int perf_event_read_group(struct perf_event *event,
 
                size = n * sizeof(u64);
 
-               if (copy_to_user(buf + size, values, size)) {
+               if (copy_to_user(buf + ret, values, size)) {
                        ret = -EFAULT;
                        goto unlock;
                }
@@ -2207,6 +2210,7 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
        perf_mmap_free_page((unsigned long)data->user_page);
        for (i = 0; i < data->nr_pages; i++)
                perf_mmap_free_page((unsigned long)data->data_pages[i]);
+       kfree(data);
 }
 
 #else
@@ -2247,6 +2251,7 @@ static void perf_mmap_data_free_work(struct work_struct *work)
                perf_mmap_unmark_page(base + (i * PAGE_SIZE));
 
        vfree(base);
+       kfree(data);
 }
 
 static void perf_mmap_data_free(struct perf_mmap_data *data)
@@ -2352,7 +2357,6 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
 
        data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
        perf_mmap_data_free(data);
-       kfree(data);
 }
 
 static void perf_mmap_data_release(struct perf_event *event)
@@ -3837,6 +3841,20 @@ static int perf_swevent_is_counting(struct perf_event *event)
 static int perf_tp_event_match(struct perf_event *event,
                                struct perf_sample_data *data);
 
+static int perf_exclude_event(struct perf_event *event,
+                             struct pt_regs *regs)
+{
+       if (regs) {
+               if (event->attr.exclude_user && user_mode(regs))
+                       return 1;
+
+               if (event->attr.exclude_kernel && !user_mode(regs))
+                       return 1;
+       }
+
+       return 0;
+}
+
 static int perf_swevent_match(struct perf_event *event,
                                enum perf_type_id type,
                                u32 event_id,
@@ -3848,16 +3866,12 @@ static int perf_swevent_match(struct perf_event *event,
 
        if (event->attr.type != type)
                return 0;
+
        if (event->attr.config != event_id)
                return 0;
 
-       if (regs) {
-               if (event->attr.exclude_user && user_mode(regs))
-                       return 0;
-
-               if (event->attr.exclude_kernel && !user_mode(regs))
-                       return 0;
-       }
+       if (perf_exclude_event(event, regs))
+               return 0;
 
        if (event->attr.type == PERF_TYPE_TRACEPOINT &&
            !perf_tp_event_match(event, data))
@@ -3880,45 +3894,50 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
        }
 }
 
-/*
- * Must be called with preemption disabled
- */
-int perf_swevent_get_recursion_context(int **recursion)
+int perf_swevent_get_recursion_context(void)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+       struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
+       int rctx;
 
        if (in_nmi())
-               *recursion = &cpuctx->recursion[3];
+               rctx = 3;
        else if (in_irq())
-               *recursion = &cpuctx->recursion[2];
+               rctx = 2;
        else if (in_softirq())
-               *recursion = &cpuctx->recursion[1];
+               rctx = 1;
        else
-               *recursion = &cpuctx->recursion[0];
+               rctx = 0;
 
-       if (**recursion)
+       if (cpuctx->recursion[rctx]) {
+               put_cpu_var(perf_cpu_context);
                return -1;
+       }
 
-       (**recursion)++;
+       cpuctx->recursion[rctx]++;
+       barrier();
 
-       return 0;
+       return rctx;
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
-void perf_swevent_put_recursion_context(int *recursion)
+void perf_swevent_put_recursion_context(int rctx)
 {
-       (*recursion)--;
+       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+       barrier();
+       cpuctx->recursion[rctx]--;
+       put_cpu_var(perf_cpu_context);
 }
 EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
 
-static void __do_perf_sw_event(enum perf_type_id type, u32 event_id,
-                              u64 nr, int nmi,
-                              struct perf_sample_data *data,
-                              struct pt_regs *regs)
+static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
+                                   u64 nr, int nmi,
+                                   struct perf_sample_data *data,
+                                   struct pt_regs *regs)
 {
+       struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
 
+       cpuctx = &__get_cpu_var(perf_cpu_context);
        rcu_read_lock();
        perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
                                 nr, nmi, data, regs);
@@ -3932,34 +3951,22 @@ static void __do_perf_sw_event(enum perf_type_id type, u32 event_id,
        rcu_read_unlock();
 }
 
-static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
-                                   u64 nr, int nmi,
-                                   struct perf_sample_data *data,
-                                   struct pt_regs *regs)
+void __perf_sw_event(u32 event_id, u64 nr, int nmi,
+                           struct pt_regs *regs, u64 addr)
 {
-       int *recursion;
+       struct perf_sample_data data;
+       int rctx;
 
-       preempt_disable();
+       rctx = perf_swevent_get_recursion_context();
+       if (rctx < 0)
+               return;
 
-       if (perf_swevent_get_recursion_context(&recursion))
-               goto out;
+       data.addr = addr;
+       data.raw  = NULL;
 
-       __do_perf_sw_event(type, event_id, nr, nmi, data, regs);
+       do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
 
-       perf_swevent_put_recursion_context(recursion);
-out:
-       preempt_enable();
-}
-
-void __perf_sw_event(u32 event_id, u64 nr, int nmi,
-                           struct pt_regs *regs, u64 addr)
-{
-       struct perf_sample_data data = {
-               .addr = addr,
-       };
-
-       do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi,
-                               &data, regs);
+       perf_swevent_put_recursion_context(rctx);
 }
 
 static void perf_swevent_read(struct perf_event *event)
@@ -4004,6 +4011,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
        event->pmu->read(event);
 
        data.addr = 0;
+       data.period = event->hw.last_period;
        regs = get_irq_regs();
        /*
         * In case we exclude kernel IPs or are somehow not in interrupt
@@ -4183,7 +4191,7 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
                regs = task_pt_regs(current);
 
        /* Trace events already protected against recursion */
-       __do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
+       do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
                                &data, regs);
 }
 EXPORT_SYMBOL_GPL(perf_tp_event);
@@ -4295,15 +4303,17 @@ static const struct pmu *bp_perf_event_init(struct perf_event *bp)
        return &perf_ops_bp;
 }
 
-void perf_bp_event(struct perf_event *bp, void *regs)
+void perf_bp_event(struct perf_event *bp, void *data)
 {
-       /* TODO */
+       struct perf_sample_data sample;
+       struct pt_regs *regs = data;
+
+       sample.addr = bp->attr.bp_addr;
+
+       if (!perf_exclude_event(bp, regs))
+               perf_swevent_add(bp, 1, 1, &sample, regs);
 }
 #else
-static void bp_perf_event_destroy(struct perf_event *event)
-{
-}
-
 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
 {
        return NULL;
@@ -4777,14 +4787,17 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
         */
 
        ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx))
-               return NULL;
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_exit;
+       }
 
        event = perf_event_alloc(attr, cpu, ctx, NULL,
                                     NULL, callback, GFP_KERNEL);
-       err = PTR_ERR(event);
-       if (IS_ERR(event))
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
                goto err_put_context;
+       }
 
        event->filp = NULL;
        WARN_ON_ONCE(ctx->parent_ctx);
@@ -4801,11 +4814,10 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 
        return event;
 
-err_put_context:
-       if (err < 0)
-               put_ctx(ctx);
-
-       return NULL;
+ err_put_context:
+       put_ctx(ctx);
+ err_exit:
+       return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
 
@@ -4943,7 +4955,6 @@ __perf_event_exit_task(struct perf_event *child_event,
 {
        struct perf_event *parent_event;
 
-       update_event_times(child_event);
        perf_event_remove_from_context(child_event);
 
        parent_event = child_event->parent;
@@ -4995,6 +5006,7 @@ void perf_event_exit_task(struct task_struct *child)
         * the events from it.
         */
        unclone_ctx(child_ctx);
+       update_context_time(child_ctx);
        spin_unlock_irqrestore(&child_ctx->lock, flags);
 
        /*