Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[safe/jmp/linux-2.6] / kernel / perf_event.c
index 35df94e..40a996e 100644 (file)
@@ -338,7 +338,16 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
                event->group_leader->nr_siblings--;
 
        update_event_times(event);
-       event->state = PERF_EVENT_STATE_OFF;
+
+       /*
+        * If event was in error state, then keep it
+        * that way, otherwise bogus counts will be
+        * returned on read(). The only way to get out
+        * of error state is by explicit re-enabling
+        * of the event
+        */
+       if (event->state > PERF_EVENT_STATE_OFF)
+               event->state = PERF_EVENT_STATE_OFF;
 
        /*
         * If this was a group event with sibling events then
@@ -467,7 +476,7 @@ static void perf_event_remove_from_context(struct perf_event *event)
        if (!task) {
                /*
                 * Per cpu events are removed via an smp call and
-                * the removal is always sucessful.
+                * the removal is always successful.
                 */
                smp_call_function_single(event->cpu,
                                         __perf_event_remove_from_context,
@@ -836,7 +845,7 @@ perf_install_in_context(struct perf_event_context *ctx,
        if (!task) {
                /*
                 * Per cpu events are installed via an smp call and
-                * the install is always sucessful.
+                * the install is always successful.
                 */
                smp_call_function_single(cpu, __perf_install_in_context,
                                         event, 1);
@@ -2201,6 +2210,7 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
        perf_mmap_free_page((unsigned long)data->user_page);
        for (i = 0; i < data->nr_pages; i++)
                perf_mmap_free_page((unsigned long)data->data_pages[i]);
+       kfree(data);
 }
 
 #else
@@ -2241,6 +2251,7 @@ static void perf_mmap_data_free_work(struct work_struct *work)
                perf_mmap_unmark_page(base + (i * PAGE_SIZE));
 
        vfree(base);
+       kfree(data);
 }
 
 static void perf_mmap_data_free(struct perf_mmap_data *data)
@@ -2346,7 +2357,6 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
 
        data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
        perf_mmap_data_free(data);
-       kfree(data);
 }
 
 static void perf_mmap_data_release(struct perf_event *event)
@@ -4001,6 +4011,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
        event->pmu->read(event);
 
        data.addr = 0;
+       data.period = event->hw.last_period;
        regs = get_irq_regs();
        /*
         * In case we exclude kernel IPs or are somehow not in interrupt
@@ -4303,10 +4314,6 @@ void perf_bp_event(struct perf_event *bp, void *data)
                perf_swevent_add(bp, 1, 1, &sample, regs);
 }
 #else
-static void bp_perf_event_destroy(struct perf_event *event)
-{
-}
-
 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
 {
        return NULL;
@@ -4780,14 +4787,17 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
         */
 
        ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx))
-               return NULL;
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_exit;
+       }
 
        event = perf_event_alloc(attr, cpu, ctx, NULL,
                                     NULL, callback, GFP_KERNEL);
-       err = PTR_ERR(event);
-       if (IS_ERR(event))
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
                goto err_put_context;
+       }
 
        event->filp = NULL;
        WARN_ON_ONCE(ctx->parent_ctx);
@@ -4804,11 +4814,10 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 
        return event;
 
-err_put_context:
-       if (err < 0)
-               put_ctx(ctx);
-
-       return NULL;
+ err_put_context:
+       put_ctx(ctx);
+ err_exit:
+       return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);