b43: N-PHY: implement setting RF sequence
[safe/jmp/linux-2.6] / kernel / perf_event.c
index 9425c96..6b7ddba 100644 (file)
@@ -338,7 +338,16 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
                event->group_leader->nr_siblings--;
 
        update_event_times(event);
-       event->state = PERF_EVENT_STATE_OFF;
+
+       /*
+        * If event was in error state, then keep it
+        * that way, otherwise bogus counts will be
+        * returned on read(). The only way to get out
+        * of error state is by explicit re-enabling
+        * of the event
+        */
+       if (event->state > PERF_EVENT_STATE_OFF)
+               event->state = PERF_EVENT_STATE_OFF;
 
        /*
         * If this was a group event with sibling events then
@@ -1831,7 +1840,7 @@ static int perf_event_read_group(struct perf_event *event,
 
                size = n * sizeof(u64);
 
-               if (copy_to_user(buf + size, values, size)) {
+               if (copy_to_user(buf + ret, values, size)) {
                        ret = -EFAULT;
                        goto unlock;
                }
@@ -2201,6 +2210,7 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
        perf_mmap_free_page((unsigned long)data->user_page);
        for (i = 0; i < data->nr_pages; i++)
                perf_mmap_free_page((unsigned long)data->data_pages[i]);
+       kfree(data);
 }
 
 #else
@@ -2241,6 +2251,7 @@ static void perf_mmap_data_free_work(struct work_struct *work)
                perf_mmap_unmark_page(base + (i * PAGE_SIZE));
 
        vfree(base);
+       kfree(data);
 }
 
 static void perf_mmap_data_free(struct perf_mmap_data *data)
@@ -2346,7 +2357,6 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
 
        data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
        perf_mmap_data_free(data);
-       kfree(data);
 }
 
 static void perf_mmap_data_release(struct perf_event *event)
@@ -3914,7 +3924,7 @@ void perf_swevent_put_recursion_context(int rctx)
 {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        barrier();
-       cpuctx->recursion[rctx]++;
+       cpuctx->recursion[rctx]--;
        put_cpu_var(perf_cpu_context);
 }
 EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
@@ -4001,6 +4011,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
        event->pmu->read(event);
 
        data.addr = 0;
+       data.period = event->hw.last_period;
        regs = get_irq_regs();
        /*
         * In case we exclude kernel IPs or are somehow not in interrupt
@@ -4303,10 +4314,6 @@ void perf_bp_event(struct perf_event *bp, void *data)
                perf_swevent_add(bp, 1, 1, &sample, regs);
 }
 #else
-static void bp_perf_event_destroy(struct perf_event *event)
-{
-}
-
 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
 {
        return NULL;
@@ -4780,14 +4787,17 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
         */
 
        ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx))
-               return NULL;
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_exit;
+       }
 
        event = perf_event_alloc(attr, cpu, ctx, NULL,
                                     NULL, callback, GFP_KERNEL);
-       err = PTR_ERR(event);
-       if (IS_ERR(event))
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
                goto err_put_context;
+       }
 
        event->filp = NULL;
        WARN_ON_ONCE(ctx->parent_ctx);
@@ -4804,11 +4814,10 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 
        return event;
 
-err_put_context:
-       if (err < 0)
-               put_ctx(ctx);
-
-       return NULL;
+ err_put_context:
+       put_ctx(ctx);
+ err_exit:
+       return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);