ALSA: asihpi - Fix imbalanced lock path in hw_message
[safe/jmp/linux-2.6] / kernel / perf_event.c
index 095101d..a4fa381 100644 (file)
@@ -83,14 +83,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
 void __weak hw_perf_disable(void)              { barrier(); }
 void __weak hw_perf_enable(void)               { barrier(); }
 
-int __weak
-hw_perf_group_sched_in(struct perf_event *group_leader,
-              struct perf_cpu_context *cpuctx,
-              struct perf_event_context *ctx)
-{
-       return 0;
-}
-
 void __weak perf_event_print_debug(void)       { }
 
 static DEFINE_PER_CPU(int, perf_disable_count);
@@ -263,6 +255,18 @@ static void update_event_times(struct perf_event *event)
        event->total_time_running = run_end - event->tstamp_running;
 }
 
+/*
+ * Update total_time_enabled and total_time_running for all events in a group.
+ */
+static void update_group_times(struct perf_event *leader)
+{
+       struct perf_event *event;
+
+       update_event_times(leader);
+       list_for_each_entry(event, &leader->sibling_list, group_entry)
+               update_event_times(event);
+}
+
 static struct list_head *
 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 {
@@ -316,8 +320,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 {
-       struct perf_event *sibling, *tmp;
-
        if (list_empty(&event->group_entry))
                return;
        ctx->nr_events--;
@@ -330,7 +332,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
        if (event->group_leader != event)
                event->group_leader->nr_siblings--;
 
-       update_event_times(event);
+       update_group_times(event);
 
        /*
         * If event was in error state, then keep it
@@ -341,6 +343,12 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
         */
        if (event->state > PERF_EVENT_STATE_OFF)
                event->state = PERF_EVENT_STATE_OFF;
+}
+
+static void
+perf_destroy_group(struct perf_event *event, struct perf_event_context *ctx)
+{
+       struct perf_event *sibling, *tmp;
 
        /*
         * If this was a group event with sibling events then
@@ -506,18 +514,6 @@ retry:
 }
 
 /*
- * Update total_time_enabled and total_time_running for all events in a group.
- */
-static void update_group_times(struct perf_event *leader)
-{
-       struct perf_event *event;
-
-       update_event_times(leader);
-       list_for_each_entry(event, &leader->sibling_list, group_entry)
-               update_event_times(event);
-}
-
-/*
  * Cross CPU call to disable a performance event
  */
 static void __perf_event_disable(void *info)
@@ -641,15 +637,20 @@ group_sched_in(struct perf_event *group_event,
               struct perf_cpu_context *cpuctx,
               struct perf_event_context *ctx)
 {
-       struct perf_event *event, *partial_group;
+       struct perf_event *event, *partial_group = NULL;
+       const struct pmu *pmu = group_event->pmu;
+       bool txn = false;
        int ret;
 
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
 
-       ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
-       if (ret)
-               return ret < 0 ? ret : 0;
+       /* Check if group transaction availabe */
+       if (pmu->start_txn)
+               txn = true;
+
+       if (txn)
+               pmu->start_txn(pmu);
 
        if (event_sched_in(group_event, cpuctx, ctx))
                return -EAGAIN;
@@ -664,9 +665,19 @@ group_sched_in(struct perf_event *group_event,
                }
        }
 
-       return 0;
+       if (!txn)
+               return 0;
+
+       ret = pmu->commit_txn(pmu);
+       if (!ret) {
+               pmu->cancel_txn(pmu);
+               return 0;
+       }
 
 group_error:
+       if (txn)
+               pmu->cancel_txn(pmu);
+
        /*
         * Groups can be scheduled in as one unit only, so undo any
         * partial group before returning:
@@ -1861,9 +1872,30 @@ int perf_event_release_kernel(struct perf_event *event)
 {
        struct perf_event_context *ctx = event->ctx;
 
+       /*
+        * Remove from the PMU, can't get re-enabled since we got
+        * here because the last ref went.
+        */
+       perf_event_disable(event);
+
        WARN_ON_ONCE(ctx->parent_ctx);
-       mutex_lock(&ctx->mutex);
-       perf_event_remove_from_context(event);
+       /*
+        * There are two ways this annotation is useful:
+        *
+        *  1) there is a lock recursion from perf_event_exit_task
+        *     see the comment there.
+        *
+        *  2) there is a lock-inversion with mmap_sem through
+        *     perf_event_read_group(), which takes faults while
+        *     holding ctx->mutex, however this is called after
+        *     the last filedesc died, so there is no possibility
+        *     to trigger the AB-BA case.
+        */
+       mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
+       raw_spin_lock_irq(&ctx->lock);
+       list_del_event(event, ctx);
+       perf_destroy_group(event, ctx);
+       raw_spin_unlock_irq(&ctx->lock);
        mutex_unlock(&ctx->mutex);
 
        mutex_lock(&event->owner->perf_event_mutex);
@@ -2798,6 +2830,27 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
 
 
 /*
+ * We assume there is only KVM supporting the callbacks.
+ * Later on, we might change it to a list if there is
+ * another virtualization implementation supporting the callbacks.
+ */
+struct perf_guest_info_callbacks *perf_guest_cbs;
+
+int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+{
+       perf_guest_cbs = cbs;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
+
+int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+{
+       perf_guest_cbs = NULL;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
+
+/*
  * Output
  */
 static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
@@ -3749,7 +3802,7 @@ void __perf_event_mmap(struct vm_area_struct *vma)
                .event_id  = {
                        .header = {
                                .type = PERF_RECORD_MMAP,
-                               .misc = 0,
+                               .misc = PERF_RECORD_MISC_USER,
                                /* .size */
                        },
                        /* .pid */
@@ -4313,36 +4366,6 @@ static const struct pmu perf_ops_task_clock = {
        .read           = task_clock_perf_event_read,
 };
 
-#ifdef CONFIG_EVENT_TRACING
-
-void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
-                  int entry_size, struct pt_regs *regs)
-{
-       struct perf_sample_data data;
-       struct perf_raw_record raw = {
-               .size = entry_size,
-               .data = record,
-       };
-
-       perf_sample_data_init(&data, addr);
-       data.raw = &raw;
-
-       /* Trace events already protected against recursion */
-       do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
-                        &data, regs);
-}
-EXPORT_SYMBOL_GPL(perf_tp_event);
-
-static int perf_tp_event_match(struct perf_event *event,
-                               struct perf_sample_data *data)
-{
-       void *record = data->raw->data;
-
-       if (likely(!event->filter) || filter_match_preds(event->filter, record))
-               return 1;
-       return 0;
-}
-
 static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
 {
        struct swevent_hlist *hlist;
@@ -4442,6 +4465,36 @@ static int swevent_hlist_get(struct perf_event *event)
        return err;
 }
 
+#ifdef CONFIG_EVENT_TRACING
+
+void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
+                  int entry_size, struct pt_regs *regs)
+{
+       struct perf_sample_data data;
+       struct perf_raw_record raw = {
+               .size = entry_size,
+               .data = record,
+       };
+
+       perf_sample_data_init(&data, addr);
+       data.raw = &raw;
+
+       /* Trace events already protected against recursion */
+       do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
+                        &data, regs);
+}
+EXPORT_SYMBOL_GPL(perf_tp_event);
+
+static int perf_tp_event_match(struct perf_event *event,
+                               struct perf_sample_data *data)
+{
+       void *record = data->raw->data;
+
+       if (likely(!event->filter) || filter_match_preds(event->filter, record))
+               return 1;
+       return 0;
+}
+
 static void tp_perf_event_destroy(struct perf_event *event)
 {
        perf_trace_disable(event->attr.config);
@@ -5000,7 +5053,7 @@ err_fput_free_put_context:
 
 err_free_put_context:
        if (err < 0)
-               kfree(event);
+               free_event(event);
 
 err_put_context:
        if (err < 0)
@@ -5279,7 +5332,7 @@ void perf_event_exit_task(struct task_struct *child)
         *
         * But since its the parent context it won't be the same instance.
         */
-       mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
+       mutex_lock(&child_ctx->mutex);
 
 again:
        list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,