ALSA: es968: fix wrong PnP dma index
[safe/jmp/linux-2.6] / kernel / perf_event.c
index 45b4b6e..2f3fbf8 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/smp.h>
 #include <linux/file.h>
 #include <linux/poll.h>
+#include <linux/slab.h>
 #include <linux/sysfs.h>
 #include <linux/dcache.h>
 #include <linux/percpu.h>
@@ -1164,11 +1165,9 @@ void perf_event_task_sched_out(struct task_struct *task,
        struct perf_event_context *ctx = task->perf_event_ctxp;
        struct perf_event_context *next_ctx;
        struct perf_event_context *parent;
-       struct pt_regs *regs;
        int do_switch = 1;
 
-       regs = task_pt_regs(task);
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
+       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
 
        if (likely(!ctx || !cpuctx->task_ctx))
                return;
@@ -2791,6 +2790,7 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
 {
 }
 
+
 /*
  * Output
  */
@@ -3376,15 +3376,23 @@ static void perf_event_task_output(struct perf_event *event,
                                     struct perf_task_event *task_event)
 {
        struct perf_output_handle handle;
-       int size;
        struct task_struct *task = task_event->task;
-       int ret;
+       unsigned long flags;
+       int size, ret;
+
+       /*
+        * If this CPU attempts to acquire an rq lock held by a CPU spinning
+        * in perf_output_lock() from interrupt context, it's game over.
+        */
+       local_irq_save(flags);
 
        size  = task_event->event_id.header.size;
        ret = perf_output_begin(&handle, event, size, 0, 0);
 
-       if (ret)
+       if (ret) {
+               local_irq_restore(flags);
                return;
+       }
 
        task_event->event_id.pid = perf_event_pid(event, task);
        task_event->event_id.ppid = perf_event_pid(event, current);
@@ -3395,6 +3403,7 @@ static void perf_event_task_output(struct perf_event *event,
        perf_output_put(&handle, task_event->event_id);
 
        perf_output_end(&handle);
+       local_irq_restore(flags);
 }
 
 static int perf_event_task_match(struct perf_event *event)
@@ -4347,7 +4356,7 @@ static int perf_tp_event_match(struct perf_event *event,
 
 static void tp_perf_event_destroy(struct perf_event *event)
 {
-       ftrace_profile_disable(event->attr.config);
+       perf_trace_disable(event->attr.config);
 }
 
 static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4361,7 +4370,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
                        !capable(CAP_SYS_ADMIN))
                return ERR_PTR(-EPERM);
 
-       if (ftrace_profile_enable(event->attr.config))
+       if (perf_trace_enable(event->attr.config))
                return NULL;
 
        event->destroy = tp_perf_event_destroy;
@@ -5368,12 +5377,22 @@ int perf_event_init_task(struct task_struct *child)
        return ret;
 }
 
+static void __init perf_event_init_all_cpus(void)
+{
+       int cpu;
+       struct perf_cpu_context *cpuctx;
+
+       for_each_possible_cpu(cpu) {
+               cpuctx = &per_cpu(perf_cpu_context, cpu);
+               __perf_event_init_context(&cpuctx->ctx, NULL);
+       }
+}
+
 static void __cpuinit perf_event_init_cpu(int cpu)
 {
        struct perf_cpu_context *cpuctx;
 
        cpuctx = &per_cpu(perf_cpu_context, cpu);
-       __perf_event_init_context(&cpuctx->ctx, NULL);
 
        spin_lock(&perf_resource_lock);
        cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
@@ -5439,6 +5458,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
 
 void __init perf_event_init(void)
 {
+       perf_event_init_all_cpus();
        perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
                        (void *)(long)smp_processor_id());
        perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,