Merge branch 'perf/x86' into perf/core
authorIngo Molnar <mingo@elte.hu>
Fri, 12 Mar 2010 20:06:35 +0000 (21:06 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 12 Mar 2010 20:06:37 +0000 (21:06 +0100)
Merge reason: The new P4 driver is stable and ready now for more
              testing.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
arch/x86/kernel/cpu/perf_event.c

@@@ -190,6 -190,8 +190,8 @@@ struct x86_pmu 
        void            (*enable_all)(void);
        void            (*enable)(struct perf_event *);
        void            (*disable)(struct perf_event *);
+       int             (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc);
+       int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
        unsigned        eventsel;
        unsigned        perfctr;
        u64             (*event_map)(int);
@@@ -415,6 -417,25 +417,25 @@@ set_ext_hw_attr(struct hw_perf_event *h
        return 0;
  }
  
+ static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
+ {
+       /*
+        * Generate PMC IRQs:
+        * (keep 'enabled' bit clear for now)
+        */
+       hwc->config = ARCH_PERFMON_EVENTSEL_INT;
+       /*
+        * Count user and OS events unless requested not to
+        */
+       if (!attr->exclude_user)
+               hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
+       if (!attr->exclude_kernel)
+               hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
+       return 0;
+ }
  /*
   * Setup the hardware configuration for a given attr_type
   */
@@@ -446,23 -467,13 +467,13 @@@ static int __hw_perf_event_init(struct 
  
        event->destroy = hw_perf_event_destroy;
  
-       /*
-        * Generate PMC IRQs:
-        * (keep 'enabled' bit clear for now)
-        */
-       hwc->config = ARCH_PERFMON_EVENTSEL_INT;
        hwc->idx = -1;
        hwc->last_cpu = -1;
        hwc->last_tag = ~0ULL;
  
-       /*
-        * Count user and OS events unless requested not to.
-        */
-       if (!attr->exclude_user)
-               hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
-       if (!attr->exclude_kernel)
-               hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
+       /* Processor specifics */
+       if (x86_pmu.hw_config(attr, hwc))
+               return -EOPNOTSUPP;
  
        if (!hwc->sample_period) {
                hwc->sample_period = x86_pmu.max_period;
                        return -EOPNOTSUPP;
  
                /* BTS is currently only allowed for user-mode. */
-               if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
+               if (!attr->exclude_kernel)
                        return -EOPNOTSUPP;
        }
  
@@@ -787,6 -798,7 +798,6 @@@ void hw_perf_enable(void
                 * step2: reprogram moved events into new counters
                 */
                for (i = 0; i < n_running; i++) {
 -
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
  
                                continue;
  
                        x86_pmu_stop(event);
                }
  
                for (i = 0; i < cpuc->n_events; i++) {
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
  
 -                      if (i < n_running &&
 -                          match_prev_assignment(hwc, cpuc, i))
 -                              continue;
 -
 -                      if (hwc->idx == -1)
 +                      if (!match_prev_assignment(hwc, cpuc, i))
                                x86_assign_hw_event(event, cpuc, i);
 +                      else if (i < n_running)
 +                              continue;
  
                        x86_pmu_start(event);
                }
@@@ -925,7 -942,7 +936,7 @@@ static int x86_pmu_enable(struct perf_e
        if (n < 0)
                return n;
  
-       ret = x86_schedule_events(cpuc, n, assign);
+       ret = x86_pmu.schedule_events(cpuc, n, assign);
        if (ret)
                return ret;
        /*
@@@ -1252,12 -1269,15 +1263,15 @@@ int hw_perf_group_sched_in(struct perf_
        int assign[X86_PMC_IDX_MAX];
        int n0, n1, ret;
  
+       if (!x86_pmu_initialized())
+               return 0;
        /* n0 = total number of events */
        n0 = collect_events(cpuc, leader, true);
        if (n0 < 0)
                return n0;
  
-       ret = x86_schedule_events(cpuc, n0, assign);
+       ret = x86_pmu.schedule_events(cpuc, n0, assign);
        if (ret)
                return ret;
  
@@@ -1307,6 -1327,7 +1321,7 @@@ undo
  
  #include "perf_event_amd.c"
  #include "perf_event_p6.c"
+ #include "perf_event_p4.c"
  #include "perf_event_intel_lbr.c"
  #include "perf_event_intel_ds.c"
  #include "perf_event_intel.c"
@@@ -1509,7 -1530,7 +1524,7 @@@ static int validate_group(struct perf_e
  
        fake_cpuc->n_events = n;
  
-       ret = x86_schedule_events(fake_cpuc, n, NULL);
+       ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
  
  out_free:
        kfree(fake_cpuc);
@@@ -1679,16 -1700,3 +1694,16 @@@ struct perf_callchain_entry *perf_callc
  
        return entry;
  }
 +
 +void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
 +{
 +      regs->ip = ip;
 +      /*
 +       * perf_arch_fetch_caller_regs adds another call, we need to increment
 +       * the skip level
 +       */
 +      regs->bp = rewind_frame_pointer(skip + 1);
 +      regs->cs = __KERNEL_CS;
 +      local_save_flags(regs->flags);
 +}
 +EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);