powerpc/oprofile: fix potential buffer overrun in op_model_cell.c
[safe/jmp/linux-2.6] / arch / powerpc / oprofile / op_model_cell.c
index bb6bff5..7fd90d0 100644 (file)
 #include "../platforms/cell/interrupt.h"
 #include "cell/pr_util.h"
 
-static void cell_global_stop_spu(void);
+#define PPU_PROFILING            0
+#define SPU_PROFILING_CYCLES     1
+#define SPU_PROFILING_EVENTS     2
 
-/*
- * spu_cycle_reset is the number of cycles between samples.
- * This variable is used for SPU profiling and should ONLY be set
- * at the beginning of cell_reg_setup; otherwise, it's read-only.
- */
-static unsigned int spu_cycle_reset;
+#define SPU_EVENT_NUM_START      4100
+#define SPU_EVENT_NUM_STOP       4399
+#define SPU_PROFILE_EVENT_ADDR          4363  /* spu, address trace, decimal */
+#define SPU_PROFILE_EVENT_ADDR_MASK_A   0x146 /* sub unit set to zero */
+#define SPU_PROFILE_EVENT_ADDR_MASK_B   0x186 /* sub unit set to zero */
 
 #define NUM_SPUS_PER_NODE    8
 #define SPU_CYCLES_EVENT_NUM 2 /*  event number for SPU_CYCLES */
@@ -61,11 +62,26 @@ static unsigned int spu_cycle_reset;
 #define NUM_THREADS 2         /* number of physical threads in
                               * physical processor
                               */
-#define NUM_TRACE_BUS_WORDS 4
+#define NUM_DEBUG_BUS_WORDS 4
 #define NUM_INPUT_BUS_WORDS 2
 
 #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
 
+/* Minumum HW interval timer setting to send value to trace buffer is 10 cycle.
+ * To configure counter to send value every N cycles set counter to
+ * 2^32 - 1 - N.
+ */
+#define NUM_INTERVAL_CYC  0xFFFFFFFF - 10
+
+/*
+ * spu_cycle_reset is the number of cycles between samples.
+ * This variable is used for SPU profiling and should ONLY be set
+ * at the beginning of cell_reg_setup; otherwise, it's read-only.
+ */
+static unsigned int spu_cycle_reset;
+static unsigned int profiling_mode;
+static int spu_evnt_phys_spu_indx;
+
 struct pmc_cntrl_data {
        unsigned long vcntr;
        unsigned long evnts;
@@ -105,6 +121,8 @@ struct pm_cntrl {
        u16 trace_mode;
        u16 freeze;
        u16 count_mode;
+       u16 spu_addr_trace;
+       u8  trace_buf_ovflw;
 };
 
 static struct {
@@ -122,7 +140,7 @@ static struct {
 #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
 
 static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
-
+static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
 static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
 
 /*
@@ -152,6 +170,7 @@ static u32 hdw_thread;
 
 static u32 virt_cntr_inter_mask;
 static struct timer_list timer_virt_cntr;
+static struct timer_list timer_spu_event_swap;
 
 /*
  * pm_signal needs to be global since it is initialized in
@@ -165,11 +184,10 @@ static int spu_rtas_token;   /* token for SPU cycle profiling */
 static u32 reset_value[NR_PHYS_CTRS];
 static int num_counters;
 static int oprofile_running;
-static DEFINE_SPINLOCK(virt_cntr_lock);
+static DEFINE_SPINLOCK(cntr_lock);
 
 static u32 ctr_enabled;
 
-static unsigned char trace_bus[NUM_TRACE_BUS_WORDS];
 static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
 
 /*
@@ -217,7 +235,7 @@ static void pm_rtas_reset_signals(u32 node)
                 * failure to stop OProfile.
                 */
                printk(KERN_WARNING "%s: rtas returned: %d\n",
-                      __FUNCTION__, ret);
+                      __func__, ret);
 }
 
 static int pm_rtas_activate_signals(u32 node, u32 count)
@@ -230,7 +248,7 @@ static int pm_rtas_activate_signals(u32 node, u32 count)
         * There is no debug setup required for the cycles event.
         * Note that only events in the same group can be used.
         * Otherwise, there will be conflicts in correctly routing
-        * the signals on the debug bus.  It is the responsiblity
+        * the signals on the debug bus.  It is the responsibility
         * of the OProfile user tool to check the events are in
         * the same group.
         */
@@ -256,7 +274,7 @@ static int pm_rtas_activate_signals(u32 node, u32 count)
 
                if (unlikely(ret)) {
                        printk(KERN_WARNING "%s: rtas returned: %d\n",
-                              __FUNCTION__, ret);
+                              __func__, ret);
                        return -EIO;
                }
        }
@@ -298,7 +316,7 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
 
        p->signal_group = event / 100;
        p->bus_word = bus_word;
-       p->sub_unit = (unit_mask & 0x0000f000) >> 12;
+       p->sub_unit = GET_SUB_UNIT(unit_mask);
 
        pm_regs.pm07_cntrl[ctr] = 0;
        pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
@@ -334,16 +352,16 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
                p->bit = signal_bit;
        }
 
-       for (i = 0; i < NUM_TRACE_BUS_WORDS; i++) {
+       for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
                if (bus_word & (1 << i)) {
                        pm_regs.debug_bus_control |=
-                           (bus_type << (31 - (2 * i) + 1));
+                               (bus_type << (30 - (2 * i)));
 
                        for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
                                if (input_bus[j] == 0xff) {
                                        input_bus[j] = i;
                                        pm_regs.group_control |=
-                                           (i << (31 - i));
+                                               (i << (30 - (2 * j)));
 
                                        break;
                                }
@@ -368,12 +386,16 @@ static void write_pm_cntrl(int cpu)
        if (pm_regs.pm_cntrl.stop_at_max == 1)
                val |= CBE_PM_STOP_AT_MAX;
 
-       if (pm_regs.pm_cntrl.trace_mode == 1)
+       if (pm_regs.pm_cntrl.trace_mode != 0)
                val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
 
+       if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
+               val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
        if (pm_regs.pm_cntrl.freeze == 1)
                val |= CBE_PM_FREEZE_ALL_CTRS;
 
+       val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
+
        /*
         * Routine set_count_mode must be called previously to set
         * the count mode based on the user selection of user and kernel.
@@ -405,7 +427,7 @@ set_count_mode(u32 kernel, u32 user)
        }
 }
 
-static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
+static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
 {
 
        pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
@@ -442,7 +464,7 @@ static void cell_virtual_cntr(unsigned long data)
         * not both playing with the counters on the same node.
         */
 
-       spin_lock_irqsave(&virt_cntr_lock, flags);
+       spin_lock_irqsave(&cntr_lock, flags);
 
        prev_hdw_thread = hdw_thread;
 
@@ -450,6 +472,12 @@ static void cell_virtual_cntr(unsigned long data)
        hdw_thread = 1 ^ hdw_thread;
        next_hdw_thread = hdw_thread;
 
+       pm_regs.group_control = 0;
+       pm_regs.debug_bus_control = 0;
+
+       for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
+               input_bus[i] = 0xff;
+
        /*
         * There are some per thread events.  Must do the
         * set event, for the thread that is being started
@@ -475,7 +503,7 @@ static void cell_virtual_cntr(unsigned long data)
                cbe_disable_pm_interrupts(cpu);
                for (i = 0; i < num_counters; i++) {
                        per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
-                           = cbe_read_ctr(cpu, i);
+                               = cbe_read_ctr(cpu, i);
 
                        if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
                            == 0xFFFFFFFF)
@@ -522,7 +550,7 @@ static void cell_virtual_cntr(unsigned long data)
                cbe_enable_pm(cpu);
        }
 
-       spin_unlock_irqrestore(&virt_cntr_lock, flags);
+       spin_unlock_irqrestore(&cntr_lock, flags);
 
        mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
 }
@@ -536,55 +564,214 @@ static void start_virt_cntrs(void)
        add_timer(&timer_virt_cntr);
 }
 
-/* This function is called once for all cpus combined */
-static int cell_reg_setup(struct op_counter_config *ctr,
+static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
                        struct op_system_config *sys, int num_ctrs)
 {
-       int i, j, cpu;
-       spu_cycle_reset = 0;
+       spu_cycle_reset = ctr[0].count;
 
-       if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
-               spu_cycle_reset = ctr[0].count;
+       /*
+        * Each node will need to make the rtas call to start
+        * and stop SPU profiling.  Get the token once and store it.
+        */
+       spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
+
+       if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
+               printk(KERN_ERR
+                      "%s: rtas token ibm,cbe-spu-perftools unknown\n",
+                      __func__);
+               return -EIO;
+       }
+       return 0;
+}
+
+/* Unfortunately, the hardware will only support event profiling
+ * on one SPU per node at a time.  Therefore, we must time slice
+ * the profiling across all SPUs in the node.  Note, we do this
+ * in parallel for each node.  The following routine is called
+ * periodically based on kernel timer to switch which SPU is
+ * being monitored in a round robbin fashion.
+ */
+static void spu_evnt_swap(unsigned long data)
+{
+       int node;
+       int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
+       unsigned long flags;
+       int cpu;
+       int ret;
+       u32 interrupt_mask;
+
+
+       /* enable interrupts on cntr 0 */
+       interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
+
+       hdw_thread = 0;
+
+       /* Make sure spu event interrupt handler and spu event swap
+        * don't access the counters simultaneously.
+        */
+       spin_lock_irqsave(&cntr_lock, flags);
+
+       cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
+
+       if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
+               spu_evnt_phys_spu_indx = 0;
+
+       pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
+       pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
+       pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
+
+       /* switch the SPU being profiled on each node */
+       for_each_online_cpu(cpu) {
+               if (cbe_get_hw_thread_id(cpu))
+                       continue;
+
+               node = cbe_cpu_to_node(cpu);
+               cur_phys_spu = (node * NUM_SPUS_PER_NODE)
+                       + cur_spu_evnt_phys_spu_indx;
+               nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
+                       + spu_evnt_phys_spu_indx;
 
                /*
-                * Each node will need to make the rtas call to start
-                * and stop SPU profiling.  Get the token once and store it.
+                * stop counters, save counter values, restore counts
+                * for previous physical SPU
+                */
+               cbe_disable_pm(cpu);
+               cbe_disable_pm_interrupts(cpu);
+
+               spu_pm_cnt[cur_phys_spu]
+                       = cbe_read_ctr(cpu, 0);
+
+               /* restore previous count for the next spu to sample */
+               /* NOTE, hardware issue, counter will not start if the
+                * counter value is at max (0xFFFFFFFF).
                 */
-               spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
+               if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
+                       cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
+                else
+                        cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
 
-               if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
-                       printk(KERN_ERR
-                              "%s: rtas token ibm,cbe-spu-perftools unknown\n",
-                              __FUNCTION__);
-                       return -EIO;
-               }
+               pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+               /* setup the debug bus measure the one event and
+                * the two events to route the next SPU's PC on
+                * the debug bus
+                */
+               ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
+               if (ret)
+                       printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
+                              "SPU event swap\n", __func__);
+
+               /* clear the trace buffer, don't want to take PC for
+                * previous SPU*/
+               cbe_write_pm(cpu, trace_address, 0);
+
+               enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
+
+               /* Enable interrupts on the CPU thread that is starting */
+               cbe_enable_pm_interrupts(cpu, hdw_thread,
+                                        interrupt_mask);
+               cbe_enable_pm(cpu);
        }
 
-       pm_rtas_token = rtas_token("ibm,cbe-perftools");
+       spin_unlock_irqrestore(&cntr_lock, flags);
+
+       /* swap approximately every 0.1 seconds */
+       mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
+}
+
+static void start_spu_event_swap(void)
+{
+       init_timer(&timer_spu_event_swap);
+       timer_spu_event_swap.function = spu_evnt_swap;
+       timer_spu_event_swap.data = 0UL;
+       timer_spu_event_swap.expires = jiffies + HZ / 25;
+       add_timer(&timer_spu_event_swap);
+}
+
+static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
+                       struct op_system_config *sys, int num_ctrs)
+{
+       int i;
+
+       /* routine is called once for all nodes */
 
+       spu_evnt_phys_spu_indx = 0;
        /*
-        * For all events excetp PPU CYCLEs, each node will need to make
+        * For all events except PPU CYCLEs, each node will need to make
         * the rtas cbe-perftools call to setup and reset the debug bus.
         * Make the token lookup call once and store it in the global
         * variable pm_rtas_token.
         */
+       pm_rtas_token = rtas_token("ibm,cbe-perftools");
+
        if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
                printk(KERN_ERR
                       "%s: rtas token ibm,cbe-perftools unknown\n",
-                      __FUNCTION__);
+                      __func__);
                return -EIO;
        }
 
-       num_counters = num_ctrs;
+       /* setup the pm_control register settings,
+        * settings will be written per node by the
+        * cell_cpu_setup() function.
+        */
+       pm_regs.pm_cntrl.trace_buf_ovflw = 1;
 
-       pm_regs.group_control = 0;
-       pm_regs.debug_bus_control = 0;
+       /* Use the occurrence trace mode to have SPU PC saved
+        * to the trace buffer.  Occurrence data in trace buffer
+        * is not used.  Bit 2 must be set to store SPU addresses.
+        */
+       pm_regs.pm_cntrl.trace_mode = 2;
+
+       pm_regs.pm_cntrl.spu_addr_trace = 0x1;  /* using debug bus
+                                                  event 2 & 3 */
+
+       /* setup the debug bus event array with the SPU PC routing events.
+       *  Note, pm_signal[0] will be filled in by set_pm_event() call below.
+       */
+       pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
+       pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
+       pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
+       pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
+
+       pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
+       pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
+       pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
+       pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
+
+       /* Set the user selected spu event to profile on,
+        * note, only one SPU profiling event is supported
+        */
+       num_counters = 1;  /* Only support one SPU event at a time */
+       set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
 
-       /* setup the pm_control register */
-       memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
-       pm_regs.pm_cntrl.stop_at_max = 1;
-       pm_regs.pm_cntrl.trace_mode = 0;
-       pm_regs.pm_cntrl.freeze = 1;
+       reset_value[0] = 0xFFFFFFFF - ctr[0].count;
+
+       /* global, used by cell_cpu_setup */
+       ctr_enabled |= 1;
+
+       /* Initialize the count for each SPU to the reset value */
+       for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
+               spu_pm_cnt[i] = reset_value[0];
+
+       return 0;
+}
+
+static int cell_reg_setup_ppu(struct op_counter_config *ctr,
+                       struct op_system_config *sys, int num_ctrs)
+{
+       /* routine is called once for all nodes */
+       int i, j, cpu;
+
+       num_counters = num_ctrs;
+
+       if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
+               printk(KERN_ERR
+                      "%s: Oprofile, number of specified events " \
+                      "exceeds number of physical counters\n",
+                      __func__);
+               return -EIO;
+       }
 
        set_count_mode(sys->enable_kernel, sys->enable_user);
 
@@ -619,9 +806,6 @@ static int cell_reg_setup(struct op_counter_config *ctr,
                pmc_cntrl[1][i].vcntr = i;
        }
 
-       for (i = 0; i < NUM_TRACE_BUS_WORDS; i++)
-               trace_bus[i] = 0xff;
-
        for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
                input_bus[i] = 0xff;
 
@@ -656,6 +840,63 @@ static int cell_reg_setup(struct op_counter_config *ctr,
 }
 
 
+/* This function is called once for all cpus combined */
+static int cell_reg_setup(struct op_counter_config *ctr,
+                       struct op_system_config *sys, int num_ctrs)
+{
+       int ret=0;
+       spu_cycle_reset = 0;
+
+       /* initialize the spu_arr_trace value, will be reset if
+        * doing spu event profiling.
+        */
+       pm_regs.group_control = 0;
+       pm_regs.debug_bus_control = 0;
+       pm_regs.pm_cntrl.stop_at_max = 1;
+       pm_regs.pm_cntrl.trace_mode = 0;
+       pm_regs.pm_cntrl.freeze = 1;
+       pm_regs.pm_cntrl.trace_buf_ovflw = 0;
+       pm_regs.pm_cntrl.spu_addr_trace = 0;
+
+       /*
+        * For all events except PPU CYCLEs, each node will need to make
+        * the rtas cbe-perftools call to setup and reset the debug bus.
+        * Make the token lookup call once and store it in the global
+        * variable pm_rtas_token.
+        */
+       pm_rtas_token = rtas_token("ibm,cbe-perftools");
+
+       if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
+               printk(KERN_ERR
+                      "%s: rtas token ibm,cbe-perftools unknown\n",
+                      __func__);
+               return -EIO;
+       }
+
+       if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
+               profiling_mode = SPU_PROFILING_CYCLES;
+               ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
+       } else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
+                  (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
+               profiling_mode = SPU_PROFILING_EVENTS;
+               spu_cycle_reset = ctr[0].count;
+
+               /* for SPU event profiling, need to setup the
+                * pm_signal array with the events to route the
+                * SPU PC before making the FW call.  Note, only
+                * one SPU event for profiling can be specified
+                * at a time.
+                */
+               cell_reg_setup_spu_events(ctr, sys, num_ctrs);
+       } else {
+               profiling_mode = PPU_PROFILING;
+               ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
+       }
+
+       return ret;
+}
+
+
 
 /* This function is called once for each cpu */
 static int cell_cpu_setup(struct op_counter_config *cntr)
@@ -663,8 +904,13 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
        u32 cpu = smp_processor_id();
        u32 num_enabled = 0;
        int i;
+       int ret;
 
-       if (spu_cycle_reset)
+       /* Cycle based SPU profiling does not use the performance
+        * counters.  The trace array is configured to collect
+        * the data.
+        */
+       if (profiling_mode == SPU_PROFILING_CYCLES)
                return 0;
 
        /* There is one performance monitor per processor chip (i.e. node),
@@ -677,7 +923,6 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
        cbe_disable_pm(cpu);
        cbe_disable_pm_interrupts(cpu);
 
-       cbe_write_pm(cpu, pm_interval, 0);
        cbe_write_pm(cpu, pm_start_stop, 0);
        cbe_write_pm(cpu, group_control, pm_regs.group_control);
        cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
@@ -694,7 +939,20 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
         * The pm_rtas_activate_signals will return -EIO if the FW
         * call failed.
         */
-       return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
+       if (profiling_mode == SPU_PROFILING_EVENTS) {
+               /* For SPU event profiling also need to setup the
+                * pm interval timer
+                */
+               ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
+                                              num_enabled+2);
+               /* store PC from debug bus to Trace buffer as often
+                * as possible (every 10 cycles)
+                */
+               cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
+               return ret;
+       } else
+               return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
+                                               num_enabled);
 }
 
 #define ENTRIES         303
@@ -819,7 +1077,7 @@ static int calculate_lfsr(int n)
                index = ENTRIES-1;
 
        /* make sure index is valid */
-       if ((index > ENTRIES) || (index < 0))
+       if ((index >= ENTRIES) || (index < 0))
                index = ENTRIES-1;
 
        return initial_lfsr[index];
@@ -828,13 +1086,13 @@ static int calculate_lfsr(int n)
 static int pm_rtas_activate_spu_profiling(u32 node)
 {
        int ret, i;
-       struct pm_signal pm_signal_local[NR_PHYS_CTRS];
+       struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
 
        /*
         * Set up the rtas call to configure the debug bus to
         * route the SPU PCs.  Setup the pm_signal for each SPU
         */
-       for (i = 0; i < NUM_SPUS_PER_NODE; i++) {
+       for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
                pm_signal_local[i].cpu = node;
                pm_signal_local[i].signal_group = 41;
                /* spu i on word (i/2) */
@@ -846,12 +1104,12 @@ static int pm_rtas_activate_spu_profiling(u32 node)
 
        ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
                                     PASSTHRU_ENABLE, pm_signal_local,
-                                    (NUM_SPUS_PER_NODE
+                                    (ARRAY_SIZE(pm_signal_local)
                                      * sizeof(struct pm_signal)));
 
        if (unlikely(ret)) {
                printk(KERN_WARNING "%s: rtas returned: %d\n",
-                      __FUNCTION__, ret);
+                      __func__, ret);
                return -EIO;
        }
 
@@ -876,7 +1134,122 @@ static struct notifier_block cpu_freq_notifier_block = {
 };
 #endif
 
-static int cell_global_start_spu(struct op_counter_config *ctr)
+/*
+ * Note the generic OProfile stop calls do not support returning
+ * an error on stop.  Hence, will not return an error if the FW
+ * calls fail on stop. Failure to reset the debug bus is not an issue.
+ * Failure to disable the SPU profiling is not an issue.  The FW calls
+ * to enable the performance counters and debug bus will work even if
+ * the hardware was not cleanly reset.
+ */
+static void cell_global_stop_spu_cycles(void)
+{
+       int subfunc, rtn_value;
+       unsigned int lfsr_value;
+       int cpu;
+
+       oprofile_running = 0;
+       smp_wmb();
+
+#ifdef CONFIG_CPU_FREQ
+       cpufreq_unregister_notifier(&cpu_freq_notifier_block,
+                                   CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
+       for_each_online_cpu(cpu) {
+               if (cbe_get_hw_thread_id(cpu))
+                       continue;
+
+               subfunc = 3;    /*
+                                * 2 - activate SPU tracing,
+                                * 3 - deactivate
+                                */
+               lfsr_value = 0x8f100000;
+
+               rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
+                                     subfunc, cbe_cpu_to_node(cpu),
+                                     lfsr_value);
+
+               if (unlikely(rtn_value != 0)) {
+                       printk(KERN_ERR
+                              "%s: rtas call ibm,cbe-spu-perftools " \
+                              "failed, return = %d\n",
+                              __func__, rtn_value);
+               }
+
+               /* Deactivate the signals */
+               pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+       }
+
+       stop_spu_profiling_cycles();
+}
+
+static void cell_global_stop_spu_events(void)
+{
+       int cpu;
+       oprofile_running = 0;
+
+       stop_spu_profiling_events();
+       smp_wmb();
+
+       for_each_online_cpu(cpu) {
+               if (cbe_get_hw_thread_id(cpu))
+                       continue;
+
+               cbe_sync_irq(cbe_cpu_to_node(cpu));
+               /* Stop the counters */
+               cbe_disable_pm(cpu);
+               cbe_write_pm07_control(cpu, 0, 0);
+
+               /* Deactivate the signals */
+               pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+               /* Deactivate interrupts */
+               cbe_disable_pm_interrupts(cpu);
+       }
+       del_timer_sync(&timer_spu_event_swap);
+}
+
+static void cell_global_stop_ppu(void)
+{
+       int cpu;
+
+       /*
+        * This routine will be called once for the system.
+        * There is one performance monitor per node, so we
+        * only need to perform this function once per node.
+        */
+       del_timer_sync(&timer_virt_cntr);
+       oprofile_running = 0;
+       smp_wmb();
+
+       for_each_online_cpu(cpu) {
+               if (cbe_get_hw_thread_id(cpu))
+                       continue;
+
+               cbe_sync_irq(cbe_cpu_to_node(cpu));
+               /* Stop the counters */
+               cbe_disable_pm(cpu);
+
+               /* Deactivate the signals */
+               pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+               /* Deactivate interrupts */
+               cbe_disable_pm_interrupts(cpu);
+       }
+}
+
+static void cell_global_stop(void)
+{
+       if (profiling_mode == PPU_PROFILING)
+               cell_global_stop_ppu();
+       else if (profiling_mode == SPU_PROFILING_EVENTS)
+               cell_global_stop_spu_events();
+       else
+               cell_global_stop_spu_cycles();
+}
+
+static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
 {
        int subfunc;
        unsigned int lfsr_value;
@@ -942,18 +1315,18 @@ static int cell_global_start_spu(struct op_counter_config *ctr)
 
                /* start profiling */
                ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
-                 cbe_cpu_to_node(cpu), lfsr_value);
+                               cbe_cpu_to_node(cpu), lfsr_value);
 
                if (unlikely(ret != 0)) {
                        printk(KERN_ERR
-                              "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
-                              __FUNCTION__, ret);
+                              "%s: rtas call ibm,cbe-spu-perftools failed, " \
+                              "return = %d\n", __func__, ret);
                        rtas_error = -EIO;
                        goto out;
                }
        }
 
-       rtas_error = start_spu_profiling(spu_cycle_reset);
+       rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
        if (rtas_error)
                goto out_stop;
 
@@ -961,11 +1334,74 @@ static int cell_global_start_spu(struct op_counter_config *ctr)
        return 0;
 
 out_stop:
-       cell_global_stop_spu();         /* clean up the PMU/debug bus */
+       cell_global_stop_spu_cycles();  /* clean up the PMU/debug bus */
 out:
        return rtas_error;
 }
 
+static int cell_global_start_spu_events(struct op_counter_config *ctr)
+{
+       int cpu;
+       u32 interrupt_mask = 0;
+       int rtn = 0;
+
+       hdw_thread = 0;
+
+       /* spu event profiling, uses the performance counters to generate
+        * an interrupt.  The hardware is setup to store the SPU program
+        * counter into the trace array.  The occurrence mode is used to
+        * enable storing data to the trace buffer.  The bits are set
+        * to send/store the SPU address in the trace buffer.  The debug
+        * bus must be setup to route the SPU program counter onto the
+        * debug bus.  The occurrence data in the trace buffer is not used.
+        */
+
+       /* This routine gets called once for the system.
+        * There is one performance monitor per node, so we
+        * only need to perform this function once per node.
+        */
+
+       for_each_online_cpu(cpu) {
+               if (cbe_get_hw_thread_id(cpu))
+                       continue;
+
+               /*
+                * Setup SPU event-based profiling.
+                * Set perf_mon_control bit 0 to a zero before
+                * enabling spu collection hardware.
+                *
+                * Only support one SPU event on one SPU per node.
+                */
+               if (ctr_enabled & 1) {
+                       cbe_write_ctr(cpu, 0, reset_value[0]);
+                       enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
+                       interrupt_mask |=
+                               CBE_PM_CTR_OVERFLOW_INTR(0);
+               } else {
+                       /* Disable counter */
+                       cbe_write_pm07_control(cpu, 0, 0);
+               }
+
+               cbe_get_and_clear_pm_interrupts(cpu);
+               cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
+               cbe_enable_pm(cpu);
+
+               /* clear the trace buffer */
+               cbe_write_pm(cpu, trace_address, 0);
+       }
+
+       /* Start the timer to time slice collecting the event profile
+        * on each of the SPUs.  Note, can collect profile on one SPU
+        * per node at a time.
+        */
+       start_spu_event_swap();
+       start_spu_profiling_events();
+       oprofile_running = 1;
+       smp_wmb();
+
+       return rtn;
+}
+
 static int cell_global_start_ppu(struct op_counter_config *ctr)
 {
        u32 cpu, i;
@@ -985,8 +1421,7 @@ static int cell_global_start_ppu(struct op_counter_config *ctr)
                        if (ctr_enabled & (1 << i)) {
                                cbe_write_ctr(cpu, i, reset_value[i]);
                                enable_ctr(cpu, i, pm_regs.pm07_cntrl);
-                               interrupt_mask |=
-                                   CBE_PM_CTR_OVERFLOW_INTR(i);
+                               interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
                        } else {
                                /* Disable counter */
                                cbe_write_pm07_control(cpu, i, 0);
@@ -1015,99 +1450,162 @@ static int cell_global_start_ppu(struct op_counter_config *ctr)
 
 static int cell_global_start(struct op_counter_config *ctr)
 {
-       if (spu_cycle_reset)
-               return cell_global_start_spu(ctr);
+       if (profiling_mode == SPU_PROFILING_CYCLES)
+               return cell_global_start_spu_cycles(ctr);
+       else if (profiling_mode == SPU_PROFILING_EVENTS)
+               return cell_global_start_spu_events(ctr);
        else
                return cell_global_start_ppu(ctr);
 }
 
-/*
- * Note the generic OProfile stop calls do not support returning
- * an error on stop.  Hence, will not return an error if the FW
- * calls fail on stop. Failure to reset the debug bus is not an issue.
- * Failure to disable the SPU profiling is not an issue.  The FW calls
- * to enable the performance counters and debug bus will work even if
- * the hardware was not cleanly reset.
+
+/* The SPU interrupt handler
+ *
+ * SPU event profiling works as follows:
+ * The pm_signal[0] holds the one SPU event to be measured.  It is routed on
+ * the debug bus using word 0 or 1.  The value of pm_signal[1] and
+ * pm_signal[2] contain the necessary events to route the SPU program
+ * counter for the selected SPU onto the debug bus using words 2 and 3.
+ * The pm_interval register is setup to write the SPU PC value into the
+ * trace buffer at the maximum rate possible.  The trace buffer is configured
+ * to store the PCs, wrapping when it is full.  The performance counter is
+ * intialized to the max hardware count minus the number of events, N, between
+ * samples.  Once the N events have occured, a HW counter overflow occurs
+ * causing the generation of a HW counter interrupt which also stops the
+ * writing of the SPU PC values to the trace buffer.  Hence the last PC
+ * written to the trace buffer is the SPU PC that we want.  Unfortunately,
+ * we have to read from the beginning of the trace buffer to get to the
+ * last value written.  We just hope the PPU has nothing better to do then
+ * service this interrupt. The PC for the specific SPU being profiled is
+ * extracted from the trace buffer processed and stored.  The trace buffer
+ * is cleared, interrupts are cleared, the counter is reset to max - N.
+ * A kernel timer is used to periodically call the routine spu_evnt_swap()
+ * to switch to the next physical SPU in the node to profile in round robbin
+ * order.  This way data is collected for all SPUs on the node. It does mean
+ * that we need to use a relatively small value of N to ensure enough samples
+ * on each SPU are collected each SPU is being profiled 1/8 of the time.
+ * It may also be necessary to use a longer sample collection period.
  */
-static void cell_global_stop_spu(void)
+static void cell_handle_interrupt_spu(struct pt_regs *regs,
+                                     struct op_counter_config *ctr)
 {
-       int subfunc, rtn_value;
-       unsigned int lfsr_value;
-       int cpu;
+       u32 cpu, cpu_tmp;
+       u64 trace_entry;
+       u32 interrupt_mask;
+       u64 trace_buffer[2];
+       u64 last_trace_buffer;
+       u32 sample;
+       u32 trace_addr;
+       unsigned long sample_array_lock_flags;
+       int spu_num;
+       unsigned long flags;
 
-       oprofile_running = 0;
+       /* Make sure spu event interrupt handler and spu event swap
+        * don't access the counters simultaneously.
+        */
+       cpu = smp_processor_id();
+       spin_lock_irqsave(&cntr_lock, flags);
 
-#ifdef CONFIG_CPU_FREQ
-       cpufreq_unregister_notifier(&cpu_freq_notifier_block,
-                                   CPUFREQ_TRANSITION_NOTIFIER);
-#endif
+       cpu_tmp = cpu;
+       cbe_disable_pm(cpu);
 
-       for_each_online_cpu(cpu) {
-               if (cbe_get_hw_thread_id(cpu))
-                       continue;
+       interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
 
-               subfunc = 3;    /*
-                                * 2 - activate SPU tracing,
-                                * 3 - deactivate
-                                */
-               lfsr_value = 0x8f100000;
+       sample = 0xABCDEF;
+       trace_entry = 0xfedcba;
+       last_trace_buffer = 0xdeadbeaf;
 
-               rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
-                                     subfunc, cbe_cpu_to_node(cpu),
-                                     lfsr_value);
+       if ((oprofile_running == 1) && (interrupt_mask != 0)) {
+               /* disable writes to trace buff */
+               cbe_write_pm(cpu, pm_interval, 0);
 
-               if (unlikely(rtn_value != 0)) {
-                       printk(KERN_ERR
-                              "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
-                              __FUNCTION__, rtn_value);
+               /* only have one perf cntr being used, cntr 0 */
+               if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
+                   && ctr[0].enabled)
+                       /* The SPU PC values will be read
+                        * from the trace buffer, reset counter
+                        */
+
+                       cbe_write_ctr(cpu, 0, reset_value[0]);
+
+               trace_addr = cbe_read_pm(cpu, trace_address);
+
+               while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
+                       /* There is data in the trace buffer to process
+                        * Read the buffer until you get to the last
+                        * entry.  This is the value we want.
+                        */
+
+                       cbe_read_trace_buffer(cpu, trace_buffer);
+                       trace_addr = cbe_read_pm(cpu, trace_address);
                }
 
-               /* Deactivate the signals */
-               pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-       }
+               /* SPU Address 16 bit count format for 128 bit
+                * HW trace buffer is used for the SPU PC storage
+                *    HDR bits          0:15
+                *    SPU Addr 0 bits   16:31
+                *    SPU Addr 1 bits   32:47
+                *    unused bits       48:127
+                *
+                * HDR: bit4 = 1 SPU Address 0 valid
+                * HDR: bit5 = 1 SPU Address 1 valid
+                *  - unfortunately, the valid bits don't seem to work
+                *
+                * Note trace_buffer[0] holds bits 0:63 of the HW
+                * trace buffer, trace_buffer[1] holds bits 64:127
+                */
 
-       stop_spu_profiling();
-}
+               trace_entry = trace_buffer[0]
+                       & 0x00000000FFFF0000;
 
-static void cell_global_stop_ppu(void)
-{
-       int cpu;
+               /* only top 16 of the 18 bit SPU PC address
+                * is stored in trace buffer, hence shift right
+                * by 16 -2 bits */
+               sample = trace_entry >> 14;
+               last_trace_buffer = trace_buffer[0];
 
-       /*
-        * This routine will be called once for the system.
-        * There is one performance monitor per node, so we
-        * only need to perform this function once per node.
-        */
-       del_timer_sync(&timer_virt_cntr);
-       oprofile_running = 0;
-       smp_wmb();
+               spu_num = spu_evnt_phys_spu_indx
+                       + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
 
-       for_each_online_cpu(cpu) {
-               if (cbe_get_hw_thread_id(cpu))
-                       continue;
+               /* make sure only one process at a time is calling
+                * spu_sync_buffer()
+                */
+               spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
+                                 sample_array_lock_flags);
+               spu_sync_buffer(spu_num, &sample, 1);
+               spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+                                      sample_array_lock_flags);
 
-               cbe_sync_irq(cbe_cpu_to_node(cpu));
-               /* Stop the counters */
-               cbe_disable_pm(cpu);
+               smp_wmb();    /* insure spu event buffer updates are written
+                              * don't want events intermingled... */
 
-               /* Deactivate the signals */
-               pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+               /* The counters were frozen by the interrupt.
+                * Reenable the interrupt and restart the counters.
+                */
+               cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
+               cbe_enable_pm_interrupts(cpu, hdw_thread,
+                                        virt_cntr_inter_mask);
 
-               /* Deactivate interrupts */
-               cbe_disable_pm_interrupts(cpu);
-       }
-}
+               /* clear the trace buffer, re-enable writes to trace buff */
+               cbe_write_pm(cpu, trace_address, 0);
+               cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
 
-static void cell_global_stop(void)
-{
-       if (spu_cycle_reset)
-               cell_global_stop_spu();
-       else
-               cell_global_stop_ppu();
+               /* The writes to the various performance counters only writes
+                * to a latch.  The new values (interrupt setting bits, reset
+                * counter value etc.) are not copied to the actual registers
+                * until the performance monitor is enabled.  In order to get
+                * this to work as desired, the performance monitor needs to
+                * be disabled while writing to the latches.  This is a
+                * HW design issue.
+                */
+               write_pm_cntrl(cpu);
+               cbe_enable_pm(cpu);
+       }
+       spin_unlock_irqrestore(&cntr_lock, flags);
 }
 
-static void cell_handle_interrupt(struct pt_regs *regs,
-                               struct op_counter_config *ctr)
+static void cell_handle_interrupt_ppu(struct pt_regs *regs,
+                                     struct op_counter_config *ctr)
 {
        u32 cpu;
        u64 pc;
@@ -1123,7 +1621,7 @@ static void cell_handle_interrupt(struct pt_regs *regs,
         * routine are not running at the same time. See the
         * cell_virtual_cntr() routine for additional comments.
         */
-       spin_lock_irqsave(&virt_cntr_lock, flags);
+       spin_lock_irqsave(&cntr_lock, flags);
 
        /*
         * Need to disable and reenable the performance counters
@@ -1149,7 +1647,7 @@ static void cell_handle_interrupt(struct pt_regs *regs,
                for (i = 0; i < num_counters; ++i) {
                        if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
                            && ctr[i].enabled) {
-                               oprofile_add_pc(pc, is_kernel, i);
+                               oprofile_add_ext_sample(pc, regs, i, is_kernel);
                                cbe_write_ctr(cpu, i, reset_value[i]);
                        }
                }
@@ -1170,13 +1668,22 @@ static void cell_handle_interrupt(struct pt_regs *regs,
                 * to a latch.  The new values (interrupt setting bits, reset
                 * counter value etc.) are not copied to the actual registers
                 * until the performance monitor is enabled.  In order to get
-                * this to work as desired, the permormance monitor needs to
+                * this to work as desired, the performance monitor needs to
                 * be disabled while writing to the latches.  This is a
                 * HW design issue.
                 */
                cbe_enable_pm(cpu);
        }
-       spin_unlock_irqrestore(&virt_cntr_lock, flags);
+       spin_unlock_irqrestore(&cntr_lock, flags);
+}
+
+static void cell_handle_interrupt(struct pt_regs *regs,
+                                 struct op_counter_config *ctr)
+{
+       if (profiling_mode == PPU_PROFILING)
+               cell_handle_interrupt_ppu(regs, ctr);
+       else
+               cell_handle_interrupt_spu(regs, ctr);
 }
 
 /*
@@ -1186,7 +1693,8 @@ static void cell_handle_interrupt(struct pt_regs *regs,
  */
 static int cell_sync_start(void)
 {
-       if (spu_cycle_reset)
+       if ((profiling_mode == SPU_PROFILING_CYCLES) ||
+           (profiling_mode == SPU_PROFILING_EVENTS))
                return spu_sync_start();
        else
                return DO_GENERIC_SYNC;
@@ -1194,7 +1702,8 @@ static int cell_sync_start(void)
 
 static int cell_sync_stop(void)
 {
-       if (spu_cycle_reset)
+       if ((profiling_mode == SPU_PROFILING_CYCLES) ||
+           (profiling_mode == SPU_PROFILING_EVENTS))
                return spu_sync_stop();
        else
                return 1;