sh: Populate initial secondary CPU info from boot_cpu_data.
[safe/jmp/linux-2.6] / arch / powerpc / kernel / irq.c
index 7f8e6a9..e5d1211 100644 (file)
@@ -53,6 +53,7 @@
 #include <linux/bootmem.h>
 #include <linux/pci.h>
 #include <linux/debugfs.h>
+#include <linux/perf_event.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -104,13 +105,6 @@ static inline notrace void set_soft_enabled(unsigned long enable)
        : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
 }
 
-#ifdef CONFIG_PERF_COUNTERS
-notrace void __weak perf_counter_do_pending(void)
-{
-       set_perf_counter_pending(0);
-}
-#endif
-
 notrace void raw_local_irq_restore(unsigned long en)
 {
        /*
@@ -124,6 +118,7 @@ notrace void raw_local_irq_restore(unsigned long en)
        if (!en)
                return;
 
+#ifdef CONFIG_PPC_STD_MMU_64
        if (firmware_has_feature(FW_FEATURE_ISERIES)) {
                /*
                 * Do we need to disable preemption here?  Not really: in the
@@ -141,9 +136,12 @@ notrace void raw_local_irq_restore(unsigned long en)
                if (local_paca->lppaca_ptr->int_dword.any_int)
                        iseries_handle_interrupts();
        }
+#endif /* CONFIG_PPC_STD_MMU_64 */
 
-       if (get_perf_counter_pending())
-               perf_counter_do_pending();
+       if (test_perf_event_pending()) {
+               clear_perf_event_pending();
+               perf_event_do_pending();
+       }
 
        /*
         * if (get_paca()->hard_enabled) return;
@@ -181,7 +179,7 @@ int show_interrupts(struct seq_file *p, void *v)
 {
        int i = *(loff_t *)v, j;
        struct irqaction *action;
-       irq_desc_t *desc;
+       struct irq_desc *desc;
        unsigned long flags;
 
        if (i == 0) {
@@ -200,7 +198,7 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_printf(p, "%3d: ", i);
 #ifdef CONFIG_SMP
                for_each_online_cpu(j)
-                       seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+                       seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #else
                seq_printf(p, "%10u ", kstat_irqs(i));
 #endif /* CONFIG_SMP */
@@ -258,77 +256,84 @@ void fixup_irqs(cpumask_t map)
 }
 #endif
 
-void do_IRQ(struct pt_regs *regs)
-{
-       struct pt_regs *old_regs = set_irq_regs(regs);
-       unsigned int irq;
 #ifdef CONFIG_IRQSTACKS
+static inline void handle_one_irq(unsigned int irq)
+{
        struct thread_info *curtp, *irqtp;
-#endif
+       unsigned long saved_sp_limit;
+       struct irq_desc *desc;
 
-       irq_enter();
+       /* Switch to the irq stack to handle this */
+       curtp = current_thread_info();
+       irqtp = hardirq_ctx[smp_processor_id()];
+
+       if (curtp == irqtp) {
+               /* We're already on the irq stack, just handle it */
+               generic_handle_irq(irq);
+               return;
+       }
+
+       desc = irq_desc + irq;
+       saved_sp_limit = current->thread.ksp_limit;
+
+       irqtp->task = curtp->task;
+       irqtp->flags = 0;
+
+       /* Copy the softirq bits in preempt_count so that the
+        * softirq checks work in the hardirq context. */
+       irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
+                              (curtp->preempt_count & SOFTIRQ_MASK);
+
+       current->thread.ksp_limit = (unsigned long)irqtp +
+               _ALIGN_UP(sizeof(struct thread_info), 16);
+
+       call_handle_irq(irq, desc, irqtp, desc->handle_irq);
+       current->thread.ksp_limit = saved_sp_limit;
+       irqtp->task = NULL;
 
+       /* Set any flag that may have been set on the
+        * alternate stack
+        */
+       if (irqtp->flags)
+               set_bits(irqtp->flags, &curtp->flags);
+}
+#else
+static inline void handle_one_irq(unsigned int irq)
+{
+       generic_handle_irq(irq);
+}
+#endif
+
+static inline void check_stack_overflow(void)
+{
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
-       /* Debugging check for stack overflow: is there less than 2KB free? */
-       {
-               long sp;
+       long sp;
 
-               sp = __get_SP() & (THREAD_SIZE-1);
+       sp = __get_SP() & (THREAD_SIZE-1);
 
-               if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
-                       printk("do_IRQ: stack overflow: %ld\n",
-                               sp - sizeof(struct thread_info));
-                       dump_stack();
-               }
+       /* check for stack overflow: is there less than 2KB free? */
+       if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
+               printk("do_IRQ: stack overflow: %ld\n",
+                       sp - sizeof(struct thread_info));
+               dump_stack();
        }
 #endif
+}
 
-       /*
-        * Every platform is required to implement ppc_md.get_irq.
-        * This function will either return an irq number or NO_IRQ to
-        * indicate there are no more pending.
-        * The value NO_IRQ_IGNORE is for buggy hardware and means that this
-        * IRQ has already been handled. -- Tom
-        */
-       irq = ppc_md.get_irq();
+void do_IRQ(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+       unsigned int irq;
 
-       if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
-#ifdef CONFIG_IRQSTACKS
-               /* Switch to the irq stack to handle this */
-               curtp = current_thread_info();
-               irqtp = hardirq_ctx[smp_processor_id()];
-               if (curtp != irqtp) {
-                       struct irq_desc *desc = irq_desc + irq;
-                       void *handler = desc->handle_irq;
-                       unsigned long saved_sp_limit = current->thread.ksp_limit;
-                       if (handler == NULL)
-                               handler = &__do_IRQ;
-                       irqtp->task = curtp->task;
-                       irqtp->flags = 0;
-
-                       /* Copy the softirq bits in preempt_count so that the
-                        * softirq checks work in the hardirq context.
-                        */
-                       irqtp->preempt_count =
-                               (irqtp->preempt_count & ~SOFTIRQ_MASK) |
-                               (curtp->preempt_count & SOFTIRQ_MASK);
+       irq_enter();
 
-                       current->thread.ksp_limit = (unsigned long)irqtp +
-                               _ALIGN_UP(sizeof(struct thread_info), 16);
-                       call_handle_irq(irq, desc, irqtp, handler);
-                       current->thread.ksp_limit = saved_sp_limit;
-                       irqtp->task = NULL;
+       check_stack_overflow();
 
+       irq = ppc_md.get_irq();
 
-                       /* Set any flag that may have been set on the
-                        * alternate stack
-                        */
-                       if (irqtp->flags)
-                               set_bits(irqtp->flags, &curtp->flags);
-               } else
-#endif
-                       generic_handle_irq(irq);
-       } else if (irq != NO_IRQ_IGNORE)
+       if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
+               handle_one_irq(irq);
+       else if (irq != NO_IRQ_IGNORE)
                /* That's not SMP safe ... but who cares ? */
                ppc_spurious_interrupts++;
 
@@ -682,11 +687,13 @@ unsigned int irq_create_mapping(struct irq_host *host,
                        return NO_IRQ;
                }
        }
-       pr_debug("irq: -> obtained virq %d\n", virq);
 
        if (irq_setup_virq(host, virq, hwirq))
                return NO_IRQ;
 
+       printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
+               hwirq, host->of_node ? host->of_node->full_name : "null", virq);
+
        return virq;
 }
 EXPORT_SYMBOL_GPL(irq_create_mapping);
@@ -1048,7 +1055,7 @@ arch_initcall(irq_late_init);
 static int virq_debug_show(struct seq_file *m, void *private)
 {
        unsigned long flags;
-       irq_desc_t *desc;
+       struct irq_desc *desc;
        const char *p;
        char none[] = "none";
        int i;