x86, mrst: Remove X86_MRST dependency on PCI_IOAPIC
[safe/jmp/linux-2.6] / arch / powerpc / kernel / irq.c
index 0d2e37c..9040330 100644 (file)
@@ -53,6 +53,7 @@
 #include <linux/bootmem.h>
 #include <linux/pci.h>
 #include <linux/debugfs.h>
+#include <linux/perf_event.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -69,6 +70,8 @@
 #include <asm/firmware.h>
 #include <asm/lv1call.h>
 #endif
+#define CREATE_TRACE_POINTS
+#include <asm/trace.h>
 
 int __irq_offset_value;
 static int ppc_spurious_interrupts;
@@ -84,7 +87,10 @@ extern int tau_interrupts(int);
 #endif /* CONFIG_PPC32 */
 
 #ifdef CONFIG_PPC64
+
+#ifndef CONFIG_SPARSE_IRQ
 EXPORT_SYMBOL(irq_desc);
+#endif
 
 int distribute_irqs = 1;
 
@@ -104,13 +110,6 @@ static inline notrace void set_soft_enabled(unsigned long enable)
        : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
 }
 
-#ifdef CONFIG_PERF_COUNTERS
-notrace void __weak perf_counter_do_pending(void)
-{
-       set_perf_counter_pending(0);
-}
-#endif
-
 notrace void raw_local_irq_restore(unsigned long en)
 {
        /*
@@ -124,6 +123,7 @@ notrace void raw_local_irq_restore(unsigned long en)
        if (!en)
                return;
 
+#ifdef CONFIG_PPC_STD_MMU_64
        if (firmware_has_feature(FW_FEATURE_ISERIES)) {
                /*
                 * Do we need to disable preemption here?  Not really: in the
@@ -141,9 +141,12 @@ notrace void raw_local_irq_restore(unsigned long en)
                if (local_paca->lppaca_ptr->int_dword.any_int)
                        iseries_handle_interrupts();
        }
+#endif /* CONFIG_PPC_STD_MMU_64 */
 
-       if (get_perf_counter_pending())
-               perf_counter_do_pending();
+       if (test_perf_event_pending()) {
+               clear_perf_event_pending();
+               perf_event_do_pending();
+       }
 
        /*
         * if (get_paca()->hard_enabled) return;
@@ -189,33 +192,7 @@ int show_interrupts(struct seq_file *p, void *v)
                for_each_online_cpu(j)
                        seq_printf(p, "CPU%d       ", j);
                seq_putc(p, '\n');
-       }
-
-       if (i < NR_IRQS) {
-               desc = get_irq_desc(i);
-               spin_lock_irqsave(&desc->lock, flags);
-               action = desc->action;
-               if (!action || !action->handler)
-                       goto skip;
-               seq_printf(p, "%3d: ", i);
-#ifdef CONFIG_SMP
-               for_each_online_cpu(j)
-                       seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-#else
-               seq_printf(p, "%10u ", kstat_irqs(i));
-#endif /* CONFIG_SMP */
-               if (desc->chip)
-                       seq_printf(p, " %s ", desc->chip->typename);
-               else
-                       seq_puts(p, "  None      ");
-               seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge  ");
-               seq_printf(p, "    %s", action->name);
-               for (action = action->next; action; action = action->next)
-                       seq_printf(p, ", %s", action->name);
-               seq_putc(p, '\n');
-skip:
-               spin_unlock_irqrestore(&desc->lock, flags);
-       } else if (i == NR_IRQS) {
+       } else if (i == nr_irqs) {
 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
                if (tau_initialized){
                        seq_puts(p, "TAU: ");
@@ -225,30 +202,68 @@ skip:
                }
 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/
                seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
+
+               return 0;
        }
+
+       desc = irq_to_desc(i);
+       if (!desc)
+               return 0;
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+
+       action = desc->action;
+       if (!action || !action->handler)
+               goto skip;
+
+       seq_printf(p, "%3d: ", i);
+#ifdef CONFIG_SMP
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+#else
+       seq_printf(p, "%10u ", kstat_irqs(i));
+#endif /* CONFIG_SMP */
+
+       if (desc->chip)
+               seq_printf(p, " %s ", desc->chip->name);
+       else
+               seq_puts(p, "  None      ");
+
+       seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge  ");
+       seq_printf(p, "    %s", action->name);
+
+       for (action = action->next; action; action = action->next)
+               seq_printf(p, ", %s", action->name);
+       seq_putc(p, '\n');
+
+skip:
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+
        return 0;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 void fixup_irqs(cpumask_t map)
 {
+       struct irq_desc *desc;
        unsigned int irq;
        static int warned;
 
        for_each_irq(irq) {
                cpumask_t mask;
 
-               if (irq_desc[irq].status & IRQ_PER_CPU)
+               desc = irq_to_desc(irq);
+               if (desc && desc->status & IRQ_PER_CPU)
                        continue;
 
-               cpumask_and(&mask, irq_desc[irq].affinity, &map);
+               cpumask_and(&mask, desc->affinity, &map);
                if (any_online_cpu(mask) == NR_CPUS) {
                        printk("Breaking affinity for irq %i\n", irq);
                        mask = map;
                }
-               if (irq_desc[irq].chip->set_affinity)
-                       irq_desc[irq].chip->set_affinity(irq, &mask);
-               else if (irq_desc[irq].action && !(warned++))
+               if (desc->chip->set_affinity)
+                       desc->chip->set_affinity(irq, &mask);
+               else if (desc->action && !(warned++))
                        printk("Cannot set affinity for irq %i\n", irq);
        }
 
@@ -258,77 +273,86 @@ void fixup_irqs(cpumask_t map)
 }
 #endif
 
-void do_IRQ(struct pt_regs *regs)
-{
-       struct pt_regs *old_regs = set_irq_regs(regs);
-       unsigned int irq;
 #ifdef CONFIG_IRQSTACKS
+static inline void handle_one_irq(unsigned int irq)
+{
        struct thread_info *curtp, *irqtp;
-#endif
+       unsigned long saved_sp_limit;
+       struct irq_desc *desc;
 
-       irq_enter();
+       /* Switch to the irq stack to handle this */
+       curtp = current_thread_info();
+       irqtp = hardirq_ctx[smp_processor_id()];
+
+       if (curtp == irqtp) {
+               /* We're already on the irq stack, just handle it */
+               generic_handle_irq(irq);
+               return;
+       }
+
+       desc = irq_to_desc(irq);
+       saved_sp_limit = current->thread.ksp_limit;
+
+       irqtp->task = curtp->task;
+       irqtp->flags = 0;
+
+       /* Copy the softirq bits in preempt_count so that the
+        * softirq checks work in the hardirq context. */
+       irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
+                              (curtp->preempt_count & SOFTIRQ_MASK);
 
+       current->thread.ksp_limit = (unsigned long)irqtp +
+               _ALIGN_UP(sizeof(struct thread_info), 16);
+
+       call_handle_irq(irq, desc, irqtp, desc->handle_irq);
+       current->thread.ksp_limit = saved_sp_limit;
+       irqtp->task = NULL;
+
+       /* Set any flag that may have been set on the
+        * alternate stack
+        */
+       if (irqtp->flags)
+               set_bits(irqtp->flags, &curtp->flags);
+}
+#else
+static inline void handle_one_irq(unsigned int irq)
+{
+       generic_handle_irq(irq);
+}
+#endif
+
+static inline void check_stack_overflow(void)
+{
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
-       /* Debugging check for stack overflow: is there less than 2KB free? */
-       {
-               long sp;
+       long sp;
 
-               sp = __get_SP() & (THREAD_SIZE-1);
+       sp = __get_SP() & (THREAD_SIZE-1);
 
-               if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
-                       printk("do_IRQ: stack overflow: %ld\n",
-                               sp - sizeof(struct thread_info));
-                       dump_stack();
-               }
+       /* check for stack overflow: is there less than 2KB free? */
+       if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
+               printk("do_IRQ: stack overflow: %ld\n",
+                       sp - sizeof(struct thread_info));
+               dump_stack();
        }
 #endif
+}
 
-       /*
-        * Every platform is required to implement ppc_md.get_irq.
-        * This function will either return an irq number or NO_IRQ to
-        * indicate there are no more pending.
-        * The value NO_IRQ_IGNORE is for buggy hardware and means that this
-        * IRQ has already been handled. -- Tom
-        */
-       irq = ppc_md.get_irq();
+void do_IRQ(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+       unsigned int irq;
 
-       if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
-#ifdef CONFIG_IRQSTACKS
-               /* Switch to the irq stack to handle this */
-               curtp = current_thread_info();
-               irqtp = hardirq_ctx[smp_processor_id()];
-               if (curtp != irqtp) {
-                       struct irq_desc *desc = irq_desc + irq;
-                       void *handler = desc->handle_irq;
-                       unsigned long saved_sp_limit = current->thread.ksp_limit;
-                       if (handler == NULL)
-                               handler = &__do_IRQ;
-                       irqtp->task = curtp->task;
-                       irqtp->flags = 0;
-
-                       /* Copy the softirq bits in preempt_count so that the
-                        * softirq checks work in the hardirq context.
-                        */
-                       irqtp->preempt_count =
-                               (irqtp->preempt_count & ~SOFTIRQ_MASK) |
-                               (curtp->preempt_count & SOFTIRQ_MASK);
+       trace_irq_entry(regs);
+
+       irq_enter();
 
-                       current->thread.ksp_limit = (unsigned long)irqtp +
-                               _ALIGN_UP(sizeof(struct thread_info), 16);
-                       call_handle_irq(irq, desc, irqtp, handler);
-                       current->thread.ksp_limit = saved_sp_limit;
-                       irqtp->task = NULL;
+       check_stack_overflow();
 
+       irq = ppc_md.get_irq();
 
-                       /* Set any flag that may have been set on the
-                        * alternate stack
-                        */
-                       if (irqtp->flags)
-                               set_bits(irqtp->flags, &curtp->flags);
-               } else
-#endif
-                       generic_handle_irq(irq);
-       } else if (irq != NO_IRQ_IGNORE)
+       if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
+               handle_one_irq(irq);
+       else if (irq != NO_IRQ_IGNORE)
                /* That's not SMP safe ... but who cares ? */
                ppc_spurious_interrupts++;
 
@@ -343,6 +367,8 @@ void do_IRQ(struct pt_regs *regs)
                timer_interrupt(regs);
        }
 #endif
+
+       trace_irq_exit(regs);
 }
 
 void __init init_IRQ(void)
@@ -530,7 +556,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
                        smp_wmb();
 
                        /* Clear norequest flags */
-                       get_irq_desc(i)->status &= ~IRQ_NOREQUEST;
+                       irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
 
                        /* Legacy flags are left to default at this point,
                         * one can then use irq_create_mapping() to
@@ -596,8 +622,16 @@ void irq_set_virq_count(unsigned int count)
 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
                            irq_hw_number_t hwirq)
 {
+       struct irq_desc *desc;
+
+       desc = irq_to_desc_alloc_node(virq, 0);
+       if (!desc) {
+               pr_debug("irq: -> allocating desc failed\n");
+               goto error;
+       }
+
        /* Clear IRQ_NOREQUEST flag */
-       get_irq_desc(virq)->status &= ~IRQ_NOREQUEST;
+       desc->status &= ~IRQ_NOREQUEST;
 
        /* map it */
        smp_wmb();
@@ -606,11 +640,14 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
 
        if (host->ops->map(host, virq, hwirq)) {
                pr_debug("irq: -> mapping failed, freeing\n");
-               irq_free_virt(virq, 1);
-               return -1;
+               goto error;
        }
 
        return 0;
+
+error:
+       irq_free_virt(virq, 1);
+       return -1;
 }
 
 unsigned int irq_create_direct_mapping(struct irq_host *host)
@@ -682,17 +719,19 @@ unsigned int irq_create_mapping(struct irq_host *host,
                        return NO_IRQ;
                }
        }
-       pr_debug("irq: -> obtained virq %d\n", virq);
 
        if (irq_setup_virq(host, virq, hwirq))
                return NO_IRQ;
 
+       printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
+               hwirq, host->of_node ? host->of_node->full_name : "null", virq);
+
        return virq;
 }
 EXPORT_SYMBOL_GPL(irq_create_mapping);
 
 unsigned int irq_create_of_mapping(struct device_node *controller,
-                                  u32 *intspec, unsigned int intsize)
+                                  const u32 *intspec, unsigned int intsize)
 {
        struct irq_host *host;
        irq_hw_number_t hwirq;
@@ -725,7 +764,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
 
        /* Set type if specified and different than the current one */
        if (type != IRQ_TYPE_NONE &&
-           type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK))
+           type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
                set_irq_type(virq, type);
        return virq;
 }
@@ -797,7 +836,7 @@ void irq_dispose_mapping(unsigned int virq)
        irq_map[virq].hwirq = host->inval_irq;
 
        /* Set some flags */
-       get_irq_desc(virq)->status |= IRQ_NOREQUEST;
+       irq_to_desc(virq)->status |= IRQ_NOREQUEST;
 
        /* Free it */
        irq_free_virt(virq, 1);
@@ -989,12 +1028,24 @@ void irq_free_virt(unsigned int virq, unsigned int count)
        spin_unlock_irqrestore(&irq_big_lock, flags);
 }
 
-void irq_early_init(void)
+int arch_early_irq_init(void)
 {
-       unsigned int i;
+       struct irq_desc *desc;
+       int i;
+
+       for (i = 0; i < NR_IRQS; i++) {
+               desc = irq_to_desc(i);
+               if (desc)
+                       desc->status |= IRQ_NOREQUEST;
+       }
+
+       return 0;
+}
 
-       for (i = 0; i < NR_IRQS; i++)
-               get_irq_desc(i)->status |= IRQ_NOREQUEST;
+int arch_init_chip_data(struct irq_desc *desc, int node)
+{
+       desc->status |= IRQ_NOREQUEST;
+       return 0;
 }
 
 /* We need to create the radix trees late */
@@ -1056,16 +1107,19 @@ static int virq_debug_show(struct seq_file *m, void *private)
        seq_printf(m, "%-5s  %-7s  %-15s  %s\n", "virq", "hwirq",
                      "chip name", "host name");
 
-       for (i = 1; i < NR_IRQS; i++) {
-               desc = get_irq_desc(i);
-               spin_lock_irqsave(&desc->lock, flags);
+       for (i = 1; i < nr_irqs; i++) {
+               desc = irq_to_desc(i);
+               if (!desc)
+                       continue;
+
+               raw_spin_lock_irqsave(&desc->lock, flags);
 
                if (desc->action && desc->action->handler) {
                        seq_printf(m, "%5d  ", i);
                        seq_printf(m, "0x%05lx  ", virq_to_hw(i));
 
-                       if (desc->chip && desc->chip->typename)
-                               p = desc->chip->typename;
+                       if (desc->chip && desc->chip->name)
+                               p = desc->chip->name;
                        else
                                p = none;
                        seq_printf(m, "%-15s  ", p);
@@ -1077,7 +1131,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
                        seq_printf(m, "%s\n", p);
                }
 
-               spin_unlock_irqrestore(&desc->lock, flags);
+               raw_spin_unlock_irqrestore(&desc->lock, flags);
        }
 
        return 0;