include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / arm / kernel / irq.c
index c3d4e94..3b3d2c8 100644 (file)
@@ -27,8 +27,6 @@
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
 #include <linux/random.h>
 #include <linux/smp.h>
 #include <linux/init.h>
@@ -39,6 +37,7 @@
 #include <linux/proc_fs.h>
 
 #include <asm/system.h>
+#include <asm/mach/irq.h>
 #include <asm/mach/time.h>
 
 /*
@@ -69,23 +68,24 @@ int show_interrupts(struct seq_file *p, void *v)
        }
 
        if (i < NR_IRQS) {
-               spin_lock_irqsave(&irq_desc[i].lock, flags);
+               raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
                action = irq_desc[i].action;
                if (!action)
                        goto unlock;
 
                seq_printf(p, "%3d: ", i);
                for_each_present_cpu(cpu)
-                       seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
+                       seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
+               seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-");
                seq_printf(p, "  %s", action->name);
                for (action = action->next; action; action = action->next)
                        seq_printf(p, ", %s", action->name);
 
                seq_putc(p, '\n');
 unlock:
-               spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+               raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
        } else if (i == NR_IRQS) {
-#ifdef CONFIG_ARCH_ACORN
+#ifdef CONFIG_FIQ
                show_fiq_list(p, v);
 #endif
 #ifdef CONFIG_SMP
@@ -97,41 +97,39 @@ unlock:
        return 0;
 }
 
-/* Handle bad interrupts */
-static struct irq_desc bad_irq_desc = {
-       .handle_irq = handle_bad_irq,
-       .lock = SPIN_LOCK_UNLOCKED
-};
-
 /*
  * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
  * come via this function.  Instead, they should provide their
  * own 'handler'
  */
-asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
 {
-       struct irqdesc *desc = irq_desc + irq;
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       irq_enter();
 
        /*
         * Some hardware gives randomly wrong interrupts.  Rather
         * than crashing, do something sensible.
         */
-       if (irq >= NR_IRQS)
-               desc = &bad_irq_desc;
-
-       irq_enter();
-
-       desc_handle_irq(irq, desc, regs);
+       if (unlikely(irq >= NR_IRQS)) {
+               if (printk_ratelimit())
+                       printk(KERN_WARNING "Bad IRQ%u\n", irq);
+               ack_bad_irq(irq);
+       } else {
+               generic_handle_irq(irq);
+       }
 
        /* AT91 specific workaround */
        irq_finish(irq);
 
        irq_exit();
+       set_irq_regs(old_regs);
 }
 
 void set_irq_flags(unsigned int irq, unsigned int iflags)
 {
-       struct irqdesc *desc;
+       struct irq_desc *desc;
        unsigned long flags;
 
        if (irq >= NR_IRQS) {
@@ -140,7 +138,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
        }
 
        desc = irq_desc + irq;
-       spin_lock_irqsave(&desc->lock, flags);
+       raw_spin_lock_irqsave(&desc->lock, flags);
        desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
        if (iflags & IRQF_VALID)
                desc->status &= ~IRQ_NOREQUEST;
@@ -148,7 +146,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
                desc->status &= ~IRQ_NOPROBE;
        if (!(iflags & IRQF_NOAUTOEN))
                desc->status &= ~IRQ_NOAUTOEN;
-       spin_unlock_irqrestore(&desc->lock, flags);
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
 
 void __init init_IRQ(void)
@@ -156,17 +154,22 @@ void __init init_IRQ(void)
        int irq;
 
        for (irq = 0; irq < NR_IRQS; irq++)
-               irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_DELAYED_DISABLE |
-                       IRQ_NOPROBE;
+               irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
 
-#ifdef CONFIG_SMP
-       bad_irq_desc.affinity = CPU_MASK_ALL;
-       bad_irq_desc.cpu = smp_processor_id();
-#endif
        init_arch_irq();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
+
+static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
+{
+       pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
+
+       raw_spin_lock_irq(&desc->lock);
+       desc->chip->set_affinity(irq, cpumask_of(cpu));
+       raw_spin_unlock_irq(&desc->lock);
+}
+
 /*
  * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
  * the affinity settings do not allow other CPUs, force them onto any
@@ -177,18 +180,19 @@ void migrate_irqs(void)
        unsigned int i, cpu = smp_processor_id();
 
        for (i = 0; i < NR_IRQS; i++) {
-               struct irqdesc *desc = irq_desc + i;
-
-               if (desc->cpu == cpu) {
-                       unsigned int newcpu = any_online_cpu(desc->affinity);
+               struct irq_desc *desc = irq_desc + i;
 
-                       if (newcpu == NR_CPUS) {
+               if (desc->node == cpu) {
+                       unsigned int newcpu = cpumask_any_and(desc->affinity,
+                                                             cpu_online_mask);
+                       if (newcpu >= nr_cpu_ids) {
                                if (printk_ratelimit())
                                        printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
                                               i, cpu);
 
-                               cpus_setall(desc->affinity);
-                               newcpu = any_online_cpu(desc->affinity);
+                               cpumask_setall(desc->affinity);
+                               newcpu = cpumask_any_and(desc->affinity,
+                                                        cpu_online_mask);
                        }
 
                        route_irq(desc, i, newcpu);