Merge branch 'cpus4096' into irq/threaded
[safe/jmp/linux-2.6] / arch / parisc / kernel / irq.c
index 9bdd019..1c740f5 100644 (file)
@@ -35,8 +35,8 @@
 
 #undef PARISC_IRQ_CR16_COUNTS
 
-extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
-extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
+extern irqreturn_t timer_interrupt(int, void *);
+extern irqreturn_t ipi_interrupt(int, void *);
 
 #define EIEM_MASK(irq)       (1UL<<(CPU_IRQ_MAX - irq))
 
@@ -46,14 +46,10 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
 static volatile unsigned long cpu_eiem = 0;
 
 /*
-** ack bitmap ... habitually set to 1, but reset to zero
+** local ACK bitmap ... habitually set to 1, but reset to zero
 ** between ->ack() and ->end() of the interrupt to prevent
 ** re-interruption of a processing interrupt.
 */
-static volatile unsigned long global_ack_eiem = ~0UL;
-/*
-** Local bitmap, same as above but for per-cpu interrupts
-*/
 static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
 
 static void cpu_disable_irq(unsigned int irq)
@@ -94,13 +90,11 @@ void cpu_ack_irq(unsigned int irq)
        int cpu = smp_processor_id();
 
        /* Clear in EIEM so we can no longer process */
-       if (CHECK_IRQ_PER_CPU(irq_desc[irq].status))
-               per_cpu(local_ack_eiem, cpu) &= ~mask;
-       else
-               global_ack_eiem &= ~mask;
+       per_cpu(local_ack_eiem, cpu) &= ~mask;
 
        /* disable the interrupt */
-       set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
+       set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
+
        /* and now ack it */
        mtctl(mask, 23);
 }
@@ -111,17 +105,14 @@ void cpu_end_irq(unsigned int irq)
        int cpu = smp_processor_id();
 
        /* set it in the eiems---it's no longer in process */
-       if (CHECK_IRQ_PER_CPU(irq_desc[irq].status))
-               per_cpu(local_ack_eiem, cpu) |= mask;
-       else
-               global_ack_eiem |= mask;
+       per_cpu(local_ack_eiem, cpu) |= mask;
 
        /* enable the interrupt */
-       set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
+       set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
 }
 
 #ifdef CONFIG_SMP
-int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
+int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
 {
        int cpu_dest;
 
@@ -129,23 +120,25 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
        if (CHECK_IRQ_PER_CPU(irq)) {
                /* Bad linux design decision.  The mask has already
                 * been set; we must reset it */
-               irq_desc[irq].affinity = CPU_MASK_ALL;
+               cpumask_setall(&irq_desc[irq].affinity);
                return -EINVAL;
        }
 
        /* whatever mask they set, we just allow one CPU */
        cpu_dest = first_cpu(*dest);
-       *dest = cpumask_of_cpu(cpu_dest);
 
-       return 0;
+       return cpu_dest;
 }
 
-static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
+static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
 {
-       if (cpu_check_affinity(irq, &dest))
+       int cpu_dest;
+
+       cpu_dest = cpu_check_affinity(irq, dest);
+       if (cpu_dest < 0)
                return;
 
-       irq_desc[irq].affinity = dest;
+       cpumask_copy(&irq_desc[irq].affinity, dest);
 }
 #endif
 
@@ -192,7 +185,7 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_printf(p, "%3d: ", i);
 #ifdef CONFIG_SMP
                for_each_online_cpu(j)
-                       seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+                       seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #else
                seq_printf(p, "%10u ", kstat_irqs(i));
 #endif
@@ -304,10 +297,10 @@ int txn_alloc_irq(unsigned int bits_wide)
 unsigned long txn_affinity_addr(unsigned int irq, int cpu)
 {
 #ifdef CONFIG_SMP
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(&irq_desc[irq].affinity, cpumask_of(cpu));
 #endif
 
-       return cpu_data[cpu].txn_addr;
+       return per_cpu(cpu_data, cpu).txn_addr;
 }
 
 
@@ -318,8 +311,9 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
        next_cpu++; /* assign to "next" CPU we want this bugger on */
 
        /* validate entry */
-       while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || 
-               !cpu_online(next_cpu)))
+       while ((next_cpu < NR_CPUS) &&
+               (!per_cpu(cpu_data, next_cpu).txn_addr ||
+                !cpu_online(next_cpu)))
                next_cpu++;
 
        if (next_cpu >= NR_CPUS) 
@@ -336,34 +330,31 @@ unsigned int txn_alloc_data(unsigned int virt_irq)
 
 static inline int eirr_to_irq(unsigned long eirr)
 {
-#ifdef CONFIG_64BIT
-       int bit = fls64(eirr);
-#else
-       int bit = fls(eirr);
-#endif
+       int bit = fls_long(eirr);
        return (BITS_PER_LONG - bit) + TIMER_IRQ;
 }
 
 /* ONLY called from entry.S:intr_extint() */
 void do_cpu_irq_mask(struct pt_regs *regs)
 {
+       struct pt_regs *old_regs;
        unsigned long eirr_val;
        int irq, cpu = smp_processor_id();
 #ifdef CONFIG_SMP
        cpumask_t dest;
 #endif
 
+       old_regs = set_irq_regs(regs);
        local_irq_disable();
        irq_enter();
 
-       eirr_val = mfctl(23) & cpu_eiem & global_ack_eiem &
-               per_cpu(local_ack_eiem, cpu);
+       eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
        if (!eirr_val)
                goto set_out;
        irq = eirr_to_irq(eirr_val);
 
 #ifdef CONFIG_SMP
-       dest = irq_desc[irq].affinity;
+       cpumask_copy(&dest, &irq_desc[irq].affinity);
        if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
            !cpu_isset(smp_processor_id(), dest)) {
                int cpu = first_cpu(dest);
@@ -371,25 +362,26 @@ void do_cpu_irq_mask(struct pt_regs *regs)
                printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
                       irq, smp_processor_id(), cpu);
                gsc_writel(irq + CPU_IRQ_BASE,
-                          cpu_data[cpu].hpa);
+                          per_cpu(cpu_data, cpu).hpa);
                goto set_out;
        }
 #endif
-       __do_IRQ(irq, regs);
+       __do_IRQ(irq);
 
  out:
        irq_exit();
+       set_irq_regs(old_regs);
        return;
 
  set_out:
-       set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
+       set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
        goto out;
 }
 
 static struct irqaction timer_action = {
        .handler = timer_interrupt,
        .name = "timer",
-       .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU,
+       .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
 };
 
 #ifdef CONFIG_SMP
@@ -408,7 +400,7 @@ static void claim_cpu_irqs(void)
        }
 
        irq_desc[TIMER_IRQ].action = &timer_action;
-       irq_desc[TIMER_IRQ].status |= IRQ_PER_CPU;
+       irq_desc[TIMER_IRQ].status = IRQ_PER_CPU;
 #ifdef CONFIG_SMP
        irq_desc[IPI_IRQ].action = &ipi_action;
        irq_desc[IPI_IRQ].status = IRQ_PER_CPU;
@@ -432,5 +424,5 @@ void __init init_IRQ(void)
 
 void ack_bad_irq(unsigned int irq)
 {
-       printk("unexpected IRQ %d\n", irq);
+       printk(KERN_WARNING "unexpected IRQ %d\n", irq);
 }