Use kill_litter_super() in autofs4 ->kill_sb()
[safe/jmp/linux-2.6] / arch / parisc / kernel / irq.c
index 76ce5e3..efbcee5 100644 (file)
@@ -112,7 +112,7 @@ void cpu_end_irq(unsigned int irq)
 }
 
 #ifdef CONFIG_SMP
-int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
+int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
 {
        int cpu_dest;
 
@@ -120,28 +120,32 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
        if (CHECK_IRQ_PER_CPU(irq)) {
                /* Bad linux design decision.  The mask has already
                 * been set; we must reset it */
-               irq_desc[irq].affinity = CPU_MASK_ALL;
+               cpumask_setall(irq_desc[irq].affinity);
                return -EINVAL;
        }
 
        /* whatever mask they set, we just allow one CPU */
        cpu_dest = first_cpu(*dest);
-       *dest = cpumask_of_cpu(cpu_dest);
 
-       return 0;
+       return cpu_dest;
 }
 
-static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
+static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
 {
-       if (cpu_check_affinity(irq, &dest))
-               return;
+       int cpu_dest;
+
+       cpu_dest = cpu_check_affinity(irq, dest);
+       if (cpu_dest < 0)
+               return -1;
+
+       cpumask_copy(irq_desc[irq].affinity, dest);
 
-       irq_desc[irq].affinity = dest;
+       return 0;
 }
 #endif
 
-static struct hw_interrupt_type cpu_interrupt_type = {
-       .typename       = "CPU",
+static struct irq_chip cpu_interrupt_type = {
+       .name           = "CPU",
        .startup        = cpu_startup_irq,
        .shutdown       = cpu_disable_irq,
        .enable         = cpu_enable_irq,
@@ -176,19 +180,19 @@ int show_interrupts(struct seq_file *p, void *v)
        if (i < NR_IRQS) {
                struct irqaction *action;
 
-               spin_lock_irqsave(&irq_desc[i].lock, flags);
+               raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
                action = irq_desc[i].action;
                if (!action)
                        goto skip;
                seq_printf(p, "%3d: ", i);
 #ifdef CONFIG_SMP
                for_each_online_cpu(j)
-                       seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+                       seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #else
                seq_printf(p, "%10u ", kstat_irqs(i));
 #endif
 
-               seq_printf(p, " %14s", irq_desc[i].chip->typename);
+               seq_printf(p, " %14s", irq_desc[i].chip->name);
 #ifndef PARISC_IRQ_CR16_COUNTS
                seq_printf(p, "  %s", action->name);
 
@@ -220,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v)
 
                seq_putc(p, '\n');
  skip:
-               spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+               raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
        }
 
        return 0;
@@ -295,10 +299,10 @@ int txn_alloc_irq(unsigned int bits_wide)
 unsigned long txn_affinity_addr(unsigned int irq, int cpu)
 {
 #ifdef CONFIG_SMP
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 #endif
 
-       return cpu_data[cpu].txn_addr;
+       return per_cpu(cpu_data, cpu).txn_addr;
 }
 
 
@@ -309,11 +313,12 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
        next_cpu++; /* assign to "next" CPU we want this bugger on */
 
        /* validate entry */
-       while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || 
-               !cpu_online(next_cpu)))
+       while ((next_cpu < nr_cpu_ids) &&
+               (!per_cpu(cpu_data, next_cpu).txn_addr ||
+                !cpu_online(next_cpu)))
                next_cpu++;
 
-       if (next_cpu >= NR_CPUS
+       if (next_cpu >= nr_cpu_ids
                next_cpu = 0;   /* nothing else, assign monarch */
 
        return txn_affinity_addr(virt_irq, next_cpu);
@@ -351,7 +356,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
        irq = eirr_to_irq(eirr_val);
 
 #ifdef CONFIG_SMP
-       dest = irq_desc[irq].affinity;
+       cpumask_copy(&dest, irq_desc[irq].affinity);
        if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
            !cpu_isset(smp_processor_id(), dest)) {
                int cpu = first_cpu(dest);
@@ -359,7 +364,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
                printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
                       irq, smp_processor_id(), cpu);
                gsc_writel(irq + CPU_IRQ_BASE,
-                          cpu_data[cpu].hpa);
+                          per_cpu(cpu_data, cpu).hpa);
                goto set_out;
        }
 #endif
@@ -397,7 +402,7 @@ static void claim_cpu_irqs(void)
        }
 
        irq_desc[TIMER_IRQ].action = &timer_action;
-       irq_desc[TIMER_IRQ].status |= IRQ_PER_CPU;
+       irq_desc[TIMER_IRQ].status = IRQ_PER_CPU;
 #ifdef CONFIG_SMP
        irq_desc[IPI_IRQ].action = &ipi_action;
        irq_desc[IPI_IRQ].status = IRQ_PER_CPU;
@@ -418,8 +423,3 @@ void __init init_IRQ(void)
         set_eiem(cpu_eiem);    /* EIEM : enable all external intr */
 
 }
-
-void ack_bad_irq(unsigned int irq)
-{
-       printk("unexpected IRQ %d\n", irq);
-}