ALSA: usb-audio: add support for Akai MPD16
[safe/jmp/linux-2.6] / arch / sparc / kernel / irq_64.c
index 3d2c6ba..830d70a 100644 (file)
@@ -20,8 +20,9 @@
 #include <linux/delay.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-#include <linux/bootmem.h>
+#include <linux/ftrace.h>
 #include <linux/irq.h>
+#include <linux/kmemleak.h>
 
 #include <asm/ptrace.h>
 #include <asm/processor.h>
@@ -45,6 +46,8 @@
 #include <asm/cacheflush.h>
 
 #include "entry.h"
+#include "cpumap.h"
+#include "kstack.h"
 
 #define NUM_IVECS      (IMAP_INR + 1)
 
@@ -176,7 +179,7 @@ int show_interrupts(struct seq_file *p, void *v)
        }
 
        if (i < NR_IRQS) {
-               spin_lock_irqsave(&irq_desc[i].lock, flags);
+               raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
                action = irq_desc[i].action;
                if (!action)
                        goto skip;
@@ -185,9 +188,9 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_printf(p, "%10u ", kstat_irqs(i));
 #else
                for_each_online_cpu(j)
-                       seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+                       seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-               seq_printf(p, " %9s", irq_desc[i].chip->typename);
+               seq_printf(p, " %9s", irq_desc[i].chip->name);
                seq_printf(p, "  %s", action->name);
 
                for (action=action->next; action; action = action->next)
@@ -195,7 +198,7 @@ int show_interrupts(struct seq_file *p, void *v)
 
                seq_putc(p, '\n');
 skip:
-               spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+               raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
        } else if (i == NR_IRQS) {
                seq_printf(p, "NMI: ");
                for_each_online_cpu(j)
@@ -229,7 +232,7 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
                                tid = ((a << IMAP_AID_SHIFT) |
                                       (n << IMAP_NID_SHIFT));
                                tid &= (IMAP_AID_SAFARI |
-                                       IMAP_NID_SAFARI);;
+                                       IMAP_NID_SAFARI);
                        }
                } else {
                        tid = cpuid << IMAP_TID_SHIFT;
@@ -250,50 +253,26 @@ struct irq_handler_data {
 };
 
 #ifdef CONFIG_SMP
-static int irq_choose_cpu(unsigned int virt_irq)
+static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
 {
        cpumask_t mask;
        int cpuid;
 
-       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
-       if (cpus_equal(mask, CPU_MASK_ALL)) {
-               static int irq_rover;
-               static DEFINE_SPINLOCK(irq_rover_lock);
-               unsigned long flags;
-
-               /* Round-robin distribution... */
-       do_round_robin:
-               spin_lock_irqsave(&irq_rover_lock, flags);
-
-               while (!cpu_online(irq_rover)) {
-                       if (++irq_rover >= NR_CPUS)
-                               irq_rover = 0;
-               }
-               cpuid = irq_rover;
-               do {
-                       if (++irq_rover >= NR_CPUS)
-                               irq_rover = 0;
-               } while (!cpu_online(irq_rover));
-
-               spin_unlock_irqrestore(&irq_rover_lock, flags);
+       cpumask_copy(&mask, affinity);
+       if (cpus_equal(mask, cpu_online_map)) {
+               cpuid = map_to_cpu(virt_irq);
        } else {
                cpumask_t tmp;
 
                cpus_and(tmp, cpu_online_map, mask);
-
-               if (cpus_empty(tmp))
-                       goto do_round_robin;
-
-               cpuid = first_cpu(tmp);
+               cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
        }
 
        return cpuid;
 }
 #else
-static int irq_choose_cpu(unsigned int virt_irq)
-{
-       return real_hard_smp_processor_id();
-}
+#define irq_choose_cpu(virt_irq, affinity)     \
+       real_hard_smp_processor_id()
 #endif
 
 static void sun4u_irq_enable(unsigned int virt_irq)
@@ -304,7 +283,8 @@ static void sun4u_irq_enable(unsigned int virt_irq)
                unsigned long cpuid, imap, val;
                unsigned int tid;
 
-               cpuid = irq_choose_cpu(virt_irq);
+               cpuid = irq_choose_cpu(virt_irq,
+                                      irq_desc[virt_irq].affinity);
                imap = data->imap;
 
                tid = sun4u_compute_tid(imap, cpuid);
@@ -318,23 +298,50 @@ static void sun4u_irq_enable(unsigned int virt_irq)
        }
 }
 
-static void sun4u_set_affinity(unsigned int virt_irq,
+static int sun4u_set_affinity(unsigned int virt_irq,
                               const struct cpumask *mask)
 {
-       sun4u_irq_enable(virt_irq);
-}
-
-static void sun4u_irq_disable(unsigned int virt_irq)
-{
        struct irq_handler_data *data = get_irq_chip_data(virt_irq);
 
        if (likely(data)) {
-               unsigned long imap = data->imap;
-               unsigned long tmp = upa_readq(imap);
+               unsigned long cpuid, imap, val;
+               unsigned int tid;
 
-               tmp &= ~IMAP_VALID;
-               upa_writeq(tmp, imap);
+               cpuid = irq_choose_cpu(virt_irq, mask);
+               imap = data->imap;
+
+               tid = sun4u_compute_tid(imap, cpuid);
+
+               val = upa_readq(imap);
+               val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
+                        IMAP_AID_SAFARI | IMAP_NID_SAFARI);
+               val |= tid | IMAP_VALID;
+               upa_writeq(val, imap);
+               upa_writeq(ICLR_IDLE, data->iclr);
        }
+
+       return 0;
+}
+
+/* Don't do anything.  The desc->status check for IRQ_DISABLED in
+ * handler_irq() will skip the handler call and that will leave the
+ * interrupt in the sent state.  The next ->enable() call will hit the
+ * ICLR register to reset the state machine.
+ *
+ * This scheme is necessary, instead of clearing the Valid bit in the
+ * IMAP register, to handle the case of IMAP registers being shared by
+ * multiple INOs (and thus ICLR registers).  Since we use a different
+ * virtual IRQ for each shared IMAP instance, the generic code thinks
+ * there is only one user so it prematurely calls ->disable() on
+ * free_irq().
+ *
+ * We have to provide an explicit ->disable() method instead of using
+ * NULL to get the default.  The reason is that if the generic code
+ * sees that, it also hooks up a default ->shutdown method which
+ * invokes ->mask() which we do not want.  See irq_chip_set_defaults().
+ */
+static void sun4u_irq_disable(unsigned int virt_irq)
+{
 }
 
 static void sun4u_irq_eoi(unsigned int virt_irq)
@@ -352,7 +359,8 @@ static void sun4u_irq_eoi(unsigned int virt_irq)
 static void sun4v_irq_enable(unsigned int virt_irq)
 {
        unsigned int ino = virt_irq_table[virt_irq].dev_ino;
-       unsigned long cpuid = irq_choose_cpu(virt_irq);
+       unsigned long cpuid = irq_choose_cpu(virt_irq,
+                                            irq_desc[virt_irq].affinity);
        int err;
 
        err = sun4v_intr_settarget(ino, cpuid);
@@ -369,17 +377,19 @@ static void sun4v_irq_enable(unsigned int virt_irq)
                       ino, err);
 }
 
-static void sun4v_set_affinity(unsigned int virt_irq,
+static int sun4v_set_affinity(unsigned int virt_irq,
                               const struct cpumask *mask)
 {
        unsigned int ino = virt_irq_table[virt_irq].dev_ino;
-       unsigned long cpuid = irq_choose_cpu(virt_irq);
+       unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
        int err;
 
        err = sun4v_intr_settarget(ino, cpuid);
        if (err != HV_EOK)
                printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
                       "err(%d)\n", ino, cpuid, err);
+
+       return 0;
 }
 
 static void sun4v_irq_disable(unsigned int virt_irq)
@@ -413,7 +423,7 @@ static void sun4v_virq_enable(unsigned int virt_irq)
        unsigned long cpuid, dev_handle, dev_ino;
        int err;
 
-       cpuid = irq_choose_cpu(virt_irq);
+       cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
 
        dev_handle = virt_irq_table[virt_irq].dev_handle;
        dev_ino = virt_irq_table[virt_irq].dev_ino;
@@ -437,13 +447,13 @@ static void sun4v_virq_enable(unsigned int virt_irq)
                       dev_handle, dev_ino, err);
 }
 
-static void sun4v_virt_set_affinity(unsigned int virt_irq,
+static int sun4v_virt_set_affinity(unsigned int virt_irq,
                                    const struct cpumask *mask)
 {
        unsigned long cpuid, dev_handle, dev_ino;
        int err;
 
-       cpuid = irq_choose_cpu(virt_irq);
+       cpuid = irq_choose_cpu(virt_irq, mask);
 
        dev_handle = virt_irq_table[virt_irq].dev_handle;
        dev_ino = virt_irq_table[virt_irq].dev_ino;
@@ -453,6 +463,8 @@ static void sun4v_virt_set_affinity(unsigned int virt_irq,
                printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
                       "err(%d)\n",
                       dev_handle, dev_ino, cpuid, err);
+
+       return 0;
 }
 
 static void sun4v_virq_disable(unsigned int virt_irq)
@@ -492,7 +504,7 @@ static void sun4v_virq_eoi(unsigned int virt_irq)
 }
 
 static struct irq_chip sun4u_irq = {
-       .typename       = "sun4u",
+       .name           = "sun4u",
        .enable         = sun4u_irq_enable,
        .disable        = sun4u_irq_disable,
        .eoi            = sun4u_irq_eoi,
@@ -500,7 +512,7 @@ static struct irq_chip sun4u_irq = {
 };
 
 static struct irq_chip sun4v_irq = {
-       .typename       = "sun4v",
+       .name           = "sun4v",
        .enable         = sun4v_irq_enable,
        .disable        = sun4v_irq_disable,
        .eoi            = sun4v_irq_eoi,
@@ -508,7 +520,7 @@ static struct irq_chip sun4v_irq = {
 };
 
 static struct irq_chip sun4v_virq = {
-       .typename       = "vsun4v",
+       .name           = "vsun4v",
        .enable         = sun4v_virq_enable,
        .disable        = sun4v_virq_disable,
        .eoi            = sun4v_virq_eoi,
@@ -638,6 +650,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
        bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
        if (unlikely(!bucket))
                return 0;
+
+       /* The only reference we store to the IRQ bucket is
+        * by physical address which kmemleak can't see, tell
+        * it that this object explicitly is not a leak and
+        * should be scanned.
+        */
+       kmemleak_not_leak(bucket);
+
        __flush_dcache_range((unsigned long) bucket,
                             ((unsigned long) bucket +
                              sizeof(struct ino_bucket)));
@@ -694,25 +714,7 @@ void ack_bad_irq(unsigned int virt_irq)
 void *hardirq_stack[NR_CPUS];
 void *softirq_stack[NR_CPUS];
 
-static __attribute__((always_inline)) void *set_hardirq_stack(void)
-{
-       void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
-
-       __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
-       if (orig_sp < sp ||
-           orig_sp > (sp + THREAD_SIZE)) {
-               sp += THREAD_SIZE - 192 - STACK_BIAS;
-               __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
-       }
-
-       return orig_sp;
-}
-static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
-{
-       __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
-}
-
-void handler_irq(int irq, struct pt_regs *regs)
+void __irq_entry handler_irq(int irq, struct pt_regs *regs)
 {
        unsigned long pstate, bucket_pa;
        struct pt_regs *old_regs;
@@ -747,7 +749,8 @@ void handler_irq(int irq, struct pt_regs *regs)
 
                desc = irq_desc + virt_irq;
 
-               desc->handle_irq(virt_irq, desc);
+               if (!(desc->status & IRQ_DISABLED))
+                       desc->handle_irq(virt_irq, desc);
 
                bucket_pa = next_pa;
        }
@@ -792,14 +795,14 @@ void fixup_irqs(void)
        for (irq = 0; irq < NR_IRQS; irq++) {
                unsigned long flags;
 
-               spin_lock_irqsave(&irq_desc[irq].lock, flags);
+               raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
                if (irq_desc[irq].action &&
                    !(irq_desc[irq].status & IRQ_PER_CPU)) {
                        if (irq_desc[irq].chip->set_affinity)
                                irq_desc[irq].chip->set_affinity(irq,
                                        irq_desc[irq].affinity);
                }
-               spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+               raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
        }
 
        tick_ops->disable_irq();
@@ -893,7 +896,7 @@ void notrace init_irqwork_curcpu(void)
  * Therefore you cannot make any OBP calls, not even prom_printf,
  * from these two routines.
  */
-static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
+static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
 {
        unsigned long num_entries = (qmask + 1) / 64;
        unsigned long status;
@@ -920,25 +923,19 @@ void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
                           tb->nonresum_qmask);
 }
 
-static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
-{
-       unsigned long size = PAGE_ALIGN(qmask + 1);
-       void *p = __alloc_bootmem(size, size, 0);
-       if (!p) {
-               prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
-               prom_halt();
-       }
-
-       *pa_ptr = __pa(p);
-}
-
-static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
+/* Each queue region must be a power of 2 multiple of 64 bytes in
+ * size.  The base real address must be aligned to the size of the
+ * region.  Thus, an 8KB queue must be 8KB aligned, for example.
+ */
+static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
 {
        unsigned long size = PAGE_ALIGN(qmask + 1);
-       void *p = __alloc_bootmem(size, size, 0);
+       unsigned long order = get_order(size);
+       unsigned long p;
 
+       p = __get_free_pages(GFP_KERNEL, order);
        if (!p) {
-               prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
+               prom_printf("SUN4V: Error, cannot allocate queue.\n");
                prom_halt();
        }
 
@@ -948,11 +945,11 @@ static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
 static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
 {
 #ifdef CONFIG_SMP
-       void *page;
+       unsigned long page;
 
        BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
 
-       page = alloc_bootmem_pages(PAGE_SIZE);
+       page = get_zeroed_page(GFP_KERNEL);
        if (!page) {
                prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
                prom_halt();
@@ -971,13 +968,13 @@ static void __init sun4v_init_mondo_queues(void)
        for_each_possible_cpu(cpu) {
                struct trap_per_cpu *tb = &trap_block[cpu];
 
-               alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
-               alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
-               alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
-               alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
-               alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
-               alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
-                              tb->nonresum_qmask);
+               alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
+               alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
+               alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
+               alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
+               alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
+               alloc_one_queue(&tb->nonresum_kernel_buf_pa,
+                               tb->nonresum_qmask);
        }
 }
 
@@ -1005,7 +1002,7 @@ void __init init_IRQ(void)
        kill_prom_timer();
 
        size = sizeof(struct ino_bucket) * NUM_IVECS;
-       ivector_table = alloc_bootmem(size);
+       ivector_table = kzalloc(size, GFP_KERNEL);
        if (!ivector_table) {
                prom_printf("Fatal error, cannot allocate ivector_table\n");
                prom_halt();