ALSA: usb-audio: add support for Akai MPD16
[safe/jmp/linux-2.6] / arch / sparc / kernel / irq_64.c
index bd07505..830d70a 100644 (file)
@@ -20,8 +20,9 @@
 #include <linux/delay.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-#include <linux/bootmem.h>
+#include <linux/ftrace.h>
 #include <linux/irq.h>
+#include <linux/kmemleak.h>
 
 #include <asm/ptrace.h>
 #include <asm/processor.h>
@@ -46,6 +47,7 @@
 
 #include "entry.h"
 #include "cpumap.h"
+#include "kstack.h"
 
 #define NUM_IVECS      (IMAP_INR + 1)
 
@@ -177,7 +179,7 @@ int show_interrupts(struct seq_file *p, void *v)
        }
 
        if (i < NR_IRQS) {
-               spin_lock_irqsave(&irq_desc[i].lock, flags);
+               raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
                action = irq_desc[i].action;
                if (!action)
                        goto skip;
@@ -188,7 +190,7 @@ int show_interrupts(struct seq_file *p, void *v)
                for_each_online_cpu(j)
                        seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-               seq_printf(p, " %9s", irq_desc[i].chip->typename);
+               seq_printf(p, " %9s", irq_desc[i].chip->name);
                seq_printf(p, "  %s", action->name);
 
                for (action=action->next; action; action = action->next)
@@ -196,7 +198,7 @@ int show_interrupts(struct seq_file *p, void *v)
 
                seq_putc(p, '\n');
 skip:
-               spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+               raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
        } else if (i == NR_IRQS) {
                seq_printf(p, "NMI: ");
                for_each_online_cpu(j)
@@ -230,7 +232,7 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
                                tid = ((a << IMAP_AID_SHIFT) |
                                       (n << IMAP_NID_SHIFT));
                                tid &= (IMAP_AID_SAFARI |
-                                       IMAP_NID_SAFARI);;
+                                       IMAP_NID_SAFARI);
                        }
                } else {
                        tid = cpuid << IMAP_TID_SHIFT;
@@ -251,12 +253,12 @@ struct irq_handler_data {
 };
 
 #ifdef CONFIG_SMP
-static int irq_choose_cpu(unsigned int virt_irq)
+static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
 {
        cpumask_t mask;
        int cpuid;
 
-       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
+       cpumask_copy(&mask, affinity);
        if (cpus_equal(mask, cpu_online_map)) {
                cpuid = map_to_cpu(virt_irq);
        } else {
@@ -269,10 +271,8 @@ static int irq_choose_cpu(unsigned int virt_irq)
        return cpuid;
 }
 #else
-static int irq_choose_cpu(unsigned int virt_irq)
-{
-       return real_hard_smp_processor_id();
-}
+#define irq_choose_cpu(virt_irq, affinity)     \
+       real_hard_smp_processor_id()
 #endif
 
 static void sun4u_irq_enable(unsigned int virt_irq)
@@ -283,7 +283,8 @@ static void sun4u_irq_enable(unsigned int virt_irq)
                unsigned long cpuid, imap, val;
                unsigned int tid;
 
-               cpuid = irq_choose_cpu(virt_irq);
+               cpuid = irq_choose_cpu(virt_irq,
+                                      irq_desc[virt_irq].affinity);
                imap = data->imap;
 
                tid = sun4u_compute_tid(imap, cpuid);
@@ -300,7 +301,24 @@ static void sun4u_irq_enable(unsigned int virt_irq)
 static int sun4u_set_affinity(unsigned int virt_irq,
                               const struct cpumask *mask)
 {
-       sun4u_irq_enable(virt_irq);
+       struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+
+       if (likely(data)) {
+               unsigned long cpuid, imap, val;
+               unsigned int tid;
+
+               cpuid = irq_choose_cpu(virt_irq, mask);
+               imap = data->imap;
+
+               tid = sun4u_compute_tid(imap, cpuid);
+
+               val = upa_readq(imap);
+               val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
+                        IMAP_AID_SAFARI | IMAP_NID_SAFARI);
+               val |= tid | IMAP_VALID;
+               upa_writeq(val, imap);
+               upa_writeq(ICLR_IDLE, data->iclr);
+       }
 
        return 0;
 }
@@ -341,7 +359,8 @@ static void sun4u_irq_eoi(unsigned int virt_irq)
 static void sun4v_irq_enable(unsigned int virt_irq)
 {
        unsigned int ino = virt_irq_table[virt_irq].dev_ino;
-       unsigned long cpuid = irq_choose_cpu(virt_irq);
+       unsigned long cpuid = irq_choose_cpu(virt_irq,
+                                            irq_desc[virt_irq].affinity);
        int err;
 
        err = sun4v_intr_settarget(ino, cpuid);
@@ -362,7 +381,7 @@ static int sun4v_set_affinity(unsigned int virt_irq,
                               const struct cpumask *mask)
 {
        unsigned int ino = virt_irq_table[virt_irq].dev_ino;
-       unsigned long cpuid = irq_choose_cpu(virt_irq);
+       unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
        int err;
 
        err = sun4v_intr_settarget(ino, cpuid);
@@ -404,7 +423,7 @@ static void sun4v_virq_enable(unsigned int virt_irq)
        unsigned long cpuid, dev_handle, dev_ino;
        int err;
 
-       cpuid = irq_choose_cpu(virt_irq);
+       cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
 
        dev_handle = virt_irq_table[virt_irq].dev_handle;
        dev_ino = virt_irq_table[virt_irq].dev_ino;
@@ -434,7 +453,7 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq,
        unsigned long cpuid, dev_handle, dev_ino;
        int err;
 
-       cpuid = irq_choose_cpu(virt_irq);
+       cpuid = irq_choose_cpu(virt_irq, mask);
 
        dev_handle = virt_irq_table[virt_irq].dev_handle;
        dev_ino = virt_irq_table[virt_irq].dev_ino;
@@ -485,7 +504,7 @@ static void sun4v_virq_eoi(unsigned int virt_irq)
 }
 
 static struct irq_chip sun4u_irq = {
-       .typename       = "sun4u",
+       .name           = "sun4u",
        .enable         = sun4u_irq_enable,
        .disable        = sun4u_irq_disable,
        .eoi            = sun4u_irq_eoi,
@@ -493,7 +512,7 @@ static struct irq_chip sun4u_irq = {
 };
 
 static struct irq_chip sun4v_irq = {
-       .typename       = "sun4v",
+       .name           = "sun4v",
        .enable         = sun4v_irq_enable,
        .disable        = sun4v_irq_disable,
        .eoi            = sun4v_irq_eoi,
@@ -501,7 +520,7 @@ static struct irq_chip sun4v_irq = {
 };
 
 static struct irq_chip sun4v_virq = {
-       .typename       = "vsun4v",
+       .name           = "vsun4v",
        .enable         = sun4v_virq_enable,
        .disable        = sun4v_virq_disable,
        .eoi            = sun4v_virq_eoi,
@@ -631,6 +650,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
        bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
        if (unlikely(!bucket))
                return 0;
+
+       /* The only reference we store to the IRQ bucket is
+        * by physical address which kmemleak can't see, tell
+        * it that this object explicitly is not a leak and
+        * should be scanned.
+        */
+       kmemleak_not_leak(bucket);
+
        __flush_dcache_range((unsigned long) bucket,
                             ((unsigned long) bucket +
                              sizeof(struct ino_bucket)));
@@ -687,25 +714,7 @@ void ack_bad_irq(unsigned int virt_irq)
 void *hardirq_stack[NR_CPUS];
 void *softirq_stack[NR_CPUS];
 
-static __attribute__((always_inline)) void *set_hardirq_stack(void)
-{
-       void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
-
-       __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
-       if (orig_sp < sp ||
-           orig_sp > (sp + THREAD_SIZE)) {
-               sp += THREAD_SIZE - 192 - STACK_BIAS;
-               __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
-       }
-
-       return orig_sp;
-}
-static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
-{
-       __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
-}
-
-void handler_irq(int irq, struct pt_regs *regs)
+void __irq_entry handler_irq(int irq, struct pt_regs *regs)
 {
        unsigned long pstate, bucket_pa;
        struct pt_regs *old_regs;
@@ -786,14 +795,14 @@ void fixup_irqs(void)
        for (irq = 0; irq < NR_IRQS; irq++) {
                unsigned long flags;
 
-               spin_lock_irqsave(&irq_desc[irq].lock, flags);
+               raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
                if (irq_desc[irq].action &&
                    !(irq_desc[irq].status & IRQ_PER_CPU)) {
                        if (irq_desc[irq].chip->set_affinity)
                                irq_desc[irq].chip->set_affinity(irq,
                                        irq_desc[irq].affinity);
                }
-               spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+               raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
        }
 
        tick_ops->disable_irq();
@@ -887,7 +896,7 @@ void notrace init_irqwork_curcpu(void)
  * Therefore you cannot make any OBP calls, not even prom_printf,
  * from these two routines.
  */
-static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
+static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
 {
        unsigned long num_entries = (qmask + 1) / 64;
        unsigned long status;
@@ -914,25 +923,19 @@ void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
                           tb->nonresum_qmask);
 }
 
-static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
-{
-       unsigned long size = PAGE_ALIGN(qmask + 1);
-       void *p = __alloc_bootmem(size, size, 0);
-       if (!p) {
-               prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
-               prom_halt();
-       }
-
-       *pa_ptr = __pa(p);
-}
-
-static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
+/* Each queue region must be a power of 2 multiple of 64 bytes in
+ * size.  The base real address must be aligned to the size of the
+ * region.  Thus, an 8KB queue must be 8KB aligned, for example.
+ */
+static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
 {
        unsigned long size = PAGE_ALIGN(qmask + 1);
-       void *p = __alloc_bootmem(size, size, 0);
+       unsigned long order = get_order(size);
+       unsigned long p;
 
+       p = __get_free_pages(GFP_KERNEL, order);
        if (!p) {
-               prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
+               prom_printf("SUN4V: Error, cannot allocate queue.\n");
                prom_halt();
        }
 
@@ -942,11 +945,11 @@ static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
 static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
 {
 #ifdef CONFIG_SMP
-       void *page;
+       unsigned long page;
 
        BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
 
-       page = alloc_bootmem_pages(PAGE_SIZE);
+       page = get_zeroed_page(GFP_KERNEL);
        if (!page) {
                prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
                prom_halt();
@@ -965,13 +968,13 @@ static void __init sun4v_init_mondo_queues(void)
        for_each_possible_cpu(cpu) {
                struct trap_per_cpu *tb = &trap_block[cpu];
 
-               alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
-               alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
-               alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
-               alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
-               alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
-               alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
-                              tb->nonresum_qmask);
+               alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
+               alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
+               alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
+               alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
+               alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
+               alloc_one_queue(&tb->nonresum_kernel_buf_pa,
+                               tb->nonresum_qmask);
        }
 }
 
@@ -999,7 +1002,7 @@ void __init init_IRQ(void)
        kill_prom_timer();
 
        size = sizeof(struct ino_bucket) * NUM_IVECS;
-       ivector_table = alloc_bootmem(size);
+       ivector_table = kzalloc(size, GFP_KERNEL);
        if (!ivector_table) {
                prom_printf("Fatal error, cannot allocate ivector_table\n");
                prom_halt();