Merge branch 'tip/tracing/core' of git://git.kernel.org/pub/scm/linux/kernel/git...
[safe/jmp/linux-2.6] / arch / x86 / kernel / irq_64.c
index 39ef7fe..acf8fbf 100644 (file)
 #include <linux/seq_file.h>
 #include <linux/module.h>
 #include <linux/delay.h>
-#include <asm/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <linux/smp.h>
 #include <asm/io_apic.h>
 #include <asm/idle.h>
-#include <asm/smp.h>
+#include <asm/apic.h>
 
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves.
- */
-void ack_bad_irq(unsigned int irq)
-{
-       printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
-       /*
-        * Currently unexpected vectors happen only on SMP and APIC.
-        * We _must_ ack these because every local APIC has only N
-        * irq slots per priority level, and a 'hanging, unacked' IRQ
-        * holds up an irq slot - in excessive cases (when multiple
-        * unexpected vectors occur) that might lock up the APIC
-        * completely.
-        * But don't ack when the APIC is disabled. -AK
-        */
-       if (!disable_apic)
-               ack_APIC_irq();
-}
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
+DEFINE_PER_CPU(struct pt_regs *, irq_regs);
+EXPORT_PER_CPU_SYMBOL(irq_regs);
 
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
 /*
  * Probabilistic stack overflow check:
  *
@@ -48,132 +35,50 @@ void ack_bad_irq(unsigned int irq)
  */
 static inline void stack_overflow_check(struct pt_regs *regs)
 {
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
        u64 curbase = (u64)task_stack_page(current);
-       static unsigned long warned = -60*HZ;
 
-       if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE &&
-           regs->sp <  curbase + sizeof(struct thread_info) + 128 &&
-           time_after(jiffies, warned + 60*HZ)) {
-               printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
-                      current->comm, curbase, regs->sp);
-               show_stack(NULL,NULL);
-               warned = jiffies;
-       }
-}
+       WARN_ONCE(regs->sp >= curbase &&
+                 regs->sp <= curbase + THREAD_SIZE &&
+                 regs->sp <  curbase + sizeof(struct thread_info) +
+                                       sizeof(struct pt_regs) + 128,
+
+                 "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
+                       current->comm, curbase, regs->sp);
 #endif
+}
 
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
+bool handle_irq(unsigned irq, struct pt_regs *regs)
 {
-       struct pt_regs *old_regs = set_irq_regs(regs);
        struct irq_desc *desc;
 
-       /* high bit used in ret_from_ code  */
-       unsigned vector = ~regs->orig_ax;
-       unsigned irq;
-
-       exit_idle();
-       irq_enter();
-       irq = __get_cpu_var(vector_irq)[vector];
-
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
        stack_overflow_check(regs);
-#endif
 
        desc = irq_to_desc(irq);
-       if (likely(desc))
-               generic_handle_irq_desc(irq, desc);
-       else {
-               if (!disable_apic)
-                       ack_APIC_irq();
-
-               if (printk_ratelimit())
-                       printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
-                               __func__, smp_processor_id(), vector);
-       }
-
-       irq_exit();
+       if (unlikely(!desc))
+               return false;
 
-       set_irq_regs(old_regs);
-       return 1;
+       generic_handle_irq_desc(irq, desc);
+       return true;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-void fixup_irqs(cpumask_t map)
-{
-       unsigned int irq;
-       static int warned;
-       struct irq_desc *desc;
-
-       for_each_irq_desc(irq, desc) {
-               cpumask_t mask;
-               int break_affinity = 0;
-               int set_affinity = 1;
-
-               if (irq == 2)
-                       continue;
-
-               /* interrupt's are disabled at this point */
-               spin_lock(&desc->lock);
-
-               if (!irq_has_action(irq) ||
-                   cpus_equal(desc->affinity, map)) {
-                       spin_unlock(&desc->lock);
-                       continue;
-               }
-
-               cpus_and(mask, desc->affinity, map);
-               if (cpus_empty(mask)) {
-                       break_affinity = 1;
-                       mask = map;
-               }
-
-               if (desc->chip->mask)
-                       desc->chip->mask(irq);
-
-               if (desc->chip->set_affinity)
-                       desc->chip->set_affinity(irq, mask);
-               else if (!(warned++))
-                       set_affinity = 0;
-
-               if (desc->chip->unmask)
-                       desc->chip->unmask(irq);
-
-               spin_unlock(&desc->lock);
-
-               if (break_affinity && set_affinity)
-                       printk("Broke affinity for irq %i\n", irq);
-               else if (!set_affinity)
-                       printk("Cannot set affinity for irq %i\n", irq);
-       }
-
-       /* That doesn't seem sufficient.  Give it 1ms. */
-       local_irq_enable();
-       mdelay(1);
-       local_irq_disable();
-}
-#endif
 
 extern void call_softirq(void);
 
 asmlinkage void do_softirq(void)
 {
-       __u32 pending;
-       unsigned long flags;
+       __u32 pending;
+       unsigned long flags;
 
-       if (in_interrupt())
-               return;
+       if (in_interrupt())
+               return;
 
-       local_irq_save(flags);
-       pending = local_softirq_pending();
-       /* Switch to interrupt stack */
-       if (pending) {
+       local_irq_save(flags);
+       pending = local_softirq_pending();
+       /* Switch to interrupt stack */
+       if (pending) {
                call_softirq();
                WARN_ON_ONCE(softirq_count());
        }
-       local_irq_restore(flags);
+       local_irq_restore(flags);
 }