Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[safe/jmp/linux-2.6] / kernel / irq / manage.c
index 1279e34..540f6c4 100644 (file)
@@ -1,26 +1,23 @@
 /*
  * linux/kernel/irq/manage.c
  *
- * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 2005-2006 Thomas Gleixner
  *
  * This file contains driver APIs to the irq subsystem.
  */
 
-#include <linux/config.h>
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/interrupt.h>
+#include <linux/slab.h>
 
 #include "internals.h"
 
 #ifdef CONFIG_SMP
 
-cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
-
-#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
-cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
-#endif
+cpumask_t irq_default_affinity = CPU_MASK_ALL;
 
 /**
  *     synchronize_irq - wait for pending IRQ handlers (on other CPUs)
@@ -34,17 +31,138 @@ cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
  */
 void synchronize_irq(unsigned int irq)
 {
-       struct irq_desc *desc = irq_desc + irq;
+       struct irq_desc *desc = irq_to_desc(irq);
+       unsigned int status;
 
-       if (irq >= NR_IRQS)
+       if (!desc)
                return;
 
-       while (desc->status & IRQ_INPROGRESS)
-               cpu_relax();
-}
+       do {
+               unsigned long flags;
+
+               /*
+                * Wait until we're out of the critical section.  This might
+                * give the wrong answer due to the lack of memory barriers.
+                */
+               while (desc->status & IRQ_INPROGRESS)
+                       cpu_relax();
 
+               /* Ok, that indicated we're done: double-check carefully. */
+               spin_lock_irqsave(&desc->lock, flags);
+               status = desc->status;
+               spin_unlock_irqrestore(&desc->lock, flags);
+
+               /* Oops, that failed? */
+       } while (status & IRQ_INPROGRESS);
+}
 EXPORT_SYMBOL(synchronize_irq);
 
+/**
+ *     irq_can_set_affinity - Check if the affinity of a given irq can be set
+ *     @irq:           Interrupt to check
+ *
+ */
+int irq_can_set_affinity(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
+           !desc->chip->set_affinity)
+               return 0;
+
+       return 1;
+}
+
+/**
+ *     irq_set_affinity - Set the irq affinity of a given irq
+ *     @irq:           Interrupt to set affinity
+ *     @cpumask:       cpumask
+ *
+ */
+int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       unsigned long flags;
+
+       if (!desc->chip->set_affinity)
+               return -EINVAL;
+
+       spin_lock_irqsave(&desc->lock, flags);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
+               desc->affinity = cpumask;
+               desc->chip->set_affinity(irq, cpumask);
+       } else {
+               desc->status |= IRQ_MOVE_PENDING;
+               desc->pending_mask = cpumask;
+       }
+#else
+       desc->affinity = cpumask;
+       desc->chip->set_affinity(irq, cpumask);
+#endif
+       desc->status |= IRQ_AFFINITY_SET;
+       spin_unlock_irqrestore(&desc->lock, flags);
+       return 0;
+}
+
+#ifndef CONFIG_AUTO_IRQ_AFFINITY
+/*
+ * Generic version of the affinity autoselector.
+ */
+int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
+{
+       cpumask_t mask;
+
+       if (!irq_can_set_affinity(irq))
+               return 0;
+
+       cpus_and(mask, cpu_online_map, irq_default_affinity);
+
+       /*
+        * Preserve an userspace affinity setup, but make sure that
+        * one of the targets is online.
+        */
+       if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
+               if (cpus_intersects(desc->affinity, cpu_online_map))
+                       mask = desc->affinity;
+               else
+                       desc->status &= ~IRQ_AFFINITY_SET;
+       }
+
+       desc->affinity = mask;
+       desc->chip->set_affinity(irq, mask);
+
+       return 0;
+}
+#else
+static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
+{
+       return irq_select_affinity(irq);
+}
+#endif
+
+/*
+ * Called when affinity is set via /proc/irq
+ */
+int irq_select_affinity_usr(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       ret = do_irq_select_affinity(irq, desc);
+       spin_unlock_irqrestore(&desc->lock, flags);
+
+       return ret;
+}
+
+#else
+static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
+{
+       return 0;
+}
 #endif
 
 /**
@@ -60,20 +178,19 @@ EXPORT_SYMBOL(synchronize_irq);
  */
 void disable_irq_nosync(unsigned int irq)
 {
-       irq_desc_t *desc = irq_desc + irq;
+       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
 
-       if (irq >= NR_IRQS)
+       if (!desc)
                return;
 
        spin_lock_irqsave(&desc->lock, flags);
        if (!desc->depth++) {
                desc->status |= IRQ_DISABLED;
-               desc->handler->disable(irq);
+               desc->chip->disable(irq);
        }
        spin_unlock_irqrestore(&desc->lock, flags);
 }
-
 EXPORT_SYMBOL(disable_irq_nosync);
 
 /**
@@ -90,18 +207,36 @@ EXPORT_SYMBOL(disable_irq_nosync);
  */
 void disable_irq(unsigned int irq)
 {
-       irq_desc_t *desc = irq_desc + irq;
+       struct irq_desc *desc = irq_to_desc(irq);
 
-       if (irq >= NR_IRQS)
+       if (!desc)
                return;
 
        disable_irq_nosync(irq);
        if (desc->action)
                synchronize_irq(irq);
 }
-
 EXPORT_SYMBOL(disable_irq);
 
+static void __enable_irq(struct irq_desc *desc, unsigned int irq)
+{
+       switch (desc->depth) {
+       case 0:
+               WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
+               break;
+       case 1: {
+               unsigned int status = desc->status & ~IRQ_DISABLED;
+
+               /* Prevent probing on this irq: */
+               desc->status = status | IRQ_NOPROBE;
+               check_irq_resend(desc, irq);
+               /* fall-through */
+       }
+       default:
+               desc->depth--;
+       }
+}
+
 /**
  *     enable_irq - enable handling of an irq
  *     @irq: Interrupt to enable
@@ -114,35 +249,75 @@ EXPORT_SYMBOL(disable_irq);
  */
 void enable_irq(unsigned int irq)
 {
-       irq_desc_t *desc = irq_desc + irq;
+       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
 
-       if (irq >= NR_IRQS)
+       if (!desc)
                return;
 
        spin_lock_irqsave(&desc->lock, flags);
-       switch (desc->depth) {
-       case 0:
-               WARN_ON(1);
-               break;
-       case 1: {
-               unsigned int status = desc->status & ~IRQ_DISABLED;
+       __enable_irq(desc, irq);
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+EXPORT_SYMBOL(enable_irq);
 
-               desc->status = status;
-               if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
-                       desc->status = status | IRQ_REPLAY;
-                       hw_resend_irq(desc->handler,irq);
+static int set_irq_wake_real(unsigned int irq, unsigned int on)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       int ret = -ENXIO;
+
+       if (desc->chip->set_wake)
+               ret = desc->chip->set_wake(irq, on);
+
+       return ret;
+}
+
+/**
+ *     set_irq_wake - control irq power management wakeup
+ *     @irq:   interrupt to control
+ *     @on:    enable/disable power management wakeup
+ *
+ *     Enable/disable power management wakeup mode, which is
+ *     disabled by default.  Enables and disables must match,
+ *     just as they match for non-wakeup mode support.
+ *
+ *     Wakeup mode lets this IRQ wake the system from sleep
+ *     states like "suspend to RAM".
+ */
+int set_irq_wake(unsigned int irq, unsigned int on)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       unsigned long flags;
+       int ret = 0;
+
+       /* wakeup-capable irqs can be shared between drivers that
+        * don't need to have the same sleep mode behaviors.
+        */
+       spin_lock_irqsave(&desc->lock, flags);
+       if (on) {
+               if (desc->wake_depth++ == 0) {
+                       ret = set_irq_wake_real(irq, on);
+                       if (ret)
+                               desc->wake_depth = 0;
+                       else
+                               desc->status |= IRQ_WAKEUP;
+               }
+       } else {
+               if (desc->wake_depth == 0) {
+                       WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
+               } else if (--desc->wake_depth == 0) {
+                       ret = set_irq_wake_real(irq, on);
+                       if (ret)
+                               desc->wake_depth = 1;
+                       else
+                               desc->status &= ~IRQ_WAKEUP;
                }
-               desc->handler->enable(irq);
-               /* fall-through */
-       }
-       default:
-               desc->depth--;
        }
+
        spin_unlock_irqrestore(&desc->lock, flags);
+       return ret;
 }
-
-EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(set_irq_wake);
 
 /*
  * Internal function that tells the architecture code whether a
@@ -151,41 +326,91 @@ EXPORT_SYMBOL(enable_irq);
  */
 int can_request_irq(unsigned int irq, unsigned long irqflags)
 {
+       struct irq_desc *desc = irq_to_desc(irq);
        struct irqaction *action;
 
-       if (irq >= NR_IRQS)
+       if (!desc)
+               return 0;
+
+       if (desc->status & IRQ_NOREQUEST)
                return 0;
 
-       action = irq_desc[irq].action;
+       action = desc->action;
        if (action)
-               if (irqflags & action->flags & SA_SHIRQ)
+               if (irqflags & action->flags & IRQF_SHARED)
                        action = NULL;
 
        return !action;
 }
 
+void compat_irq_chip_set_default_handler(struct irq_desc *desc)
+{
+       /*
+        * If the architecture still has not overriden
+        * the flow handler then zap the default. This
+        * should catch incorrect flow-type setting.
+        */
+       if (desc->handle_irq == &handle_bad_irq)
+               desc->handle_irq = NULL;
+}
+
+int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
+               unsigned long flags)
+{
+       int ret;
+       struct irq_chip *chip = desc->chip;
+
+       if (!chip || !chip->set_type) {
+               /*
+                * IRQF_TRIGGER_* but the PIC does not support multiple
+                * flow-types?
+                */
+               pr_debug("No set_type function for IRQ %d (%s)\n", irq,
+                               chip ? (chip->name ? : "unknown") : "unknown");
+               return 0;
+       }
+
+       /* caller masked out all except trigger mode flags */
+       ret = chip->set_type(irq, flags);
+
+       if (ret)
+               pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
+                               (int)flags, irq, chip->set_type);
+       else {
+               if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+                       flags |= IRQ_LEVEL;
+               /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
+               desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
+               desc->status |= flags;
+       }
+
+       return ret;
+}
+
 /*
  * Internal function to register an irqaction - typically used to
  * allocate special interrupts that are part of the architecture.
  */
-int setup_irq(unsigned int irq, struct irqaction * new)
+static int
+__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
 {
-       struct irq_desc *desc = irq_desc + irq;
        struct irqaction *old, **p;
+       const char *old_name = NULL;
        unsigned long flags;
        int shared = 0;
+       int ret;
 
-       if (irq >= NR_IRQS)
+       if (!desc)
                return -EINVAL;
 
-       if (desc->handler == &no_irq_type)
+       if (desc->chip == &no_irq_chip)
                return -ENOSYS;
        /*
         * Some drivers like serial.c use request_irq() heavily,
         * so we have to be careful not to interfere with a
         * running system.
         */
-       if (new->flags & SA_SAMPLE_RANDOM) {
+       if (new->flags & IRQF_SAMPLE_RANDOM) {
                /*
                 * This function might sleep, we want to call it first,
                 * outside of the atomic block.
@@ -200,16 +425,26 @@ int setup_irq(unsigned int irq, struct irqaction * new)
        /*
         * The following block of code has to be executed atomically
         */
-       spin_lock_irqsave(&desc->lock,flags);
+       spin_lock_irqsave(&desc->lock, flags);
        p = &desc->action;
-       if ((old = *p) != NULL) {
-               /* Can't share interrupts unless both agree to */
-               if (!(old->flags & new->flags & SA_SHIRQ))
+       old = *p;
+       if (old) {
+               /*
+                * Can't share interrupts unless both agree to and are
+                * the same type (level, edge, polarity). So both flag
+                * fields must have IRQF_SHARED set and the bits which
+                * set the trigger type must match.
+                */
+               if (!((old->flags & new->flags) & IRQF_SHARED) ||
+                   ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
+                       old_name = old->name;
                        goto mismatch;
+               }
 
-#if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
+#if defined(CONFIG_IRQ_PER_CPU)
                /* All handlers must agree on per-cpuness */
-               if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU))
+               if ((old->flags & IRQF_PERCPU) !=
+                   (new->flags & IRQF_PERCPU))
                        goto mismatch;
 #endif
 
@@ -221,39 +456,104 @@ int setup_irq(unsigned int irq, struct irqaction * new)
                shared = 1;
        }
 
-       *p = new;
-#if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
-       if (new->flags & SA_PERCPU_IRQ)
-               desc->status |= IRQ_PER_CPU;
-#endif
        if (!shared) {
-               desc->depth = 0;
-               desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT |
-                                 IRQ_WAITING | IRQ_INPROGRESS);
-               if (desc->handler->startup)
-                       desc->handler->startup(irq);
-               else
-                       desc->handler->enable(irq);
+               irq_chip_set_defaults(desc->chip);
+
+               /* Setup the type (level, edge polarity) if configured: */
+               if (new->flags & IRQF_TRIGGER_MASK) {
+                       ret = __irq_set_trigger(desc, irq,
+                                       new->flags & IRQF_TRIGGER_MASK);
+
+                       if (ret) {
+                               spin_unlock_irqrestore(&desc->lock, flags);
+                               return ret;
+                       }
+               } else
+                       compat_irq_chip_set_default_handler(desc);
+#if defined(CONFIG_IRQ_PER_CPU)
+               if (new->flags & IRQF_PERCPU)
+                       desc->status |= IRQ_PER_CPU;
+#endif
+
+               desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
+                                 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
+
+               if (!(desc->status & IRQ_NOAUTOEN)) {
+                       desc->depth = 0;
+                       desc->status &= ~IRQ_DISABLED;
+                       desc->chip->startup(irq);
+               } else
+                       /* Undo nested disables: */
+                       desc->depth = 1;
+
+               /* Exclude IRQ from balancing if requested */
+               if (new->flags & IRQF_NOBALANCING)
+                       desc->status |= IRQ_NO_BALANCING;
+
+               /* Set default affinity mask once everything is setup */
+               do_irq_select_affinity(irq, desc);
+
+       } else if ((new->flags & IRQF_TRIGGER_MASK)
+                       && (new->flags & IRQF_TRIGGER_MASK)
+                               != (desc->status & IRQ_TYPE_SENSE_MASK)) {
+               /* hope the handler works with the actual trigger mode... */
+               pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
+                               irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
+                               (int)(new->flags & IRQF_TRIGGER_MASK));
+       }
+
+       *p = new;
+
+       /* Reset broken irq detection when installing new handler */
+       desc->irq_count = 0;
+       desc->irqs_unhandled = 0;
+
+       /*
+        * Check whether we disabled the irq via the spurious handler
+        * before. Reenable it and give it another chance.
+        */
+       if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
+               desc->status &= ~IRQ_SPURIOUS_DISABLED;
+               __enable_irq(desc, irq);
        }
-       spin_unlock_irqrestore(&desc->lock,flags);
+
+       spin_unlock_irqrestore(&desc->lock, flags);
 
        new->irq = irq;
-       register_irq_proc(irq);
+       register_irq_proc(irq, desc);
        new->dir = NULL;
        register_handler_proc(irq, new);
 
        return 0;
 
 mismatch:
-       spin_unlock_irqrestore(&desc->lock, flags);
-       if (!(new->flags & SA_PROBEIRQ)) {
-               printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__);
+#ifdef CONFIG_DEBUG_SHIRQ
+       if (!(new->flags & IRQF_PROBE_SHARED)) {
+               printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
+               if (old_name)
+                       printk(KERN_ERR "current handler: %s\n", old_name);
                dump_stack();
        }
+#endif
+       spin_unlock_irqrestore(&desc->lock, flags);
        return -EBUSY;
 }
 
 /**
+ *     setup_irq - setup an interrupt
+ *     @irq: Interrupt line to setup
+ *     @act: irqaction for the interrupt
+ *
+ * Used to statically setup interrupts in the early boot process.
+ */
+int setup_irq(unsigned int irq, struct irqaction *act)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       return __setup_irq(irq, desc, act);
+}
+
+/**
  *     free_irq - free an interrupt
  *     @irq: Interrupt line to free
  *     @dev_id: Device identity to free
@@ -269,19 +569,19 @@ mismatch:
  */
 void free_irq(unsigned int irq, void *dev_id)
 {
-       struct irq_desc *desc;
+       struct irq_desc *desc = irq_to_desc(irq);
        struct irqaction **p;
        unsigned long flags;
 
        WARN_ON(in_interrupt());
-       if (irq >= NR_IRQS)
+
+       if (!desc)
                return;
 
-       desc = irq_desc + irq;
-       spin_lock_irqsave(&desc->lock,flags);
+       spin_lock_irqsave(&desc->lock, flags);
        p = &desc->action;
        for (;;) {
-               struct irqaction * action = *p;
+               struct irqaction *action = *p;
 
                if (action) {
                        struct irqaction **pp = p;
@@ -295,31 +595,48 @@ void free_irq(unsigned int irq, void *dev_id)
 
                        /* Currently used only by UML, might disappear one day.*/
 #ifdef CONFIG_IRQ_RELEASE_METHOD
-                       if (desc->handler->release)
-                               desc->handler->release(irq, dev_id);
+                       if (desc->chip->release)
+                               desc->chip->release(irq, dev_id);
 #endif
 
                        if (!desc->action) {
                                desc->status |= IRQ_DISABLED;
-                               if (desc->handler->shutdown)
-                                       desc->handler->shutdown(irq);
+                               if (desc->chip->shutdown)
+                                       desc->chip->shutdown(irq);
                                else
-                                       desc->handler->disable(irq);
+                                       desc->chip->disable(irq);
                        }
-                       spin_unlock_irqrestore(&desc->lock,flags);
+                       spin_unlock_irqrestore(&desc->lock, flags);
                        unregister_handler_proc(irq, action);
 
                        /* Make sure it's not being used on another CPU */
                        synchronize_irq(irq);
+#ifdef CONFIG_DEBUG_SHIRQ
+                       /*
+                        * It's a shared IRQ -- the driver ought to be
+                        * prepared for it to happen even now it's
+                        * being freed, so let's make sure....  We do
+                        * this after actually deregistering it, to
+                        * make sure that a 'real' IRQ doesn't run in
+                        * parallel with our fake
+                        */
+                       if (action->flags & IRQF_SHARED) {
+                               local_irq_save(flags);
+                               action->handler(irq, dev_id);
+                               local_irq_restore(flags);
+                       }
+#endif
                        kfree(action);
                        return;
                }
-               printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
-               spin_unlock_irqrestore(&desc->lock,flags);
+               printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
+#ifdef CONFIG_DEBUG_SHIRQ
+               dump_stack();
+#endif
+               spin_unlock_irqrestore(&desc->lock, flags);
                return;
        }
 }
-
 EXPORT_SYMBOL(free_irq);
 
 /**
@@ -346,27 +663,51 @@ EXPORT_SYMBOL(free_irq);
  *
  *     Flags:
  *
- *     SA_SHIRQ                Interrupt is shared
- *     SA_INTERRUPT            Disable local interrupts while processing
- *     SA_SAMPLE_RANDOM        The interrupt can be used for entropy
+ *     IRQF_SHARED             Interrupt is shared
+ *     IRQF_DISABLED   Disable local interrupts while processing
+ *     IRQF_SAMPLE_RANDOM      The interrupt can be used for entropy
+ *     IRQF_TRIGGER_*          Specify active edge(s) or level
  *
  */
-int request_irq(unsigned int irq,
-               irqreturn_t (*handler)(int, void *, struct pt_regs *),
-               unsigned long irqflags, const char * devname, void *dev_id)
+int request_irq(unsigned int irq, irq_handler_t handler,
+               unsigned long irqflags, const char *devname, void *dev_id)
 {
-       struct irqaction * action;
+       struct irqaction *action;
+       struct irq_desc *desc;
        int retval;
 
        /*
+        * handle_IRQ_event() always ignores IRQF_DISABLED except for
+        * the _first_ irqaction (sigh).  That can cause oopsing, but
+        * the behavior is classified as "will not fix" so we need to
+        * start nudging drivers away from using that idiom.
+        */
+       if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
+                       == (IRQF_SHARED|IRQF_DISABLED))
+               pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
+                               "guaranteed on shared IRQs\n",
+                               irq, devname);
+
+#ifdef CONFIG_LOCKDEP
+       /*
+        * Lockdep wants atomic interrupt handlers:
+        */
+       irqflags |= IRQF_DISABLED;
+#endif
+       /*
         * Sanity-check: shared interrupts must pass in a real dev-ID,
         * otherwise we'll have trouble later trying to figure out
         * which interrupt is which (messes up the interrupt freeing
         * logic etc).
         */
-       if ((irqflags & SA_SHIRQ) && !dev_id)
+       if ((irqflags & IRQF_SHARED) && !dev_id)
                return -EINVAL;
-       if (irq >= NR_IRQS)
+
+       desc = irq_to_desc(irq);
+       if (!desc)
+               return -EINVAL;
+
+       if (desc->status & IRQ_NOREQUEST)
                return -EINVAL;
        if (!handler)
                return -EINVAL;
@@ -382,14 +723,29 @@ int request_irq(unsigned int irq,
        action->next = NULL;
        action->dev_id = dev_id;
 
-       select_smp_affinity(irq);
-
-       retval = setup_irq(irq, action);
+       retval = __setup_irq(irq, desc, action);
        if (retval)
                kfree(action);
 
+#ifdef CONFIG_DEBUG_SHIRQ
+       if (irqflags & IRQF_SHARED) {
+               /*
+                * It's a shared IRQ -- the driver ought to be prepared for it
+                * to happen immediately, so let's make sure....
+                * We disable the irq to make sure that a 'real' IRQ doesn't
+                * run in parallel with our fake.
+                */
+               unsigned long flags;
+
+               disable_irq(irq);
+               local_irq_save(flags);
+
+               handler(irq, dev_id);
+
+               local_irq_restore(flags);
+               enable_irq(irq);
+       }
+#endif
        return retval;
 }
-
 EXPORT_SYMBOL(request_irq);
-