Merge branches 'x86/acpi', 'x86/asm', 'x86/cpudetect', 'x86/crashdump', 'x86/debug...
[safe/jmp/linux-2.6] / kernel / irq / migration.c
index a12d00e..e05ad9b 100644 (file)
@@ -1,23 +1,11 @@
 
 #include <linux/irq.h>
 
-void set_pending_irq(unsigned int irq, cpumask_t mask)
+void move_masked_irq(int irq)
 {
-       irq_desc_t *desc = irq_desc + irq;
-       unsigned long flags;
+       struct irq_desc *desc = irq_to_desc(irq);
 
-       spin_lock_irqsave(&desc->lock, flags);
-       desc->move_irq = 1;
-       pending_irq_cpumask[irq] = mask;
-       spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-void move_native_irq(int irq)
-{
-       cpumask_t tmp;
-       irq_desc_t *desc = irq_descp(irq);
-
-       if (likely(!desc->move_irq))
+       if (likely(!(desc->status & IRQ_MOVE_PENDING)))
                return;
 
        /*
@@ -28,18 +16,16 @@ void move_native_irq(int irq)
                return;
        }
 
-       desc->move_irq = 0;
+       desc->status &= ~IRQ_MOVE_PENDING;
 
-       if (unlikely(cpus_empty(pending_irq_cpumask[irq])))
+       if (unlikely(cpumask_empty(desc->pending_mask)))
                return;
 
-       if (!desc->handler->set_affinity)
+       if (!desc->chip->set_affinity)
                return;
 
        assert_spin_locked(&desc->lock);
 
-       cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
-
        /*
         * If there was a valid mask to work with, please
         * do the disable, re-program, enable sequence.
@@ -48,15 +34,31 @@ void move_native_irq(int irq)
         * when an active trigger is comming in. This could
         * cause some ioapics to mal-function.
         * Being paranoid i guess!
+        *
+        * For correct operation this depends on the caller
+        * masking the irqs.
         */
-       if (likely(!cpus_empty(tmp))) {
-               if (likely(!(desc->status & IRQ_DISABLED)))
-                       desc->handler->disable(irq);
+       if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
+                  < nr_cpu_ids)) {
+               cpumask_and(desc->affinity,
+                           desc->pending_mask, cpu_online_mask);
+               desc->chip->set_affinity(irq, desc->affinity);
+       }
+       cpumask_clear(desc->pending_mask);
+}
 
-               desc->handler->set_affinity(irq,tmp);
+void move_native_irq(int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
 
-               if (likely(!(desc->status & IRQ_DISABLED)))
-                       desc->handler->enable(irq);
-       }
-       cpus_clear(pending_irq_cpumask[irq]);
+       if (likely(!(desc->status & IRQ_MOVE_PENDING)))
+               return;
+
+       if (unlikely(desc->status & IRQ_DISABLED))
+               return;
+
+       desc->chip->mask(irq);
+       move_masked_irq(irq);
+       desc->chip->unmask(irq);
 }
+