x86/irq: Fix move_irq_desc() for nodes without ram
[safe/jmp/linux-2.6] / kernel / irq / manage.c
index 2734eca..61c679d 100644 (file)
@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq)
        return 1;
 }
 
-static void
-irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
+/**
+ *     irq_set_thread_affinity - Notify irq threads to adjust affinity
+ *     @desc:          irq descriptor which has affitnity changed
+ *
+ *     We just set IRQTF_AFFINITY and delegate the affinity setting
+ *     to the interrupt thread itself. We can not call
+ *     set_cpus_allowed_ptr() here as we hold desc->lock and this
+ *     code can be called from hard interrupt context.
+ */
+void irq_set_thread_affinity(struct irq_desc *desc)
 {
        struct irqaction *action = desc->action;
 
        while (action) {
                if (action->thread)
-                       set_cpus_allowed_ptr(action->thread, cpumask);
+                       set_bit(IRQTF_AFFINITY, &action->thread_flags);
                action = action->next;
        }
 }
@@ -109,17 +117,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
        spin_lock_irqsave(&desc->lock, flags);
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
-       if (desc->status & IRQ_MOVE_PCNTXT)
-               desc->chip->set_affinity(irq, cpumask);
+       if (desc->status & IRQ_MOVE_PCNTXT) {
+               if (!desc->chip->set_affinity(irq, cpumask)) {
+                       cpumask_copy(desc->affinity, cpumask);
+                       irq_set_thread_affinity(desc);
+               }
+       }
        else {
                desc->status |= IRQ_MOVE_PENDING;
                cpumask_copy(desc->pending_mask, cpumask);
        }
 #else
-       cpumask_copy(desc->affinity, cpumask);
-       desc->chip->set_affinity(irq, cpumask);
+       if (!desc->chip->set_affinity(irq, cpumask)) {
+               cpumask_copy(desc->affinity, cpumask);
+               irq_set_thread_affinity(desc);
+       }
 #endif
-       irq_set_thread_affinity(desc, cpumask);
        desc->status |= IRQ_AFFINITY_SET;
        spin_unlock_irqrestore(&desc->lock, flags);
        return 0;
@@ -171,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq)
        spin_lock_irqsave(&desc->lock, flags);
        ret = setup_affinity(irq, desc);
        if (!ret)
-               irq_set_thread_affinity(desc, desc->affinity);
+               irq_set_thread_affinity(desc);
        spin_unlock_irqrestore(&desc->lock, flags);
 
        return ret;
@@ -438,6 +451,39 @@ static int irq_wait_for_interrupt(struct irqaction *action)
        return -1;
 }
 
+#ifdef CONFIG_SMP
+/*
+ * Check whether we need to change the affinity of the interrupt thread.
+ */
+static void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+{
+       cpumask_var_t mask;
+
+       if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+               return;
+
+       /*
+        * In case we are out of memory we set IRQTF_AFFINITY again and
+        * try again next time
+        */
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+               set_bit(IRQTF_AFFINITY, &action->thread_flags);
+               return;
+       }
+
+       spin_lock_irq(&desc->lock);
+       cpumask_copy(mask, desc->affinity);
+       spin_unlock_irq(&desc->lock);
+
+       set_cpus_allowed_ptr(current, mask);
+       free_cpumask_var(mask);
+}
+#else
+static inline void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
+#endif
+
 /*
  * Interrupt handler thread
  */
@@ -453,6 +499,8 @@ static int irq_thread(void *data)
 
        while (!irq_wait_for_interrupt(action)) {
 
+               irq_thread_check_affinity(desc, action);
+
                atomic_inc(&desc->threads_active);
 
                spin_lock_irq(&desc->lock);
@@ -851,7 +899,7 @@ EXPORT_SYMBOL(free_irq);
  *     still called in hard interrupt context and has to check
  *     whether the interrupt originates from the device. If yes it
  *     needs to disable the interrupt on the device and return
- *     IRQ_THREAD_WAKE which will wake up the handler thread and run
+ *     IRQ_WAKE_THREAD which will wake up the handler thread and run
  *     @thread_fn. This split handler design is necessary to support
  *     shared interrupts.
  *