x86/irq: Fix move_irq_desc() for nodes without ram
[safe/jmp/linux-2.6] / kernel / irq / manage.c
index a4c1ab8..61c679d 100644 (file)
@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq)
        return 1;
 }
 
-static void
-irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
+/**
+ *     irq_set_thread_affinity - Notify irq threads to adjust affinity
+ *     @desc:          irq descriptor which has affitnity changed
+ *
+ *     We just set IRQTF_AFFINITY and delegate the affinity setting
+ *     to the interrupt thread itself. We can not call
+ *     set_cpus_allowed_ptr() here as we hold desc->lock and this
+ *     code can be called from hard interrupt context.
+ */
+void irq_set_thread_affinity(struct irq_desc *desc)
 {
        struct irqaction *action = desc->action;
 
        while (action) {
                if (action->thread)
-                       set_cpus_allowed_ptr(action->thread, cpumask);
+                       set_bit(IRQTF_AFFINITY, &action->thread_flags);
                action = action->next;
        }
 }
@@ -109,18 +117,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
        spin_lock_irqsave(&desc->lock, flags);
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
-       if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
-               cpumask_copy(desc->affinity, cpumask);
-               desc->chip->set_affinity(irq, cpumask);
-       } else {
+       if (desc->status & IRQ_MOVE_PCNTXT) {
+               if (!desc->chip->set_affinity(irq, cpumask)) {
+                       cpumask_copy(desc->affinity, cpumask);
+                       irq_set_thread_affinity(desc);
+               }
+       }
+       else {
                desc->status |= IRQ_MOVE_PENDING;
                cpumask_copy(desc->pending_mask, cpumask);
        }
 #else
-       cpumask_copy(desc->affinity, cpumask);
-       desc->chip->set_affinity(irq, cpumask);
+       if (!desc->chip->set_affinity(irq, cpumask)) {
+               cpumask_copy(desc->affinity, cpumask);
+               irq_set_thread_affinity(desc);
+       }
 #endif
-       irq_set_thread_affinity(desc, cpumask);
        desc->status |= IRQ_AFFINITY_SET;
        spin_unlock_irqrestore(&desc->lock, flags);
        return 0;
@@ -172,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq)
        spin_lock_irqsave(&desc->lock, flags);
        ret = setup_affinity(irq, desc);
        if (!ret)
-               irq_set_thread_affinity(desc, desc->affinity);
+               irq_set_thread_affinity(desc);
        spin_unlock_irqrestore(&desc->lock, flags);
 
        return ret;
@@ -185,6 +197,20 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
 }
 #endif
 
+void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
+{
+       if (suspend) {
+               if (!desc->action || (desc->action->flags & IRQF_TIMER))
+                       return;
+               desc->status |= IRQ_SUSPENDED;
+       }
+
+       if (!desc->depth++) {
+               desc->status |= IRQ_DISABLED;
+               desc->chip->disable(irq);
+       }
+}
+
 /**
  *     disable_irq_nosync - disable an irq without waiting
  *     @irq: Interrupt to disable
@@ -205,10 +231,7 @@ void disable_irq_nosync(unsigned int irq)
                return;
 
        spin_lock_irqsave(&desc->lock, flags);
-       if (!desc->depth++) {
-               desc->status |= IRQ_DISABLED;
-               desc->chip->disable(irq);
-       }
+       __disable_irq(desc, irq, false);
        spin_unlock_irqrestore(&desc->lock, flags);
 }
 EXPORT_SYMBOL(disable_irq_nosync);
@@ -238,15 +261,21 @@ void disable_irq(unsigned int irq)
 }
 EXPORT_SYMBOL(disable_irq);
 
-static void __enable_irq(struct irq_desc *desc, unsigned int irq)
+void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
 {
+       if (resume)
+               desc->status &= ~IRQ_SUSPENDED;
+
        switch (desc->depth) {
        case 0:
+ err_out:
                WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
                break;
        case 1: {
                unsigned int status = desc->status & ~IRQ_DISABLED;
 
+               if (desc->status & IRQ_SUSPENDED)
+                       goto err_out;
                /* Prevent probing on this irq: */
                desc->status = status | IRQ_NOPROBE;
                check_irq_resend(desc, irq);
@@ -276,7 +305,7 @@ void enable_irq(unsigned int irq)
                return;
 
        spin_lock_irqsave(&desc->lock, flags);
-       __enable_irq(desc, irq);
+       __enable_irq(desc, irq, false);
        spin_unlock_irqrestore(&desc->lock, flags);
 }
 EXPORT_SYMBOL(enable_irq);
@@ -407,24 +436,54 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
        return ret;
 }
 
-static inline int irq_thread_should_run(struct irqaction *action)
-{
-       return test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags);
-}
-
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
        while (!kthread_should_stop()) {
                set_current_state(TASK_INTERRUPTIBLE);
-               if (irq_thread_should_run(action)) {
+
+               if (test_and_clear_bit(IRQTF_RUNTHREAD,
+                                      &action->thread_flags)) {
                        __set_current_state(TASK_RUNNING);
                        return 0;
-               } else
-                       schedule();
+               }
+               schedule();
        }
        return -1;
 }
 
+#ifdef CONFIG_SMP
+/*
+ * Check whether we need to change the affinity of the interrupt thread.
+ */
+static void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+{
+       cpumask_var_t mask;
+
+       if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+               return;
+
+       /*
+        * In case we are out of memory we set IRQTF_AFFINITY again and
+        * try again next time
+        */
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+               set_bit(IRQTF_AFFINITY, &action->thread_flags);
+               return;
+       }
+
+       spin_lock_irq(&desc->lock);
+       cpumask_copy(mask, desc->affinity);
+       spin_unlock_irq(&desc->lock);
+
+       set_cpus_allowed_ptr(current, mask);
+       free_cpumask_var(mask);
+}
+#else
+static inline void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
+#endif
+
 /*
  * Interrupt handler thread
  */
@@ -440,6 +499,8 @@ static int irq_thread(void *data)
 
        while (!irq_wait_for_interrupt(action)) {
 
+               irq_thread_check_affinity(desc, action);
+
                atomic_inc(&desc->threads_active);
 
                spin_lock_irq(&desc->lock);
@@ -641,7 +702,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
         */
        if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
                desc->status &= ~IRQ_SPURIOUS_DISABLED;
-               __enable_irq(desc, irq);
+               __enable_irq(desc, irq, false);
        }
 
        spin_unlock_irqrestore(&desc->lock, flags);
@@ -820,8 +881,8 @@ EXPORT_SYMBOL(free_irq);
  *     @irq: Interrupt line to allocate
  *     @handler: Function to be called when the IRQ occurs.
  *               Primary handler for threaded interrupts
- *      @thread_fn: Function called from the irq handler thread
- *                  If NULL, no irq thread is created
+ *     @thread_fn: Function called from the irq handler thread
+ *                 If NULL, no irq thread is created
  *     @irqflags: Interrupt type flags
  *     @devname: An ascii name for the claiming device
  *     @dev_id: A cookie passed back to the handler function
@@ -838,7 +899,7 @@ EXPORT_SYMBOL(free_irq);
  *     still called in hard interrupt context and has to check
  *     whether the interrupt originates from the device. If yes it
  *     needs to disable the interrupt on the device and return
- *     IRQ_THREAD_WAKE which will wake up the handler thread and run
+ *     IRQ_WAKE_THREAD which will wake up the handler thread and run
  *     @thread_fn. This split handler design is necessary to support
  *     shared interrupts.
  *