time: Fix accumulation bug triggered by long delay.
[safe/jmp/linux-2.6] / kernel / irq / numa_migrate.c
index 089c374..26bac9d 100644 (file)
 
 static void init_copy_kstat_irqs(struct irq_desc *old_desc,
                                 struct irq_desc *desc,
-                                int cpu, int nr)
+                                int node, int nr)
 {
-       unsigned long bytes;
+       init_kstat_irqs(desc, node, nr);
 
-       init_kstat_irqs(desc, cpu, nr);
-
-       if (desc->kstat_irqs != old_desc->kstat_irqs) {
-               /* Compute how many bytes we need per irq and allocate them */
-               bytes = nr * sizeof(unsigned int);
-
-               memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
-       }
+       if (desc->kstat_irqs != old_desc->kstat_irqs)
+               memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
+                        nr * sizeof(*desc->kstat_irqs));
 }
 
 static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -38,84 +33,86 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
        old_desc->kstat_irqs = NULL;
 }
 
-static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
-                struct irq_desc *desc, int cpu)
+static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
+                struct irq_desc *desc, int node)
 {
        memcpy(desc, old_desc, sizeof(struct irq_desc));
-       desc->cpu = cpu;
+       if (!alloc_desc_masks(desc, node, false)) {
+               printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
+                               "for migration.\n", irq);
+               return false;
+       }
+       raw_spin_lock_init(&desc->lock);
+       desc->node = node;
        lockdep_set_class(&desc->lock, &irq_desc_lock_class);
-       init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
-       arch_init_copy_chip_data(old_desc, desc, cpu);
+       init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
+       init_copy_desc_masks(old_desc, desc);
+       arch_init_copy_chip_data(old_desc, desc, node);
+       return true;
 }
 
 static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
 {
        free_kstat_irqs(old_desc, desc);
+       free_desc_masks(old_desc, desc);
        arch_free_chip_data(old_desc, desc);
 }
 
 static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
-                                               int cpu)
+                                               int node)
 {
        struct irq_desc *desc;
        unsigned int irq;
        unsigned long flags;
-       int node;
 
        irq = old_desc->irq;
 
-       spin_lock_irqsave(&sparse_irq_lock, flags);
+       raw_spin_lock_irqsave(&sparse_irq_lock, flags);
 
        /* We have to check it to avoid races with another CPU */
        desc = irq_desc_ptrs[irq];
 
        if (desc && old_desc != desc)
-                       goto out_unlock;
+               goto out_unlock;
 
-       node = cpu_to_node(cpu);
        desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
-       printk(KERN_DEBUG "  move irq_desc for %d to cpu %d node %d\n",
-                irq, cpu, node);
        if (!desc) {
-               printk(KERN_ERR "can not get new irq_desc for moving\n");
+               printk(KERN_ERR "irq %d: can not get new irq_desc "
+                               "for migration.\n", irq);
                /* still use old one */
                desc = old_desc;
                goto out_unlock;
        }
-       init_copy_one_irq_desc(irq, old_desc, desc, cpu);
+       if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) {
+               /* still use old one */
+               kfree(desc);
+               desc = old_desc;
+               goto out_unlock;
+       }
 
        irq_desc_ptrs[irq] = desc;
+       raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
 
        /* free the old one */
        free_one_irq_desc(old_desc, desc);
        kfree(old_desc);
 
+       return desc;
+
 out_unlock:
-       spin_unlock_irqrestore(&sparse_irq_lock, flags);
+       raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
 
        return desc;
 }
 
-struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu)
+struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
 {
-       int old_cpu;
-       int node, old_node;
-
-       /* those all static, do move them */
-       if (desc->irq < NR_IRQS_LEGACY)
+       /* those static or target node is -1, do not move them */
+       if (desc->irq < NR_IRQS_LEGACY || node == -1)
                return desc;
 
-       old_cpu = desc->cpu;
-       printk(KERN_DEBUG
-                "try to move irq_desc from cpu %d to %d\n", old_cpu, cpu);
-       if (old_cpu != cpu) {
-               node = cpu_to_node(cpu);
-               old_node = cpu_to_node(old_cpu);
-               if (old_node != node)
-                       desc = __real_move_irq_desc(desc, cpu);
-               else
-                       desc->cpu = cpu;
-       }
+       if (desc->node != node)
+               desc = __real_move_irq_desc(desc, node);
 
        return desc;
 }