tunnels: fix netns vs proto registration ordering
[safe/jmp/linux-2.6] / kernel / sched_cpupri.c
index 344712a..597b330 100644 (file)
@@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
                if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
                        continue;
 
-               if (lowest_mask)
+               if (lowest_mask) {
                        cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
+
+                       /*
+                        * We have to ensure that we have at least one bit
+                        * still set in the array, since the map could have
+                        * been concurrently emptied between the first and
+                        * second reads of vec->mask.  If we hit this
+                        * condition, simply act as though we never hit this
+                        * priority level and continue on.
+                        */
+                       if (cpumask_any(lowest_mask) >= nr_cpu_ids)
+                               continue;
+               }
+
                return 1;
        }
 
@@ -114,32 +127,34 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
 
        /*
         * If the cpu was currently mapped to a different value, we
-        * first need to unmap the old value
+        * need to map it to the new value then remove the old value.
+        * Note, we must add the new value first, otherwise we risk the
+        * cpu being cleared from pri_active, and this cpu could be
+        * missed for a push or pull.
         */
-       if (likely(oldpri != CPUPRI_INVALID)) {
-               struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
-
-               spin_lock_irqsave(&vec->lock, flags);
-
-               vec->count--;
-               if (!vec->count)
-                       clear_bit(oldpri, cp->pri_active);
-               cpumask_clear_cpu(cpu, vec->mask);
-
-               spin_unlock_irqrestore(&vec->lock, flags);
-       }
-
        if (likely(newpri != CPUPRI_INVALID)) {
                struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
 
-               spin_lock_irqsave(&vec->lock, flags);
+               raw_spin_lock_irqsave(&vec->lock, flags);
 
                cpumask_set_cpu(cpu, vec->mask);
                vec->count++;
                if (vec->count == 1)
                        set_bit(newpri, cp->pri_active);
 
-               spin_unlock_irqrestore(&vec->lock, flags);
+               raw_spin_unlock_irqrestore(&vec->lock, flags);
+       }
+       if (likely(oldpri != CPUPRI_INVALID)) {
+               struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
+
+               raw_spin_lock_irqsave(&vec->lock, flags);
+
+               vec->count--;
+               if (!vec->count)
+                       clear_bit(oldpri, cp->pri_active);
+               cpumask_clear_cpu(cpu, vec->mask);
+
+               raw_spin_unlock_irqrestore(&vec->lock, flags);
        }
 
        *currpri = newpri;
@@ -152,20 +167,22 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
  *
  * Returns: -ENOMEM if memory fails.
  */
-int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
+int cpupri_init(struct cpupri *cp, bool bootmem)
 {
+       gfp_t gfp = GFP_KERNEL;
        int i;
 
+       if (bootmem)
+               gfp = GFP_NOWAIT;
+
        memset(cp, 0, sizeof(*cp));
 
        for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
                struct cpupri_vec *vec = &cp->pri_to_cpu[i];
 
-               spin_lock_init(&vec->lock);
+               raw_spin_lock_init(&vec->lock);
                vec->count = 0;
-               if (bootmem)
-                       alloc_bootmem_cpumask_var(&vec->mask);
-               else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
+               if (!zalloc_cpumask_var(&vec->mask, gfp))
                        goto cleanup;
        }