tunnels: fix netns vs proto registration ordering
[safe/jmp/linux-2.6] / kernel / sched_cpupri.c
index 52154fe..597b330 100644 (file)
@@ -55,7 +55,7 @@ static int convert_prio(int prio)
  * cpupri_find - find the best (lowest-pri) CPU in the system
  * @cp: The cpupri context
  * @p: The task
- * @lowest_mask: A mask to fill in with selected CPUs
+ * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
  *
  * Note: This function returns the recommended CPUs as calculated during the
  * current invokation.  By the time the call returns, the CPUs may have in
@@ -67,24 +67,35 @@ static int convert_prio(int prio)
  * Returns: (int)bool - CPUs were found
  */
 int cpupri_find(struct cpupri *cp, struct task_struct *p,
-               cpumask_t *lowest_mask)
+               struct cpumask *lowest_mask)
 {
        int                  idx      = 0;
        int                  task_pri = convert_prio(p->prio);
 
        for_each_cpupri_active(cp->pri_active, idx) {
                struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
-               cpumask_t mask;
 
                if (idx >= task_pri)
                        break;
 
-               cpus_and(mask, p->cpus_allowed, vec->mask);
-
-               if (cpus_empty(mask))
+               if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
                        continue;
 
-               *lowest_mask = mask;
+               if (lowest_mask) {
+                       cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
+
+                       /*
+                        * We have to ensure that we have at least one bit
+                        * still set in the array, since the map could have
+                        * been concurrently emptied between the first and
+                        * second reads of vec->mask.  If we hit this
+                        * condition, simply act as though we never hit this
+                        * priority level and continue on.
+                        */
+                       if (cpumask_any(lowest_mask) >= nr_cpu_ids)
+                               continue;
+               }
+
                return 1;
        }
 
@@ -116,32 +127,34 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
 
        /*
         * If the cpu was currently mapped to a different value, we
-        * first need to unmap the old value
+        * need to map it to the new value then remove the old value.
+        * Note, we must add the new value first, otherwise we risk the
+        * cpu being cleared from pri_active, and this cpu could be
+        * missed for a push or pull.
         */
-       if (likely(oldpri != CPUPRI_INVALID)) {
-               struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
-
-               spin_lock_irqsave(&vec->lock, flags);
-
-               vec->count--;
-               if (!vec->count)
-                       clear_bit(oldpri, cp->pri_active);
-               cpu_clear(cpu, vec->mask);
-
-               spin_unlock_irqrestore(&vec->lock, flags);
-       }
-
        if (likely(newpri != CPUPRI_INVALID)) {
                struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
 
-               spin_lock_irqsave(&vec->lock, flags);
+               raw_spin_lock_irqsave(&vec->lock, flags);
 
-               cpu_set(cpu, vec->mask);
+               cpumask_set_cpu(cpu, vec->mask);
                vec->count++;
                if (vec->count == 1)
                        set_bit(newpri, cp->pri_active);
 
-               spin_unlock_irqrestore(&vec->lock, flags);
+               raw_spin_unlock_irqrestore(&vec->lock, flags);
+       }
+       if (likely(oldpri != CPUPRI_INVALID)) {
+               struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
+
+               raw_spin_lock_irqsave(&vec->lock, flags);
+
+               vec->count--;
+               if (!vec->count)
+                       clear_bit(oldpri, cp->pri_active);
+               cpumask_clear_cpu(cpu, vec->mask);
+
+               raw_spin_unlock_irqrestore(&vec->lock, flags);
        }
 
        *currpri = newpri;
@@ -150,25 +163,47 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
 /**
  * cpupri_init - initialize the cpupri structure
  * @cp: The cpupri context
+ * @bootmem: true if allocations need to use bootmem
  *
- * Returns: (void)
+ * Returns: -ENOMEM if memory fails.
  */
-void cpupri_init(struct cpupri *cp)
+int cpupri_init(struct cpupri *cp, bool bootmem)
 {
+       gfp_t gfp = GFP_KERNEL;
        int i;
 
+       if (bootmem)
+               gfp = GFP_NOWAIT;
+
        memset(cp, 0, sizeof(*cp));
 
        for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
                struct cpupri_vec *vec = &cp->pri_to_cpu[i];
 
-               spin_lock_init(&vec->lock);
+               raw_spin_lock_init(&vec->lock);
                vec->count = 0;
-               cpus_clear(vec->mask);
+               if (!zalloc_cpumask_var(&vec->mask, gfp))
+                       goto cleanup;
        }
 
        for_each_possible_cpu(i)
                cp->cpu_to_pri[i] = CPUPRI_INVALID;
+       return 0;
+
+cleanup:
+       for (i--; i >= 0; i--)
+               free_cpumask_var(cp->pri_to_cpu[i].mask);
+       return -ENOMEM;
 }
 
+/**
+ * cpupri_cleanup - clean up the cpupri structure
+ * @cp: The cpupri context
+ */
+void cpupri_cleanup(struct cpupri *cp)
+{
+       int i;
 
+       for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
+               free_cpumask_var(cp->pri_to_cpu[i].mask);
+}