sched: Separate out build of NUMA sched domain from __build_sched_domains
authorAndreas Herrmann <andreas.herrmann3@amd.com>
Tue, 18 Aug 2009 10:54:06 +0000 (12:54 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 18 Aug 2009 16:35:40 +0000 (18:35 +0200)
... to further strip down __build_sched_domains().

Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105406.GD29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c

index c5d1fee..dd95a47 100644 (file)
@@ -8482,6 +8482,37 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
        return sa_rootdomain;
 }
 
+static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
+       const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
+{
+       struct sched_domain *sd = NULL;
+#ifdef CONFIG_NUMA
+       struct sched_domain *parent;
+
+       d->sd_allnodes = 0;
+       if (cpumask_weight(cpu_map) >
+           SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
+               sd = &per_cpu(allnodes_domains, i).sd;
+               SD_INIT(sd, ALLNODES);
+               set_domain_attribute(sd, attr);
+               cpumask_copy(sched_domain_span(sd), cpu_map);
+               cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
+               d->sd_allnodes = 1;
+       }
+       parent = sd;
+
+       sd = &per_cpu(node_domains, i).sd;
+       SD_INIT(sd, NODE);
+       set_domain_attribute(sd, attr);
+       sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
+       sd->parent = parent;
+       if (parent)
+               parent->child = sd;
+       cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
+#endif
+       return sd;
+}
+
 /*
  * Build sched domains for a given set of cpus and attach the sched domains
  * to the individual cpus
@@ -8510,31 +8541,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
                cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
                            cpu_map);
 
-#ifdef CONFIG_NUMA
-               if (cpumask_weight(cpu_map) >
-                               SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) {
-                       sd = &per_cpu(allnodes_domains, i).sd;
-                       SD_INIT(sd, ALLNODES);
-                       set_domain_attribute(sd, attr);
-                       cpumask_copy(sched_domain_span(sd), cpu_map);
-                       cpu_to_allnodes_group(i, cpu_map, &sd->groups,
-                                             d.tmpmask);
-                       p = sd;
-                       d.sd_allnodes = 1;
-               } else
-                       p = NULL;
-
-               sd = &per_cpu(node_domains, i).sd;
-               SD_INIT(sd, NODE);
-               set_domain_attribute(sd, attr);
-               sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
-               sd->parent = p;
-               if (p)
-                       p->child = sd;
-               cpumask_and(sched_domain_span(sd),
-                           sched_domain_span(sd), cpu_map);
-#endif
-
+               sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
                p = sd;
                sd = &per_cpu(phys_domains, i).sd;
                SD_INIT(sd, CPU);