cpuset: speed up sched domain partition
authorLai Jiangshan <laijs@cn.fujitsu.com>
Wed, 30 Jul 2008 05:33:22 +0000 (22:33 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 30 Jul 2008 16:41:44 +0000 (09:41 -0700)
All child cpusets contain a subset of the parent's cpus, so we can skip
them when partitioning sched domains. This decreases 'csa' greately for
cpusets with multi-level hierarchy.

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: Cedric Le Goater <clg@fr.ibm.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/cpuset.c

index 3624dc0..c818c36 100644 (file)
@@ -486,13 +486,38 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 static void
 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
 {
-       if (!dattr)
-               return;
        if (dattr->relax_domain_level < c->relax_domain_level)
                dattr->relax_domain_level = c->relax_domain_level;
        return;
 }
 
+static void
+update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
+{
+       LIST_HEAD(q);
+
+       list_add(&c->stack_list, &q);
+       while (!list_empty(&q)) {
+               struct cpuset *cp;
+               struct cgroup *cont;
+               struct cpuset *child;
+
+               cp = list_first_entry(&q, struct cpuset, stack_list);
+               list_del(q.next);
+
+               if (cpus_empty(cp->cpus_allowed))
+                       continue;
+
+               if (is_sched_load_balance(cp))
+                       update_domain_attr(dattr, cp);
+
+               list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
+                       child = cgroup_cs(cont);
+                       list_add_tail(&child->stack_list, &q);
+               }
+       }
+}
+
 /*
  * rebuild_sched_domains()
  *
@@ -614,8 +639,16 @@ void rebuild_sched_domains(void)
                if (cpus_empty(cp->cpus_allowed))
                        continue;
 
-               if (is_sched_load_balance(cp))
+               /*
+                * All child cpusets contain a subset of the parent's cpus, so
+                * just skip them, and then we call update_domain_attr_tree()
+                * to calc relax_domain_level of the corresponding sched
+                * domain.
+                */
+               if (is_sched_load_balance(cp)) {
                        csa[csn++] = cp;
+                       continue;
+               }
 
                list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
                        child = cgroup_cs(cont);
@@ -686,7 +719,7 @@ restart:
                                        cpus_or(*dp, *dp, b->cpus_allowed);
                                        b->pn = -1;
                                        if (dattr)
-                                               update_domain_attr(dattr
+                                               update_domain_attr_tree(dattr
                                                                   + nslot, b);
                                }
                        }