memcgroup: use vmalloc for mem_cgroup allocation
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tue, 29 Apr 2008 08:00:24 +0000 (01:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 Apr 2008 15:06:11 +0000 (08:06 -0700)
On ia64, this kmalloc() requires order-4 pages.  But this is not necessary to
be physically contiguous.  For big mem_cgroup, vmalloc is better.  For small
ones, kmalloc is used.

[akpm@linux-foundation.org: simplification]
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index c5285af..15aa34b 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/spinlock.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
 #include <linux/spinlock.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
 
 #include <asm/uaccess.h>
 
 
 #include <asm/uaccess.h>
 
@@ -983,6 +984,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
        kfree(mem->info.nodeinfo[node]);
 }
 
        kfree(mem->info.nodeinfo[node]);
 }
 
+static struct mem_cgroup *mem_cgroup_alloc(void)
+{
+       struct mem_cgroup *mem;
+
+       if (sizeof(*mem) < PAGE_SIZE)
+               mem = kmalloc(sizeof(*mem), GFP_KERNEL);
+       else
+               mem = vmalloc(sizeof(*mem));
+
+       if (mem)
+               memset(mem, 0, sizeof(*mem));
+       return mem;
+}
+
+static void mem_cgroup_free(struct mem_cgroup *mem)
+{
+       if (sizeof(*mem) < PAGE_SIZE)
+               kfree(mem);
+       else
+               vfree(mem);
+}
+
+
 static struct cgroup_subsys_state *
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
 static struct cgroup_subsys_state *
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
@@ -993,12 +1017,11 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                mem = &init_mem_cgroup;
                page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
        } else {
                mem = &init_mem_cgroup;
                page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
        } else {
-               mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
+               mem = mem_cgroup_alloc();
+               if (!mem)
+                       return ERR_PTR(-ENOMEM);
        }
 
        }
 
-       if (mem == NULL)
-               return ERR_PTR(-ENOMEM);
-
        res_counter_init(&mem->res);
 
        memset(&mem->info, 0, sizeof(mem->info));
        res_counter_init(&mem->res);
 
        memset(&mem->info, 0, sizeof(mem->info));
@@ -1012,7 +1035,7 @@ free_out:
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
        if (cont->parent != NULL)
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
        if (cont->parent != NULL)
-               kfree(mem);
+               mem_cgroup_free(mem);
        return ERR_PTR(-ENOMEM);
 }
 
        return ERR_PTR(-ENOMEM);
 }
 
@@ -1032,7 +1055,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
 
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
 
-       kfree(mem_cgroup_from_cont(cont));
+       mem_cgroup_free(mem_cgroup_from_cont(cont));
 }
 
 static int mem_cgroup_populate(struct cgroup_subsys *ss,
 }
 
 static int mem_cgroup_populate(struct cgroup_subsys *ss,