memcg: fix page_cgroup fatal error in FLATMEM
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Fri, 12 Jun 2009 07:33:53 +0000 (10:33 +0300)
committerPekka Enberg <penberg@cs.helsinki.fi>
Fri, 12 Jun 2009 08:00:54 +0000 (11:00 +0300)
Now, SLAB is configured in very early stage and it can be used in
init routine now.

But replacing alloc_bootmem() in FLAT/DISCONTIGMEM's page_cgroup()
initialization breaks the allocation, now.
(Works well in SPARSEMEM case...it supports MEMORY_HOTPLUG and
 size of page_cgroup is in reasonable size (< 1 << MAX_ORDER.)

This patch revive FLATMEM+memory cgroup by using alloc_bootmem.

In future,
We stop to support FLATMEM (if no users) or rewrite codes for flatmem
completely.But this will adds more messy codes and overheads.

Reported-by: Li Zefan <lizf@cn.fujitsu.com>
Tested-by: Li Zefan <lizf@cn.fujitsu.com>
Tested-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
include/linux/page_cgroup.h
init/main.c
mm/page_cgroup.c

index 7339c7b..13f126c 100644 (file)
@@ -18,7 +18,19 @@ struct page_cgroup {
 };
 
 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
 };
 
 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
-void __init page_cgroup_init(void);
+
+#ifdef CONFIG_SPARSEMEM
+static inline void __init page_cgroup_init_flatmem(void)
+{
+}
+extern void __init page_cgroup_init(void);
+#else
+void __init page_cgroup_init_flatmem(void);
+static inline void __init page_cgroup_init(void)
+{
+}
+#endif
+
 struct page_cgroup *lookup_page_cgroup(struct page *page);
 
 enum {
 struct page_cgroup *lookup_page_cgroup(struct page *page);
 
 enum {
@@ -87,6 +99,10 @@ static inline void page_cgroup_init(void)
 {
 }
 
 {
 }
 
+static inline void __init page_cgroup_init_flatmem(void)
+{
+}
+
 #endif
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 #endif
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
index 5616661..b3e8f14 100644 (file)
@@ -539,6 +539,11 @@ void __init __weak thread_info_cache_init(void)
  */
 static void __init mm_init(void)
 {
  */
 static void __init mm_init(void)
 {
+       /*
+        * page_cgroup requires countinous pages as memmap
+        * and it's bigger than MAX_ORDER unless SPARSEMEM.
+        */
+       page_cgroup_init_flatmem();
        mem_init();
        kmem_cache_init();
        vmalloc_init();
        mem_init();
        kmem_cache_init();
        vmalloc_init();
index 3dd4a90..11a8a10 100644 (file)
@@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid)
        struct page_cgroup *base, *pc;
        unsigned long table_size;
        unsigned long start_pfn, nr_pages, index;
        struct page_cgroup *base, *pc;
        unsigned long table_size;
        unsigned long start_pfn, nr_pages, index;
-       struct page *page;
-       unsigned int order;
 
        start_pfn = NODE_DATA(nid)->node_start_pfn;
        nr_pages = NODE_DATA(nid)->node_spanned_pages;
 
        start_pfn = NODE_DATA(nid)->node_start_pfn;
        nr_pages = NODE_DATA(nid)->node_spanned_pages;
@@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid)
                return 0;
 
        table_size = sizeof(struct page_cgroup) * nr_pages;
                return 0;
 
        table_size = sizeof(struct page_cgroup) * nr_pages;
-       order = get_order(table_size);
-       page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order);
-       if (!page)
-               page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order);
-       if (!page)
+
+       base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
+                       table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+       if (!base)
                return -ENOMEM;
                return -ENOMEM;
-       base = page_address(page);
        for (index = 0; index < nr_pages; index++) {
                pc = base + index;
                __init_page_cgroup(pc, start_pfn + index);
        for (index = 0; index < nr_pages; index++) {
                pc = base + index;
                __init_page_cgroup(pc, start_pfn + index);
@@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid)
        return 0;
 }
 
        return 0;
 }
 
-void __init page_cgroup_init(void)
+void __init page_cgroup_init_flatmem(void)
 {
 
        int nid, fail;
 {
 
        int nid, fail;
@@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
        if (!section->page_cgroup) {
                nid = page_to_nid(pfn_to_page(pfn));
                table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
        if (!section->page_cgroup) {
                nid = page_to_nid(pfn_to_page(pfn));
                table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
-               if (slab_is_available()) {
-                       base = kmalloc_node(table_size,
-                                       GFP_KERNEL | __GFP_NOWARN, nid);
-                       if (!base)
-                               base = vmalloc_node(table_size, nid);
-               } else {
-                       base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
-                               table_size,
-                               PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
-               }
+               VM_BUG_ON(!slab_is_available());
+               base = kmalloc_node(table_size,
+                               GFP_KERNEL | __GFP_NOWARN, nid);
+               if (!base)
+                       base = vmalloc_node(table_size, nid);
        } else {
                /*
                 * We don't have to allocate page_cgroup again, but
        } else {
                /*
                 * We don't have to allocate page_cgroup again, but