vmscan: count the number of times zone_reclaim() scans and fails
[safe/jmp/linux-2.6] / mm / page_cgroup.c
index ebf8107..11a8a10 100644 (file)
@@ -69,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid)
        return 0;
 }
 
-void __init page_cgroup_init(void)
+void __init page_cgroup_init_flatmem(void)
 {
 
        int nid, fail;
@@ -113,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
        if (!section->page_cgroup) {
                nid = page_to_nid(pfn_to_page(pfn));
                table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
-               if (slab_is_available()) {
-                       base = kmalloc_node(table_size,
-                                       GFP_KERNEL | __GFP_NOWARN, nid);
-                       if (!base)
-                               base = vmalloc_node(table_size, nid);
-               } else {
-                       base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
-                               table_size,
-                               PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
-               }
+               VM_BUG_ON(!slab_is_available());
+               base = kmalloc_node(table_size,
+                               GFP_KERNEL | __GFP_NOWARN, nid);
+               if (!base)
+                       base = vmalloc_node(table_size, nid);
        } else {
                /*
                 * We don't have to allocate page_cgroup again, but
@@ -426,13 +421,6 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
        }
        mutex_unlock(&swap_cgroup_mutex);
 
-       printk(KERN_INFO
-               "swap_cgroup: uses %ld bytes of vmalloc for pointer array space"
-               " and %ld bytes to hold mem_cgroup information per swap ents\n",
-               array_size, length * PAGE_SIZE);
-       printk(KERN_INFO
-       "swap_cgroup can be disabled by noswapaccount boot option.\n");
-
        return 0;
 nomem:
        printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");