[PATCH] mm: show bounce pages in oom killer output
[safe/jmp/linux-2.6] / mm / page_alloc.c
index a49f96b..f12052d 100644 (file)
@@ -989,8 +989,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
                      int classzone_idx, int alloc_flags)
 {
        /* free_pages my go negative - that's OK */
-       unsigned long min = mark;
-       long free_pages = z->free_pages - (1 << order) + 1;
+       long min = mark, free_pages = z->free_pages - (1 << order) + 1;
        int o;
 
        if (alloc_flags & ALLOC_HIGH)
@@ -1580,8 +1579,8 @@ void show_free_areas(void)
 
        get_zone_counts(&active, &inactive, &free);
 
-       printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
-               "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
+       printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
+               " free:%u slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
                active,
                inactive,
                global_page_state(NR_FILE_DIRTY),
@@ -1591,7 +1590,8 @@ void show_free_areas(void)
                global_page_state(NR_SLAB_RECLAIMABLE) +
                        global_page_state(NR_SLAB_UNRECLAIMABLE),
                global_page_state(NR_FILE_MAPPED),
-               global_page_state(NR_PAGETABLE));
+               global_page_state(NR_PAGETABLE),
+               global_page_state(NR_BOUNCE));
 
        for_each_zone(zone) {
                int i;
@@ -1956,17 +1956,24 @@ static inline unsigned long wait_table_bits(unsigned long size)
  * done. Non-atomic initialization, single-pass.
  */
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
-               unsigned long start_pfn)
+               unsigned long start_pfn, enum memmap_context context)
 {
        struct page *page;
        unsigned long end_pfn = start_pfn + size;
        unsigned long pfn;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
-               if (!early_pfn_valid(pfn))
-                       continue;
-               if (!early_pfn_in_nid(pfn, nid))
-                       continue;
+               /*
+                * There can be holes in boot-time mem_map[]s
+                * handed to this function.  They do not
+                * exist on hotplugged memory.
+                */
+               if (context == MEMMAP_EARLY) {
+                       if (!early_pfn_valid(pfn))
+                               continue;
+                       if (!early_pfn_in_nid(pfn, nid))
+                               continue;
+               }
                page = pfn_to_page(pfn);
                set_page_links(page, zone, nid, pfn);
                init_page_count(page);
@@ -1993,7 +2000,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
 
 #ifndef __HAVE_ARCH_MEMMAP_INIT
 #define memmap_init(size, nid, zone, start_pfn) \
-       memmap_init_zone((size), (nid), (zone), (start_pfn))
+       memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
 #endif
 
 static int __cpuinit zone_batchsize(struct zone *zone)
@@ -2239,7 +2246,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
 
 __meminit int init_currently_empty_zone(struct zone *zone,
                                        unsigned long zone_start_pfn,
-                                       unsigned long size)
+                                       unsigned long size,
+                                       enum memmap_context context)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
        int ret;
@@ -2683,7 +2691,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                if (!size)
                        continue;
 
-               ret = init_currently_empty_zone(zone, zone_start_pfn, size);
+               ret = init_currently_empty_zone(zone, zone_start_pfn,
+                                               size, MEMMAP_EARLY);
                BUG_ON(ret);
                zone_start_pfn += size;
        }