mm: remove __invalidate_mapping_pages variant
[safe/jmp/linux-2.6] / mm / page_alloc.c
index 5dac5d8..c5fb017 100644 (file)
@@ -178,6 +178,8 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
                                        PB_migrate, PB_migrate_end);
 }
 
+bool oom_killer_disabled __read_mostly;
+
 #ifdef CONFIG_DEBUG_VM
 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 {
@@ -300,23 +302,6 @@ void prep_compound_page(struct page *page, unsigned long order)
        }
 }
 
-#ifdef CONFIG_HUGETLBFS
-void prep_compound_gigantic_page(struct page *page, unsigned long order)
-{
-       int i;
-       int nr_pages = 1 << order;
-       struct page *p = page + 1;
-
-       set_compound_page_dtor(page, free_compound_page);
-       set_compound_order(page, order);
-       __SetPageHead(page);
-       for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
-               __SetPageTail(p);
-               p->first_page = page;
-       }
-}
-#endif
-
 static int destroy_compound_page(struct page *page, unsigned long order)
 {
        int i;
@@ -1145,6 +1130,19 @@ again:
                list_del(&page->lru);
                pcp->count--;
        } else {
+               if (unlikely(gfp_flags & __GFP_NOFAIL)) {
+                       /*
+                        * __GFP_NOFAIL is not to be used in new code.
+                        *
+                        * All __GFP_NOFAIL callers should be fixed so that they
+                        * properly detect and handle allocation failures.
+                        *
+                        * We most definitely don't want callers attempting to
+                        * allocate greater than single-page units with
+                        * __GFP_NOFAIL.
+                        */
+                       WARN_ON_ONCE(order > 0);
+               }
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order, migratetype);
                __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
@@ -1446,9 +1444,6 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
        int zlc_active = 0;             /* set if using zonelist_cache */
        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
 
-       if (WARN_ON_ONCE(order >= MAX_ORDER))
-               return NULL;
-
        classzone_idx = zone_idx(preferred_zone);
 zonelist_scan:
        /*
@@ -1566,7 +1561,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                goto out;
 
        /* The OOM killer will not help higher order allocs */
-       if (order > PAGE_ALLOC_COSTLY_ORDER)
+       if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
                goto out;
 
        /* Exhausted what can be done so it's blamo time */
@@ -1707,6 +1702,15 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        struct task_struct *p = current;
 
        /*
+        * In the slowpath, we sanity check order to avoid ever trying to
+        * reclaim >= MAX_ORDER areas which will never succeed. Callers may
+        * be using allocators in order of preference for an area that is
+        * too large.
+        */
+       if (WARN_ON_ONCE(order >= MAX_ORDER))
+               return NULL;
+
+       /*
         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
         * __GFP_NOWARN set) should not cause reclaim since the subsystem
         * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
@@ -1767,6 +1771,8 @@ rebalance:
         */
        if (!did_some_progress) {
                if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+                       if (oom_killer_disabled)
+                               goto nopage;
                        page = __alloc_pages_may_oom(gfp_mask, order,
                                        zonelist, high_zoneidx,
                                        nodemask, preferred_zone,
@@ -1775,11 +1781,13 @@ rebalance:
                                goto got_pg;
 
                        /*
-                        * The OOM killer does not trigger for high-order allocations
-                        * but if no progress is being made, there are no other
-                        * options and retrying is unlikely to help
+                        * The OOM killer does not trigger for high-order
+                        * ~__GFP_NOFAIL allocations so if no progress is being
+                        * made, there are no other options and retrying is
+                        * unlikely to help.
                         */
-                       if (order > PAGE_ALLOC_COSTLY_ORDER)
+                       if (order > PAGE_ALLOC_COSTLY_ORDER &&
+                                               !(gfp_mask & __GFP_NOFAIL))
                                goto nopage;
 
                        goto restart;
@@ -2071,19 +2079,14 @@ void show_free_areas(void)
 
        printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
                " inactive_file:%lu"
-//TODO:  check/adjust line lengths
-#ifdef CONFIG_UNEVICTABLE_LRU
                " unevictable:%lu"
-#endif
                " dirty:%lu writeback:%lu unstable:%lu\n"
                " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
                global_page_state(NR_ACTIVE_ANON),
                global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_ANON),
                global_page_state(NR_INACTIVE_FILE),
-#ifdef CONFIG_UNEVICTABLE_LRU
                global_page_state(NR_UNEVICTABLE),
-#endif
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
@@ -2107,9 +2110,7 @@ void show_free_areas(void)
                        " inactive_anon:%lukB"
                        " active_file:%lukB"
                        " inactive_file:%lukB"
-#ifdef CONFIG_UNEVICTABLE_LRU
                        " unevictable:%lukB"
-#endif
                        " present:%lukB"
                        " pages_scanned:%lu"
                        " all_unreclaimable? %s"
@@ -2123,9 +2124,7 @@ void show_free_areas(void)
                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
-#ifdef CONFIG_UNEVICTABLE_LRU
                        K(zone_page_state(zone, NR_UNEVICTABLE)),
-#endif
                        K(zone->present_pages),
                        zone->pages_scanned,
                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
@@ -3668,7 +3667,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone_pcp_init(zone);
                for_each_lru(l) {
                        INIT_LIST_HEAD(&zone->lru[l].list);
-                       zone->lru[l].nr_scan = 0;
+                       zone->lru[l].nr_saved_scan = 0;
                }
                zone->reclaim_stat.recent_rotated[0] = 0;
                zone->reclaim_stat.recent_rotated[1] = 0;
@@ -4390,12 +4389,13 @@ static void setup_per_zone_lowmem_reserve(void)
 }
 
 /**
- * setup_per_zone_pages_min - called when min_free_kbytes changes.
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
  *
- * Ensures that the pages_{min,low,high} values for each zone are set correctly
- * with respect to min_free_kbytes.
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
  */
-void setup_per_zone_pages_min(void)
+void setup_per_zone_wmarks(void)
 {
        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
        unsigned long lowmem_pages = 0;
@@ -4451,8 +4451,6 @@ void setup_per_zone_pages_min(void)
 }
 
 /**
- * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
- *
  * The inactive anon list should be small enough that the VM never has to
  * do too much work, but large enough that each inactive page has a chance
  * to be referenced again before it is swapped out.
@@ -4473,21 +4471,26 @@ void setup_per_zone_pages_min(void)
  *    1TB     101        10GB
  *   10TB     320        32GB
  */
-static void setup_per_zone_inactive_ratio(void)
+void calculate_zone_inactive_ratio(struct zone *zone)
 {
-       struct zone *zone;
-
-       for_each_zone(zone) {
-               unsigned int gb, ratio;
+       unsigned int gb, ratio;
 
-               /* Zone size in gigabytes */
-               gb = zone->present_pages >> (30 - PAGE_SHIFT);
+       /* Zone size in gigabytes */
+       gb = zone->present_pages >> (30 - PAGE_SHIFT);
+       if (gb)
                ratio = int_sqrt(10 * gb);
-               if (!ratio)
-                       ratio = 1;
+       else
+               ratio = 1;
 
-               zone->inactive_ratio = ratio;
-       }
+       zone->inactive_ratio = ratio;
+}
+
+static void __init setup_per_zone_inactive_ratio(void)
+{
+       struct zone *zone;
+
+       for_each_zone(zone)
+               calculate_zone_inactive_ratio(zone);
 }
 
 /*
@@ -4514,7 +4517,7 @@ static void setup_per_zone_inactive_ratio(void)
  * 8192MB:     11584k
  * 16384MB:    16384k
  */
-static int __init init_per_zone_pages_min(void)
+static int __init init_per_zone_wmark_min(void)
 {
        unsigned long lowmem_kbytes;
 
@@ -4525,12 +4528,12 @@ static int __init init_per_zone_pages_min(void)
                min_free_kbytes = 128;
        if (min_free_kbytes > 65536)
                min_free_kbytes = 65536;
-       setup_per_zone_pages_min();
+       setup_per_zone_wmarks();
        setup_per_zone_lowmem_reserve();
        setup_per_zone_inactive_ratio();
        return 0;
 }
-module_init(init_per_zone_pages_min)
+module_init(init_per_zone_wmark_min)
 
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
@@ -4542,7 +4545,7 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
 {
        proc_dointvec(table, write, file, buffer, length, ppos);
        if (write)
-               setup_per_zone_pages_min();
+               setup_per_zone_wmarks();
        return 0;
 }
 
@@ -4693,26 +4696,13 @@ void *__init alloc_large_system_hash(const char *tablename,
                else if (hashdist)
                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
                else {
-                       unsigned long order = get_order(size);
-
-                       if (order < MAX_ORDER)
-                               table = (void *)__get_free_pages(GFP_ATOMIC,
-                                                               order);
                        /*
                         * If bucketsize is not a power-of-two, we may free
-                        * some pages at the end of hash table.
+                        * some pages at the end of hash table which
+                        * alloc_pages_exact() automatically does
                         */
-                       if (table) {
-                               unsigned long alloc_end = (unsigned long)table +
-                                               (PAGE_SIZE << order);
-                               unsigned long used = (unsigned long)table +
-                                               PAGE_ALIGN(size);
-                               split_page(virt_to_page(table), order);
-                               while (used < alloc_end) {
-                                       free_page(used);
-                                       used += PAGE_SIZE;
-                               }
-                       }
+                       if (get_order(size) < MAX_ORDER)
+                               table = alloc_pages_exact(size, GFP_ATOMIC);
                }
        } while (!table && size > PAGE_SIZE && --log2qty);