mm: CONFIG_MMU for PG_mlocked
[safe/jmp/linux-2.6] / mm / vmscan.c
index 0e7f5e4..cb69f71 100644 (file)
@@ -358,7 +358,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
         * stalls if we need to run get_block().  We could test
         * PagePrivate for that.
         *
-        * If this process is currently in generic_file_write() against
+        * If this process is currently in __generic_file_aio_write() against
         * this page's queue, we can perform writeback even if that
         * will block.
         *
@@ -366,7 +366,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
         * block, for some throttling. This happens by accident, because
         * swap_backing_dev_info is bust: it doesn't reflect the
         * congestion state of the swapdevs.  Easy to fix, if needed.
-        * See swapfile.c:page_queue_congested().
         */
        if (!is_page_cache_freeable(page))
                return PAGE_KEEP;
@@ -545,6 +544,16 @@ redo:
                 */
                lru = LRU_UNEVICTABLE;
                add_page_to_unevictable_list(page);
+               /*
+                * When racing with an mlock clearing (page is
+                * unlocked), make sure that if the other thread does
+                * not observe our setting of PG_lru and fails
+                * isolation, we see PG_mlocked cleared below and move
+                * the page back to the evictable list.
+                *
+                * The other side is TestClearPageMlocked().
+                */
+               smp_mb();
        }
 
        /*
@@ -664,7 +673,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * processes. Try to unmap it here.
                 */
                if (page_mapped(page) && mapping) {
-                       switch (try_to_unmap(page, 0)) {
+                       switch (try_to_unmap(page, TTU_UNMAP)) {
                        case SWAP_FAIL:
                                goto activate_locked;
                        case SWAP_AGAIN:
@@ -1089,7 +1098,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
        int lumpy_reclaim = 0;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
-               congestion_wait(WRITE, HZ/10);
+               congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                /* We are about to die and free our memory. Return now. */
                if (fatal_signal_pending(current))
@@ -1357,7 +1366,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                         * IO, plus JVM can create lots of anon VM_EXEC pages,
                         * so we ignore them here.
                         */
-                       if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
+                       if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
                                list_add(&page->lru, &l_active);
                                continue;
                        }
@@ -1710,10 +1719,10 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
  *
  * If the caller is !__GFP_FS then the probability of a failure is reasonably
  * high - the zone may be full of dirty or under-writeback pages, which this
- * caller can't do much about.  We kick pdflush and take explicit naps in the
- * hope that some of these pages can be written.  But if the allocating task
- * holds filesystem locks which prevent writeout this might not work, and the
- * allocation attempt will fail.
+ * caller can't do much about.  We kick the writeback threads and take explicit
+ * naps in the hope that some of these pages can be written.  But if the
+ * allocating task holds filesystem locks which prevent writeout this might not
+ * work, and the allocation attempt will fail.
  *
  * returns:    0, if no pages reclaimed
  *             else, the number of pages reclaimed
@@ -1837,11 +1846,45 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
+unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
+                                               gfp_t gfp_mask, bool noswap,
+                                               unsigned int swappiness,
+                                               struct zone *zone, int nid)
+{
+       struct scan_control sc = {
+               .may_writepage = !laptop_mode,
+               .may_unmap = 1,
+               .may_swap = !noswap,
+               .swap_cluster_max = SWAP_CLUSTER_MAX,
+               .swappiness = swappiness,
+               .order = 0,
+               .mem_cgroup = mem,
+               .isolate_pages = mem_cgroup_isolate_pages,
+       };
+       nodemask_t nm  = nodemask_of_node(nid);
+
+       sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+                       (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
+       sc.nodemask = &nm;
+       sc.nr_reclaimed = 0;
+       sc.nr_scanned = 0;
+       /*
+        * NOTE: Although we can get the priority field, using it
+        * here is not a good idea, since it limits the pages we can scan.
+        * if we don't reclaim here, the shrink_zone from balance_pgdat
+        * will pick up pages from other mem cgroup's as well. We hack
+        * the priority and make it zero.
+        */
+       shrink_zone(0, zone, &sc);
+       return sc.nr_reclaimed;
+}
+
 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                           gfp_t gfp_mask,
                                           bool noswap,
                                           unsigned int swappiness)
 {
+       struct zonelist *zonelist;
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
@@ -1853,7 +1896,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .isolate_pages = mem_cgroup_isolate_pages,
                .nodemask = NULL, /* we don't care the placement */
        };
-       struct zonelist *zonelist;
 
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -1862,6 +1904,30 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 }
 #endif
 
+/* is kswapd sleeping prematurely? */
+static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
+{
+       int i;
+
+       /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
+       if (remaining)
+               return 1;
+
+       /* If after HZ/10, a zone is below the high mark, it's premature */
+       for (i = 0; i < pgdat->nr_zones; i++) {
+               struct zone *zone = pgdat->node_zones + i;
+
+               if (!populated_zone(zone))
+                       continue;
+
+               if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
+                                                               0, 0))
+                       return 1;
+       }
+
+       return 0;
+}
+
 /*
  * For kswapd, balance_pgdat() will work across all this node's zones until
  * they are all at high_wmark_pages(zone).
@@ -1919,6 +1985,7 @@ loop_again:
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
                unsigned long lru_pages = 0;
+               int has_under_min_watermark_zone = 0;
 
                /* The swap token gets in the way of swapout... */
                if (!priority)
@@ -1975,6 +2042,7 @@ loop_again:
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
                        int nr_slab;
+                       int nid, zid;
 
                        if (!populated_zone(zone))
                                continue;
@@ -1989,6 +2057,15 @@ loop_again:
                        temp_priority[i] = priority;
                        sc.nr_scanned = 0;
                        note_zone_scanning_priority(zone, priority);
+
+                       nid = pgdat->node_id;
+                       zid = zone_idx(zone);
+                       /*
+                        * Call soft limit reclaim before calling shrink_zone.
+                        * For now we ignore the return value
+                        */
+                       mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
+                                                       nid, zid);
                        /*
                         * We put equal pressure on every zone, unless one
                         * zone has way too many pages free already.
@@ -2015,6 +2092,15 @@ loop_again:
                        if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
                            total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
+
+                       /*
+                        * We are still under min water mark. it mean we have
+                        * GFP_ATOMIC allocation failure risk. Hurry up!
+                        */
+                       if (!zone_watermark_ok(zone, order, min_wmark_pages(zone),
+                                             end_zone, 0))
+                               has_under_min_watermark_zone = 1;
+
                }
                if (all_zones_ok)
                        break;          /* kswapd: all done */
@@ -2022,8 +2108,12 @@ loop_again:
                 * OK, kswapd is getting into trouble.  Take a nap, then take
                 * another pass across the zones.
                 */
-               if (total_scanned && priority < DEF_PRIORITY - 2)
-                       congestion_wait(BLK_RW_ASYNC, HZ/10);
+               if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+                       if (has_under_min_watermark_zone)
+                               count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
+                       else
+                               congestion_wait(BLK_RW_ASYNC, HZ/10);
+               }
 
                /*
                 * We do this so kswapd doesn't build up large priorities for
@@ -2121,6 +2211,7 @@ static int kswapd(void *p)
        order = 0;
        for ( ; ; ) {
                unsigned long new_order;
+               int ret;
 
                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
                new_order = pgdat->kswapd_max_order;
@@ -2132,19 +2223,45 @@ static int kswapd(void *p)
                         */
                        order = new_order;
                } else {
-                       if (!freezing(current))
-                               schedule();
+                       if (!freezing(current) && !kthread_should_stop()) {
+                               long remaining = 0;
+
+                               /* Try to sleep for a short interval */
+                               if (!sleeping_prematurely(pgdat, order, remaining)) {
+                                       remaining = schedule_timeout(HZ/10);
+                                       finish_wait(&pgdat->kswapd_wait, &wait);
+                                       prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+                               }
+
+                               /*
+                                * After a short sleep, check if it was a
+                                * premature sleep. If not, then go fully
+                                * to sleep until explicitly woken up
+                                */
+                               if (!sleeping_prematurely(pgdat, order, remaining))
+                                       schedule();
+                               else {
+                                       if (remaining)
+                                               count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
+                                       else
+                                               count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
+                               }
+                       }
 
                        order = pgdat->kswapd_max_order;
                }
                finish_wait(&pgdat->kswapd_wait, &wait);
 
-               if (!try_to_freeze()) {
-                       /* We can speed up thawing tasks if we don't call
-                        * balance_pgdat after returning from the refrigerator
-                        */
+               ret = try_to_freeze();
+               if (kthread_should_stop())
+                       break;
+
+               /*
+                * We can speed up thawing tasks if we don't call balance_pgdat
+                * after returning from the refrigerator
+                */
+               if (!ret)
                        balance_pgdat(pgdat, order);
-               }
        }
        return 0;
 }
@@ -2399,6 +2516,17 @@ int kswapd_run(int nid)
        return ret;
 }
 
+/*
+ * Called by memory hotplug when all memory in a node is offlined.
+ */
+void kswapd_stop(int nid)
+{
+       struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
+
+       if (kswapd)
+               kthread_stop(kswapd);
+}
+
 static int __init kswapd_init(void)
 {
        int nid;
@@ -2802,10 +2930,10 @@ static void scan_all_zones_unevictable_pages(void)
 unsigned long scan_unevictable_pages;
 
 int scan_unevictable_handler(struct ctl_table *table, int write,
-                          struct file *file, void __user *buffer,
+                          void __user *buffer,
                           size_t *length, loff_t *ppos)
 {
-       proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+       proc_doulongvec_minmax(table, write, buffer, length, ppos);
 
        if (write && *(unsigned long *)table->data)
                scan_all_zones_unevictable_pages();