vmalloc(): adjust gfp mask passed on nested vmalloc() invocation
[safe/jmp/linux-2.6] / mm / vmscan.c
index f444b74..d0a631a 100644 (file)
@@ -544,6 +544,16 @@ redo:
                 */
                lru = LRU_UNEVICTABLE;
                add_page_to_unevictable_list(page);
+               /*
+                * When racing with an mlock clearing (page is
+                * unlocked), make sure that if the other thread does
+                * not observe our setting of PG_lru and fails
+                * isolation, we see PG_mlocked cleared below and move
+                * the page back to the evictable list.
+                *
+                * The other side is TestClearPageMlocked().
+                */
+               smp_mb();
        }
 
        /*
@@ -663,7 +673,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * processes. Try to unmap it here.
                 */
                if (page_mapped(page) && mapping) {
-                       switch (try_to_unmap(page, 0)) {
+                       switch (try_to_unmap(page, TTU_UNMAP)) {
                        case SWAP_FAIL:
                                goto activate_locked;
                        case SWAP_AGAIN:
@@ -1088,7 +1098,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
        int lumpy_reclaim = 0;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
-               congestion_wait(WRITE, HZ/10);
+               congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                /* We are about to die and free our memory. Return now. */
                if (fatal_signal_pending(current))
@@ -1356,7 +1366,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                         * IO, plus JVM can create lots of anon VM_EXEC pages,
                         * so we ignore them here.
                         */
-                       if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
+                       if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
                                list_add(&page->lru, &l_active);
                                continue;
                        }
@@ -1709,10 +1719,10 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
  *
  * If the caller is !__GFP_FS then the probability of a failure is reasonably
  * high - the zone may be full of dirty or under-writeback pages, which this
- * caller can't do much about.  We kick pdflush and take explicit naps in the
- * hope that some of these pages can be written.  But if the allocating task
- * holds filesystem locks which prevent writeout this might not work, and the
- * allocation attempt will fail.
+ * caller can't do much about.  We kick the writeback threads and take explicit
+ * naps in the hope that some of these pages can be written.  But if the
+ * allocating task holds filesystem locks which prevent writeout this might not
+ * work, and the allocation attempt will fail.
  *
  * returns:    0, if no pages reclaimed
  *             else, the number of pages reclaimed
@@ -2163,6 +2173,7 @@ static int kswapd(void *p)
        order = 0;
        for ( ; ; ) {
                unsigned long new_order;
+               int ret;
 
                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
                new_order = pgdat->kswapd_max_order;
@@ -2174,19 +2185,23 @@ static int kswapd(void *p)
                         */
                        order = new_order;
                } else {
-                       if (!freezing(current))
+                       if (!freezing(current) && !kthread_should_stop())
                                schedule();
 
                        order = pgdat->kswapd_max_order;
                }
                finish_wait(&pgdat->kswapd_wait, &wait);
 
-               if (!try_to_freeze()) {
-                       /* We can speed up thawing tasks if we don't call
-                        * balance_pgdat after returning from the refrigerator
-                        */
+               ret = try_to_freeze();
+               if (kthread_should_stop())
+                       break;
+
+               /*
+                * We can speed up thawing tasks if we don't call balance_pgdat
+                * after returning from the refrigerator
+                */
+               if (!ret)
                        balance_pgdat(pgdat, order);
-               }
        }
        return 0;
 }
@@ -2441,6 +2456,17 @@ int kswapd_run(int nid)
        return ret;
 }
 
+/*
+ * Called by memory hotplug when all memory in a node is offlined.
+ */
+void kswapd_stop(int nid)
+{
+       struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
+
+       if (kswapd)
+               kthread_stop(kswapd);
+}
+
 static int __init kswapd_init(void)
 {
        int nid;