vmscan: kill hibernation specific reclaim logic and unify it
[safe/jmp/linux-2.6] / mm / vmscan.c
1 /*
2  *  linux/mm/vmscan.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *
6  *  Swap reorganised 29.12.95, Stephen Tweedie.
7  *  kswapd added: 7.1.96  sct
8  *  Removed kswapd_ctl limits, and swap out as many pages as needed
9  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11  *  Multiqueue VM started 5.8.00, Rik van Riel.
12  */
13
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/vmstat.h>
23 #include <linux/file.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h>  /* for try_to_release_page(),
27                                         buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/notifier.h>
36 #include <linux/rwsem.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/freezer.h>
40 #include <linux/memcontrol.h>
41 #include <linux/delayacct.h>
42 #include <linux/sysctl.h>
43
44 #include <asm/tlbflush.h>
45 #include <asm/div64.h>
46
47 #include <linux/swapops.h>
48
49 #include "internal.h"
50
51 struct scan_control {
52         /* Incremented by the number of inactive pages that were scanned */
53         unsigned long nr_scanned;
54
55         /* Number of pages freed so far during a call to shrink_zones() */
56         unsigned long nr_reclaimed;
57
58         /* How many pages shrink_list() should reclaim */
59         unsigned long nr_to_reclaim;
60
61         unsigned long hibernation_mode;
62
63         /* This context's GFP mask */
64         gfp_t gfp_mask;
65
66         int may_writepage;
67
68         /* Can mapped pages be reclaimed? */
69         int may_unmap;
70
71         /* Can pages be swapped as part of reclaim? */
72         int may_swap;
73
74         /* This context's SWAP_CLUSTER_MAX. If freeing memory for
75          * suspend, we effectively ignore SWAP_CLUSTER_MAX.
76          * In this context, it doesn't matter that we scan the
77          * whole list at once. */
78         int swap_cluster_max;
79
80         int swappiness;
81
82         int all_unreclaimable;
83
84         int order;
85
86         /* Which cgroup do we reclaim from */
87         struct mem_cgroup *mem_cgroup;
88
89         /*
90          * Nodemask of nodes allowed by the caller. If NULL, all nodes
91          * are scanned.
92          */
93         nodemask_t      *nodemask;
94
95         /* Pluggable isolate pages callback */
96         unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
97                         unsigned long *scanned, int order, int mode,
98                         struct zone *z, struct mem_cgroup *mem_cont,
99                         int active, int file);
100 };
101
102 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
103
104 #ifdef ARCH_HAS_PREFETCH
105 #define prefetch_prev_lru_page(_page, _base, _field)                    \
106         do {                                                            \
107                 if ((_page)->lru.prev != _base) {                       \
108                         struct page *prev;                              \
109                                                                         \
110                         prev = lru_to_page(&(_page->lru));              \
111                         prefetch(&prev->_field);                        \
112                 }                                                       \
113         } while (0)
114 #else
115 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
116 #endif
117
118 #ifdef ARCH_HAS_PREFETCHW
119 #define prefetchw_prev_lru_page(_page, _base, _field)                   \
120         do {                                                            \
121                 if ((_page)->lru.prev != _base) {                       \
122                         struct page *prev;                              \
123                                                                         \
124                         prev = lru_to_page(&(_page->lru));              \
125                         prefetchw(&prev->_field);                       \
126                 }                                                       \
127         } while (0)
128 #else
129 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
130 #endif
131
132 /*
133  * From 0 .. 100.  Higher means more swappy.
134  */
135 int vm_swappiness = 60;
136 long vm_total_pages;    /* The total number of pages which the VM controls */
137
138 static LIST_HEAD(shrinker_list);
139 static DECLARE_RWSEM(shrinker_rwsem);
140
141 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
142 #define scanning_global_lru(sc) (!(sc)->mem_cgroup)
143 #else
144 #define scanning_global_lru(sc) (1)
145 #endif
146
147 static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
148                                                   struct scan_control *sc)
149 {
150         if (!scanning_global_lru(sc))
151                 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
152
153         return &zone->reclaim_stat;
154 }
155
156 static unsigned long zone_nr_lru_pages(struct zone *zone,
157                                 struct scan_control *sc, enum lru_list lru)
158 {
159         if (!scanning_global_lru(sc))
160                 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
161
162         return zone_page_state(zone, NR_LRU_BASE + lru);
163 }
164
165
166 /*
167  * Add a shrinker callback to be called from the vm
168  */
169 void register_shrinker(struct shrinker *shrinker)
170 {
171         shrinker->nr = 0;
172         down_write(&shrinker_rwsem);
173         list_add_tail(&shrinker->list, &shrinker_list);
174         up_write(&shrinker_rwsem);
175 }
176 EXPORT_SYMBOL(register_shrinker);
177
178 /*
179  * Remove one
180  */
181 void unregister_shrinker(struct shrinker *shrinker)
182 {
183         down_write(&shrinker_rwsem);
184         list_del(&shrinker->list);
185         up_write(&shrinker_rwsem);
186 }
187 EXPORT_SYMBOL(unregister_shrinker);
188
189 #define SHRINK_BATCH 128
190 /*
191  * Call the shrink functions to age shrinkable caches
192  *
193  * Here we assume it costs one seek to replace a lru page and that it also
194  * takes a seek to recreate a cache object.  With this in mind we age equal
195  * percentages of the lru and ageable caches.  This should balance the seeks
196  * generated by these structures.
197  *
198  * If the vm encountered mapped pages on the LRU it increase the pressure on
199  * slab to avoid swapping.
200  *
201  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
202  *
203  * `lru_pages' represents the number of on-LRU pages in all the zones which
204  * are eligible for the caller's allocation attempt.  It is used for balancing
205  * slab reclaim versus page reclaim.
206  *
207  * Returns the number of slab objects which we shrunk.
208  */
209 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
210                         unsigned long lru_pages)
211 {
212         struct shrinker *shrinker;
213         unsigned long ret = 0;
214
215         if (scanned == 0)
216                 scanned = SWAP_CLUSTER_MAX;
217
218         if (!down_read_trylock(&shrinker_rwsem))
219                 return 1;       /* Assume we'll be able to shrink next time */
220
221         list_for_each_entry(shrinker, &shrinker_list, list) {
222                 unsigned long long delta;
223                 unsigned long total_scan;
224                 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
225
226                 delta = (4 * scanned) / shrinker->seeks;
227                 delta *= max_pass;
228                 do_div(delta, lru_pages + 1);
229                 shrinker->nr += delta;
230                 if (shrinker->nr < 0) {
231                         printk(KERN_ERR "shrink_slab: %pF negative objects to "
232                                "delete nr=%ld\n",
233                                shrinker->shrink, shrinker->nr);
234                         shrinker->nr = max_pass;
235                 }
236
237                 /*
238                  * Avoid risking looping forever due to too large nr value:
239                  * never try to free more than twice the estimate number of
240                  * freeable entries.
241                  */
242                 if (shrinker->nr > max_pass * 2)
243                         shrinker->nr = max_pass * 2;
244
245                 total_scan = shrinker->nr;
246                 shrinker->nr = 0;
247
248                 while (total_scan >= SHRINK_BATCH) {
249                         long this_scan = SHRINK_BATCH;
250                         int shrink_ret;
251                         int nr_before;
252
253                         nr_before = (*shrinker->shrink)(0, gfp_mask);
254                         shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
255                         if (shrink_ret == -1)
256                                 break;
257                         if (shrink_ret < nr_before)
258                                 ret += nr_before - shrink_ret;
259                         count_vm_events(SLABS_SCANNED, this_scan);
260                         total_scan -= this_scan;
261
262                         cond_resched();
263                 }
264
265                 shrinker->nr += total_scan;
266         }
267         up_read(&shrinker_rwsem);
268         return ret;
269 }
270
271 /* Called without lock on whether page is mapped, so answer is unstable */
272 static inline int page_mapping_inuse(struct page *page)
273 {
274         struct address_space *mapping;
275
276         /* Page is in somebody's page tables. */
277         if (page_mapped(page))
278                 return 1;
279
280         /* Be more reluctant to reclaim swapcache than pagecache */
281         if (PageSwapCache(page))
282                 return 1;
283
284         mapping = page_mapping(page);
285         if (!mapping)
286                 return 0;
287
288         /* File is mmap'd by somebody? */
289         return mapping_mapped(mapping);
290 }
291
292 static inline int is_page_cache_freeable(struct page *page)
293 {
294         /*
295          * A freeable page cache page is referenced only by the caller
296          * that isolated the page, the page cache radix tree and
297          * optional buffer heads at page->private.
298          */
299         return page_count(page) - page_has_private(page) == 2;
300 }
301
302 static int may_write_to_queue(struct backing_dev_info *bdi)
303 {
304         if (current->flags & PF_SWAPWRITE)
305                 return 1;
306         if (!bdi_write_congested(bdi))
307                 return 1;
308         if (bdi == current->backing_dev_info)
309                 return 1;
310         return 0;
311 }
312
313 /*
314  * We detected a synchronous write error writing a page out.  Probably
315  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
316  * fsync(), msync() or close().
317  *
318  * The tricky part is that after writepage we cannot touch the mapping: nothing
319  * prevents it from being freed up.  But we have a ref on the page and once
320  * that page is locked, the mapping is pinned.
321  *
322  * We're allowed to run sleeping lock_page() here because we know the caller has
323  * __GFP_FS.
324  */
325 static void handle_write_error(struct address_space *mapping,
326                                 struct page *page, int error)
327 {
328         lock_page(page);
329         if (page_mapping(page) == mapping)
330                 mapping_set_error(mapping, error);
331         unlock_page(page);
332 }
333
334 /* Request for sync pageout. */
335 enum pageout_io {
336         PAGEOUT_IO_ASYNC,
337         PAGEOUT_IO_SYNC,
338 };
339
340 /* possible outcome of pageout() */
341 typedef enum {
342         /* failed to write page out, page is locked */
343         PAGE_KEEP,
344         /* move page to the active list, page is locked */
345         PAGE_ACTIVATE,
346         /* page has been sent to the disk successfully, page is unlocked */
347         PAGE_SUCCESS,
348         /* page is clean and locked */
349         PAGE_CLEAN,
350 } pageout_t;
351
352 /*
353  * pageout is called by shrink_page_list() for each dirty page.
354  * Calls ->writepage().
355  */
356 static pageout_t pageout(struct page *page, struct address_space *mapping,
357                                                 enum pageout_io sync_writeback)
358 {
359         /*
360          * If the page is dirty, only perform writeback if that write
361          * will be non-blocking.  To prevent this allocation from being
362          * stalled by pagecache activity.  But note that there may be
363          * stalls if we need to run get_block().  We could test
364          * PagePrivate for that.
365          *
366          * If this process is currently in __generic_file_aio_write() against
367          * this page's queue, we can perform writeback even if that
368          * will block.
369          *
370          * If the page is swapcache, write it back even if that would
371          * block, for some throttling. This happens by accident, because
372          * swap_backing_dev_info is bust: it doesn't reflect the
373          * congestion state of the swapdevs.  Easy to fix, if needed.
374          */
375         if (!is_page_cache_freeable(page))
376                 return PAGE_KEEP;
377         if (!mapping) {
378                 /*
379                  * Some data journaling orphaned pages can have
380                  * page->mapping == NULL while being dirty with clean buffers.
381                  */
382                 if (page_has_private(page)) {
383                         if (try_to_free_buffers(page)) {
384                                 ClearPageDirty(page);
385                                 printk("%s: orphaned page\n", __func__);
386                                 return PAGE_CLEAN;
387                         }
388                 }
389                 return PAGE_KEEP;
390         }
391         if (mapping->a_ops->writepage == NULL)
392                 return PAGE_ACTIVATE;
393         if (!may_write_to_queue(mapping->backing_dev_info))
394                 return PAGE_KEEP;
395
396         if (clear_page_dirty_for_io(page)) {
397                 int res;
398                 struct writeback_control wbc = {
399                         .sync_mode = WB_SYNC_NONE,
400                         .nr_to_write = SWAP_CLUSTER_MAX,
401                         .range_start = 0,
402                         .range_end = LLONG_MAX,
403                         .nonblocking = 1,
404                         .for_reclaim = 1,
405                 };
406
407                 SetPageReclaim(page);
408                 res = mapping->a_ops->writepage(page, &wbc);
409                 if (res < 0)
410                         handle_write_error(mapping, page, res);
411                 if (res == AOP_WRITEPAGE_ACTIVATE) {
412                         ClearPageReclaim(page);
413                         return PAGE_ACTIVATE;
414                 }
415
416                 /*
417                  * Wait on writeback if requested to. This happens when
418                  * direct reclaiming a large contiguous area and the
419                  * first attempt to free a range of pages fails.
420                  */
421                 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
422                         wait_on_page_writeback(page);
423
424                 if (!PageWriteback(page)) {
425                         /* synchronous write or broken a_ops? */
426                         ClearPageReclaim(page);
427                 }
428                 inc_zone_page_state(page, NR_VMSCAN_WRITE);
429                 return PAGE_SUCCESS;
430         }
431
432         return PAGE_CLEAN;
433 }
434
435 /*
436  * Same as remove_mapping, but if the page is removed from the mapping, it
437  * gets returned with a refcount of 0.
438  */
439 static int __remove_mapping(struct address_space *mapping, struct page *page)
440 {
441         BUG_ON(!PageLocked(page));
442         BUG_ON(mapping != page_mapping(page));
443
444         spin_lock_irq(&mapping->tree_lock);
445         /*
446          * The non racy check for a busy page.
447          *
448          * Must be careful with the order of the tests. When someone has
449          * a ref to the page, it may be possible that they dirty it then
450          * drop the reference. So if PageDirty is tested before page_count
451          * here, then the following race may occur:
452          *
453          * get_user_pages(&page);
454          * [user mapping goes away]
455          * write_to(page);
456          *                              !PageDirty(page)    [good]
457          * SetPageDirty(page);
458          * put_page(page);
459          *                              !page_count(page)   [good, discard it]
460          *
461          * [oops, our write_to data is lost]
462          *
463          * Reversing the order of the tests ensures such a situation cannot
464          * escape unnoticed. The smp_rmb is needed to ensure the page->flags
465          * load is not satisfied before that of page->_count.
466          *
467          * Note that if SetPageDirty is always performed via set_page_dirty,
468          * and thus under tree_lock, then this ordering is not required.
469          */
470         if (!page_freeze_refs(page, 2))
471                 goto cannot_free;
472         /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
473         if (unlikely(PageDirty(page))) {
474                 page_unfreeze_refs(page, 2);
475                 goto cannot_free;
476         }
477
478         if (PageSwapCache(page)) {
479                 swp_entry_t swap = { .val = page_private(page) };
480                 __delete_from_swap_cache(page);
481                 spin_unlock_irq(&mapping->tree_lock);
482                 swapcache_free(swap, page);
483         } else {
484                 __remove_from_page_cache(page);
485                 spin_unlock_irq(&mapping->tree_lock);
486                 mem_cgroup_uncharge_cache_page(page);
487         }
488
489         return 1;
490
491 cannot_free:
492         spin_unlock_irq(&mapping->tree_lock);
493         return 0;
494 }
495
496 /*
497  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
498  * someone else has a ref on the page, abort and return 0.  If it was
499  * successfully detached, return 1.  Assumes the caller has a single ref on
500  * this page.
501  */
502 int remove_mapping(struct address_space *mapping, struct page *page)
503 {
504         if (__remove_mapping(mapping, page)) {
505                 /*
506                  * Unfreezing the refcount with 1 rather than 2 effectively
507                  * drops the pagecache ref for us without requiring another
508                  * atomic operation.
509                  */
510                 page_unfreeze_refs(page, 1);
511                 return 1;
512         }
513         return 0;
514 }
515
516 /**
517  * putback_lru_page - put previously isolated page onto appropriate LRU list
518  * @page: page to be put back to appropriate lru list
519  *
520  * Add previously isolated @page to appropriate LRU list.
521  * Page may still be unevictable for other reasons.
522  *
523  * lru_lock must not be held, interrupts must be enabled.
524  */
525 void putback_lru_page(struct page *page)
526 {
527         int lru;
528         int active = !!TestClearPageActive(page);
529         int was_unevictable = PageUnevictable(page);
530
531         VM_BUG_ON(PageLRU(page));
532
533 redo:
534         ClearPageUnevictable(page);
535
536         if (page_evictable(page, NULL)) {
537                 /*
538                  * For evictable pages, we can use the cache.
539                  * In event of a race, worst case is we end up with an
540                  * unevictable page on [in]active list.
541                  * We know how to handle that.
542                  */
543                 lru = active + page_lru_base_type(page);
544                 lru_cache_add_lru(page, lru);
545         } else {
546                 /*
547                  * Put unevictable pages directly on zone's unevictable
548                  * list.
549                  */
550                 lru = LRU_UNEVICTABLE;
551                 add_page_to_unevictable_list(page);
552                 /*
553                  * When racing with an mlock clearing (page is
554                  * unlocked), make sure that if the other thread does
555                  * not observe our setting of PG_lru and fails
556                  * isolation, we see PG_mlocked cleared below and move
557                  * the page back to the evictable list.
558                  *
559                  * The other side is TestClearPageMlocked().
560                  */
561                 smp_mb();
562         }
563
564         /*
565          * page's status can change while we move it among lru. If an evictable
566          * page is on unevictable list, it never be freed. To avoid that,
567          * check after we added it to the list, again.
568          */
569         if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
570                 if (!isolate_lru_page(page)) {
571                         put_page(page);
572                         goto redo;
573                 }
574                 /* This means someone else dropped this page from LRU
575                  * So, it will be freed or putback to LRU again. There is
576                  * nothing to do here.
577                  */
578         }
579
580         if (was_unevictable && lru != LRU_UNEVICTABLE)
581                 count_vm_event(UNEVICTABLE_PGRESCUED);
582         else if (!was_unevictable && lru == LRU_UNEVICTABLE)
583                 count_vm_event(UNEVICTABLE_PGCULLED);
584
585         put_page(page);         /* drop ref from isolate */
586 }
587
588 /*
589  * shrink_page_list() returns the number of reclaimed pages
590  */
591 static unsigned long shrink_page_list(struct list_head *page_list,
592                                         struct scan_control *sc,
593                                         enum pageout_io sync_writeback)
594 {
595         LIST_HEAD(ret_pages);
596         struct pagevec freed_pvec;
597         int pgactivate = 0;
598         unsigned long nr_reclaimed = 0;
599         unsigned long vm_flags;
600
601         cond_resched();
602
603         pagevec_init(&freed_pvec, 1);
604         while (!list_empty(page_list)) {
605                 struct address_space *mapping;
606                 struct page *page;
607                 int may_enter_fs;
608                 int referenced;
609
610                 cond_resched();
611
612                 page = lru_to_page(page_list);
613                 list_del(&page->lru);
614
615                 if (!trylock_page(page))
616                         goto keep;
617
618                 VM_BUG_ON(PageActive(page));
619
620                 sc->nr_scanned++;
621
622                 if (unlikely(!page_evictable(page, NULL)))
623                         goto cull_mlocked;
624
625                 if (!sc->may_unmap && page_mapped(page))
626                         goto keep_locked;
627
628                 /* Double the slab pressure for mapped and swapcache pages */
629                 if (page_mapped(page) || PageSwapCache(page))
630                         sc->nr_scanned++;
631
632                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
633                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
634
635                 if (PageWriteback(page)) {
636                         /*
637                          * Synchronous reclaim is performed in two passes,
638                          * first an asynchronous pass over the list to
639                          * start parallel writeback, and a second synchronous
640                          * pass to wait for the IO to complete.  Wait here
641                          * for any page for which writeback has already
642                          * started.
643                          */
644                         if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
645                                 wait_on_page_writeback(page);
646                         else
647                                 goto keep_locked;
648                 }
649
650                 referenced = page_referenced(page, 1,
651                                                 sc->mem_cgroup, &vm_flags);
652                 /*
653                  * In active use or really unfreeable?  Activate it.
654                  * If page which have PG_mlocked lost isoltation race,
655                  * try_to_unmap moves it to unevictable list
656                  */
657                 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
658                                         referenced && page_mapping_inuse(page)
659                                         && !(vm_flags & VM_LOCKED))
660                         goto activate_locked;
661
662                 /*
663                  * Anonymous process memory has backing store?
664                  * Try to allocate it some swap space here.
665                  */
666                 if (PageAnon(page) && !PageSwapCache(page)) {
667                         if (!(sc->gfp_mask & __GFP_IO))
668                                 goto keep_locked;
669                         if (!add_to_swap(page))
670                                 goto activate_locked;
671                         may_enter_fs = 1;
672                 }
673
674                 mapping = page_mapping(page);
675
676                 /*
677                  * The page is mapped into the page tables of one or more
678                  * processes. Try to unmap it here.
679                  */
680                 if (page_mapped(page) && mapping) {
681                         switch (try_to_unmap(page, TTU_UNMAP)) {
682                         case SWAP_FAIL:
683                                 goto activate_locked;
684                         case SWAP_AGAIN:
685                                 goto keep_locked;
686                         case SWAP_MLOCK:
687                                 goto cull_mlocked;
688                         case SWAP_SUCCESS:
689                                 ; /* try to free the page below */
690                         }
691                 }
692
693                 if (PageDirty(page)) {
694                         if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
695                                 goto keep_locked;
696                         if (!may_enter_fs)
697                                 goto keep_locked;
698                         if (!sc->may_writepage)
699                                 goto keep_locked;
700
701                         /* Page is dirty, try to write it out here */
702                         switch (pageout(page, mapping, sync_writeback)) {
703                         case PAGE_KEEP:
704                                 goto keep_locked;
705                         case PAGE_ACTIVATE:
706                                 goto activate_locked;
707                         case PAGE_SUCCESS:
708                                 if (PageWriteback(page) || PageDirty(page))
709                                         goto keep;
710                                 /*
711                                  * A synchronous write - probably a ramdisk.  Go
712                                  * ahead and try to reclaim the page.
713                                  */
714                                 if (!trylock_page(page))
715                                         goto keep;
716                                 if (PageDirty(page) || PageWriteback(page))
717                                         goto keep_locked;
718                                 mapping = page_mapping(page);
719                         case PAGE_CLEAN:
720                                 ; /* try to free the page below */
721                         }
722                 }
723
724                 /*
725                  * If the page has buffers, try to free the buffer mappings
726                  * associated with this page. If we succeed we try to free
727                  * the page as well.
728                  *
729                  * We do this even if the page is PageDirty().
730                  * try_to_release_page() does not perform I/O, but it is
731                  * possible for a page to have PageDirty set, but it is actually
732                  * clean (all its buffers are clean).  This happens if the
733                  * buffers were written out directly, with submit_bh(). ext3
734                  * will do this, as well as the blockdev mapping.
735                  * try_to_release_page() will discover that cleanness and will
736                  * drop the buffers and mark the page clean - it can be freed.
737                  *
738                  * Rarely, pages can have buffers and no ->mapping.  These are
739                  * the pages which were not successfully invalidated in
740                  * truncate_complete_page().  We try to drop those buffers here
741                  * and if that worked, and the page is no longer mapped into
742                  * process address space (page_count == 1) it can be freed.
743                  * Otherwise, leave the page on the LRU so it is swappable.
744                  */
745                 if (page_has_private(page)) {
746                         if (!try_to_release_page(page, sc->gfp_mask))
747                                 goto activate_locked;
748                         if (!mapping && page_count(page) == 1) {
749                                 unlock_page(page);
750                                 if (put_page_testzero(page))
751                                         goto free_it;
752                                 else {
753                                         /*
754                                          * rare race with speculative reference.
755                                          * the speculative reference will free
756                                          * this page shortly, so we may
757                                          * increment nr_reclaimed here (and
758                                          * leave it off the LRU).
759                                          */
760                                         nr_reclaimed++;
761                                         continue;
762                                 }
763                         }
764                 }
765
766                 if (!mapping || !__remove_mapping(mapping, page))
767                         goto keep_locked;
768
769                 /*
770                  * At this point, we have no other references and there is
771                  * no way to pick any more up (removed from LRU, removed
772                  * from pagecache). Can use non-atomic bitops now (and
773                  * we obviously don't have to worry about waking up a process
774                  * waiting on the page lock, because there are no references.
775                  */
776                 __clear_page_locked(page);
777 free_it:
778                 nr_reclaimed++;
779                 if (!pagevec_add(&freed_pvec, page)) {
780                         __pagevec_free(&freed_pvec);
781                         pagevec_reinit(&freed_pvec);
782                 }
783                 continue;
784
785 cull_mlocked:
786                 if (PageSwapCache(page))
787                         try_to_free_swap(page);
788                 unlock_page(page);
789                 putback_lru_page(page);
790                 continue;
791
792 activate_locked:
793                 /* Not a candidate for swapping, so reclaim swap space. */
794                 if (PageSwapCache(page) && vm_swap_full())
795                         try_to_free_swap(page);
796                 VM_BUG_ON(PageActive(page));
797                 SetPageActive(page);
798                 pgactivate++;
799 keep_locked:
800                 unlock_page(page);
801 keep:
802                 list_add(&page->lru, &ret_pages);
803                 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
804         }
805         list_splice(&ret_pages, page_list);
806         if (pagevec_count(&freed_pvec))
807                 __pagevec_free(&freed_pvec);
808         count_vm_events(PGACTIVATE, pgactivate);
809         return nr_reclaimed;
810 }
811
812 /* LRU Isolation modes. */
813 #define ISOLATE_INACTIVE 0      /* Isolate inactive pages. */
814 #define ISOLATE_ACTIVE 1        /* Isolate active pages. */
815 #define ISOLATE_BOTH 2          /* Isolate both active and inactive pages. */
816
817 /*
818  * Attempt to remove the specified page from its LRU.  Only take this page
819  * if it is of the appropriate PageActive status.  Pages which are being
820  * freed elsewhere are also ignored.
821  *
822  * page:        page to consider
823  * mode:        one of the LRU isolation modes defined above
824  *
825  * returns 0 on success, -ve errno on failure.
826  */
827 int __isolate_lru_page(struct page *page, int mode, int file)
828 {
829         int ret = -EINVAL;
830
831         /* Only take pages on the LRU. */
832         if (!PageLRU(page))
833                 return ret;
834
835         /*
836          * When checking the active state, we need to be sure we are
837          * dealing with comparible boolean values.  Take the logical not
838          * of each.
839          */
840         if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
841                 return ret;
842
843         if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
844                 return ret;
845
846         /*
847          * When this function is being called for lumpy reclaim, we
848          * initially look into all LRU pages, active, inactive and
849          * unevictable; only give shrink_page_list evictable pages.
850          */
851         if (PageUnevictable(page))
852                 return ret;
853
854         ret = -EBUSY;
855
856         if (likely(get_page_unless_zero(page))) {
857                 /*
858                  * Be careful not to clear PageLRU until after we're
859                  * sure the page is not being freed elsewhere -- the
860                  * page release code relies on it.
861                  */
862                 ClearPageLRU(page);
863                 ret = 0;
864         }
865
866         return ret;
867 }
868
869 /*
870  * zone->lru_lock is heavily contended.  Some of the functions that
871  * shrink the lists perform better by taking out a batch of pages
872  * and working on them outside the LRU lock.
873  *
874  * For pagecache intensive workloads, this function is the hottest
875  * spot in the kernel (apart from copy_*_user functions).
876  *
877  * Appropriate locks must be held before calling this function.
878  *
879  * @nr_to_scan: The number of pages to look through on the list.
880  * @src:        The LRU list to pull pages off.
881  * @dst:        The temp list to put pages on to.
882  * @scanned:    The number of pages that were scanned.
883  * @order:      The caller's attempted allocation order
884  * @mode:       One of the LRU isolation modes
885  * @file:       True [1] if isolating file [!anon] pages
886  *
887  * returns how many pages were moved onto *@dst.
888  */
889 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
890                 struct list_head *src, struct list_head *dst,
891                 unsigned long *scanned, int order, int mode, int file)
892 {
893         unsigned long nr_taken = 0;
894         unsigned long scan;
895
896         for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
897                 struct page *page;
898                 unsigned long pfn;
899                 unsigned long end_pfn;
900                 unsigned long page_pfn;
901                 int zone_id;
902
903                 page = lru_to_page(src);
904                 prefetchw_prev_lru_page(page, src, flags);
905
906                 VM_BUG_ON(!PageLRU(page));
907
908                 switch (__isolate_lru_page(page, mode, file)) {
909                 case 0:
910                         list_move(&page->lru, dst);
911                         mem_cgroup_del_lru(page);
912                         nr_taken++;
913                         break;
914
915                 case -EBUSY:
916                         /* else it is being freed elsewhere */
917                         list_move(&page->lru, src);
918                         mem_cgroup_rotate_lru_list(page, page_lru(page));
919                         continue;
920
921                 default:
922                         BUG();
923                 }
924
925                 if (!order)
926                         continue;
927
928                 /*
929                  * Attempt to take all pages in the order aligned region
930                  * surrounding the tag page.  Only take those pages of
931                  * the same active state as that tag page.  We may safely
932                  * round the target page pfn down to the requested order
933                  * as the mem_map is guarenteed valid out to MAX_ORDER,
934                  * where that page is in a different zone we will detect
935                  * it from its zone id and abort this block scan.
936                  */
937                 zone_id = page_zone_id(page);
938                 page_pfn = page_to_pfn(page);
939                 pfn = page_pfn & ~((1 << order) - 1);
940                 end_pfn = pfn + (1 << order);
941                 for (; pfn < end_pfn; pfn++) {
942                         struct page *cursor_page;
943
944                         /* The target page is in the block, ignore it. */
945                         if (unlikely(pfn == page_pfn))
946                                 continue;
947
948                         /* Avoid holes within the zone. */
949                         if (unlikely(!pfn_valid_within(pfn)))
950                                 break;
951
952                         cursor_page = pfn_to_page(pfn);
953
954                         /* Check that we have not crossed a zone boundary. */
955                         if (unlikely(page_zone_id(cursor_page) != zone_id))
956                                 continue;
957
958                         /*
959                          * If we don't have enough swap space, reclaiming of
960                          * anon page which don't already have a swap slot is
961                          * pointless.
962                          */
963                         if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
964                                         !PageSwapCache(cursor_page))
965                                 continue;
966
967                         if (__isolate_lru_page(cursor_page, mode, file) == 0) {
968                                 list_move(&cursor_page->lru, dst);
969                                 mem_cgroup_del_lru(cursor_page);
970                                 nr_taken++;
971                                 scan++;
972                         }
973                 }
974         }
975
976         *scanned = scan;
977         return nr_taken;
978 }
979
980 static unsigned long isolate_pages_global(unsigned long nr,
981                                         struct list_head *dst,
982                                         unsigned long *scanned, int order,
983                                         int mode, struct zone *z,
984                                         struct mem_cgroup *mem_cont,
985                                         int active, int file)
986 {
987         int lru = LRU_BASE;
988         if (active)
989                 lru += LRU_ACTIVE;
990         if (file)
991                 lru += LRU_FILE;
992         return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
993                                                                 mode, file);
994 }
995
996 /*
997  * clear_active_flags() is a helper for shrink_active_list(), clearing
998  * any active bits from the pages in the list.
999  */
1000 static unsigned long clear_active_flags(struct list_head *page_list,
1001                                         unsigned int *count)
1002 {
1003         int nr_active = 0;
1004         int lru;
1005         struct page *page;
1006
1007         list_for_each_entry(page, page_list, lru) {
1008                 lru = page_lru_base_type(page);
1009                 if (PageActive(page)) {
1010                         lru += LRU_ACTIVE;
1011                         ClearPageActive(page);
1012                         nr_active++;
1013                 }
1014                 count[lru]++;
1015         }
1016
1017         return nr_active;
1018 }
1019
1020 /**
1021  * isolate_lru_page - tries to isolate a page from its LRU list
1022  * @page: page to isolate from its LRU list
1023  *
1024  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1025  * vmstat statistic corresponding to whatever LRU list the page was on.
1026  *
1027  * Returns 0 if the page was removed from an LRU list.
1028  * Returns -EBUSY if the page was not on an LRU list.
1029  *
1030  * The returned page will have PageLRU() cleared.  If it was found on
1031  * the active list, it will have PageActive set.  If it was found on
1032  * the unevictable list, it will have the PageUnevictable bit set. That flag
1033  * may need to be cleared by the caller before letting the page go.
1034  *
1035  * The vmstat statistic corresponding to the list on which the page was
1036  * found will be decremented.
1037  *
1038  * Restrictions:
1039  * (1) Must be called with an elevated refcount on the page. This is a
1040  *     fundamentnal difference from isolate_lru_pages (which is called
1041  *     without a stable reference).
1042  * (2) the lru_lock must not be held.
1043  * (3) interrupts must be enabled.
1044  */
1045 int isolate_lru_page(struct page *page)
1046 {
1047         int ret = -EBUSY;
1048
1049         if (PageLRU(page)) {
1050                 struct zone *zone = page_zone(page);
1051
1052                 spin_lock_irq(&zone->lru_lock);
1053                 if (PageLRU(page) && get_page_unless_zero(page)) {
1054                         int lru = page_lru(page);
1055                         ret = 0;
1056                         ClearPageLRU(page);
1057
1058                         del_page_from_lru_list(zone, page, lru);
1059                 }
1060                 spin_unlock_irq(&zone->lru_lock);
1061         }
1062         return ret;
1063 }
1064
1065 /*
1066  * Are there way too many processes in the direct reclaim path already?
1067  */
1068 static int too_many_isolated(struct zone *zone, int file,
1069                 struct scan_control *sc)
1070 {
1071         unsigned long inactive, isolated;
1072
1073         if (current_is_kswapd())
1074                 return 0;
1075
1076         if (!scanning_global_lru(sc))
1077                 return 0;
1078
1079         if (file) {
1080                 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1081                 isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1082         } else {
1083                 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1084                 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1085         }
1086
1087         return isolated > inactive;
1088 }
1089
1090 /*
1091  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
1092  * of reclaimed pages
1093  */
1094 static unsigned long shrink_inactive_list(unsigned long max_scan,
1095                         struct zone *zone, struct scan_control *sc,
1096                         int priority, int file)
1097 {
1098         LIST_HEAD(page_list);
1099         struct pagevec pvec;
1100         unsigned long nr_scanned = 0;
1101         unsigned long nr_reclaimed = 0;
1102         struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1103         int lumpy_reclaim = 0;
1104
1105         while (unlikely(too_many_isolated(zone, file, sc))) {
1106                 congestion_wait(BLK_RW_ASYNC, HZ/10);
1107
1108                 /* We are about to die and free our memory. Return now. */
1109                 if (fatal_signal_pending(current))
1110                         return SWAP_CLUSTER_MAX;
1111         }
1112
1113         /*
1114          * If we need a large contiguous chunk of memory, or have
1115          * trouble getting a small set of contiguous pages, we
1116          * will reclaim both active and inactive pages.
1117          *
1118          * We use the same threshold as pageout congestion_wait below.
1119          */
1120         if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1121                 lumpy_reclaim = 1;
1122         else if (sc->order && priority < DEF_PRIORITY - 2)
1123                 lumpy_reclaim = 1;
1124
1125         pagevec_init(&pvec, 1);
1126
1127         lru_add_drain();
1128         spin_lock_irq(&zone->lru_lock);
1129         do {
1130                 struct page *page;
1131                 unsigned long nr_taken;
1132                 unsigned long nr_scan;
1133                 unsigned long nr_freed;
1134                 unsigned long nr_active;
1135                 unsigned int count[NR_LRU_LISTS] = { 0, };
1136                 int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
1137                 unsigned long nr_anon;
1138                 unsigned long nr_file;
1139
1140                 nr_taken = sc->isolate_pages(sc->swap_cluster_max,
1141                              &page_list, &nr_scan, sc->order, mode,
1142                                 zone, sc->mem_cgroup, 0, file);
1143
1144                 if (scanning_global_lru(sc)) {
1145                         zone->pages_scanned += nr_scan;
1146                         if (current_is_kswapd())
1147                                 __count_zone_vm_events(PGSCAN_KSWAPD, zone,
1148                                                        nr_scan);
1149                         else
1150                                 __count_zone_vm_events(PGSCAN_DIRECT, zone,
1151                                                        nr_scan);
1152                 }
1153
1154                 if (nr_taken == 0)
1155                         goto done;
1156
1157                 nr_active = clear_active_flags(&page_list, count);
1158                 __count_vm_events(PGDEACTIVATE, nr_active);
1159
1160                 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1161                                                 -count[LRU_ACTIVE_FILE]);
1162                 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1163                                                 -count[LRU_INACTIVE_FILE]);
1164                 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1165                                                 -count[LRU_ACTIVE_ANON]);
1166                 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1167                                                 -count[LRU_INACTIVE_ANON]);
1168
1169                 nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1170                 nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1171                 __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
1172                 __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
1173
1174                 reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
1175                 reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
1176                 reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
1177                 reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
1178
1179                 spin_unlock_irq(&zone->lru_lock);
1180
1181                 nr_scanned += nr_scan;
1182                 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
1183
1184                 /*
1185                  * If we are direct reclaiming for contiguous pages and we do
1186                  * not reclaim everything in the list, try again and wait
1187                  * for IO to complete. This will stall high-order allocations
1188                  * but that should be acceptable to the caller
1189                  */
1190                 if (nr_freed < nr_taken && !current_is_kswapd() &&
1191                     lumpy_reclaim) {
1192                         congestion_wait(BLK_RW_ASYNC, HZ/10);
1193
1194                         /*
1195                          * The attempt at page out may have made some
1196                          * of the pages active, mark them inactive again.
1197                          */
1198                         nr_active = clear_active_flags(&page_list, count);
1199                         count_vm_events(PGDEACTIVATE, nr_active);
1200
1201                         nr_freed += shrink_page_list(&page_list, sc,
1202                                                         PAGEOUT_IO_SYNC);
1203                 }
1204
1205                 nr_reclaimed += nr_freed;
1206
1207                 local_irq_disable();
1208                 if (current_is_kswapd())
1209                         __count_vm_events(KSWAPD_STEAL, nr_freed);
1210                 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
1211
1212                 spin_lock(&zone->lru_lock);
1213                 /*
1214                  * Put back any unfreeable pages.
1215                  */
1216                 while (!list_empty(&page_list)) {
1217                         int lru;
1218                         page = lru_to_page(&page_list);
1219                         VM_BUG_ON(PageLRU(page));
1220                         list_del(&page->lru);
1221                         if (unlikely(!page_evictable(page, NULL))) {
1222                                 spin_unlock_irq(&zone->lru_lock);
1223                                 putback_lru_page(page);
1224                                 spin_lock_irq(&zone->lru_lock);
1225                                 continue;
1226                         }
1227                         SetPageLRU(page);
1228                         lru = page_lru(page);
1229                         add_page_to_lru_list(zone, page, lru);
1230                         if (is_active_lru(lru)) {
1231                                 int file = is_file_lru(lru);
1232                                 reclaim_stat->recent_rotated[file]++;
1233                         }
1234                         if (!pagevec_add(&pvec, page)) {
1235                                 spin_unlock_irq(&zone->lru_lock);
1236                                 __pagevec_release(&pvec);
1237                                 spin_lock_irq(&zone->lru_lock);
1238                         }
1239                 }
1240                 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1241                 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1242
1243         } while (nr_scanned < max_scan);
1244
1245 done:
1246         spin_unlock_irq(&zone->lru_lock);
1247         pagevec_release(&pvec);
1248         return nr_reclaimed;
1249 }
1250
1251 /*
1252  * We are about to scan this zone at a certain priority level.  If that priority
1253  * level is smaller (ie: more urgent) than the previous priority, then note
1254  * that priority level within the zone.  This is done so that when the next
1255  * process comes in to scan this zone, it will immediately start out at this
1256  * priority level rather than having to build up its own scanning priority.
1257  * Here, this priority affects only the reclaim-mapped threshold.
1258  */
1259 static inline void note_zone_scanning_priority(struct zone *zone, int priority)
1260 {
1261         if (priority < zone->prev_priority)
1262                 zone->prev_priority = priority;
1263 }
1264
1265 /*
1266  * This moves pages from the active list to the inactive list.
1267  *
1268  * We move them the other way if the page is referenced by one or more
1269  * processes, from rmap.
1270  *
1271  * If the pages are mostly unmapped, the processing is fast and it is
1272  * appropriate to hold zone->lru_lock across the whole operation.  But if
1273  * the pages are mapped, the processing is slow (page_referenced()) so we
1274  * should drop zone->lru_lock around each page.  It's impossible to balance
1275  * this, so instead we remove the pages from the LRU while processing them.
1276  * It is safe to rely on PG_active against the non-LRU pages in here because
1277  * nobody will play with that bit on a non-LRU page.
1278  *
1279  * The downside is that we have to touch page->_count against each page.
1280  * But we had to alter page->flags anyway.
1281  */
1282
1283 static void move_active_pages_to_lru(struct zone *zone,
1284                                      struct list_head *list,
1285                                      enum lru_list lru)
1286 {
1287         unsigned long pgmoved = 0;
1288         struct pagevec pvec;
1289         struct page *page;
1290
1291         pagevec_init(&pvec, 1);
1292
1293         while (!list_empty(list)) {
1294                 page = lru_to_page(list);
1295
1296                 VM_BUG_ON(PageLRU(page));
1297                 SetPageLRU(page);
1298
1299                 list_move(&page->lru, &zone->lru[lru].list);
1300                 mem_cgroup_add_lru_list(page, lru);
1301                 pgmoved++;
1302
1303                 if (!pagevec_add(&pvec, page) || list_empty(list)) {
1304                         spin_unlock_irq(&zone->lru_lock);
1305                         if (buffer_heads_over_limit)
1306                                 pagevec_strip(&pvec);
1307                         __pagevec_release(&pvec);
1308                         spin_lock_irq(&zone->lru_lock);
1309                 }
1310         }
1311         __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1312         if (!is_active_lru(lru))
1313                 __count_vm_events(PGDEACTIVATE, pgmoved);
1314 }
1315
1316 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1317                         struct scan_control *sc, int priority, int file)
1318 {
1319         unsigned long nr_taken;
1320         unsigned long pgscanned;
1321         unsigned long vm_flags;
1322         LIST_HEAD(l_hold);      /* The pages which were snipped off */
1323         LIST_HEAD(l_active);
1324         LIST_HEAD(l_inactive);
1325         struct page *page;
1326         struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1327         unsigned long nr_rotated = 0;
1328
1329         lru_add_drain();
1330         spin_lock_irq(&zone->lru_lock);
1331         nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1332                                         ISOLATE_ACTIVE, zone,
1333                                         sc->mem_cgroup, 1, file);
1334         /*
1335          * zone->pages_scanned is used for detect zone's oom
1336          * mem_cgroup remembers nr_scan by itself.
1337          */
1338         if (scanning_global_lru(sc)) {
1339                 zone->pages_scanned += pgscanned;
1340         }
1341         reclaim_stat->recent_scanned[file] += nr_taken;
1342
1343         __count_zone_vm_events(PGREFILL, zone, pgscanned);
1344         if (file)
1345                 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
1346         else
1347                 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
1348         __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1349         spin_unlock_irq(&zone->lru_lock);
1350
1351         while (!list_empty(&l_hold)) {
1352                 cond_resched();
1353                 page = lru_to_page(&l_hold);
1354                 list_del(&page->lru);
1355
1356                 if (unlikely(!page_evictable(page, NULL))) {
1357                         putback_lru_page(page);
1358                         continue;
1359                 }
1360
1361                 /* page_referenced clears PageReferenced */
1362                 if (page_mapping_inuse(page) &&
1363                     page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1364                         nr_rotated++;
1365                         /*
1366                          * Identify referenced, file-backed active pages and
1367                          * give them one more trip around the active list. So
1368                          * that executable code get better chances to stay in
1369                          * memory under moderate memory pressure.  Anon pages
1370                          * are not likely to be evicted by use-once streaming
1371                          * IO, plus JVM can create lots of anon VM_EXEC pages,
1372                          * so we ignore them here.
1373                          */
1374                         if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1375                                 list_add(&page->lru, &l_active);
1376                                 continue;
1377                         }
1378                 }
1379
1380                 ClearPageActive(page);  /* we are de-activating */
1381                 list_add(&page->lru, &l_inactive);
1382         }
1383
1384         /*
1385          * Move pages back to the lru list.
1386          */
1387         spin_lock_irq(&zone->lru_lock);
1388         /*
1389          * Count referenced pages from currently used mappings as rotated,
1390          * even though only some of them are actually re-activated.  This
1391          * helps balance scan pressure between file and anonymous pages in
1392          * get_scan_ratio.
1393          */
1394         reclaim_stat->recent_rotated[file] += nr_rotated;
1395
1396         move_active_pages_to_lru(zone, &l_active,
1397                                                 LRU_ACTIVE + file * LRU_FILE);
1398         move_active_pages_to_lru(zone, &l_inactive,
1399                                                 LRU_BASE   + file * LRU_FILE);
1400         __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1401         spin_unlock_irq(&zone->lru_lock);
1402 }
1403
1404 static int inactive_anon_is_low_global(struct zone *zone)
1405 {
1406         unsigned long active, inactive;
1407
1408         active = zone_page_state(zone, NR_ACTIVE_ANON);
1409         inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1410
1411         if (inactive * zone->inactive_ratio < active)
1412                 return 1;
1413
1414         return 0;
1415 }
1416
1417 /**
1418  * inactive_anon_is_low - check if anonymous pages need to be deactivated
1419  * @zone: zone to check
1420  * @sc:   scan control of this context
1421  *
1422  * Returns true if the zone does not have enough inactive anon pages,
1423  * meaning some active anon pages need to be deactivated.
1424  */
1425 static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1426 {
1427         int low;
1428
1429         if (scanning_global_lru(sc))
1430                 low = inactive_anon_is_low_global(zone);
1431         else
1432                 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1433         return low;
1434 }
1435
1436 static int inactive_file_is_low_global(struct zone *zone)
1437 {
1438         unsigned long active, inactive;
1439
1440         active = zone_page_state(zone, NR_ACTIVE_FILE);
1441         inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1442
1443         return (active > inactive);
1444 }
1445
1446 /**
1447  * inactive_file_is_low - check if file pages need to be deactivated
1448  * @zone: zone to check
1449  * @sc:   scan control of this context
1450  *
1451  * When the system is doing streaming IO, memory pressure here
1452  * ensures that active file pages get deactivated, until more
1453  * than half of the file pages are on the inactive list.
1454  *
1455  * Once we get to that situation, protect the system's working
1456  * set from being evicted by disabling active file page aging.
1457  *
1458  * This uses a different ratio than the anonymous pages, because
1459  * the page cache uses a use-once replacement algorithm.
1460  */
1461 static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1462 {
1463         int low;
1464
1465         if (scanning_global_lru(sc))
1466                 low = inactive_file_is_low_global(zone);
1467         else
1468                 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
1469         return low;
1470 }
1471
1472 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1473         struct zone *zone, struct scan_control *sc, int priority)
1474 {
1475         int file = is_file_lru(lru);
1476
1477         if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
1478                 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1479                 return 0;
1480         }
1481
1482         if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
1483                 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1484                 return 0;
1485         }
1486         return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1487 }
1488
1489 /*
1490  * Determine how aggressively the anon and file LRU lists should be
1491  * scanned.  The relative value of each set of LRU lists is determined
1492  * by looking at the fraction of the pages scanned we did rotate back
1493  * onto the active list instead of evict.
1494  *
1495  * percent[0] specifies how much pressure to put on ram/swap backed
1496  * memory, while percent[1] determines pressure on the file LRUs.
1497  */
1498 static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1499                                         unsigned long *percent)
1500 {
1501         unsigned long anon, file, free;
1502         unsigned long anon_prio, file_prio;
1503         unsigned long ap, fp;
1504         struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1505
1506         anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1507                 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1508         file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1509                 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1510
1511         if (scanning_global_lru(sc)) {
1512                 free  = zone_page_state(zone, NR_FREE_PAGES);
1513                 /* If we have very few page cache pages,
1514                    force-scan anon pages. */
1515                 if (unlikely(file + free <= high_wmark_pages(zone))) {
1516                         percent[0] = 100;
1517                         percent[1] = 0;
1518                         return;
1519                 }
1520         }
1521
1522         /*
1523          * OK, so we have swap space and a fair amount of page cache
1524          * pages.  We use the recently rotated / recently scanned
1525          * ratios to determine how valuable each cache is.
1526          *
1527          * Because workloads change over time (and to avoid overflow)
1528          * we keep these statistics as a floating average, which ends
1529          * up weighing recent references more than old ones.
1530          *
1531          * anon in [0], file in [1]
1532          */
1533         if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1534                 spin_lock_irq(&zone->lru_lock);
1535                 reclaim_stat->recent_scanned[0] /= 2;
1536                 reclaim_stat->recent_rotated[0] /= 2;
1537                 spin_unlock_irq(&zone->lru_lock);
1538         }
1539
1540         if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1541                 spin_lock_irq(&zone->lru_lock);
1542                 reclaim_stat->recent_scanned[1] /= 2;
1543                 reclaim_stat->recent_rotated[1] /= 2;
1544                 spin_unlock_irq(&zone->lru_lock);
1545         }
1546
1547         /*
1548          * With swappiness at 100, anonymous and file have the same priority.
1549          * This scanning priority is essentially the inverse of IO cost.
1550          */
1551         anon_prio = sc->swappiness;
1552         file_prio = 200 - sc->swappiness;
1553
1554         /*
1555          * The amount of pressure on anon vs file pages is inversely
1556          * proportional to the fraction of recently scanned pages on
1557          * each list that were recently referenced and in active use.
1558          */
1559         ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1560         ap /= reclaim_stat->recent_rotated[0] + 1;
1561
1562         fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1563         fp /= reclaim_stat->recent_rotated[1] + 1;
1564
1565         /* Normalize to percentages */
1566         percent[0] = 100 * ap / (ap + fp + 1);
1567         percent[1] = 100 - percent[0];
1568 }
1569
1570 /*
1571  * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1572  * until we collected @swap_cluster_max pages to scan.
1573  */
1574 static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1575                                        unsigned long *nr_saved_scan,
1576                                        unsigned long swap_cluster_max)
1577 {
1578         unsigned long nr;
1579
1580         *nr_saved_scan += nr_to_scan;
1581         nr = *nr_saved_scan;
1582
1583         if (nr >= swap_cluster_max)
1584                 *nr_saved_scan = 0;
1585         else
1586                 nr = 0;
1587
1588         return nr;
1589 }
1590
1591 /*
1592  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
1593  */
1594 static void shrink_zone(int priority, struct zone *zone,
1595                                 struct scan_control *sc)
1596 {
1597         unsigned long nr[NR_LRU_LISTS];
1598         unsigned long nr_to_scan;
1599         unsigned long percent[2];       /* anon @ 0; file @ 1 */
1600         enum lru_list l;
1601         unsigned long nr_reclaimed = sc->nr_reclaimed;
1602         unsigned long swap_cluster_max = sc->swap_cluster_max;
1603         unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1604         struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1605         int noswap = 0;
1606
1607         /* If we have no swap space, do not bother scanning anon pages. */
1608         if (!sc->may_swap || (nr_swap_pages <= 0)) {
1609                 noswap = 1;
1610                 percent[0] = 0;
1611                 percent[1] = 100;
1612         } else
1613                 get_scan_ratio(zone, sc, percent);
1614
1615         for_each_evictable_lru(l) {
1616                 int file = is_file_lru(l);
1617                 unsigned long scan;
1618
1619                 scan = zone_nr_lru_pages(zone, sc, l);
1620                 if (priority || noswap) {
1621                         scan >>= priority;
1622                         scan = (scan * percent[file]) / 100;
1623                 }
1624                 nr[l] = nr_scan_try_batch(scan,
1625                                           &reclaim_stat->nr_saved_scan[l],
1626                                           swap_cluster_max);
1627         }
1628
1629         while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1630                                         nr[LRU_INACTIVE_FILE]) {
1631                 for_each_evictable_lru(l) {
1632                         if (nr[l]) {
1633                                 nr_to_scan = min(nr[l], swap_cluster_max);
1634                                 nr[l] -= nr_to_scan;
1635
1636                                 nr_reclaimed += shrink_list(l, nr_to_scan,
1637                                                             zone, sc, priority);
1638                         }
1639                 }
1640                 /*
1641                  * On large memory systems, scan >> priority can become
1642                  * really large. This is fine for the starting priority;
1643                  * we want to put equal scanning pressure on each zone.
1644                  * However, if the VM has a harder time of freeing pages,
1645                  * with multiple processes reclaiming pages, the total
1646                  * freeing target can get unreasonably large.
1647                  */
1648                 if (nr_reclaimed > nr_to_reclaim && priority < DEF_PRIORITY)
1649                         break;
1650         }
1651
1652         sc->nr_reclaimed = nr_reclaimed;
1653
1654         /*
1655          * Even if we did not try to evict anon pages at all, we want to
1656          * rebalance the anon lru active/inactive ratio.
1657          */
1658         if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
1659                 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1660
1661         throttle_vm_writeout(sc->gfp_mask);
1662 }
1663
1664 /*
1665  * This is the direct reclaim path, for page-allocating processes.  We only
1666  * try to reclaim pages from zones which will satisfy the caller's allocation
1667  * request.
1668  *
1669  * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
1670  * Because:
1671  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1672  *    allocation or
1673  * b) The target zone may be at high_wmark_pages(zone) but the lower zones
1674  *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
1675  *    zone defense algorithm.
1676  *
1677  * If a zone is deemed to be full of pinned pages then just give it a light
1678  * scan then give up on it.
1679  */
1680 static void shrink_zones(int priority, struct zonelist *zonelist,
1681                                         struct scan_control *sc)
1682 {
1683         enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1684         struct zoneref *z;
1685         struct zone *zone;
1686
1687         sc->all_unreclaimable = 1;
1688         for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1689                                         sc->nodemask) {
1690                 if (!populated_zone(zone))
1691                         continue;
1692                 /*
1693                  * Take care memory controller reclaiming has small influence
1694                  * to global LRU.
1695                  */
1696                 if (scanning_global_lru(sc)) {
1697                         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1698                                 continue;
1699                         note_zone_scanning_priority(zone, priority);
1700
1701                         if (zone_is_all_unreclaimable(zone) &&
1702                                                 priority != DEF_PRIORITY)
1703                                 continue;       /* Let kswapd poll it */
1704                         sc->all_unreclaimable = 0;
1705                 } else {
1706                         /*
1707                          * Ignore cpuset limitation here. We just want to reduce
1708                          * # of used pages by us regardless of memory shortage.
1709                          */
1710                         sc->all_unreclaimable = 0;
1711                         mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
1712                                                         priority);
1713                 }
1714
1715                 shrink_zone(priority, zone, sc);
1716         }
1717 }
1718
1719 /*
1720  * This is the main entry point to direct page reclaim.
1721  *
1722  * If a full scan of the inactive list fails to free enough memory then we
1723  * are "out of memory" and something needs to be killed.
1724  *
1725  * If the caller is !__GFP_FS then the probability of a failure is reasonably
1726  * high - the zone may be full of dirty or under-writeback pages, which this
1727  * caller can't do much about.  We kick the writeback threads and take explicit
1728  * naps in the hope that some of these pages can be written.  But if the
1729  * allocating task holds filesystem locks which prevent writeout this might not
1730  * work, and the allocation attempt will fail.
1731  *
1732  * returns:     0, if no pages reclaimed
1733  *              else, the number of pages reclaimed
1734  */
1735 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1736                                         struct scan_control *sc)
1737 {
1738         int priority;
1739         unsigned long ret = 0;
1740         unsigned long total_scanned = 0;
1741         struct reclaim_state *reclaim_state = current->reclaim_state;
1742         unsigned long lru_pages = 0;
1743         struct zoneref *z;
1744         struct zone *zone;
1745         enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1746         unsigned long writeback_threshold;
1747
1748         delayacct_freepages_start();
1749
1750         if (scanning_global_lru(sc))
1751                 count_vm_event(ALLOCSTALL);
1752         /*
1753          * mem_cgroup will not do shrink_slab.
1754          */
1755         if (scanning_global_lru(sc)) {
1756                 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1757
1758                         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1759                                 continue;
1760
1761                         lru_pages += zone_reclaimable_pages(zone);
1762                 }
1763         }
1764
1765         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1766                 sc->nr_scanned = 0;
1767                 if (!priority)
1768                         disable_swap_token();
1769                 shrink_zones(priority, zonelist, sc);
1770                 /*
1771                  * Don't shrink slabs when reclaiming memory from
1772                  * over limit cgroups
1773                  */
1774                 if (scanning_global_lru(sc)) {
1775                         shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1776                         if (reclaim_state) {
1777                                 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
1778                                 reclaim_state->reclaimed_slab = 0;
1779                         }
1780                 }
1781                 total_scanned += sc->nr_scanned;
1782                 if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
1783                         ret = sc->nr_reclaimed;
1784                         goto out;
1785                 }
1786
1787                 /*
1788                  * Try to write back as many pages as we just scanned.  This
1789                  * tends to cause slow streaming writers to write data to the
1790                  * disk smoothly, at the dirtying rate, which is nice.   But
1791                  * that's undesirable in laptop mode, where we *want* lumpy
1792                  * writeout.  So in laptop mode, write out the whole world.
1793                  */
1794                 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
1795                 if (total_scanned > writeback_threshold) {
1796                         wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
1797                         sc->may_writepage = 1;
1798                 }
1799
1800                 /* Take a nap, wait for some writeback to complete */
1801                 if (!sc->hibernation_mode && sc->nr_scanned &&
1802                     priority < DEF_PRIORITY - 2)
1803                         congestion_wait(BLK_RW_ASYNC, HZ/10);
1804         }
1805         /* top priority shrink_zones still had more to do? don't OOM, then */
1806         if (!sc->all_unreclaimable && scanning_global_lru(sc))
1807                 ret = sc->nr_reclaimed;
1808 out:
1809         /*
1810          * Now that we've scanned all the zones at this priority level, note
1811          * that level within the zone so that the next thread which performs
1812          * scanning of this zone will immediately start out at this priority
1813          * level.  This affects only the decision whether or not to bring
1814          * mapped pages onto the inactive list.
1815          */
1816         if (priority < 0)
1817                 priority = 0;
1818
1819         if (scanning_global_lru(sc)) {
1820                 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1821
1822                         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1823                                 continue;
1824
1825                         zone->prev_priority = priority;
1826                 }
1827         } else
1828                 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1829
1830         delayacct_freepages_end();
1831
1832         return ret;
1833 }
1834
1835 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1836                                 gfp_t gfp_mask, nodemask_t *nodemask)
1837 {
1838         struct scan_control sc = {
1839                 .gfp_mask = gfp_mask,
1840                 .may_writepage = !laptop_mode,
1841                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1842                 .nr_to_reclaim = SWAP_CLUSTER_MAX,
1843                 .may_unmap = 1,
1844                 .may_swap = 1,
1845                 .swappiness = vm_swappiness,
1846                 .order = order,
1847                 .mem_cgroup = NULL,
1848                 .isolate_pages = isolate_pages_global,
1849                 .nodemask = nodemask,
1850         };
1851
1852         return do_try_to_free_pages(zonelist, &sc);
1853 }
1854
1855 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
1856
1857 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
1858                                                 gfp_t gfp_mask, bool noswap,
1859                                                 unsigned int swappiness,
1860                                                 struct zone *zone, int nid)
1861 {
1862         struct scan_control sc = {
1863                 .may_writepage = !laptop_mode,
1864                 .may_unmap = 1,
1865                 .may_swap = !noswap,
1866                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1867                 .swappiness = swappiness,
1868                 .order = 0,
1869                 .mem_cgroup = mem,
1870                 .isolate_pages = mem_cgroup_isolate_pages,
1871         };
1872         nodemask_t nm  = nodemask_of_node(nid);
1873
1874         sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1875                         (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1876         sc.nodemask = &nm;
1877         sc.nr_reclaimed = 0;
1878         sc.nr_scanned = 0;
1879         /*
1880          * NOTE: Although we can get the priority field, using it
1881          * here is not a good idea, since it limits the pages we can scan.
1882          * if we don't reclaim here, the shrink_zone from balance_pgdat
1883          * will pick up pages from other mem cgroup's as well. We hack
1884          * the priority and make it zero.
1885          */
1886         shrink_zone(0, zone, &sc);
1887         return sc.nr_reclaimed;
1888 }
1889
1890 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1891                                            gfp_t gfp_mask,
1892                                            bool noswap,
1893                                            unsigned int swappiness)
1894 {
1895         struct zonelist *zonelist;
1896         struct scan_control sc = {
1897                 .may_writepage = !laptop_mode,
1898                 .may_unmap = 1,
1899                 .may_swap = !noswap,
1900                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1901                 .nr_to_reclaim = SWAP_CLUSTER_MAX,
1902                 .swappiness = swappiness,
1903                 .order = 0,
1904                 .mem_cgroup = mem_cont,
1905                 .isolate_pages = mem_cgroup_isolate_pages,
1906                 .nodemask = NULL, /* we don't care the placement */
1907         };
1908
1909         sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1910                         (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1911         zonelist = NODE_DATA(numa_node_id())->node_zonelists;
1912         return do_try_to_free_pages(zonelist, &sc);
1913 }
1914 #endif
1915
1916 /* is kswapd sleeping prematurely? */
1917 static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
1918 {
1919         int i;
1920
1921         /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
1922         if (remaining)
1923                 return 1;
1924
1925         /* If after HZ/10, a zone is below the high mark, it's premature */
1926         for (i = 0; i < pgdat->nr_zones; i++) {
1927                 struct zone *zone = pgdat->node_zones + i;
1928
1929                 if (!populated_zone(zone))
1930                         continue;
1931
1932                 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
1933                                                                 0, 0))
1934                         return 1;
1935         }
1936
1937         return 0;
1938 }
1939
1940 /*
1941  * For kswapd, balance_pgdat() will work across all this node's zones until
1942  * they are all at high_wmark_pages(zone).
1943  *
1944  * Returns the number of pages which were actually freed.
1945  *
1946  * There is special handling here for zones which are full of pinned pages.
1947  * This can happen if the pages are all mlocked, or if they are all used by
1948  * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
1949  * What we do is to detect the case where all pages in the zone have been
1950  * scanned twice and there has been zero successful reclaim.  Mark the zone as
1951  * dead and from now on, only perform a short scan.  Basically we're polling
1952  * the zone for when the problem goes away.
1953  *
1954  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
1955  * zones which have free_pages > high_wmark_pages(zone), but once a zone is
1956  * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
1957  * lower zones regardless of the number of free pages in the lower zones. This
1958  * interoperates with the page allocator fallback scheme to ensure that aging
1959  * of pages is balanced across the zones.
1960  */
1961 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1962 {
1963         int all_zones_ok;
1964         int priority;
1965         int i;
1966         unsigned long total_scanned;
1967         struct reclaim_state *reclaim_state = current->reclaim_state;
1968         struct scan_control sc = {
1969                 .gfp_mask = GFP_KERNEL,
1970                 .may_unmap = 1,
1971                 .may_swap = 1,
1972                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1973                 /*
1974                  * kswapd doesn't want to be bailed out while reclaim. because
1975                  * we want to put equal scanning pressure on each zone.
1976                  */
1977                 .nr_to_reclaim = ULONG_MAX,
1978                 .swappiness = vm_swappiness,
1979                 .order = order,
1980                 .mem_cgroup = NULL,
1981                 .isolate_pages = isolate_pages_global,
1982         };
1983         /*
1984          * temp_priority is used to remember the scanning priority at which
1985          * this zone was successfully refilled to
1986          * free_pages == high_wmark_pages(zone).
1987          */
1988         int temp_priority[MAX_NR_ZONES];
1989
1990 loop_again:
1991         total_scanned = 0;
1992         sc.nr_reclaimed = 0;
1993         sc.may_writepage = !laptop_mode;
1994         count_vm_event(PAGEOUTRUN);
1995
1996         for (i = 0; i < pgdat->nr_zones; i++)
1997                 temp_priority[i] = DEF_PRIORITY;
1998
1999         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2000                 int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
2001                 unsigned long lru_pages = 0;
2002                 int has_under_min_watermark_zone = 0;
2003
2004                 /* The swap token gets in the way of swapout... */
2005                 if (!priority)
2006                         disable_swap_token();
2007
2008                 all_zones_ok = 1;
2009
2010                 /*
2011                  * Scan in the highmem->dma direction for the highest
2012                  * zone which needs scanning
2013                  */
2014                 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2015                         struct zone *zone = pgdat->node_zones + i;
2016
2017                         if (!populated_zone(zone))
2018                                 continue;
2019
2020                         if (zone_is_all_unreclaimable(zone) &&
2021                             priority != DEF_PRIORITY)
2022                                 continue;
2023
2024                         /*
2025                          * Do some background aging of the anon list, to give
2026                          * pages a chance to be referenced before reclaiming.
2027                          */
2028                         if (inactive_anon_is_low(zone, &sc))
2029                                 shrink_active_list(SWAP_CLUSTER_MAX, zone,
2030                                                         &sc, priority, 0);
2031
2032                         if (!zone_watermark_ok(zone, order,
2033                                         high_wmark_pages(zone), 0, 0)) {
2034                                 end_zone = i;
2035                                 break;
2036                         }
2037                 }
2038                 if (i < 0)
2039                         goto out;
2040
2041                 for (i = 0; i <= end_zone; i++) {
2042                         struct zone *zone = pgdat->node_zones + i;
2043
2044                         lru_pages += zone_reclaimable_pages(zone);
2045                 }
2046
2047                 /*
2048                  * Now scan the zone in the dma->highmem direction, stopping
2049                  * at the last zone which needs scanning.
2050                  *
2051                  * We do this because the page allocator works in the opposite
2052                  * direction.  This prevents the page allocator from allocating
2053                  * pages behind kswapd's direction of progress, which would
2054                  * cause too much scanning of the lower zones.
2055                  */
2056                 for (i = 0; i <= end_zone; i++) {
2057                         struct zone *zone = pgdat->node_zones + i;
2058                         int nr_slab;
2059                         int nid, zid;
2060
2061                         if (!populated_zone(zone))
2062                                 continue;
2063
2064                         if (zone_is_all_unreclaimable(zone) &&
2065                                         priority != DEF_PRIORITY)
2066                                 continue;
2067
2068                         if (!zone_watermark_ok(zone, order,
2069                                         high_wmark_pages(zone), end_zone, 0))
2070                                 all_zones_ok = 0;
2071                         temp_priority[i] = priority;
2072                         sc.nr_scanned = 0;
2073                         note_zone_scanning_priority(zone, priority);
2074
2075                         nid = pgdat->node_id;
2076                         zid = zone_idx(zone);
2077                         /*
2078                          * Call soft limit reclaim before calling shrink_zone.
2079                          * For now we ignore the return value
2080                          */
2081                         mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
2082                                                         nid, zid);
2083                         /*
2084                          * We put equal pressure on every zone, unless one
2085                          * zone has way too many pages free already.
2086                          */
2087                         if (!zone_watermark_ok(zone, order,
2088                                         8*high_wmark_pages(zone), end_zone, 0))
2089                                 shrink_zone(priority, zone, &sc);
2090                         reclaim_state->reclaimed_slab = 0;
2091                         nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
2092                                                 lru_pages);
2093                         sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2094                         total_scanned += sc.nr_scanned;
2095                         if (zone_is_all_unreclaimable(zone))
2096                                 continue;
2097                         if (nr_slab == 0 && zone->pages_scanned >=
2098                                         (zone_reclaimable_pages(zone) * 6))
2099                                         zone_set_flag(zone,
2100                                                       ZONE_ALL_UNRECLAIMABLE);
2101                         /*
2102                          * If we've done a decent amount of scanning and
2103                          * the reclaim ratio is low, start doing writepage
2104                          * even in laptop mode
2105                          */
2106                         if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2107                             total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2108                                 sc.may_writepage = 1;
2109
2110                         /*
2111                          * We are still under min water mark. it mean we have
2112                          * GFP_ATOMIC allocation failure risk. Hurry up!
2113                          */
2114                         if (!zone_watermark_ok(zone, order, min_wmark_pages(zone),
2115                                               end_zone, 0))
2116                                 has_under_min_watermark_zone = 1;
2117
2118                 }
2119                 if (all_zones_ok)
2120                         break;          /* kswapd: all done */
2121                 /*
2122                  * OK, kswapd is getting into trouble.  Take a nap, then take
2123                  * another pass across the zones.
2124                  */
2125                 if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2126                         if (has_under_min_watermark_zone)
2127                                 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2128                         else
2129                                 congestion_wait(BLK_RW_ASYNC, HZ/10);
2130                 }
2131
2132                 /*
2133                  * We do this so kswapd doesn't build up large priorities for
2134                  * example when it is freeing in parallel with allocators. It
2135                  * matches the direct reclaim path behaviour in terms of impact
2136                  * on zone->*_priority.
2137                  */
2138                 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2139                         break;
2140         }
2141 out:
2142         /*
2143          * Note within each zone the priority level at which this zone was
2144          * brought into a happy state.  So that the next thread which scans this
2145          * zone will start out at that priority level.
2146          */
2147         for (i = 0; i < pgdat->nr_zones; i++) {
2148                 struct zone *zone = pgdat->node_zones + i;
2149
2150                 zone->prev_priority = temp_priority[i];
2151         }
2152         if (!all_zones_ok) {
2153                 cond_resched();
2154
2155                 try_to_freeze();
2156
2157                 /*
2158                  * Fragmentation may mean that the system cannot be
2159                  * rebalanced for high-order allocations in all zones.
2160                  * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2161                  * it means the zones have been fully scanned and are still
2162                  * not balanced. For high-order allocations, there is
2163                  * little point trying all over again as kswapd may
2164                  * infinite loop.
2165                  *
2166                  * Instead, recheck all watermarks at order-0 as they
2167                  * are the most important. If watermarks are ok, kswapd will go
2168                  * back to sleep. High-order users can still perform direct
2169                  * reclaim if they wish.
2170                  */
2171                 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2172                         order = sc.order = 0;
2173
2174                 goto loop_again;
2175         }
2176
2177         return sc.nr_reclaimed;
2178 }
2179
2180 /*
2181  * The background pageout daemon, started as a kernel thread
2182  * from the init process.
2183  *
2184  * This basically trickles out pages so that we have _some_
2185  * free memory available even if there is no other activity
2186  * that frees anything up. This is needed for things like routing
2187  * etc, where we otherwise might have all activity going on in
2188  * asynchronous contexts that cannot page things out.
2189  *
2190  * If there are applications that are active memory-allocators
2191  * (most normal use), this basically shouldn't matter.
2192  */
2193 static int kswapd(void *p)
2194 {
2195         unsigned long order;
2196         pg_data_t *pgdat = (pg_data_t*)p;
2197         struct task_struct *tsk = current;
2198         DEFINE_WAIT(wait);
2199         struct reclaim_state reclaim_state = {
2200                 .reclaimed_slab = 0,
2201         };
2202         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2203
2204         lockdep_set_current_reclaim_state(GFP_KERNEL);
2205
2206         if (!cpumask_empty(cpumask))
2207                 set_cpus_allowed_ptr(tsk, cpumask);
2208         current->reclaim_state = &reclaim_state;
2209
2210         /*
2211          * Tell the memory management that we're a "memory allocator",
2212          * and that if we need more memory we should get access to it
2213          * regardless (see "__alloc_pages()"). "kswapd" should
2214          * never get caught in the normal page freeing logic.
2215          *
2216          * (Kswapd normally doesn't need memory anyway, but sometimes
2217          * you need a small amount of memory in order to be able to
2218          * page out something else, and this flag essentially protects
2219          * us from recursively trying to free more memory as we're
2220          * trying to free the first piece of memory in the first place).
2221          */
2222         tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2223         set_freezable();
2224
2225         order = 0;
2226         for ( ; ; ) {
2227                 unsigned long new_order;
2228                 int ret;
2229
2230                 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2231                 new_order = pgdat->kswapd_max_order;
2232                 pgdat->kswapd_max_order = 0;
2233                 if (order < new_order) {
2234                         /*
2235                          * Don't sleep if someone wants a larger 'order'
2236                          * allocation
2237                          */
2238                         order = new_order;
2239                 } else {
2240                         if (!freezing(current) && !kthread_should_stop()) {
2241                                 long remaining = 0;
2242
2243                                 /* Try to sleep for a short interval */
2244                                 if (!sleeping_prematurely(pgdat, order, remaining)) {
2245                                         remaining = schedule_timeout(HZ/10);
2246                                         finish_wait(&pgdat->kswapd_wait, &wait);
2247                                         prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2248                                 }
2249
2250                                 /*
2251                                  * After a short sleep, check if it was a
2252                                  * premature sleep. If not, then go fully
2253                                  * to sleep until explicitly woken up
2254                                  */
2255                                 if (!sleeping_prematurely(pgdat, order, remaining))
2256                                         schedule();
2257                                 else {
2258                                         if (remaining)
2259                                                 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2260                                         else
2261                                                 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2262                                 }
2263                         }
2264
2265                         order = pgdat->kswapd_max_order;
2266                 }
2267                 finish_wait(&pgdat->kswapd_wait, &wait);
2268
2269                 ret = try_to_freeze();
2270                 if (kthread_should_stop())
2271                         break;
2272
2273                 /*
2274                  * We can speed up thawing tasks if we don't call balance_pgdat
2275                  * after returning from the refrigerator
2276                  */
2277                 if (!ret)
2278                         balance_pgdat(pgdat, order);
2279         }
2280         return 0;
2281 }
2282
2283 /*
2284  * A zone is low on free memory, so wake its kswapd task to service it.
2285  */
2286 void wakeup_kswapd(struct zone *zone, int order)
2287 {
2288         pg_data_t *pgdat;
2289
2290         if (!populated_zone(zone))
2291                 return;
2292
2293         pgdat = zone->zone_pgdat;
2294         if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
2295                 return;
2296         if (pgdat->kswapd_max_order < order)
2297                 pgdat->kswapd_max_order = order;
2298         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2299                 return;
2300         if (!waitqueue_active(&pgdat->kswapd_wait))
2301                 return;
2302         wake_up_interruptible(&pgdat->kswapd_wait);
2303 }
2304
2305 /*
2306  * The reclaimable count would be mostly accurate.
2307  * The less reclaimable pages may be
2308  * - mlocked pages, which will be moved to unevictable list when encountered
2309  * - mapped pages, which may require several travels to be reclaimed
2310  * - dirty pages, which is not "instantly" reclaimable
2311  */
2312 unsigned long global_reclaimable_pages(void)
2313 {
2314         int nr;
2315
2316         nr = global_page_state(NR_ACTIVE_FILE) +
2317              global_page_state(NR_INACTIVE_FILE);
2318
2319         if (nr_swap_pages > 0)
2320                 nr += global_page_state(NR_ACTIVE_ANON) +
2321                       global_page_state(NR_INACTIVE_ANON);
2322
2323         return nr;
2324 }
2325
2326 unsigned long zone_reclaimable_pages(struct zone *zone)
2327 {
2328         int nr;
2329
2330         nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2331              zone_page_state(zone, NR_INACTIVE_FILE);
2332
2333         if (nr_swap_pages > 0)
2334                 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2335                       zone_page_state(zone, NR_INACTIVE_ANON);
2336
2337         return nr;
2338 }
2339
2340 #ifdef CONFIG_HIBERNATION
2341 /*
2342  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
2343  * freed pages.
2344  *
2345  * Rather than trying to age LRUs the aim is to preserve the overall
2346  * LRU order by reclaiming preferentially
2347  * inactive > active > active referenced > active mapped
2348  */
2349 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2350 {
2351         struct reclaim_state reclaim_state;
2352         struct scan_control sc = {
2353                 .gfp_mask = GFP_HIGHUSER_MOVABLE,
2354                 .may_swap = 1,
2355                 .may_unmap = 1,
2356                 .may_writepage = 1,
2357                 .swap_cluster_max = SWAP_CLUSTER_MAX,
2358                 .nr_to_reclaim = nr_to_reclaim,
2359                 .hibernation_mode = 1,
2360                 .swappiness = vm_swappiness,
2361                 .order = 0,
2362                 .isolate_pages = isolate_pages_global,
2363         };
2364         struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2365         struct task_struct *p = current;
2366         unsigned long nr_reclaimed;
2367
2368         p->flags |= PF_MEMALLOC;
2369         lockdep_set_current_reclaim_state(sc.gfp_mask);
2370         reclaim_state.reclaimed_slab = 0;
2371         p->reclaim_state = &reclaim_state;
2372
2373         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2374
2375         p->reclaim_state = NULL;
2376         lockdep_clear_current_reclaim_state();
2377         p->flags &= ~PF_MEMALLOC;
2378
2379         return nr_reclaimed;
2380 }
2381 #endif /* CONFIG_HIBERNATION */
2382
2383 /* It's optimal to keep kswapds on the same CPUs as their memory, but
2384    not required for correctness.  So if the last cpu in a node goes
2385    away, we get changed to run anywhere: as the first one comes back,
2386    restore their cpu bindings. */
2387 static int __devinit cpu_callback(struct notifier_block *nfb,
2388                                   unsigned long action, void *hcpu)
2389 {
2390         int nid;
2391
2392         if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2393                 for_each_node_state(nid, N_HIGH_MEMORY) {
2394                         pg_data_t *pgdat = NODE_DATA(nid);
2395                         const struct cpumask *mask;
2396
2397                         mask = cpumask_of_node(pgdat->node_id);
2398
2399                         if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2400                                 /* One of our CPUs online: restore mask */
2401                                 set_cpus_allowed_ptr(pgdat->kswapd, mask);
2402                 }
2403         }
2404         return NOTIFY_OK;
2405 }
2406
2407 /*
2408  * This kswapd start function will be called by init and node-hot-add.
2409  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
2410  */
2411 int kswapd_run(int nid)
2412 {
2413         pg_data_t *pgdat = NODE_DATA(nid);
2414         int ret = 0;
2415
2416         if (pgdat->kswapd)
2417                 return 0;
2418
2419         pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2420         if (IS_ERR(pgdat->kswapd)) {
2421                 /* failure at boot is fatal */
2422                 BUG_ON(system_state == SYSTEM_BOOTING);
2423                 printk("Failed to start kswapd on node %d\n",nid);
2424                 ret = -1;
2425         }
2426         return ret;
2427 }
2428
2429 /*
2430  * Called by memory hotplug when all memory in a node is offlined.
2431  */
2432 void kswapd_stop(int nid)
2433 {
2434         struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
2435
2436         if (kswapd)
2437                 kthread_stop(kswapd);
2438 }
2439
2440 static int __init kswapd_init(void)
2441 {
2442         int nid;
2443
2444         swap_setup();
2445         for_each_node_state(nid, N_HIGH_MEMORY)
2446                 kswapd_run(nid);
2447         hotcpu_notifier(cpu_callback, 0);
2448         return 0;
2449 }
2450
2451 module_init(kswapd_init)
2452
2453 #ifdef CONFIG_NUMA
2454 /*
2455  * Zone reclaim mode
2456  *
2457  * If non-zero call zone_reclaim when the number of free pages falls below
2458  * the watermarks.
2459  */
2460 int zone_reclaim_mode __read_mostly;
2461
2462 #define RECLAIM_OFF 0
2463 #define RECLAIM_ZONE (1<<0)     /* Run shrink_inactive_list on the zone */
2464 #define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
2465 #define RECLAIM_SWAP (1<<2)     /* Swap pages out during reclaim */
2466
2467 /*
2468  * Priority for ZONE_RECLAIM. This determines the fraction of pages
2469  * of a node considered for each zone_reclaim. 4 scans 1/16th of
2470  * a zone.
2471  */
2472 #define ZONE_RECLAIM_PRIORITY 4
2473
2474 /*
2475  * Percentage of pages in a zone that must be unmapped for zone_reclaim to
2476  * occur.
2477  */
2478 int sysctl_min_unmapped_ratio = 1;
2479
2480 /*
2481  * If the number of slab pages in a zone grows beyond this percentage then
2482  * slab reclaim needs to occur.
2483  */
2484 int sysctl_min_slab_ratio = 5;
2485
2486 static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
2487 {
2488         unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
2489         unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
2490                 zone_page_state(zone, NR_ACTIVE_FILE);
2491
2492         /*
2493          * It's possible for there to be more file mapped pages than
2494          * accounted for by the pages on the file LRU lists because
2495          * tmpfs pages accounted for as ANON can also be FILE_MAPPED
2496          */
2497         return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
2498 }
2499
2500 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
2501 static long zone_pagecache_reclaimable(struct zone *zone)
2502 {
2503         long nr_pagecache_reclaimable;
2504         long delta = 0;
2505
2506         /*
2507          * If RECLAIM_SWAP is set, then all file pages are considered
2508          * potentially reclaimable. Otherwise, we have to worry about
2509          * pages like swapcache and zone_unmapped_file_pages() provides
2510          * a better estimate
2511          */
2512         if (zone_reclaim_mode & RECLAIM_SWAP)
2513                 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
2514         else
2515                 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
2516
2517         /* If we can't clean pages, remove dirty pages from consideration */
2518         if (!(zone_reclaim_mode & RECLAIM_WRITE))
2519                 delta += zone_page_state(zone, NR_FILE_DIRTY);
2520
2521         /* Watch for any possible underflows due to delta */
2522         if (unlikely(delta > nr_pagecache_reclaimable))
2523                 delta = nr_pagecache_reclaimable;
2524
2525         return nr_pagecache_reclaimable - delta;
2526 }
2527
2528 /*
2529  * Try to free up some pages from this zone through reclaim.
2530  */
2531 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2532 {
2533         /* Minimum pages needed in order to stay on node */
2534         const unsigned long nr_pages = 1 << order;
2535         struct task_struct *p = current;
2536         struct reclaim_state reclaim_state;
2537         int priority;
2538         struct scan_control sc = {
2539                 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2540                 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2541                 .may_swap = 1,
2542                 .swap_cluster_max = max_t(unsigned long, nr_pages,
2543                                        SWAP_CLUSTER_MAX),
2544                 .nr_to_reclaim = max_t(unsigned long, nr_pages,
2545                                        SWAP_CLUSTER_MAX),
2546                 .gfp_mask = gfp_mask,
2547                 .swappiness = vm_swappiness,
2548                 .order = order,
2549                 .isolate_pages = isolate_pages_global,
2550         };
2551         unsigned long slab_reclaimable;
2552
2553         disable_swap_token();
2554         cond_resched();
2555         /*
2556          * We need to be able to allocate from the reserves for RECLAIM_SWAP
2557          * and we also need to be able to write out pages for RECLAIM_WRITE
2558          * and RECLAIM_SWAP.
2559          */
2560         p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2561         reclaim_state.reclaimed_slab = 0;
2562         p->reclaim_state = &reclaim_state;
2563
2564         if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
2565                 /*
2566                  * Free memory by calling shrink zone with increasing
2567                  * priorities until we have enough memory freed.
2568                  */
2569                 priority = ZONE_RECLAIM_PRIORITY;
2570                 do {
2571                         note_zone_scanning_priority(zone, priority);
2572                         shrink_zone(priority, zone, &sc);
2573                         priority--;
2574                 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
2575         }
2576
2577         slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2578         if (slab_reclaimable > zone->min_slab_pages) {
2579                 /*
2580                  * shrink_slab() does not currently allow us to determine how
2581                  * many pages were freed in this zone. So we take the current
2582                  * number of slab pages and shake the slab until it is reduced
2583                  * by the same nr_pages that we used for reclaiming unmapped
2584                  * pages.
2585                  *
2586                  * Note that shrink_slab will free memory on all zones and may
2587                  * take a long time.
2588                  */
2589                 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
2590                         zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
2591                                 slab_reclaimable - nr_pages)
2592                         ;
2593
2594                 /*
2595                  * Update nr_reclaimed by the number of slab pages we
2596                  * reclaimed from this zone.
2597                  */
2598                 sc.nr_reclaimed += slab_reclaimable -
2599                         zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2600         }
2601
2602         p->reclaim_state = NULL;
2603         current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
2604         return sc.nr_reclaimed >= nr_pages;
2605 }
2606
2607 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2608 {
2609         int node_id;
2610         int ret;
2611
2612         /*
2613          * Zone reclaim reclaims unmapped file backed pages and
2614          * slab pages if we are over the defined limits.
2615          *
2616          * A small portion of unmapped file backed pages is needed for
2617          * file I/O otherwise pages read by file I/O will be immediately
2618          * thrown out if the zone is overallocated. So we do not reclaim
2619          * if less than a specified percentage of the zone is used by
2620          * unmapped file backed pages.
2621          */
2622         if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
2623             zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
2624                 return ZONE_RECLAIM_FULL;
2625
2626         if (zone_is_all_unreclaimable(zone))
2627                 return ZONE_RECLAIM_FULL;
2628
2629         /*
2630          * Do not scan if the allocation should not be delayed.
2631          */
2632         if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2633                 return ZONE_RECLAIM_NOSCAN;
2634
2635         /*
2636          * Only run zone reclaim on the local zone or on zones that do not
2637          * have associated processors. This will favor the local processor
2638          * over remote processors and spread off node memory allocations
2639          * as wide as possible.
2640          */
2641         node_id = zone_to_nid(zone);
2642         if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2643                 return ZONE_RECLAIM_NOSCAN;
2644
2645         if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2646                 return ZONE_RECLAIM_NOSCAN;
2647
2648         ret = __zone_reclaim(zone, gfp_mask, order);
2649         zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2650
2651         if (!ret)
2652                 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
2653
2654         return ret;
2655 }
2656 #endif
2657
2658 /*
2659  * page_evictable - test whether a page is evictable
2660  * @page: the page to test
2661  * @vma: the VMA in which the page is or will be mapped, may be NULL
2662  *
2663  * Test whether page is evictable--i.e., should be placed on active/inactive
2664  * lists vs unevictable list.  The vma argument is !NULL when called from the
2665  * fault path to determine how to instantate a new page.
2666  *
2667  * Reasons page might not be evictable:
2668  * (1) page's mapping marked unevictable
2669  * (2) page is part of an mlocked VMA
2670  *
2671  */
2672 int page_evictable(struct page *page, struct vm_area_struct *vma)
2673 {
2674
2675         if (mapping_unevictable(page_mapping(page)))
2676                 return 0;
2677
2678         if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
2679                 return 0;
2680
2681         return 1;
2682 }
2683
2684 /**
2685  * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2686  * @page: page to check evictability and move to appropriate lru list
2687  * @zone: zone page is in
2688  *
2689  * Checks a page for evictability and moves the page to the appropriate
2690  * zone lru list.
2691  *
2692  * Restrictions: zone->lru_lock must be held, page must be on LRU and must
2693  * have PageUnevictable set.
2694  */
2695 static void check_move_unevictable_page(struct page *page, struct zone *zone)
2696 {
2697         VM_BUG_ON(PageActive(page));
2698
2699 retry:
2700         ClearPageUnevictable(page);
2701         if (page_evictable(page, NULL)) {
2702                 enum lru_list l = page_lru_base_type(page);
2703
2704                 __dec_zone_state(zone, NR_UNEVICTABLE);
2705                 list_move(&page->lru, &zone->lru[l].list);
2706                 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
2707                 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
2708                 __count_vm_event(UNEVICTABLE_PGRESCUED);
2709         } else {
2710                 /*
2711                  * rotate unevictable list
2712                  */
2713                 SetPageUnevictable(page);
2714                 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2715                 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
2716                 if (page_evictable(page, NULL))
2717                         goto retry;
2718         }
2719 }
2720
2721 /**
2722  * scan_mapping_unevictable_pages - scan an address space for evictable pages
2723  * @mapping: struct address_space to scan for evictable pages
2724  *
2725  * Scan all pages in mapping.  Check unevictable pages for
2726  * evictability and move them to the appropriate zone lru list.
2727  */
2728 void scan_mapping_unevictable_pages(struct address_space *mapping)
2729 {
2730         pgoff_t next = 0;
2731         pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
2732                          PAGE_CACHE_SHIFT;
2733         struct zone *zone;
2734         struct pagevec pvec;
2735
2736         if (mapping->nrpages == 0)
2737                 return;
2738
2739         pagevec_init(&pvec, 0);
2740         while (next < end &&
2741                 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
2742                 int i;
2743                 int pg_scanned = 0;
2744
2745                 zone = NULL;
2746
2747                 for (i = 0; i < pagevec_count(&pvec); i++) {
2748                         struct page *page = pvec.pages[i];
2749                         pgoff_t page_index = page->index;
2750                         struct zone *pagezone = page_zone(page);
2751
2752                         pg_scanned++;
2753                         if (page_index > next)
2754                                 next = page_index;
2755                         next++;
2756
2757                         if (pagezone != zone) {
2758                                 if (zone)
2759                                         spin_unlock_irq(&zone->lru_lock);
2760                                 zone = pagezone;
2761                                 spin_lock_irq(&zone->lru_lock);
2762                         }
2763
2764                         if (PageLRU(page) && PageUnevictable(page))
2765                                 check_move_unevictable_page(page, zone);
2766                 }
2767                 if (zone)
2768                         spin_unlock_irq(&zone->lru_lock);
2769                 pagevec_release(&pvec);
2770
2771                 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
2772         }
2773
2774 }
2775
2776 /**
2777  * scan_zone_unevictable_pages - check unevictable list for evictable pages
2778  * @zone - zone of which to scan the unevictable list
2779  *
2780  * Scan @zone's unevictable LRU lists to check for pages that have become
2781  * evictable.  Move those that have to @zone's inactive list where they
2782  * become candidates for reclaim, unless shrink_inactive_zone() decides
2783  * to reactivate them.  Pages that are still unevictable are rotated
2784  * back onto @zone's unevictable list.
2785  */
2786 #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
2787 static void scan_zone_unevictable_pages(struct zone *zone)
2788 {
2789         struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
2790         unsigned long scan;
2791         unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
2792
2793         while (nr_to_scan > 0) {
2794                 unsigned long batch_size = min(nr_to_scan,
2795                                                 SCAN_UNEVICTABLE_BATCH_SIZE);
2796
2797                 spin_lock_irq(&zone->lru_lock);
2798                 for (scan = 0;  scan < batch_size; scan++) {
2799                         struct page *page = lru_to_page(l_unevictable);
2800
2801                         if (!trylock_page(page))
2802                                 continue;
2803
2804                         prefetchw_prev_lru_page(page, l_unevictable, flags);
2805
2806                         if (likely(PageLRU(page) && PageUnevictable(page)))
2807                                 check_move_unevictable_page(page, zone);
2808
2809                         unlock_page(page);
2810                 }
2811                 spin_unlock_irq(&zone->lru_lock);
2812
2813                 nr_to_scan -= batch_size;
2814         }
2815 }
2816
2817
2818 /**
2819  * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
2820  *
2821  * A really big hammer:  scan all zones' unevictable LRU lists to check for
2822  * pages that have become evictable.  Move those back to the zones'
2823  * inactive list where they become candidates for reclaim.
2824  * This occurs when, e.g., we have unswappable pages on the unevictable lists,
2825  * and we add swap to the system.  As such, it runs in the context of a task
2826  * that has possibly/probably made some previously unevictable pages
2827  * evictable.
2828  */
2829 static void scan_all_zones_unevictable_pages(void)
2830 {
2831         struct zone *zone;
2832
2833         for_each_zone(zone) {
2834                 scan_zone_unevictable_pages(zone);
2835         }
2836 }
2837
2838 /*
2839  * scan_unevictable_pages [vm] sysctl handler.  On demand re-scan of
2840  * all nodes' unevictable lists for evictable pages
2841  */
2842 unsigned long scan_unevictable_pages;
2843
2844 int scan_unevictable_handler(struct ctl_table *table, int write,
2845                            void __user *buffer,
2846                            size_t *length, loff_t *ppos)
2847 {
2848         proc_doulongvec_minmax(table, write, buffer, length, ppos);
2849
2850         if (write && *(unsigned long *)table->data)
2851                 scan_all_zones_unevictable_pages();
2852
2853         scan_unevictable_pages = 0;
2854         return 0;
2855 }
2856
2857 /*
2858  * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
2859  * a specified node's per zone unevictable lists for evictable pages.
2860  */
2861
2862 static ssize_t read_scan_unevictable_node(struct sys_device *dev,
2863                                           struct sysdev_attribute *attr,
2864                                           char *buf)
2865 {
2866         return sprintf(buf, "0\n");     /* always zero; should fit... */
2867 }
2868
2869 static ssize_t write_scan_unevictable_node(struct sys_device *dev,
2870                                            struct sysdev_attribute *attr,
2871                                         const char *buf, size_t count)
2872 {
2873         struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
2874         struct zone *zone;
2875         unsigned long res;
2876         unsigned long req = strict_strtoul(buf, 10, &res);
2877
2878         if (!req)
2879                 return 1;       /* zero is no-op */
2880
2881         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2882                 if (!populated_zone(zone))
2883                         continue;
2884                 scan_zone_unevictable_pages(zone);
2885         }
2886         return 1;
2887 }
2888
2889
2890 static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
2891                         read_scan_unevictable_node,
2892                         write_scan_unevictable_node);
2893
2894 int scan_unevictable_register_node(struct node *node)
2895 {
2896         return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
2897 }
2898
2899 void scan_unevictable_unregister_node(struct node *node)
2900 {
2901         sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
2902 }
2903