memcg: add mem_cgroup_zone_nr_pages()
[safe/jmp/linux-2.6] / mm / vmscan.c
1 /*
2  *  linux/mm/vmscan.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *
6  *  Swap reorganised 29.12.95, Stephen Tweedie.
7  *  kswapd added: 7.1.96  sct
8  *  Removed kswapd_ctl limits, and swap out as many pages as needed
9  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11  *  Multiqueue VM started 5.8.00, Rik van Riel.
12  */
13
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/vmstat.h>
23 #include <linux/file.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h>  /* for try_to_release_page(),
27                                         buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/notifier.h>
36 #include <linux/rwsem.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/freezer.h>
40 #include <linux/memcontrol.h>
41 #include <linux/delayacct.h>
42 #include <linux/sysctl.h>
43
44 #include <asm/tlbflush.h>
45 #include <asm/div64.h>
46
47 #include <linux/swapops.h>
48
49 #include "internal.h"
50
51 struct scan_control {
52         /* Incremented by the number of inactive pages that were scanned */
53         unsigned long nr_scanned;
54
55         /* Number of pages freed so far during a call to shrink_zones() */
56         unsigned long nr_reclaimed;
57
58         /* This context's GFP mask */
59         gfp_t gfp_mask;
60
61         int may_writepage;
62
63         /* Can pages be swapped as part of reclaim? */
64         int may_swap;
65
66         /* This context's SWAP_CLUSTER_MAX. If freeing memory for
67          * suspend, we effectively ignore SWAP_CLUSTER_MAX.
68          * In this context, it doesn't matter that we scan the
69          * whole list at once. */
70         int swap_cluster_max;
71
72         int swappiness;
73
74         int all_unreclaimable;
75
76         int order;
77
78         /* Which cgroup do we reclaim from */
79         struct mem_cgroup *mem_cgroup;
80
81         /* Pluggable isolate pages callback */
82         unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
83                         unsigned long *scanned, int order, int mode,
84                         struct zone *z, struct mem_cgroup *mem_cont,
85                         int active, int file);
86 };
87
88 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
89
90 #ifdef ARCH_HAS_PREFETCH
91 #define prefetch_prev_lru_page(_page, _base, _field)                    \
92         do {                                                            \
93                 if ((_page)->lru.prev != _base) {                       \
94                         struct page *prev;                              \
95                                                                         \
96                         prev = lru_to_page(&(_page->lru));              \
97                         prefetch(&prev->_field);                        \
98                 }                                                       \
99         } while (0)
100 #else
101 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
102 #endif
103
104 #ifdef ARCH_HAS_PREFETCHW
105 #define prefetchw_prev_lru_page(_page, _base, _field)                   \
106         do {                                                            \
107                 if ((_page)->lru.prev != _base) {                       \
108                         struct page *prev;                              \
109                                                                         \
110                         prev = lru_to_page(&(_page->lru));              \
111                         prefetchw(&prev->_field);                       \
112                 }                                                       \
113         } while (0)
114 #else
115 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
116 #endif
117
118 /*
119  * From 0 .. 100.  Higher means more swappy.
120  */
121 int vm_swappiness = 60;
122 long vm_total_pages;    /* The total number of pages which the VM controls */
123
124 static LIST_HEAD(shrinker_list);
125 static DECLARE_RWSEM(shrinker_rwsem);
126
127 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
128 #define scan_global_lru(sc)     (!(sc)->mem_cgroup)
129 #else
130 #define scan_global_lru(sc)     (1)
131 #endif
132
133 static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
134                                                   struct scan_control *sc)
135 {
136         return &zone->reclaim_stat;
137 }
138
139 static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
140                                    enum lru_list lru)
141 {
142         if (!scan_global_lru(sc))
143                 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
144
145         return zone_page_state(zone, NR_LRU_BASE + lru);
146 }
147
148
149 /*
150  * Add a shrinker callback to be called from the vm
151  */
152 void register_shrinker(struct shrinker *shrinker)
153 {
154         shrinker->nr = 0;
155         down_write(&shrinker_rwsem);
156         list_add_tail(&shrinker->list, &shrinker_list);
157         up_write(&shrinker_rwsem);
158 }
159 EXPORT_SYMBOL(register_shrinker);
160
161 /*
162  * Remove one
163  */
164 void unregister_shrinker(struct shrinker *shrinker)
165 {
166         down_write(&shrinker_rwsem);
167         list_del(&shrinker->list);
168         up_write(&shrinker_rwsem);
169 }
170 EXPORT_SYMBOL(unregister_shrinker);
171
172 #define SHRINK_BATCH 128
173 /*
174  * Call the shrink functions to age shrinkable caches
175  *
176  * Here we assume it costs one seek to replace a lru page and that it also
177  * takes a seek to recreate a cache object.  With this in mind we age equal
178  * percentages of the lru and ageable caches.  This should balance the seeks
179  * generated by these structures.
180  *
181  * If the vm encountered mapped pages on the LRU it increase the pressure on
182  * slab to avoid swapping.
183  *
184  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
185  *
186  * `lru_pages' represents the number of on-LRU pages in all the zones which
187  * are eligible for the caller's allocation attempt.  It is used for balancing
188  * slab reclaim versus page reclaim.
189  *
190  * Returns the number of slab objects which we shrunk.
191  */
192 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
193                         unsigned long lru_pages)
194 {
195         struct shrinker *shrinker;
196         unsigned long ret = 0;
197
198         if (scanned == 0)
199                 scanned = SWAP_CLUSTER_MAX;
200
201         if (!down_read_trylock(&shrinker_rwsem))
202                 return 1;       /* Assume we'll be able to shrink next time */
203
204         list_for_each_entry(shrinker, &shrinker_list, list) {
205                 unsigned long long delta;
206                 unsigned long total_scan;
207                 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
208
209                 delta = (4 * scanned) / shrinker->seeks;
210                 delta *= max_pass;
211                 do_div(delta, lru_pages + 1);
212                 shrinker->nr += delta;
213                 if (shrinker->nr < 0) {
214                         printk(KERN_ERR "%s: nr=%ld\n",
215                                         __func__, shrinker->nr);
216                         shrinker->nr = max_pass;
217                 }
218
219                 /*
220                  * Avoid risking looping forever due to too large nr value:
221                  * never try to free more than twice the estimate number of
222                  * freeable entries.
223                  */
224                 if (shrinker->nr > max_pass * 2)
225                         shrinker->nr = max_pass * 2;
226
227                 total_scan = shrinker->nr;
228                 shrinker->nr = 0;
229
230                 while (total_scan >= SHRINK_BATCH) {
231                         long this_scan = SHRINK_BATCH;
232                         int shrink_ret;
233                         int nr_before;
234
235                         nr_before = (*shrinker->shrink)(0, gfp_mask);
236                         shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
237                         if (shrink_ret == -1)
238                                 break;
239                         if (shrink_ret < nr_before)
240                                 ret += nr_before - shrink_ret;
241                         count_vm_events(SLABS_SCANNED, this_scan);
242                         total_scan -= this_scan;
243
244                         cond_resched();
245                 }
246
247                 shrinker->nr += total_scan;
248         }
249         up_read(&shrinker_rwsem);
250         return ret;
251 }
252
253 /* Called without lock on whether page is mapped, so answer is unstable */
254 static inline int page_mapping_inuse(struct page *page)
255 {
256         struct address_space *mapping;
257
258         /* Page is in somebody's page tables. */
259         if (page_mapped(page))
260                 return 1;
261
262         /* Be more reluctant to reclaim swapcache than pagecache */
263         if (PageSwapCache(page))
264                 return 1;
265
266         mapping = page_mapping(page);
267         if (!mapping)
268                 return 0;
269
270         /* File is mmap'd by somebody? */
271         return mapping_mapped(mapping);
272 }
273
274 static inline int is_page_cache_freeable(struct page *page)
275 {
276         return page_count(page) - !!PagePrivate(page) == 2;
277 }
278
279 static int may_write_to_queue(struct backing_dev_info *bdi)
280 {
281         if (current->flags & PF_SWAPWRITE)
282                 return 1;
283         if (!bdi_write_congested(bdi))
284                 return 1;
285         if (bdi == current->backing_dev_info)
286                 return 1;
287         return 0;
288 }
289
290 /*
291  * We detected a synchronous write error writing a page out.  Probably
292  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
293  * fsync(), msync() or close().
294  *
295  * The tricky part is that after writepage we cannot touch the mapping: nothing
296  * prevents it from being freed up.  But we have a ref on the page and once
297  * that page is locked, the mapping is pinned.
298  *
299  * We're allowed to run sleeping lock_page() here because we know the caller has
300  * __GFP_FS.
301  */
302 static void handle_write_error(struct address_space *mapping,
303                                 struct page *page, int error)
304 {
305         lock_page(page);
306         if (page_mapping(page) == mapping)
307                 mapping_set_error(mapping, error);
308         unlock_page(page);
309 }
310
311 /* Request for sync pageout. */
312 enum pageout_io {
313         PAGEOUT_IO_ASYNC,
314         PAGEOUT_IO_SYNC,
315 };
316
317 /* possible outcome of pageout() */
318 typedef enum {
319         /* failed to write page out, page is locked */
320         PAGE_KEEP,
321         /* move page to the active list, page is locked */
322         PAGE_ACTIVATE,
323         /* page has been sent to the disk successfully, page is unlocked */
324         PAGE_SUCCESS,
325         /* page is clean and locked */
326         PAGE_CLEAN,
327 } pageout_t;
328
329 /*
330  * pageout is called by shrink_page_list() for each dirty page.
331  * Calls ->writepage().
332  */
333 static pageout_t pageout(struct page *page, struct address_space *mapping,
334                                                 enum pageout_io sync_writeback)
335 {
336         /*
337          * If the page is dirty, only perform writeback if that write
338          * will be non-blocking.  To prevent this allocation from being
339          * stalled by pagecache activity.  But note that there may be
340          * stalls if we need to run get_block().  We could test
341          * PagePrivate for that.
342          *
343          * If this process is currently in generic_file_write() against
344          * this page's queue, we can perform writeback even if that
345          * will block.
346          *
347          * If the page is swapcache, write it back even if that would
348          * block, for some throttling. This happens by accident, because
349          * swap_backing_dev_info is bust: it doesn't reflect the
350          * congestion state of the swapdevs.  Easy to fix, if needed.
351          * See swapfile.c:page_queue_congested().
352          */
353         if (!is_page_cache_freeable(page))
354                 return PAGE_KEEP;
355         if (!mapping) {
356                 /*
357                  * Some data journaling orphaned pages can have
358                  * page->mapping == NULL while being dirty with clean buffers.
359                  */
360                 if (PagePrivate(page)) {
361                         if (try_to_free_buffers(page)) {
362                                 ClearPageDirty(page);
363                                 printk("%s: orphaned page\n", __func__);
364                                 return PAGE_CLEAN;
365                         }
366                 }
367                 return PAGE_KEEP;
368         }
369         if (mapping->a_ops->writepage == NULL)
370                 return PAGE_ACTIVATE;
371         if (!may_write_to_queue(mapping->backing_dev_info))
372                 return PAGE_KEEP;
373
374         if (clear_page_dirty_for_io(page)) {
375                 int res;
376                 struct writeback_control wbc = {
377                         .sync_mode = WB_SYNC_NONE,
378                         .nr_to_write = SWAP_CLUSTER_MAX,
379                         .range_start = 0,
380                         .range_end = LLONG_MAX,
381                         .nonblocking = 1,
382                         .for_reclaim = 1,
383                 };
384
385                 SetPageReclaim(page);
386                 res = mapping->a_ops->writepage(page, &wbc);
387                 if (res < 0)
388                         handle_write_error(mapping, page, res);
389                 if (res == AOP_WRITEPAGE_ACTIVATE) {
390                         ClearPageReclaim(page);
391                         return PAGE_ACTIVATE;
392                 }
393
394                 /*
395                  * Wait on writeback if requested to. This happens when
396                  * direct reclaiming a large contiguous area and the
397                  * first attempt to free a range of pages fails.
398                  */
399                 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
400                         wait_on_page_writeback(page);
401
402                 if (!PageWriteback(page)) {
403                         /* synchronous write or broken a_ops? */
404                         ClearPageReclaim(page);
405                 }
406                 inc_zone_page_state(page, NR_VMSCAN_WRITE);
407                 return PAGE_SUCCESS;
408         }
409
410         return PAGE_CLEAN;
411 }
412
413 /*
414  * Same as remove_mapping, but if the page is removed from the mapping, it
415  * gets returned with a refcount of 0.
416  */
417 static int __remove_mapping(struct address_space *mapping, struct page *page)
418 {
419         BUG_ON(!PageLocked(page));
420         BUG_ON(mapping != page_mapping(page));
421
422         spin_lock_irq(&mapping->tree_lock);
423         /*
424          * The non racy check for a busy page.
425          *
426          * Must be careful with the order of the tests. When someone has
427          * a ref to the page, it may be possible that they dirty it then
428          * drop the reference. So if PageDirty is tested before page_count
429          * here, then the following race may occur:
430          *
431          * get_user_pages(&page);
432          * [user mapping goes away]
433          * write_to(page);
434          *                              !PageDirty(page)    [good]
435          * SetPageDirty(page);
436          * put_page(page);
437          *                              !page_count(page)   [good, discard it]
438          *
439          * [oops, our write_to data is lost]
440          *
441          * Reversing the order of the tests ensures such a situation cannot
442          * escape unnoticed. The smp_rmb is needed to ensure the page->flags
443          * load is not satisfied before that of page->_count.
444          *
445          * Note that if SetPageDirty is always performed via set_page_dirty,
446          * and thus under tree_lock, then this ordering is not required.
447          */
448         if (!page_freeze_refs(page, 2))
449                 goto cannot_free;
450         /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
451         if (unlikely(PageDirty(page))) {
452                 page_unfreeze_refs(page, 2);
453                 goto cannot_free;
454         }
455
456         if (PageSwapCache(page)) {
457                 swp_entry_t swap = { .val = page_private(page) };
458                 __delete_from_swap_cache(page);
459                 spin_unlock_irq(&mapping->tree_lock);
460                 swap_free(swap);
461         } else {
462                 __remove_from_page_cache(page);
463                 spin_unlock_irq(&mapping->tree_lock);
464         }
465
466         return 1;
467
468 cannot_free:
469         spin_unlock_irq(&mapping->tree_lock);
470         return 0;
471 }
472
473 /*
474  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
475  * someone else has a ref on the page, abort and return 0.  If it was
476  * successfully detached, return 1.  Assumes the caller has a single ref on
477  * this page.
478  */
479 int remove_mapping(struct address_space *mapping, struct page *page)
480 {
481         if (__remove_mapping(mapping, page)) {
482                 /*
483                  * Unfreezing the refcount with 1 rather than 2 effectively
484                  * drops the pagecache ref for us without requiring another
485                  * atomic operation.
486                  */
487                 page_unfreeze_refs(page, 1);
488                 return 1;
489         }
490         return 0;
491 }
492
493 /**
494  * putback_lru_page - put previously isolated page onto appropriate LRU list
495  * @page: page to be put back to appropriate lru list
496  *
497  * Add previously isolated @page to appropriate LRU list.
498  * Page may still be unevictable for other reasons.
499  *
500  * lru_lock must not be held, interrupts must be enabled.
501  */
502 #ifdef CONFIG_UNEVICTABLE_LRU
503 void putback_lru_page(struct page *page)
504 {
505         int lru;
506         int active = !!TestClearPageActive(page);
507         int was_unevictable = PageUnevictable(page);
508
509         VM_BUG_ON(PageLRU(page));
510
511 redo:
512         ClearPageUnevictable(page);
513
514         if (page_evictable(page, NULL)) {
515                 /*
516                  * For evictable pages, we can use the cache.
517                  * In event of a race, worst case is we end up with an
518                  * unevictable page on [in]active list.
519                  * We know how to handle that.
520                  */
521                 lru = active + page_is_file_cache(page);
522                 lru_cache_add_lru(page, lru);
523         } else {
524                 /*
525                  * Put unevictable pages directly on zone's unevictable
526                  * list.
527                  */
528                 lru = LRU_UNEVICTABLE;
529                 add_page_to_unevictable_list(page);
530         }
531
532         /*
533          * page's status can change while we move it among lru. If an evictable
534          * page is on unevictable list, it never be freed. To avoid that,
535          * check after we added it to the list, again.
536          */
537         if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
538                 if (!isolate_lru_page(page)) {
539                         put_page(page);
540                         goto redo;
541                 }
542                 /* This means someone else dropped this page from LRU
543                  * So, it will be freed or putback to LRU again. There is
544                  * nothing to do here.
545                  */
546         }
547
548         if (was_unevictable && lru != LRU_UNEVICTABLE)
549                 count_vm_event(UNEVICTABLE_PGRESCUED);
550         else if (!was_unevictable && lru == LRU_UNEVICTABLE)
551                 count_vm_event(UNEVICTABLE_PGCULLED);
552
553         put_page(page);         /* drop ref from isolate */
554 }
555
556 #else /* CONFIG_UNEVICTABLE_LRU */
557
558 void putback_lru_page(struct page *page)
559 {
560         int lru;
561         VM_BUG_ON(PageLRU(page));
562
563         lru = !!TestClearPageActive(page) + page_is_file_cache(page);
564         lru_cache_add_lru(page, lru);
565         put_page(page);
566 }
567 #endif /* CONFIG_UNEVICTABLE_LRU */
568
569
570 /*
571  * shrink_page_list() returns the number of reclaimed pages
572  */
573 static unsigned long shrink_page_list(struct list_head *page_list,
574                                         struct scan_control *sc,
575                                         enum pageout_io sync_writeback)
576 {
577         LIST_HEAD(ret_pages);
578         struct pagevec freed_pvec;
579         int pgactivate = 0;
580         unsigned long nr_reclaimed = 0;
581
582         cond_resched();
583
584         pagevec_init(&freed_pvec, 1);
585         while (!list_empty(page_list)) {
586                 struct address_space *mapping;
587                 struct page *page;
588                 int may_enter_fs;
589                 int referenced;
590
591                 cond_resched();
592
593                 page = lru_to_page(page_list);
594                 list_del(&page->lru);
595
596                 if (!trylock_page(page))
597                         goto keep;
598
599                 VM_BUG_ON(PageActive(page));
600
601                 sc->nr_scanned++;
602
603                 if (unlikely(!page_evictable(page, NULL)))
604                         goto cull_mlocked;
605
606                 if (!sc->may_swap && page_mapped(page))
607                         goto keep_locked;
608
609                 /* Double the slab pressure for mapped and swapcache pages */
610                 if (page_mapped(page) || PageSwapCache(page))
611                         sc->nr_scanned++;
612
613                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
614                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
615
616                 if (PageWriteback(page)) {
617                         /*
618                          * Synchronous reclaim is performed in two passes,
619                          * first an asynchronous pass over the list to
620                          * start parallel writeback, and a second synchronous
621                          * pass to wait for the IO to complete.  Wait here
622                          * for any page for which writeback has already
623                          * started.
624                          */
625                         if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
626                                 wait_on_page_writeback(page);
627                         else
628                                 goto keep_locked;
629                 }
630
631                 referenced = page_referenced(page, 1, sc->mem_cgroup);
632                 /* In active use or really unfreeable?  Activate it. */
633                 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
634                                         referenced && page_mapping_inuse(page))
635                         goto activate_locked;
636
637                 /*
638                  * Anonymous process memory has backing store?
639                  * Try to allocate it some swap space here.
640                  */
641                 if (PageAnon(page) && !PageSwapCache(page)) {
642                         if (!(sc->gfp_mask & __GFP_IO))
643                                 goto keep_locked;
644                         if (!add_to_swap(page))
645                                 goto activate_locked;
646                         may_enter_fs = 1;
647                 }
648
649                 mapping = page_mapping(page);
650
651                 /*
652                  * The page is mapped into the page tables of one or more
653                  * processes. Try to unmap it here.
654                  */
655                 if (page_mapped(page) && mapping) {
656                         switch (try_to_unmap(page, 0)) {
657                         case SWAP_FAIL:
658                                 goto activate_locked;
659                         case SWAP_AGAIN:
660                                 goto keep_locked;
661                         case SWAP_MLOCK:
662                                 goto cull_mlocked;
663                         case SWAP_SUCCESS:
664                                 ; /* try to free the page below */
665                         }
666                 }
667
668                 if (PageDirty(page)) {
669                         if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
670                                 goto keep_locked;
671                         if (!may_enter_fs)
672                                 goto keep_locked;
673                         if (!sc->may_writepage)
674                                 goto keep_locked;
675
676                         /* Page is dirty, try to write it out here */
677                         switch (pageout(page, mapping, sync_writeback)) {
678                         case PAGE_KEEP:
679                                 goto keep_locked;
680                         case PAGE_ACTIVATE:
681                                 goto activate_locked;
682                         case PAGE_SUCCESS:
683                                 if (PageWriteback(page) || PageDirty(page))
684                                         goto keep;
685                                 /*
686                                  * A synchronous write - probably a ramdisk.  Go
687                                  * ahead and try to reclaim the page.
688                                  */
689                                 if (!trylock_page(page))
690                                         goto keep;
691                                 if (PageDirty(page) || PageWriteback(page))
692                                         goto keep_locked;
693                                 mapping = page_mapping(page);
694                         case PAGE_CLEAN:
695                                 ; /* try to free the page below */
696                         }
697                 }
698
699                 /*
700                  * If the page has buffers, try to free the buffer mappings
701                  * associated with this page. If we succeed we try to free
702                  * the page as well.
703                  *
704                  * We do this even if the page is PageDirty().
705                  * try_to_release_page() does not perform I/O, but it is
706                  * possible for a page to have PageDirty set, but it is actually
707                  * clean (all its buffers are clean).  This happens if the
708                  * buffers were written out directly, with submit_bh(). ext3
709                  * will do this, as well as the blockdev mapping.
710                  * try_to_release_page() will discover that cleanness and will
711                  * drop the buffers and mark the page clean - it can be freed.
712                  *
713                  * Rarely, pages can have buffers and no ->mapping.  These are
714                  * the pages which were not successfully invalidated in
715                  * truncate_complete_page().  We try to drop those buffers here
716                  * and if that worked, and the page is no longer mapped into
717                  * process address space (page_count == 1) it can be freed.
718                  * Otherwise, leave the page on the LRU so it is swappable.
719                  */
720                 if (PagePrivate(page)) {
721                         if (!try_to_release_page(page, sc->gfp_mask))
722                                 goto activate_locked;
723                         if (!mapping && page_count(page) == 1) {
724                                 unlock_page(page);
725                                 if (put_page_testzero(page))
726                                         goto free_it;
727                                 else {
728                                         /*
729                                          * rare race with speculative reference.
730                                          * the speculative reference will free
731                                          * this page shortly, so we may
732                                          * increment nr_reclaimed here (and
733                                          * leave it off the LRU).
734                                          */
735                                         nr_reclaimed++;
736                                         continue;
737                                 }
738                         }
739                 }
740
741                 if (!mapping || !__remove_mapping(mapping, page))
742                         goto keep_locked;
743
744                 /*
745                  * At this point, we have no other references and there is
746                  * no way to pick any more up (removed from LRU, removed
747                  * from pagecache). Can use non-atomic bitops now (and
748                  * we obviously don't have to worry about waking up a process
749                  * waiting on the page lock, because there are no references.
750                  */
751                 __clear_page_locked(page);
752 free_it:
753                 nr_reclaimed++;
754                 if (!pagevec_add(&freed_pvec, page)) {
755                         __pagevec_free(&freed_pvec);
756                         pagevec_reinit(&freed_pvec);
757                 }
758                 continue;
759
760 cull_mlocked:
761                 if (PageSwapCache(page))
762                         try_to_free_swap(page);
763                 unlock_page(page);
764                 putback_lru_page(page);
765                 continue;
766
767 activate_locked:
768                 /* Not a candidate for swapping, so reclaim swap space. */
769                 if (PageSwapCache(page) && vm_swap_full())
770                         try_to_free_swap(page);
771                 VM_BUG_ON(PageActive(page));
772                 SetPageActive(page);
773                 pgactivate++;
774 keep_locked:
775                 unlock_page(page);
776 keep:
777                 list_add(&page->lru, &ret_pages);
778                 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
779         }
780         list_splice(&ret_pages, page_list);
781         if (pagevec_count(&freed_pvec))
782                 __pagevec_free(&freed_pvec);
783         count_vm_events(PGACTIVATE, pgactivate);
784         return nr_reclaimed;
785 }
786
787 /* LRU Isolation modes. */
788 #define ISOLATE_INACTIVE 0      /* Isolate inactive pages. */
789 #define ISOLATE_ACTIVE 1        /* Isolate active pages. */
790 #define ISOLATE_BOTH 2          /* Isolate both active and inactive pages. */
791
792 /*
793  * Attempt to remove the specified page from its LRU.  Only take this page
794  * if it is of the appropriate PageActive status.  Pages which are being
795  * freed elsewhere are also ignored.
796  *
797  * page:        page to consider
798  * mode:        one of the LRU isolation modes defined above
799  *
800  * returns 0 on success, -ve errno on failure.
801  */
802 int __isolate_lru_page(struct page *page, int mode, int file)
803 {
804         int ret = -EINVAL;
805
806         /* Only take pages on the LRU. */
807         if (!PageLRU(page))
808                 return ret;
809
810         /*
811          * When checking the active state, we need to be sure we are
812          * dealing with comparible boolean values.  Take the logical not
813          * of each.
814          */
815         if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
816                 return ret;
817
818         if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
819                 return ret;
820
821         /*
822          * When this function is being called for lumpy reclaim, we
823          * initially look into all LRU pages, active, inactive and
824          * unevictable; only give shrink_page_list evictable pages.
825          */
826         if (PageUnevictable(page))
827                 return ret;
828
829         ret = -EBUSY;
830
831         if (likely(get_page_unless_zero(page))) {
832                 /*
833                  * Be careful not to clear PageLRU until after we're
834                  * sure the page is not being freed elsewhere -- the
835                  * page release code relies on it.
836                  */
837                 ClearPageLRU(page);
838                 ret = 0;
839                 mem_cgroup_del_lru(page);
840         }
841
842         return ret;
843 }
844
845 /*
846  * zone->lru_lock is heavily contended.  Some of the functions that
847  * shrink the lists perform better by taking out a batch of pages
848  * and working on them outside the LRU lock.
849  *
850  * For pagecache intensive workloads, this function is the hottest
851  * spot in the kernel (apart from copy_*_user functions).
852  *
853  * Appropriate locks must be held before calling this function.
854  *
855  * @nr_to_scan: The number of pages to look through on the list.
856  * @src:        The LRU list to pull pages off.
857  * @dst:        The temp list to put pages on to.
858  * @scanned:    The number of pages that were scanned.
859  * @order:      The caller's attempted allocation order
860  * @mode:       One of the LRU isolation modes
861  * @file:       True [1] if isolating file [!anon] pages
862  *
863  * returns how many pages were moved onto *@dst.
864  */
865 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
866                 struct list_head *src, struct list_head *dst,
867                 unsigned long *scanned, int order, int mode, int file)
868 {
869         unsigned long nr_taken = 0;
870         unsigned long scan;
871
872         for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
873                 struct page *page;
874                 unsigned long pfn;
875                 unsigned long end_pfn;
876                 unsigned long page_pfn;
877                 int zone_id;
878
879                 page = lru_to_page(src);
880                 prefetchw_prev_lru_page(page, src, flags);
881
882                 VM_BUG_ON(!PageLRU(page));
883
884                 switch (__isolate_lru_page(page, mode, file)) {
885                 case 0:
886                         list_move(&page->lru, dst);
887                         nr_taken++;
888                         break;
889
890                 case -EBUSY:
891                         /* else it is being freed elsewhere */
892                         list_move(&page->lru, src);
893                         continue;
894
895                 default:
896                         BUG();
897                 }
898
899                 if (!order)
900                         continue;
901
902                 /*
903                  * Attempt to take all pages in the order aligned region
904                  * surrounding the tag page.  Only take those pages of
905                  * the same active state as that tag page.  We may safely
906                  * round the target page pfn down to the requested order
907                  * as the mem_map is guarenteed valid out to MAX_ORDER,
908                  * where that page is in a different zone we will detect
909                  * it from its zone id and abort this block scan.
910                  */
911                 zone_id = page_zone_id(page);
912                 page_pfn = page_to_pfn(page);
913                 pfn = page_pfn & ~((1 << order) - 1);
914                 end_pfn = pfn + (1 << order);
915                 for (; pfn < end_pfn; pfn++) {
916                         struct page *cursor_page;
917
918                         /* The target page is in the block, ignore it. */
919                         if (unlikely(pfn == page_pfn))
920                                 continue;
921
922                         /* Avoid holes within the zone. */
923                         if (unlikely(!pfn_valid_within(pfn)))
924                                 break;
925
926                         cursor_page = pfn_to_page(pfn);
927
928                         /* Check that we have not crossed a zone boundary. */
929                         if (unlikely(page_zone_id(cursor_page) != zone_id))
930                                 continue;
931                         switch (__isolate_lru_page(cursor_page, mode, file)) {
932                         case 0:
933                                 list_move(&cursor_page->lru, dst);
934                                 nr_taken++;
935                                 scan++;
936                                 break;
937
938                         case -EBUSY:
939                                 /* else it is being freed elsewhere */
940                                 list_move(&cursor_page->lru, src);
941                         default:
942                                 break;  /* ! on LRU or wrong list */
943                         }
944                 }
945         }
946
947         *scanned = scan;
948         return nr_taken;
949 }
950
951 static unsigned long isolate_pages_global(unsigned long nr,
952                                         struct list_head *dst,
953                                         unsigned long *scanned, int order,
954                                         int mode, struct zone *z,
955                                         struct mem_cgroup *mem_cont,
956                                         int active, int file)
957 {
958         int lru = LRU_BASE;
959         if (active)
960                 lru += LRU_ACTIVE;
961         if (file)
962                 lru += LRU_FILE;
963         return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
964                                                                 mode, !!file);
965 }
966
967 /*
968  * clear_active_flags() is a helper for shrink_active_list(), clearing
969  * any active bits from the pages in the list.
970  */
971 static unsigned long clear_active_flags(struct list_head *page_list,
972                                         unsigned int *count)
973 {
974         int nr_active = 0;
975         int lru;
976         struct page *page;
977
978         list_for_each_entry(page, page_list, lru) {
979                 lru = page_is_file_cache(page);
980                 if (PageActive(page)) {
981                         lru += LRU_ACTIVE;
982                         ClearPageActive(page);
983                         nr_active++;
984                 }
985                 count[lru]++;
986         }
987
988         return nr_active;
989 }
990
991 /**
992  * isolate_lru_page - tries to isolate a page from its LRU list
993  * @page: page to isolate from its LRU list
994  *
995  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
996  * vmstat statistic corresponding to whatever LRU list the page was on.
997  *
998  * Returns 0 if the page was removed from an LRU list.
999  * Returns -EBUSY if the page was not on an LRU list.
1000  *
1001  * The returned page will have PageLRU() cleared.  If it was found on
1002  * the active list, it will have PageActive set.  If it was found on
1003  * the unevictable list, it will have the PageUnevictable bit set. That flag
1004  * may need to be cleared by the caller before letting the page go.
1005  *
1006  * The vmstat statistic corresponding to the list on which the page was
1007  * found will be decremented.
1008  *
1009  * Restrictions:
1010  * (1) Must be called with an elevated refcount on the page. This is a
1011  *     fundamentnal difference from isolate_lru_pages (which is called
1012  *     without a stable reference).
1013  * (2) the lru_lock must not be held.
1014  * (3) interrupts must be enabled.
1015  */
1016 int isolate_lru_page(struct page *page)
1017 {
1018         int ret = -EBUSY;
1019
1020         if (PageLRU(page)) {
1021                 struct zone *zone = page_zone(page);
1022
1023                 spin_lock_irq(&zone->lru_lock);
1024                 if (PageLRU(page) && get_page_unless_zero(page)) {
1025                         int lru = page_lru(page);
1026                         ret = 0;
1027                         ClearPageLRU(page);
1028
1029                         del_page_from_lru_list(zone, page, lru);
1030                 }
1031                 spin_unlock_irq(&zone->lru_lock);
1032         }
1033         return ret;
1034 }
1035
1036 /*
1037  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
1038  * of reclaimed pages
1039  */
1040 static unsigned long shrink_inactive_list(unsigned long max_scan,
1041                         struct zone *zone, struct scan_control *sc,
1042                         int priority, int file)
1043 {
1044         LIST_HEAD(page_list);
1045         struct pagevec pvec;
1046         unsigned long nr_scanned = 0;
1047         unsigned long nr_reclaimed = 0;
1048         struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1049
1050         pagevec_init(&pvec, 1);
1051
1052         lru_add_drain();
1053         spin_lock_irq(&zone->lru_lock);
1054         do {
1055                 struct page *page;
1056                 unsigned long nr_taken;
1057                 unsigned long nr_scan;
1058                 unsigned long nr_freed;
1059                 unsigned long nr_active;
1060                 unsigned int count[NR_LRU_LISTS] = { 0, };
1061                 int mode = ISOLATE_INACTIVE;
1062
1063                 /*
1064                  * If we need a large contiguous chunk of memory, or have
1065                  * trouble getting a small set of contiguous pages, we
1066                  * will reclaim both active and inactive pages.
1067                  *
1068                  * We use the same threshold as pageout congestion_wait below.
1069                  */
1070                 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1071                         mode = ISOLATE_BOTH;
1072                 else if (sc->order && priority < DEF_PRIORITY - 2)
1073                         mode = ISOLATE_BOTH;
1074
1075                 nr_taken = sc->isolate_pages(sc->swap_cluster_max,
1076                              &page_list, &nr_scan, sc->order, mode,
1077                                 zone, sc->mem_cgroup, 0, file);
1078                 nr_active = clear_active_flags(&page_list, count);
1079                 __count_vm_events(PGDEACTIVATE, nr_active);
1080
1081                 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1082                                                 -count[LRU_ACTIVE_FILE]);
1083                 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1084                                                 -count[LRU_INACTIVE_FILE]);
1085                 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1086                                                 -count[LRU_ACTIVE_ANON]);
1087                 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1088                                                 -count[LRU_INACTIVE_ANON]);
1089
1090                 if (scan_global_lru(sc)) {
1091                         zone->pages_scanned += nr_scan;
1092                         reclaim_stat->recent_scanned[0] +=
1093                                                       count[LRU_INACTIVE_ANON];
1094                         reclaim_stat->recent_scanned[0] +=
1095                                                       count[LRU_ACTIVE_ANON];
1096                         reclaim_stat->recent_scanned[1] +=
1097                                                       count[LRU_INACTIVE_FILE];
1098                         reclaim_stat->recent_scanned[1] +=
1099                                                       count[LRU_ACTIVE_FILE];
1100                 }
1101                 spin_unlock_irq(&zone->lru_lock);
1102
1103                 nr_scanned += nr_scan;
1104                 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
1105
1106                 /*
1107                  * If we are direct reclaiming for contiguous pages and we do
1108                  * not reclaim everything in the list, try again and wait
1109                  * for IO to complete. This will stall high-order allocations
1110                  * but that should be acceptable to the caller
1111                  */
1112                 if (nr_freed < nr_taken && !current_is_kswapd() &&
1113                                         sc->order > PAGE_ALLOC_COSTLY_ORDER) {
1114                         congestion_wait(WRITE, HZ/10);
1115
1116                         /*
1117                          * The attempt at page out may have made some
1118                          * of the pages active, mark them inactive again.
1119                          */
1120                         nr_active = clear_active_flags(&page_list, count);
1121                         count_vm_events(PGDEACTIVATE, nr_active);
1122
1123                         nr_freed += shrink_page_list(&page_list, sc,
1124                                                         PAGEOUT_IO_SYNC);
1125                 }
1126
1127                 nr_reclaimed += nr_freed;
1128                 local_irq_disable();
1129                 if (current_is_kswapd()) {
1130                         __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
1131                         __count_vm_events(KSWAPD_STEAL, nr_freed);
1132                 } else if (scan_global_lru(sc))
1133                         __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
1134
1135                 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
1136
1137                 if (nr_taken == 0)
1138                         goto done;
1139
1140                 spin_lock(&zone->lru_lock);
1141                 /*
1142                  * Put back any unfreeable pages.
1143                  */
1144                 while (!list_empty(&page_list)) {
1145                         int lru;
1146                         page = lru_to_page(&page_list);
1147                         VM_BUG_ON(PageLRU(page));
1148                         list_del(&page->lru);
1149                         if (unlikely(!page_evictable(page, NULL))) {
1150                                 spin_unlock_irq(&zone->lru_lock);
1151                                 putback_lru_page(page);
1152                                 spin_lock_irq(&zone->lru_lock);
1153                                 continue;
1154                         }
1155                         SetPageLRU(page);
1156                         lru = page_lru(page);
1157                         add_page_to_lru_list(zone, page, lru);
1158                         if (PageActive(page) && scan_global_lru(sc)) {
1159                                 int file = !!page_is_file_cache(page);
1160                                 reclaim_stat->recent_rotated[file]++;
1161                         }
1162                         if (!pagevec_add(&pvec, page)) {
1163                                 spin_unlock_irq(&zone->lru_lock);
1164                                 __pagevec_release(&pvec);
1165                                 spin_lock_irq(&zone->lru_lock);
1166                         }
1167                 }
1168         } while (nr_scanned < max_scan);
1169         spin_unlock(&zone->lru_lock);
1170 done:
1171         local_irq_enable();
1172         pagevec_release(&pvec);
1173         return nr_reclaimed;
1174 }
1175
1176 /*
1177  * We are about to scan this zone at a certain priority level.  If that priority
1178  * level is smaller (ie: more urgent) than the previous priority, then note
1179  * that priority level within the zone.  This is done so that when the next
1180  * process comes in to scan this zone, it will immediately start out at this
1181  * priority level rather than having to build up its own scanning priority.
1182  * Here, this priority affects only the reclaim-mapped threshold.
1183  */
1184 static inline void note_zone_scanning_priority(struct zone *zone, int priority)
1185 {
1186         if (priority < zone->prev_priority)
1187                 zone->prev_priority = priority;
1188 }
1189
1190 /*
1191  * This moves pages from the active list to the inactive list.
1192  *
1193  * We move them the other way if the page is referenced by one or more
1194  * processes, from rmap.
1195  *
1196  * If the pages are mostly unmapped, the processing is fast and it is
1197  * appropriate to hold zone->lru_lock across the whole operation.  But if
1198  * the pages are mapped, the processing is slow (page_referenced()) so we
1199  * should drop zone->lru_lock around each page.  It's impossible to balance
1200  * this, so instead we remove the pages from the LRU while processing them.
1201  * It is safe to rely on PG_active against the non-LRU pages in here because
1202  * nobody will play with that bit on a non-LRU page.
1203  *
1204  * The downside is that we have to touch page->_count against each page.
1205  * But we had to alter page->flags anyway.
1206  */
1207
1208
1209 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1210                         struct scan_control *sc, int priority, int file)
1211 {
1212         unsigned long pgmoved;
1213         int pgdeactivate = 0;
1214         unsigned long pgscanned;
1215         LIST_HEAD(l_hold);      /* The pages which were snipped off */
1216         LIST_HEAD(l_inactive);
1217         struct page *page;
1218         struct pagevec pvec;
1219         enum lru_list lru;
1220         struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1221
1222         lru_add_drain();
1223         spin_lock_irq(&zone->lru_lock);
1224         pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1225                                         ISOLATE_ACTIVE, zone,
1226                                         sc->mem_cgroup, 1, file);
1227         /*
1228          * zone->pages_scanned is used for detect zone's oom
1229          * mem_cgroup remembers nr_scan by itself.
1230          */
1231         if (scan_global_lru(sc)) {
1232                 zone->pages_scanned += pgscanned;
1233                 reclaim_stat->recent_scanned[!!file] += pgmoved;
1234         }
1235
1236         if (file)
1237                 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
1238         else
1239                 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
1240         spin_unlock_irq(&zone->lru_lock);
1241
1242         pgmoved = 0;
1243         while (!list_empty(&l_hold)) {
1244                 cond_resched();
1245                 page = lru_to_page(&l_hold);
1246                 list_del(&page->lru);
1247
1248                 if (unlikely(!page_evictable(page, NULL))) {
1249                         putback_lru_page(page);
1250                         continue;
1251                 }
1252
1253                 /* page_referenced clears PageReferenced */
1254                 if (page_mapping_inuse(page) &&
1255                     page_referenced(page, 0, sc->mem_cgroup))
1256                         pgmoved++;
1257
1258                 list_add(&page->lru, &l_inactive);
1259         }
1260
1261         /*
1262          * Move the pages to the [file or anon] inactive list.
1263          */
1264         pagevec_init(&pvec, 1);
1265         pgmoved = 0;
1266         lru = LRU_BASE + file * LRU_FILE;
1267
1268         spin_lock_irq(&zone->lru_lock);
1269         /*
1270          * Count referenced pages from currently used mappings as
1271          * rotated, even though they are moved to the inactive list.
1272          * This helps balance scan pressure between file and anonymous
1273          * pages in get_scan_ratio.
1274          */
1275         if (scan_global_lru(sc))
1276                 reclaim_stat->recent_rotated[!!file] += pgmoved;
1277
1278         while (!list_empty(&l_inactive)) {
1279                 page = lru_to_page(&l_inactive);
1280                 prefetchw_prev_lru_page(page, &l_inactive, flags);
1281                 VM_BUG_ON(PageLRU(page));
1282                 SetPageLRU(page);
1283                 VM_BUG_ON(!PageActive(page));
1284                 ClearPageActive(page);
1285
1286                 list_move(&page->lru, &zone->lru[lru].list);
1287                 mem_cgroup_add_lru_list(page, lru);
1288                 pgmoved++;
1289                 if (!pagevec_add(&pvec, page)) {
1290                         __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1291                         spin_unlock_irq(&zone->lru_lock);
1292                         pgdeactivate += pgmoved;
1293                         pgmoved = 0;
1294                         if (buffer_heads_over_limit)
1295                                 pagevec_strip(&pvec);
1296                         __pagevec_release(&pvec);
1297                         spin_lock_irq(&zone->lru_lock);
1298                 }
1299         }
1300         __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1301         pgdeactivate += pgmoved;
1302         if (buffer_heads_over_limit) {
1303                 spin_unlock_irq(&zone->lru_lock);
1304                 pagevec_strip(&pvec);
1305                 spin_lock_irq(&zone->lru_lock);
1306         }
1307         __count_zone_vm_events(PGREFILL, zone, pgscanned);
1308         __count_vm_events(PGDEACTIVATE, pgdeactivate);
1309         spin_unlock_irq(&zone->lru_lock);
1310         if (vm_swap_full())
1311                 pagevec_swap_free(&pvec);
1312
1313         pagevec_release(&pvec);
1314 }
1315
1316 static int inactive_anon_is_low_global(struct zone *zone)
1317 {
1318         unsigned long active, inactive;
1319
1320         active = zone_page_state(zone, NR_ACTIVE_ANON);
1321         inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1322
1323         if (inactive * zone->inactive_ratio < active)
1324                 return 1;
1325
1326         return 0;
1327 }
1328
1329 /**
1330  * inactive_anon_is_low - check if anonymous pages need to be deactivated
1331  * @zone: zone to check
1332  * @sc:   scan control of this context
1333  *
1334  * Returns true if the zone does not have enough inactive anon pages,
1335  * meaning some active anon pages need to be deactivated.
1336  */
1337 static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1338 {
1339         int low;
1340
1341         if (scan_global_lru(sc))
1342                 low = inactive_anon_is_low_global(zone);
1343         else
1344                 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone);
1345         return low;
1346 }
1347
1348 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1349         struct zone *zone, struct scan_control *sc, int priority)
1350 {
1351         int file = is_file_lru(lru);
1352
1353         if (lru == LRU_ACTIVE_FILE) {
1354                 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1355                 return 0;
1356         }
1357
1358         if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
1359                 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1360                 return 0;
1361         }
1362         return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1363 }
1364
1365 /*
1366  * Determine how aggressively the anon and file LRU lists should be
1367  * scanned.  The relative value of each set of LRU lists is determined
1368  * by looking at the fraction of the pages scanned we did rotate back
1369  * onto the active list instead of evict.
1370  *
1371  * percent[0] specifies how much pressure to put on ram/swap backed
1372  * memory, while percent[1] determines pressure on the file LRUs.
1373  */
1374 static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1375                                         unsigned long *percent)
1376 {
1377         unsigned long anon, file, free;
1378         unsigned long anon_prio, file_prio;
1379         unsigned long ap, fp;
1380         struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1381
1382         /* If we have no swap space, do not bother scanning anon pages. */
1383         if (nr_swap_pages <= 0) {
1384                 percent[0] = 0;
1385                 percent[1] = 100;
1386                 return;
1387         }
1388
1389         anon  = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
1390                 zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
1391         file  = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
1392                 zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
1393
1394         if (scan_global_lru(sc)) {
1395                 free  = zone_page_state(zone, NR_FREE_PAGES);
1396                 /* If we have very few page cache pages,
1397                    force-scan anon pages. */
1398                 if (unlikely(file + free <= zone->pages_high)) {
1399                         percent[0] = 100;
1400                         percent[1] = 0;
1401                         return;
1402                 }
1403         }
1404
1405         /*
1406          * OK, so we have swap space and a fair amount of page cache
1407          * pages.  We use the recently rotated / recently scanned
1408          * ratios to determine how valuable each cache is.
1409          *
1410          * Because workloads change over time (and to avoid overflow)
1411          * we keep these statistics as a floating average, which ends
1412          * up weighing recent references more than old ones.
1413          *
1414          * anon in [0], file in [1]
1415          */
1416         if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1417                 spin_lock_irq(&zone->lru_lock);
1418                 reclaim_stat->recent_scanned[0] /= 2;
1419                 reclaim_stat->recent_rotated[0] /= 2;
1420                 spin_unlock_irq(&zone->lru_lock);
1421         }
1422
1423         if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1424                 spin_lock_irq(&zone->lru_lock);
1425                 reclaim_stat->recent_scanned[1] /= 2;
1426                 reclaim_stat->recent_rotated[1] /= 2;
1427                 spin_unlock_irq(&zone->lru_lock);
1428         }
1429
1430         /*
1431          * With swappiness at 100, anonymous and file have the same priority.
1432          * This scanning priority is essentially the inverse of IO cost.
1433          */
1434         anon_prio = sc->swappiness;
1435         file_prio = 200 - sc->swappiness;
1436
1437         /*
1438          * The amount of pressure on anon vs file pages is inversely
1439          * proportional to the fraction of recently scanned pages on
1440          * each list that were recently referenced and in active use.
1441          */
1442         ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1443         ap /= reclaim_stat->recent_rotated[0] + 1;
1444
1445         fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1446         fp /= reclaim_stat->recent_rotated[1] + 1;
1447
1448         /* Normalize to percentages */
1449         percent[0] = 100 * ap / (ap + fp + 1);
1450         percent[1] = 100 - percent[0];
1451 }
1452
1453
1454 /*
1455  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
1456  */
1457 static void shrink_zone(int priority, struct zone *zone,
1458                                 struct scan_control *sc)
1459 {
1460         unsigned long nr[NR_LRU_LISTS];
1461         unsigned long nr_to_scan;
1462         unsigned long percent[2];       /* anon @ 0; file @ 1 */
1463         enum lru_list l;
1464         unsigned long nr_reclaimed = sc->nr_reclaimed;
1465         unsigned long swap_cluster_max = sc->swap_cluster_max;
1466
1467         get_scan_ratio(zone, sc, percent);
1468
1469         for_each_evictable_lru(l) {
1470                 if (scan_global_lru(sc)) {
1471                         int file = is_file_lru(l);
1472                         int scan;
1473
1474                         scan = zone_page_state(zone, NR_LRU_BASE + l);
1475                         if (priority) {
1476                                 scan >>= priority;
1477                                 scan = (scan * percent[file]) / 100;
1478                         }
1479                         zone->lru[l].nr_scan += scan;
1480                         nr[l] = zone->lru[l].nr_scan;
1481                         if (nr[l] >= swap_cluster_max)
1482                                 zone->lru[l].nr_scan = 0;
1483                         else
1484                                 nr[l] = 0;
1485                 } else {
1486                         /*
1487                          * This reclaim occurs not because zone memory shortage
1488                          * but because memory controller hits its limit.
1489                          * Don't modify zone reclaim related data.
1490                          */
1491                         nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
1492                                                                 priority, l);
1493                 }
1494         }
1495
1496         while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1497                                         nr[LRU_INACTIVE_FILE]) {
1498                 for_each_evictable_lru(l) {
1499                         if (nr[l]) {
1500                                 nr_to_scan = min(nr[l], swap_cluster_max);
1501                                 nr[l] -= nr_to_scan;
1502
1503                                 nr_reclaimed += shrink_list(l, nr_to_scan,
1504                                                             zone, sc, priority);
1505                         }
1506                 }
1507                 /*
1508                  * On large memory systems, scan >> priority can become
1509                  * really large. This is fine for the starting priority;
1510                  * we want to put equal scanning pressure on each zone.
1511                  * However, if the VM has a harder time of freeing pages,
1512                  * with multiple processes reclaiming pages, the total
1513                  * freeing target can get unreasonably large.
1514                  */
1515                 if (nr_reclaimed > swap_cluster_max &&
1516                         priority < DEF_PRIORITY && !current_is_kswapd())
1517                         break;
1518         }
1519
1520         sc->nr_reclaimed = nr_reclaimed;
1521
1522         /*
1523          * Even if we did not try to evict anon pages at all, we want to
1524          * rebalance the anon lru active/inactive ratio.
1525          */
1526         if (inactive_anon_is_low(zone, sc))
1527                 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1528
1529         throttle_vm_writeout(sc->gfp_mask);
1530 }
1531
1532 /*
1533  * This is the direct reclaim path, for page-allocating processes.  We only
1534  * try to reclaim pages from zones which will satisfy the caller's allocation
1535  * request.
1536  *
1537  * We reclaim from a zone even if that zone is over pages_high.  Because:
1538  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1539  *    allocation or
1540  * b) The zones may be over pages_high but they must go *over* pages_high to
1541  *    satisfy the `incremental min' zone defense algorithm.
1542  *
1543  * If a zone is deemed to be full of pinned pages then just give it a light
1544  * scan then give up on it.
1545  */
1546 static void shrink_zones(int priority, struct zonelist *zonelist,
1547                                         struct scan_control *sc)
1548 {
1549         enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1550         struct zoneref *z;
1551         struct zone *zone;
1552
1553         sc->all_unreclaimable = 1;
1554         for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1555                 if (!populated_zone(zone))
1556                         continue;
1557                 /*
1558                  * Take care memory controller reclaiming has small influence
1559                  * to global LRU.
1560                  */
1561                 if (scan_global_lru(sc)) {
1562                         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1563                                 continue;
1564                         note_zone_scanning_priority(zone, priority);
1565
1566                         if (zone_is_all_unreclaimable(zone) &&
1567                                                 priority != DEF_PRIORITY)
1568                                 continue;       /* Let kswapd poll it */
1569                         sc->all_unreclaimable = 0;
1570                 } else {
1571                         /*
1572                          * Ignore cpuset limitation here. We just want to reduce
1573                          * # of used pages by us regardless of memory shortage.
1574                          */
1575                         sc->all_unreclaimable = 0;
1576                         mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
1577                                                         priority);
1578                 }
1579
1580                 shrink_zone(priority, zone, sc);
1581         }
1582 }
1583
1584 /*
1585  * This is the main entry point to direct page reclaim.
1586  *
1587  * If a full scan of the inactive list fails to free enough memory then we
1588  * are "out of memory" and something needs to be killed.
1589  *
1590  * If the caller is !__GFP_FS then the probability of a failure is reasonably
1591  * high - the zone may be full of dirty or under-writeback pages, which this
1592  * caller can't do much about.  We kick pdflush and take explicit naps in the
1593  * hope that some of these pages can be written.  But if the allocating task
1594  * holds filesystem locks which prevent writeout this might not work, and the
1595  * allocation attempt will fail.
1596  *
1597  * returns:     0, if no pages reclaimed
1598  *              else, the number of pages reclaimed
1599  */
1600 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1601                                         struct scan_control *sc)
1602 {
1603         int priority;
1604         unsigned long ret = 0;
1605         unsigned long total_scanned = 0;
1606         struct reclaim_state *reclaim_state = current->reclaim_state;
1607         unsigned long lru_pages = 0;
1608         struct zoneref *z;
1609         struct zone *zone;
1610         enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1611
1612         delayacct_freepages_start();
1613
1614         if (scan_global_lru(sc))
1615                 count_vm_event(ALLOCSTALL);
1616         /*
1617          * mem_cgroup will not do shrink_slab.
1618          */
1619         if (scan_global_lru(sc)) {
1620                 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1621
1622                         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1623                                 continue;
1624
1625                         lru_pages += zone_lru_pages(zone);
1626                 }
1627         }
1628
1629         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1630                 sc->nr_scanned = 0;
1631                 if (!priority)
1632                         disable_swap_token();
1633                 shrink_zones(priority, zonelist, sc);
1634                 /*
1635                  * Don't shrink slabs when reclaiming memory from
1636                  * over limit cgroups
1637                  */
1638                 if (scan_global_lru(sc)) {
1639                         shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1640                         if (reclaim_state) {
1641                                 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
1642                                 reclaim_state->reclaimed_slab = 0;
1643                         }
1644                 }
1645                 total_scanned += sc->nr_scanned;
1646                 if (sc->nr_reclaimed >= sc->swap_cluster_max) {
1647                         ret = sc->nr_reclaimed;
1648                         goto out;
1649                 }
1650
1651                 /*
1652                  * Try to write back as many pages as we just scanned.  This
1653                  * tends to cause slow streaming writers to write data to the
1654                  * disk smoothly, at the dirtying rate, which is nice.   But
1655                  * that's undesirable in laptop mode, where we *want* lumpy
1656                  * writeout.  So in laptop mode, write out the whole world.
1657                  */
1658                 if (total_scanned > sc->swap_cluster_max +
1659                                         sc->swap_cluster_max / 2) {
1660                         wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1661                         sc->may_writepage = 1;
1662                 }
1663
1664                 /* Take a nap, wait for some writeback to complete */
1665                 if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
1666                         congestion_wait(WRITE, HZ/10);
1667         }
1668         /* top priority shrink_zones still had more to do? don't OOM, then */
1669         if (!sc->all_unreclaimable && scan_global_lru(sc))
1670                 ret = sc->nr_reclaimed;
1671 out:
1672         /*
1673          * Now that we've scanned all the zones at this priority level, note
1674          * that level within the zone so that the next thread which performs
1675          * scanning of this zone will immediately start out at this priority
1676          * level.  This affects only the decision whether or not to bring
1677          * mapped pages onto the inactive list.
1678          */
1679         if (priority < 0)
1680                 priority = 0;
1681
1682         if (scan_global_lru(sc)) {
1683                 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1684
1685                         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1686                                 continue;
1687
1688                         zone->prev_priority = priority;
1689                 }
1690         } else
1691                 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1692
1693         delayacct_freepages_end();
1694
1695         return ret;
1696 }
1697
1698 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1699                                                                 gfp_t gfp_mask)
1700 {
1701         struct scan_control sc = {
1702                 .gfp_mask = gfp_mask,
1703                 .may_writepage = !laptop_mode,
1704                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1705                 .may_swap = 1,
1706                 .swappiness = vm_swappiness,
1707                 .order = order,
1708                 .mem_cgroup = NULL,
1709                 .isolate_pages = isolate_pages_global,
1710         };
1711
1712         return do_try_to_free_pages(zonelist, &sc);
1713 }
1714
1715 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
1716
1717 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1718                                                 gfp_t gfp_mask,
1719                                            bool noswap)
1720 {
1721         struct scan_control sc = {
1722                 .may_writepage = !laptop_mode,
1723                 .may_swap = 1,
1724                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1725                 .swappiness = vm_swappiness,
1726                 .order = 0,
1727                 .mem_cgroup = mem_cont,
1728                 .isolate_pages = mem_cgroup_isolate_pages,
1729         };
1730         struct zonelist *zonelist;
1731
1732         if (noswap)
1733                 sc.may_swap = 0;
1734
1735         sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1736                         (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1737         zonelist = NODE_DATA(numa_node_id())->node_zonelists;
1738         return do_try_to_free_pages(zonelist, &sc);
1739 }
1740 #endif
1741
1742 /*
1743  * For kswapd, balance_pgdat() will work across all this node's zones until
1744  * they are all at pages_high.
1745  *
1746  * Returns the number of pages which were actually freed.
1747  *
1748  * There is special handling here for zones which are full of pinned pages.
1749  * This can happen if the pages are all mlocked, or if they are all used by
1750  * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
1751  * What we do is to detect the case where all pages in the zone have been
1752  * scanned twice and there has been zero successful reclaim.  Mark the zone as
1753  * dead and from now on, only perform a short scan.  Basically we're polling
1754  * the zone for when the problem goes away.
1755  *
1756  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
1757  * zones which have free_pages > pages_high, but once a zone is found to have
1758  * free_pages <= pages_high, we scan that zone and the lower zones regardless
1759  * of the number of free pages in the lower zones.  This interoperates with
1760  * the page allocator fallback scheme to ensure that aging of pages is balanced
1761  * across the zones.
1762  */
1763 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1764 {
1765         int all_zones_ok;
1766         int priority;
1767         int i;
1768         unsigned long total_scanned;
1769         struct reclaim_state *reclaim_state = current->reclaim_state;
1770         struct scan_control sc = {
1771                 .gfp_mask = GFP_KERNEL,
1772                 .may_swap = 1,
1773                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1774                 .swappiness = vm_swappiness,
1775                 .order = order,
1776                 .mem_cgroup = NULL,
1777                 .isolate_pages = isolate_pages_global,
1778         };
1779         /*
1780          * temp_priority is used to remember the scanning priority at which
1781          * this zone was successfully refilled to free_pages == pages_high.
1782          */
1783         int temp_priority[MAX_NR_ZONES];
1784
1785 loop_again:
1786         total_scanned = 0;
1787         sc.nr_reclaimed = 0;
1788         sc.may_writepage = !laptop_mode;
1789         count_vm_event(PAGEOUTRUN);
1790
1791         for (i = 0; i < pgdat->nr_zones; i++)
1792                 temp_priority[i] = DEF_PRIORITY;
1793
1794         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1795                 int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
1796                 unsigned long lru_pages = 0;
1797
1798                 /* The swap token gets in the way of swapout... */
1799                 if (!priority)
1800                         disable_swap_token();
1801
1802                 all_zones_ok = 1;
1803
1804                 /*
1805                  * Scan in the highmem->dma direction for the highest
1806                  * zone which needs scanning
1807                  */
1808                 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1809                         struct zone *zone = pgdat->node_zones + i;
1810
1811                         if (!populated_zone(zone))
1812                                 continue;
1813
1814                         if (zone_is_all_unreclaimable(zone) &&
1815                             priority != DEF_PRIORITY)
1816                                 continue;
1817
1818                         /*
1819                          * Do some background aging of the anon list, to give
1820                          * pages a chance to be referenced before reclaiming.
1821                          */
1822                         if (inactive_anon_is_low(zone, &sc))
1823                                 shrink_active_list(SWAP_CLUSTER_MAX, zone,
1824                                                         &sc, priority, 0);
1825
1826                         if (!zone_watermark_ok(zone, order, zone->pages_high,
1827                                                0, 0)) {
1828                                 end_zone = i;
1829                                 break;
1830                         }
1831                 }
1832                 if (i < 0)
1833                         goto out;
1834
1835                 for (i = 0; i <= end_zone; i++) {
1836                         struct zone *zone = pgdat->node_zones + i;
1837
1838                         lru_pages += zone_lru_pages(zone);
1839                 }
1840
1841                 /*
1842                  * Now scan the zone in the dma->highmem direction, stopping
1843                  * at the last zone which needs scanning.
1844                  *
1845                  * We do this because the page allocator works in the opposite
1846                  * direction.  This prevents the page allocator from allocating
1847                  * pages behind kswapd's direction of progress, which would
1848                  * cause too much scanning of the lower zones.
1849                  */
1850                 for (i = 0; i <= end_zone; i++) {
1851                         struct zone *zone = pgdat->node_zones + i;
1852                         int nr_slab;
1853
1854                         if (!populated_zone(zone))
1855                                 continue;
1856
1857                         if (zone_is_all_unreclaimable(zone) &&
1858                                         priority != DEF_PRIORITY)
1859                                 continue;
1860
1861                         if (!zone_watermark_ok(zone, order, zone->pages_high,
1862                                                end_zone, 0))
1863                                 all_zones_ok = 0;
1864                         temp_priority[i] = priority;
1865                         sc.nr_scanned = 0;
1866                         note_zone_scanning_priority(zone, priority);
1867                         /*
1868                          * We put equal pressure on every zone, unless one
1869                          * zone has way too many pages free already.
1870                          */
1871                         if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
1872                                                 end_zone, 0))
1873                                 shrink_zone(priority, zone, &sc);
1874                         reclaim_state->reclaimed_slab = 0;
1875                         nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1876                                                 lru_pages);
1877                         sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1878                         total_scanned += sc.nr_scanned;
1879                         if (zone_is_all_unreclaimable(zone))
1880                                 continue;
1881                         if (nr_slab == 0 && zone->pages_scanned >=
1882                                                 (zone_lru_pages(zone) * 6))
1883                                         zone_set_flag(zone,
1884                                                       ZONE_ALL_UNRECLAIMABLE);
1885                         /*
1886                          * If we've done a decent amount of scanning and
1887                          * the reclaim ratio is low, start doing writepage
1888                          * even in laptop mode
1889                          */
1890                         if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1891                             total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
1892                                 sc.may_writepage = 1;
1893                 }
1894                 if (all_zones_ok)
1895                         break;          /* kswapd: all done */
1896                 /*
1897                  * OK, kswapd is getting into trouble.  Take a nap, then take
1898                  * another pass across the zones.
1899                  */
1900                 if (total_scanned && priority < DEF_PRIORITY - 2)
1901                         congestion_wait(WRITE, HZ/10);
1902
1903                 /*
1904                  * We do this so kswapd doesn't build up large priorities for
1905                  * example when it is freeing in parallel with allocators. It
1906                  * matches the direct reclaim path behaviour in terms of impact
1907                  * on zone->*_priority.
1908                  */
1909                 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
1910                         break;
1911         }
1912 out:
1913         /*
1914          * Note within each zone the priority level at which this zone was
1915          * brought into a happy state.  So that the next thread which scans this
1916          * zone will start out at that priority level.
1917          */
1918         for (i = 0; i < pgdat->nr_zones; i++) {
1919                 struct zone *zone = pgdat->node_zones + i;
1920
1921                 zone->prev_priority = temp_priority[i];
1922         }
1923         if (!all_zones_ok) {
1924                 cond_resched();
1925
1926                 try_to_freeze();
1927
1928                 /*
1929                  * Fragmentation may mean that the system cannot be
1930                  * rebalanced for high-order allocations in all zones.
1931                  * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
1932                  * it means the zones have been fully scanned and are still
1933                  * not balanced. For high-order allocations, there is
1934                  * little point trying all over again as kswapd may
1935                  * infinite loop.
1936                  *
1937                  * Instead, recheck all watermarks at order-0 as they
1938                  * are the most important. If watermarks are ok, kswapd will go
1939                  * back to sleep. High-order users can still perform direct
1940                  * reclaim if they wish.
1941                  */
1942                 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
1943                         order = sc.order = 0;
1944
1945                 goto loop_again;
1946         }
1947
1948         return sc.nr_reclaimed;
1949 }
1950
1951 /*
1952  * The background pageout daemon, started as a kernel thread
1953  * from the init process.
1954  *
1955  * This basically trickles out pages so that we have _some_
1956  * free memory available even if there is no other activity
1957  * that frees anything up. This is needed for things like routing
1958  * etc, where we otherwise might have all activity going on in
1959  * asynchronous contexts that cannot page things out.
1960  *
1961  * If there are applications that are active memory-allocators
1962  * (most normal use), this basically shouldn't matter.
1963  */
1964 static int kswapd(void *p)
1965 {
1966         unsigned long order;
1967         pg_data_t *pgdat = (pg_data_t*)p;
1968         struct task_struct *tsk = current;
1969         DEFINE_WAIT(wait);
1970         struct reclaim_state reclaim_state = {
1971                 .reclaimed_slab = 0,
1972         };
1973         node_to_cpumask_ptr(cpumask, pgdat->node_id);
1974
1975         if (!cpumask_empty(cpumask))
1976                 set_cpus_allowed_ptr(tsk, cpumask);
1977         current->reclaim_state = &reclaim_state;
1978
1979         /*
1980          * Tell the memory management that we're a "memory allocator",
1981          * and that if we need more memory we should get access to it
1982          * regardless (see "__alloc_pages()"). "kswapd" should
1983          * never get caught in the normal page freeing logic.
1984          *
1985          * (Kswapd normally doesn't need memory anyway, but sometimes
1986          * you need a small amount of memory in order to be able to
1987          * page out something else, and this flag essentially protects
1988          * us from recursively trying to free more memory as we're
1989          * trying to free the first piece of memory in the first place).
1990          */
1991         tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1992         set_freezable();
1993
1994         order = 0;
1995         for ( ; ; ) {
1996                 unsigned long new_order;
1997
1998                 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1999                 new_order = pgdat->kswapd_max_order;
2000                 pgdat->kswapd_max_order = 0;
2001                 if (order < new_order) {
2002                         /*
2003                          * Don't sleep if someone wants a larger 'order'
2004                          * allocation
2005                          */
2006                         order = new_order;
2007                 } else {
2008                         if (!freezing(current))
2009                                 schedule();
2010
2011                         order = pgdat->kswapd_max_order;
2012                 }
2013                 finish_wait(&pgdat->kswapd_wait, &wait);
2014
2015                 if (!try_to_freeze()) {
2016                         /* We can speed up thawing tasks if we don't call
2017                          * balance_pgdat after returning from the refrigerator
2018                          */
2019                         balance_pgdat(pgdat, order);
2020                 }
2021         }
2022         return 0;
2023 }
2024
2025 /*
2026  * A zone is low on free memory, so wake its kswapd task to service it.
2027  */
2028 void wakeup_kswapd(struct zone *zone, int order)
2029 {
2030         pg_data_t *pgdat;
2031
2032         if (!populated_zone(zone))
2033                 return;
2034
2035         pgdat = zone->zone_pgdat;
2036         if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
2037                 return;
2038         if (pgdat->kswapd_max_order < order)
2039                 pgdat->kswapd_max_order = order;
2040         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2041                 return;
2042         if (!waitqueue_active(&pgdat->kswapd_wait))
2043                 return;
2044         wake_up_interruptible(&pgdat->kswapd_wait);
2045 }
2046
2047 unsigned long global_lru_pages(void)
2048 {
2049         return global_page_state(NR_ACTIVE_ANON)
2050                 + global_page_state(NR_ACTIVE_FILE)
2051                 + global_page_state(NR_INACTIVE_ANON)
2052                 + global_page_state(NR_INACTIVE_FILE);
2053 }
2054
2055 #ifdef CONFIG_PM
2056 /*
2057  * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
2058  * from LRU lists system-wide, for given pass and priority, and returns the
2059  * number of reclaimed pages
2060  *
2061  * For pass > 3 we also try to shrink the LRU lists that contain a few pages
2062  */
2063 static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2064                                       int pass, struct scan_control *sc)
2065 {
2066         struct zone *zone;
2067         unsigned long nr_to_scan, ret = 0;
2068         enum lru_list l;
2069
2070         for_each_zone(zone) {
2071
2072                 if (!populated_zone(zone))
2073                         continue;
2074
2075                 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2076                         continue;
2077
2078                 for_each_evictable_lru(l) {
2079                         /* For pass = 0, we don't shrink the active list */
2080                         if (pass == 0 &&
2081                                 (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
2082                                 continue;
2083
2084                         zone->lru[l].nr_scan +=
2085                                 (zone_page_state(zone, NR_LRU_BASE + l)
2086                                                                 >> prio) + 1;
2087                         if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
2088                                 zone->lru[l].nr_scan = 0;
2089                                 nr_to_scan = min(nr_pages,
2090                                         zone_page_state(zone,
2091                                                         NR_LRU_BASE + l));
2092                                 ret += shrink_list(l, nr_to_scan, zone,
2093                                                                 sc, prio);
2094                                 if (ret >= nr_pages)
2095                                         return ret;
2096                         }
2097                 }
2098         }
2099
2100         return ret;
2101 }
2102
2103 /*
2104  * Try to free `nr_pages' of memory, system-wide, and return the number of
2105  * freed pages.
2106  *
2107  * Rather than trying to age LRUs the aim is to preserve the overall
2108  * LRU order by reclaiming preferentially
2109  * inactive > active > active referenced > active mapped
2110  */
2111 unsigned long shrink_all_memory(unsigned long nr_pages)
2112 {
2113         unsigned long lru_pages, nr_slab;
2114         unsigned long ret = 0;
2115         int pass;
2116         struct reclaim_state reclaim_state;
2117         struct scan_control sc = {
2118                 .gfp_mask = GFP_KERNEL,
2119                 .may_swap = 0,
2120                 .swap_cluster_max = nr_pages,
2121                 .may_writepage = 1,
2122                 .swappiness = vm_swappiness,
2123                 .isolate_pages = isolate_pages_global,
2124         };
2125
2126         current->reclaim_state = &reclaim_state;
2127
2128         lru_pages = global_lru_pages();
2129         nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
2130         /* If slab caches are huge, it's better to hit them first */
2131         while (nr_slab >= lru_pages) {
2132                 reclaim_state.reclaimed_slab = 0;
2133                 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
2134                 if (!reclaim_state.reclaimed_slab)
2135                         break;
2136
2137                 ret += reclaim_state.reclaimed_slab;
2138                 if (ret >= nr_pages)
2139                         goto out;
2140
2141                 nr_slab -= reclaim_state.reclaimed_slab;
2142         }
2143
2144         /*
2145          * We try to shrink LRUs in 5 passes:
2146          * 0 = Reclaim from inactive_list only
2147          * 1 = Reclaim from active list but don't reclaim mapped
2148          * 2 = 2nd pass of type 1
2149          * 3 = Reclaim mapped (normal reclaim)
2150          * 4 = 2nd pass of type 3
2151          */
2152         for (pass = 0; pass < 5; pass++) {
2153                 int prio;
2154
2155                 /* Force reclaiming mapped pages in the passes #3 and #4 */
2156                 if (pass > 2) {
2157                         sc.may_swap = 1;
2158                         sc.swappiness = 100;
2159                 }
2160
2161                 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
2162                         unsigned long nr_to_scan = nr_pages - ret;
2163
2164                         sc.nr_scanned = 0;
2165                         ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
2166                         if (ret >= nr_pages)
2167                                 goto out;
2168
2169                         reclaim_state.reclaimed_slab = 0;
2170                         shrink_slab(sc.nr_scanned, sc.gfp_mask,
2171                                         global_lru_pages());
2172                         ret += reclaim_state.reclaimed_slab;
2173                         if (ret >= nr_pages)
2174                                 goto out;
2175
2176                         if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
2177                                 congestion_wait(WRITE, HZ / 10);
2178                 }
2179         }
2180
2181         /*
2182          * If ret = 0, we could not shrink LRUs, but there may be something
2183          * in slab caches
2184          */
2185         if (!ret) {
2186                 do {
2187                         reclaim_state.reclaimed_slab = 0;
2188                         shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
2189                         ret += reclaim_state.reclaimed_slab;
2190                 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
2191         }
2192
2193 out:
2194         current->reclaim_state = NULL;
2195
2196         return ret;
2197 }
2198 #endif
2199
2200 /* It's optimal to keep kswapds on the same CPUs as their memory, but
2201    not required for correctness.  So if the last cpu in a node goes
2202    away, we get changed to run anywhere: as the first one comes back,
2203    restore their cpu bindings. */
2204 static int __devinit cpu_callback(struct notifier_block *nfb,
2205                                   unsigned long action, void *hcpu)
2206 {
2207         int nid;
2208
2209         if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2210                 for_each_node_state(nid, N_HIGH_MEMORY) {
2211                         pg_data_t *pgdat = NODE_DATA(nid);
2212                         node_to_cpumask_ptr(mask, pgdat->node_id);
2213
2214                         if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2215                                 /* One of our CPUs online: restore mask */
2216                                 set_cpus_allowed_ptr(pgdat->kswapd, mask);
2217                 }
2218         }
2219         return NOTIFY_OK;
2220 }
2221
2222 /*
2223  * This kswapd start function will be called by init and node-hot-add.
2224  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
2225  */
2226 int kswapd_run(int nid)
2227 {
2228         pg_data_t *pgdat = NODE_DATA(nid);
2229         int ret = 0;
2230
2231         if (pgdat->kswapd)
2232                 return 0;
2233
2234         pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2235         if (IS_ERR(pgdat->kswapd)) {
2236                 /* failure at boot is fatal */
2237                 BUG_ON(system_state == SYSTEM_BOOTING);
2238                 printk("Failed to start kswapd on node %d\n",nid);
2239                 ret = -1;
2240         }
2241         return ret;
2242 }
2243
2244 static int __init kswapd_init(void)
2245 {
2246         int nid;
2247
2248         swap_setup();
2249         for_each_node_state(nid, N_HIGH_MEMORY)
2250                 kswapd_run(nid);
2251         hotcpu_notifier(cpu_callback, 0);
2252         return 0;
2253 }
2254
2255 module_init(kswapd_init)
2256
2257 #ifdef CONFIG_NUMA
2258 /*
2259  * Zone reclaim mode
2260  *
2261  * If non-zero call zone_reclaim when the number of free pages falls below
2262  * the watermarks.
2263  */
2264 int zone_reclaim_mode __read_mostly;
2265
2266 #define RECLAIM_OFF 0
2267 #define RECLAIM_ZONE (1<<0)     /* Run shrink_inactive_list on the zone */
2268 #define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
2269 #define RECLAIM_SWAP (1<<2)     /* Swap pages out during reclaim */
2270
2271 /*
2272  * Priority for ZONE_RECLAIM. This determines the fraction of pages
2273  * of a node considered for each zone_reclaim. 4 scans 1/16th of
2274  * a zone.
2275  */
2276 #define ZONE_RECLAIM_PRIORITY 4
2277
2278 /*
2279  * Percentage of pages in a zone that must be unmapped for zone_reclaim to
2280  * occur.
2281  */
2282 int sysctl_min_unmapped_ratio = 1;
2283
2284 /*
2285  * If the number of slab pages in a zone grows beyond this percentage then
2286  * slab reclaim needs to occur.
2287  */
2288 int sysctl_min_slab_ratio = 5;
2289
2290 /*
2291  * Try to free up some pages from this zone through reclaim.
2292  */
2293 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2294 {
2295         /* Minimum pages needed in order to stay on node */
2296         const unsigned long nr_pages = 1 << order;
2297         struct task_struct *p = current;
2298         struct reclaim_state reclaim_state;
2299         int priority;
2300         struct scan_control sc = {
2301                 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2302                 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2303                 .swap_cluster_max = max_t(unsigned long, nr_pages,
2304                                         SWAP_CLUSTER_MAX),
2305                 .gfp_mask = gfp_mask,
2306                 .swappiness = vm_swappiness,
2307                 .isolate_pages = isolate_pages_global,
2308         };
2309         unsigned long slab_reclaimable;
2310
2311         disable_swap_token();
2312         cond_resched();
2313         /*
2314          * We need to be able to allocate from the reserves for RECLAIM_SWAP
2315          * and we also need to be able to write out pages for RECLAIM_WRITE
2316          * and RECLAIM_SWAP.
2317          */
2318         p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2319         reclaim_state.reclaimed_slab = 0;
2320         p->reclaim_state = &reclaim_state;
2321
2322         if (zone_page_state(zone, NR_FILE_PAGES) -
2323                 zone_page_state(zone, NR_FILE_MAPPED) >
2324                 zone->min_unmapped_pages) {
2325                 /*
2326                  * Free memory by calling shrink zone with increasing
2327                  * priorities until we have enough memory freed.
2328                  */
2329                 priority = ZONE_RECLAIM_PRIORITY;
2330                 do {
2331                         note_zone_scanning_priority(zone, priority);
2332                         shrink_zone(priority, zone, &sc);
2333                         priority--;
2334                 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
2335         }
2336
2337         slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2338         if (slab_reclaimable > zone->min_slab_pages) {
2339                 /*
2340                  * shrink_slab() does not currently allow us to determine how
2341                  * many pages were freed in this zone. So we take the current
2342                  * number of slab pages and shake the slab until it is reduced
2343                  * by the same nr_pages that we used for reclaiming unmapped
2344                  * pages.
2345                  *
2346                  * Note that shrink_slab will free memory on all zones and may
2347                  * take a long time.
2348                  */
2349                 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
2350                         zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
2351                                 slab_reclaimable - nr_pages)
2352                         ;
2353
2354                 /*
2355                  * Update nr_reclaimed by the number of slab pages we
2356                  * reclaimed from this zone.
2357                  */
2358                 sc.nr_reclaimed += slab_reclaimable -
2359                         zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2360         }
2361
2362         p->reclaim_state = NULL;
2363         current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
2364         return sc.nr_reclaimed >= nr_pages;
2365 }
2366
2367 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2368 {
2369         int node_id;
2370         int ret;
2371
2372         /*
2373          * Zone reclaim reclaims unmapped file backed pages and
2374          * slab pages if we are over the defined limits.
2375          *
2376          * A small portion of unmapped file backed pages is needed for
2377          * file I/O otherwise pages read by file I/O will be immediately
2378          * thrown out if the zone is overallocated. So we do not reclaim
2379          * if less than a specified percentage of the zone is used by
2380          * unmapped file backed pages.
2381          */
2382         if (zone_page_state(zone, NR_FILE_PAGES) -
2383             zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
2384             && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
2385                         <= zone->min_slab_pages)
2386                 return 0;
2387
2388         if (zone_is_all_unreclaimable(zone))
2389                 return 0;
2390
2391         /*
2392          * Do not scan if the allocation should not be delayed.
2393          */
2394         if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2395                         return 0;
2396
2397         /*
2398          * Only run zone reclaim on the local zone or on zones that do not
2399          * have associated processors. This will favor the local processor
2400          * over remote processors and spread off node memory allocations
2401          * as wide as possible.
2402          */
2403         node_id = zone_to_nid(zone);
2404         if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2405                 return 0;
2406
2407         if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2408                 return 0;
2409         ret = __zone_reclaim(zone, gfp_mask, order);
2410         zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2411
2412         return ret;
2413 }
2414 #endif
2415
2416 #ifdef CONFIG_UNEVICTABLE_LRU
2417 /*
2418  * page_evictable - test whether a page is evictable
2419  * @page: the page to test
2420  * @vma: the VMA in which the page is or will be mapped, may be NULL
2421  *
2422  * Test whether page is evictable--i.e., should be placed on active/inactive
2423  * lists vs unevictable list.  The vma argument is !NULL when called from the
2424  * fault path to determine how to instantate a new page.
2425  *
2426  * Reasons page might not be evictable:
2427  * (1) page's mapping marked unevictable
2428  * (2) page is part of an mlocked VMA
2429  *
2430  */
2431 int page_evictable(struct page *page, struct vm_area_struct *vma)
2432 {
2433
2434         if (mapping_unevictable(page_mapping(page)))
2435                 return 0;
2436
2437         if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
2438                 return 0;
2439
2440         return 1;
2441 }
2442
2443 /**
2444  * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2445  * @page: page to check evictability and move to appropriate lru list
2446  * @zone: zone page is in
2447  *
2448  * Checks a page for evictability and moves the page to the appropriate
2449  * zone lru list.
2450  *
2451  * Restrictions: zone->lru_lock must be held, page must be on LRU and must
2452  * have PageUnevictable set.
2453  */
2454 static void check_move_unevictable_page(struct page *page, struct zone *zone)
2455 {
2456         VM_BUG_ON(PageActive(page));
2457
2458 retry:
2459         ClearPageUnevictable(page);
2460         if (page_evictable(page, NULL)) {
2461                 enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
2462
2463                 __dec_zone_state(zone, NR_UNEVICTABLE);
2464                 list_move(&page->lru, &zone->lru[l].list);
2465                 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
2466                 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
2467                 __count_vm_event(UNEVICTABLE_PGRESCUED);
2468         } else {
2469                 /*
2470                  * rotate unevictable list
2471                  */
2472                 SetPageUnevictable(page);
2473                 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2474                 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
2475                 if (page_evictable(page, NULL))
2476                         goto retry;
2477         }
2478 }
2479
2480 /**
2481  * scan_mapping_unevictable_pages - scan an address space for evictable pages
2482  * @mapping: struct address_space to scan for evictable pages
2483  *
2484  * Scan all pages in mapping.  Check unevictable pages for
2485  * evictability and move them to the appropriate zone lru list.
2486  */
2487 void scan_mapping_unevictable_pages(struct address_space *mapping)
2488 {
2489         pgoff_t next = 0;
2490         pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
2491                          PAGE_CACHE_SHIFT;
2492         struct zone *zone;
2493         struct pagevec pvec;
2494
2495         if (mapping->nrpages == 0)
2496                 return;
2497
2498         pagevec_init(&pvec, 0);
2499         while (next < end &&
2500                 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
2501                 int i;
2502                 int pg_scanned = 0;
2503
2504                 zone = NULL;
2505
2506                 for (i = 0; i < pagevec_count(&pvec); i++) {
2507                         struct page *page = pvec.pages[i];
2508                         pgoff_t page_index = page->index;
2509                         struct zone *pagezone = page_zone(page);
2510
2511                         pg_scanned++;
2512                         if (page_index > next)
2513                                 next = page_index;
2514                         next++;
2515
2516                         if (pagezone != zone) {
2517                                 if (zone)
2518                                         spin_unlock_irq(&zone->lru_lock);
2519                                 zone = pagezone;
2520                                 spin_lock_irq(&zone->lru_lock);
2521                         }
2522
2523                         if (PageLRU(page) && PageUnevictable(page))
2524                                 check_move_unevictable_page(page, zone);
2525                 }
2526                 if (zone)
2527                         spin_unlock_irq(&zone->lru_lock);
2528                 pagevec_release(&pvec);
2529
2530                 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
2531         }
2532
2533 }
2534
2535 /**
2536  * scan_zone_unevictable_pages - check unevictable list for evictable pages
2537  * @zone - zone of which to scan the unevictable list
2538  *
2539  * Scan @zone's unevictable LRU lists to check for pages that have become
2540  * evictable.  Move those that have to @zone's inactive list where they
2541  * become candidates for reclaim, unless shrink_inactive_zone() decides
2542  * to reactivate them.  Pages that are still unevictable are rotated
2543  * back onto @zone's unevictable list.
2544  */
2545 #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
2546 static void scan_zone_unevictable_pages(struct zone *zone)
2547 {
2548         struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
2549         unsigned long scan;
2550         unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
2551
2552         while (nr_to_scan > 0) {
2553                 unsigned long batch_size = min(nr_to_scan,
2554                                                 SCAN_UNEVICTABLE_BATCH_SIZE);
2555
2556                 spin_lock_irq(&zone->lru_lock);
2557                 for (scan = 0;  scan < batch_size; scan++) {
2558                         struct page *page = lru_to_page(l_unevictable);
2559
2560                         if (!trylock_page(page))
2561                                 continue;
2562
2563                         prefetchw_prev_lru_page(page, l_unevictable, flags);
2564
2565                         if (likely(PageLRU(page) && PageUnevictable(page)))
2566                                 check_move_unevictable_page(page, zone);
2567
2568                         unlock_page(page);
2569                 }
2570                 spin_unlock_irq(&zone->lru_lock);
2571
2572                 nr_to_scan -= batch_size;
2573         }
2574 }
2575
2576
2577 /**
2578  * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
2579  *
2580  * A really big hammer:  scan all zones' unevictable LRU lists to check for
2581  * pages that have become evictable.  Move those back to the zones'
2582  * inactive list where they become candidates for reclaim.
2583  * This occurs when, e.g., we have unswappable pages on the unevictable lists,
2584  * and we add swap to the system.  As such, it runs in the context of a task
2585  * that has possibly/probably made some previously unevictable pages
2586  * evictable.
2587  */
2588 static void scan_all_zones_unevictable_pages(void)
2589 {
2590         struct zone *zone;
2591
2592         for_each_zone(zone) {
2593                 scan_zone_unevictable_pages(zone);
2594         }
2595 }
2596
2597 /*
2598  * scan_unevictable_pages [vm] sysctl handler.  On demand re-scan of
2599  * all nodes' unevictable lists for evictable pages
2600  */
2601 unsigned long scan_unevictable_pages;
2602
2603 int scan_unevictable_handler(struct ctl_table *table, int write,
2604                            struct file *file, void __user *buffer,
2605                            size_t *length, loff_t *ppos)
2606 {
2607         proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
2608
2609         if (write && *(unsigned long *)table->data)
2610                 scan_all_zones_unevictable_pages();
2611
2612         scan_unevictable_pages = 0;
2613         return 0;
2614 }
2615
2616 /*
2617  * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
2618  * a specified node's per zone unevictable lists for evictable pages.
2619  */
2620
2621 static ssize_t read_scan_unevictable_node(struct sys_device *dev,
2622                                           struct sysdev_attribute *attr,
2623                                           char *buf)
2624 {
2625         return sprintf(buf, "0\n");     /* always zero; should fit... */
2626 }
2627
2628 static ssize_t write_scan_unevictable_node(struct sys_device *dev,
2629                                            struct sysdev_attribute *attr,
2630                                         const char *buf, size_t count)
2631 {
2632         struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
2633         struct zone *zone;
2634         unsigned long res;
2635         unsigned long req = strict_strtoul(buf, 10, &res);
2636
2637         if (!req)
2638                 return 1;       /* zero is no-op */
2639
2640         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2641                 if (!populated_zone(zone))
2642                         continue;
2643                 scan_zone_unevictable_pages(zone);
2644         }
2645         return 1;
2646 }
2647
2648
2649 static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
2650                         read_scan_unevictable_node,
2651                         write_scan_unevictable_node);
2652
2653 int scan_unevictable_register_node(struct node *node)
2654 {
2655         return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
2656 }
2657
2658 void scan_unevictable_unregister_node(struct node *node)
2659 {
2660         sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
2661 }
2662
2663 #endif