Lumpy Reclaim V4
[safe/jmp/linux-2.6] / mm / vmscan.c
1 /*
2  *  linux/mm/vmscan.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *
6  *  Swap reorganised 29.12.95, Stephen Tweedie.
7  *  kswapd added: 7.1.96  sct
8  *  Removed kswapd_ctl limits, and swap out as many pages as needed
9  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11  *  Multiqueue VM started 5.8.00, Rik van Riel.
12  */
13
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/vmstat.h>
23 #include <linux/file.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h>  /* for try_to_release_page(),
27                                         buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/notifier.h>
36 #include <linux/rwsem.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/freezer.h>
40
41 #include <asm/tlbflush.h>
42 #include <asm/div64.h>
43
44 #include <linux/swapops.h>
45
46 #include "internal.h"
47
48 struct scan_control {
49         /* Incremented by the number of inactive pages that were scanned */
50         unsigned long nr_scanned;
51
52         /* This context's GFP mask */
53         gfp_t gfp_mask;
54
55         int may_writepage;
56
57         /* Can pages be swapped as part of reclaim? */
58         int may_swap;
59
60         /* This context's SWAP_CLUSTER_MAX. If freeing memory for
61          * suspend, we effectively ignore SWAP_CLUSTER_MAX.
62          * In this context, it doesn't matter that we scan the
63          * whole list at once. */
64         int swap_cluster_max;
65
66         int swappiness;
67
68         int all_unreclaimable;
69
70         int order;
71 };
72
73 /*
74  * The list of shrinker callbacks used by to apply pressure to
75  * ageable caches.
76  */
77 struct shrinker {
78         shrinker_t              shrinker;
79         struct list_head        list;
80         int                     seeks;  /* seeks to recreate an obj */
81         long                    nr;     /* objs pending delete */
82 };
83
84 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
85
86 #ifdef ARCH_HAS_PREFETCH
87 #define prefetch_prev_lru_page(_page, _base, _field)                    \
88         do {                                                            \
89                 if ((_page)->lru.prev != _base) {                       \
90                         struct page *prev;                              \
91                                                                         \
92                         prev = lru_to_page(&(_page->lru));              \
93                         prefetch(&prev->_field);                        \
94                 }                                                       \
95         } while (0)
96 #else
97 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
98 #endif
99
100 #ifdef ARCH_HAS_PREFETCHW
101 #define prefetchw_prev_lru_page(_page, _base, _field)                   \
102         do {                                                            \
103                 if ((_page)->lru.prev != _base) {                       \
104                         struct page *prev;                              \
105                                                                         \
106                         prev = lru_to_page(&(_page->lru));              \
107                         prefetchw(&prev->_field);                       \
108                 }                                                       \
109         } while (0)
110 #else
111 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
112 #endif
113
114 /*
115  * From 0 .. 100.  Higher means more swappy.
116  */
117 int vm_swappiness = 60;
118 long vm_total_pages;    /* The total number of pages which the VM controls */
119
120 static LIST_HEAD(shrinker_list);
121 static DECLARE_RWSEM(shrinker_rwsem);
122
123 /*
124  * Add a shrinker callback to be called from the vm
125  */
126 struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
127 {
128         struct shrinker *shrinker;
129
130         shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
131         if (shrinker) {
132                 shrinker->shrinker = theshrinker;
133                 shrinker->seeks = seeks;
134                 shrinker->nr = 0;
135                 down_write(&shrinker_rwsem);
136                 list_add_tail(&shrinker->list, &shrinker_list);
137                 up_write(&shrinker_rwsem);
138         }
139         return shrinker;
140 }
141 EXPORT_SYMBOL(set_shrinker);
142
143 /*
144  * Remove one
145  */
146 void remove_shrinker(struct shrinker *shrinker)
147 {
148         down_write(&shrinker_rwsem);
149         list_del(&shrinker->list);
150         up_write(&shrinker_rwsem);
151         kfree(shrinker);
152 }
153 EXPORT_SYMBOL(remove_shrinker);
154
155 #define SHRINK_BATCH 128
156 /*
157  * Call the shrink functions to age shrinkable caches
158  *
159  * Here we assume it costs one seek to replace a lru page and that it also
160  * takes a seek to recreate a cache object.  With this in mind we age equal
161  * percentages of the lru and ageable caches.  This should balance the seeks
162  * generated by these structures.
163  *
164  * If the vm encounted mapped pages on the LRU it increase the pressure on
165  * slab to avoid swapping.
166  *
167  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
168  *
169  * `lru_pages' represents the number of on-LRU pages in all the zones which
170  * are eligible for the caller's allocation attempt.  It is used for balancing
171  * slab reclaim versus page reclaim.
172  *
173  * Returns the number of slab objects which we shrunk.
174  */
175 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
176                         unsigned long lru_pages)
177 {
178         struct shrinker *shrinker;
179         unsigned long ret = 0;
180
181         if (scanned == 0)
182                 scanned = SWAP_CLUSTER_MAX;
183
184         if (!down_read_trylock(&shrinker_rwsem))
185                 return 1;       /* Assume we'll be able to shrink next time */
186
187         list_for_each_entry(shrinker, &shrinker_list, list) {
188                 unsigned long long delta;
189                 unsigned long total_scan;
190                 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
191
192                 delta = (4 * scanned) / shrinker->seeks;
193                 delta *= max_pass;
194                 do_div(delta, lru_pages + 1);
195                 shrinker->nr += delta;
196                 if (shrinker->nr < 0) {
197                         printk(KERN_ERR "%s: nr=%ld\n",
198                                         __FUNCTION__, shrinker->nr);
199                         shrinker->nr = max_pass;
200                 }
201
202                 /*
203                  * Avoid risking looping forever due to too large nr value:
204                  * never try to free more than twice the estimate number of
205                  * freeable entries.
206                  */
207                 if (shrinker->nr > max_pass * 2)
208                         shrinker->nr = max_pass * 2;
209
210                 total_scan = shrinker->nr;
211                 shrinker->nr = 0;
212
213                 while (total_scan >= SHRINK_BATCH) {
214                         long this_scan = SHRINK_BATCH;
215                         int shrink_ret;
216                         int nr_before;
217
218                         nr_before = (*shrinker->shrinker)(0, gfp_mask);
219                         shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
220                         if (shrink_ret == -1)
221                                 break;
222                         if (shrink_ret < nr_before)
223                                 ret += nr_before - shrink_ret;
224                         count_vm_events(SLABS_SCANNED, this_scan);
225                         total_scan -= this_scan;
226
227                         cond_resched();
228                 }
229
230                 shrinker->nr += total_scan;
231         }
232         up_read(&shrinker_rwsem);
233         return ret;
234 }
235
236 /* Called without lock on whether page is mapped, so answer is unstable */
237 static inline int page_mapping_inuse(struct page *page)
238 {
239         struct address_space *mapping;
240
241         /* Page is in somebody's page tables. */
242         if (page_mapped(page))
243                 return 1;
244
245         /* Be more reluctant to reclaim swapcache than pagecache */
246         if (PageSwapCache(page))
247                 return 1;
248
249         mapping = page_mapping(page);
250         if (!mapping)
251                 return 0;
252
253         /* File is mmap'd by somebody? */
254         return mapping_mapped(mapping);
255 }
256
257 static inline int is_page_cache_freeable(struct page *page)
258 {
259         return page_count(page) - !!PagePrivate(page) == 2;
260 }
261
262 static int may_write_to_queue(struct backing_dev_info *bdi)
263 {
264         if (current->flags & PF_SWAPWRITE)
265                 return 1;
266         if (!bdi_write_congested(bdi))
267                 return 1;
268         if (bdi == current->backing_dev_info)
269                 return 1;
270         return 0;
271 }
272
273 /*
274  * We detected a synchronous write error writing a page out.  Probably
275  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
276  * fsync(), msync() or close().
277  *
278  * The tricky part is that after writepage we cannot touch the mapping: nothing
279  * prevents it from being freed up.  But we have a ref on the page and once
280  * that page is locked, the mapping is pinned.
281  *
282  * We're allowed to run sleeping lock_page() here because we know the caller has
283  * __GFP_FS.
284  */
285 static void handle_write_error(struct address_space *mapping,
286                                 struct page *page, int error)
287 {
288         lock_page(page);
289         if (page_mapping(page) == mapping)
290                 mapping_set_error(mapping, error);
291         unlock_page(page);
292 }
293
294 /* possible outcome of pageout() */
295 typedef enum {
296         /* failed to write page out, page is locked */
297         PAGE_KEEP,
298         /* move page to the active list, page is locked */
299         PAGE_ACTIVATE,
300         /* page has been sent to the disk successfully, page is unlocked */
301         PAGE_SUCCESS,
302         /* page is clean and locked */
303         PAGE_CLEAN,
304 } pageout_t;
305
306 /*
307  * pageout is called by shrink_page_list() for each dirty page.
308  * Calls ->writepage().
309  */
310 static pageout_t pageout(struct page *page, struct address_space *mapping)
311 {
312         /*
313          * If the page is dirty, only perform writeback if that write
314          * will be non-blocking.  To prevent this allocation from being
315          * stalled by pagecache activity.  But note that there may be
316          * stalls if we need to run get_block().  We could test
317          * PagePrivate for that.
318          *
319          * If this process is currently in generic_file_write() against
320          * this page's queue, we can perform writeback even if that
321          * will block.
322          *
323          * If the page is swapcache, write it back even if that would
324          * block, for some throttling. This happens by accident, because
325          * swap_backing_dev_info is bust: it doesn't reflect the
326          * congestion state of the swapdevs.  Easy to fix, if needed.
327          * See swapfile.c:page_queue_congested().
328          */
329         if (!is_page_cache_freeable(page))
330                 return PAGE_KEEP;
331         if (!mapping) {
332                 /*
333                  * Some data journaling orphaned pages can have
334                  * page->mapping == NULL while being dirty with clean buffers.
335                  */
336                 if (PagePrivate(page)) {
337                         if (try_to_free_buffers(page)) {
338                                 ClearPageDirty(page);
339                                 printk("%s: orphaned page\n", __FUNCTION__);
340                                 return PAGE_CLEAN;
341                         }
342                 }
343                 return PAGE_KEEP;
344         }
345         if (mapping->a_ops->writepage == NULL)
346                 return PAGE_ACTIVATE;
347         if (!may_write_to_queue(mapping->backing_dev_info))
348                 return PAGE_KEEP;
349
350         if (clear_page_dirty_for_io(page)) {
351                 int res;
352                 struct writeback_control wbc = {
353                         .sync_mode = WB_SYNC_NONE,
354                         .nr_to_write = SWAP_CLUSTER_MAX,
355                         .range_start = 0,
356                         .range_end = LLONG_MAX,
357                         .nonblocking = 1,
358                         .for_reclaim = 1,
359                 };
360
361                 SetPageReclaim(page);
362                 res = mapping->a_ops->writepage(page, &wbc);
363                 if (res < 0)
364                         handle_write_error(mapping, page, res);
365                 if (res == AOP_WRITEPAGE_ACTIVATE) {
366                         ClearPageReclaim(page);
367                         return PAGE_ACTIVATE;
368                 }
369                 if (!PageWriteback(page)) {
370                         /* synchronous write or broken a_ops? */
371                         ClearPageReclaim(page);
372                 }
373                 inc_zone_page_state(page, NR_VMSCAN_WRITE);
374                 return PAGE_SUCCESS;
375         }
376
377         return PAGE_CLEAN;
378 }
379
380 /*
381  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
382  * someone else has a ref on the page, abort and return 0.  If it was
383  * successfully detached, return 1.  Assumes the caller has a single ref on
384  * this page.
385  */
386 int remove_mapping(struct address_space *mapping, struct page *page)
387 {
388         BUG_ON(!PageLocked(page));
389         BUG_ON(mapping != page_mapping(page));
390
391         write_lock_irq(&mapping->tree_lock);
392         /*
393          * The non racy check for a busy page.
394          *
395          * Must be careful with the order of the tests. When someone has
396          * a ref to the page, it may be possible that they dirty it then
397          * drop the reference. So if PageDirty is tested before page_count
398          * here, then the following race may occur:
399          *
400          * get_user_pages(&page);
401          * [user mapping goes away]
402          * write_to(page);
403          *                              !PageDirty(page)    [good]
404          * SetPageDirty(page);
405          * put_page(page);
406          *                              !page_count(page)   [good, discard it]
407          *
408          * [oops, our write_to data is lost]
409          *
410          * Reversing the order of the tests ensures such a situation cannot
411          * escape unnoticed. The smp_rmb is needed to ensure the page->flags
412          * load is not satisfied before that of page->_count.
413          *
414          * Note that if SetPageDirty is always performed via set_page_dirty,
415          * and thus under tree_lock, then this ordering is not required.
416          */
417         if (unlikely(page_count(page) != 2))
418                 goto cannot_free;
419         smp_rmb();
420         if (unlikely(PageDirty(page)))
421                 goto cannot_free;
422
423         if (PageSwapCache(page)) {
424                 swp_entry_t swap = { .val = page_private(page) };
425                 __delete_from_swap_cache(page);
426                 write_unlock_irq(&mapping->tree_lock);
427                 swap_free(swap);
428                 __put_page(page);       /* The pagecache ref */
429                 return 1;
430         }
431
432         __remove_from_page_cache(page);
433         write_unlock_irq(&mapping->tree_lock);
434         __put_page(page);
435         return 1;
436
437 cannot_free:
438         write_unlock_irq(&mapping->tree_lock);
439         return 0;
440 }
441
442 /*
443  * shrink_page_list() returns the number of reclaimed pages
444  */
445 static unsigned long shrink_page_list(struct list_head *page_list,
446                                         struct scan_control *sc)
447 {
448         LIST_HEAD(ret_pages);
449         struct pagevec freed_pvec;
450         int pgactivate = 0;
451         unsigned long nr_reclaimed = 0;
452
453         cond_resched();
454
455         pagevec_init(&freed_pvec, 1);
456         while (!list_empty(page_list)) {
457                 struct address_space *mapping;
458                 struct page *page;
459                 int may_enter_fs;
460                 int referenced;
461
462                 cond_resched();
463
464                 page = lru_to_page(page_list);
465                 list_del(&page->lru);
466
467                 if (TestSetPageLocked(page))
468                         goto keep;
469
470                 VM_BUG_ON(PageActive(page));
471
472                 sc->nr_scanned++;
473
474                 if (!sc->may_swap && page_mapped(page))
475                         goto keep_locked;
476
477                 /* Double the slab pressure for mapped and swapcache pages */
478                 if (page_mapped(page) || PageSwapCache(page))
479                         sc->nr_scanned++;
480
481                 if (PageWriteback(page))
482                         goto keep_locked;
483
484                 referenced = page_referenced(page, 1);
485                 /* In active use or really unfreeable?  Activate it. */
486                 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
487                                         referenced && page_mapping_inuse(page))
488                         goto activate_locked;
489
490 #ifdef CONFIG_SWAP
491                 /*
492                  * Anonymous process memory has backing store?
493                  * Try to allocate it some swap space here.
494                  */
495                 if (PageAnon(page) && !PageSwapCache(page))
496                         if (!add_to_swap(page, GFP_ATOMIC))
497                                 goto activate_locked;
498 #endif /* CONFIG_SWAP */
499
500                 mapping = page_mapping(page);
501                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
502                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
503
504                 /*
505                  * The page is mapped into the page tables of one or more
506                  * processes. Try to unmap it here.
507                  */
508                 if (page_mapped(page) && mapping) {
509                         switch (try_to_unmap(page, 0)) {
510                         case SWAP_FAIL:
511                                 goto activate_locked;
512                         case SWAP_AGAIN:
513                                 goto keep_locked;
514                         case SWAP_SUCCESS:
515                                 ; /* try to free the page below */
516                         }
517                 }
518
519                 if (PageDirty(page)) {
520                         if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
521                                 goto keep_locked;
522                         if (!may_enter_fs)
523                                 goto keep_locked;
524                         if (!sc->may_writepage)
525                                 goto keep_locked;
526
527                         /* Page is dirty, try to write it out here */
528                         switch(pageout(page, mapping)) {
529                         case PAGE_KEEP:
530                                 goto keep_locked;
531                         case PAGE_ACTIVATE:
532                                 goto activate_locked;
533                         case PAGE_SUCCESS:
534                                 if (PageWriteback(page) || PageDirty(page))
535                                         goto keep;
536                                 /*
537                                  * A synchronous write - probably a ramdisk.  Go
538                                  * ahead and try to reclaim the page.
539                                  */
540                                 if (TestSetPageLocked(page))
541                                         goto keep;
542                                 if (PageDirty(page) || PageWriteback(page))
543                                         goto keep_locked;
544                                 mapping = page_mapping(page);
545                         case PAGE_CLEAN:
546                                 ; /* try to free the page below */
547                         }
548                 }
549
550                 /*
551                  * If the page has buffers, try to free the buffer mappings
552                  * associated with this page. If we succeed we try to free
553                  * the page as well.
554                  *
555                  * We do this even if the page is PageDirty().
556                  * try_to_release_page() does not perform I/O, but it is
557                  * possible for a page to have PageDirty set, but it is actually
558                  * clean (all its buffers are clean).  This happens if the
559                  * buffers were written out directly, with submit_bh(). ext3
560                  * will do this, as well as the blockdev mapping. 
561                  * try_to_release_page() will discover that cleanness and will
562                  * drop the buffers and mark the page clean - it can be freed.
563                  *
564                  * Rarely, pages can have buffers and no ->mapping.  These are
565                  * the pages which were not successfully invalidated in
566                  * truncate_complete_page().  We try to drop those buffers here
567                  * and if that worked, and the page is no longer mapped into
568                  * process address space (page_count == 1) it can be freed.
569                  * Otherwise, leave the page on the LRU so it is swappable.
570                  */
571                 if (PagePrivate(page)) {
572                         if (!try_to_release_page(page, sc->gfp_mask))
573                                 goto activate_locked;
574                         if (!mapping && page_count(page) == 1)
575                                 goto free_it;
576                 }
577
578                 if (!mapping || !remove_mapping(mapping, page))
579                         goto keep_locked;
580
581 free_it:
582                 unlock_page(page);
583                 nr_reclaimed++;
584                 if (!pagevec_add(&freed_pvec, page))
585                         __pagevec_release_nonlru(&freed_pvec);
586                 continue;
587
588 activate_locked:
589                 SetPageActive(page);
590                 pgactivate++;
591 keep_locked:
592                 unlock_page(page);
593 keep:
594                 list_add(&page->lru, &ret_pages);
595                 VM_BUG_ON(PageLRU(page));
596         }
597         list_splice(&ret_pages, page_list);
598         if (pagevec_count(&freed_pvec))
599                 __pagevec_release_nonlru(&freed_pvec);
600         count_vm_events(PGACTIVATE, pgactivate);
601         return nr_reclaimed;
602 }
603
604 /* LRU Isolation modes. */
605 #define ISOLATE_INACTIVE 0      /* Isolate inactive pages. */
606 #define ISOLATE_ACTIVE 1        /* Isolate active pages. */
607 #define ISOLATE_BOTH 2          /* Isolate both active and inactive pages. */
608
609 /*
610  * Attempt to remove the specified page from its LRU.  Only take this page
611  * if it is of the appropriate PageActive status.  Pages which are being
612  * freed elsewhere are also ignored.
613  *
614  * page:        page to consider
615  * mode:        one of the LRU isolation modes defined above
616  *
617  * returns 0 on success, -ve errno on failure.
618  */
619 static int __isolate_lru_page(struct page *page, int mode)
620 {
621         int ret = -EINVAL;
622
623         /* Only take pages on the LRU. */
624         if (!PageLRU(page))
625                 return ret;
626
627         /*
628          * When checking the active state, we need to be sure we are
629          * dealing with comparible boolean values.  Take the logical not
630          * of each.
631          */
632         if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
633                 return ret;
634
635         ret = -EBUSY;
636         if (likely(get_page_unless_zero(page))) {
637                 /*
638                  * Be careful not to clear PageLRU until after we're
639                  * sure the page is not being freed elsewhere -- the
640                  * page release code relies on it.
641                  */
642                 ClearPageLRU(page);
643                 ret = 0;
644         }
645
646         return ret;
647 }
648
649 /*
650  * zone->lru_lock is heavily contended.  Some of the functions that
651  * shrink the lists perform better by taking out a batch of pages
652  * and working on them outside the LRU lock.
653  *
654  * For pagecache intensive workloads, this function is the hottest
655  * spot in the kernel (apart from copy_*_user functions).
656  *
657  * Appropriate locks must be held before calling this function.
658  *
659  * @nr_to_scan: The number of pages to look through on the list.
660  * @src:        The LRU list to pull pages off.
661  * @dst:        The temp list to put pages on to.
662  * @scanned:    The number of pages that were scanned.
663  * @order:      The caller's attempted allocation order
664  * @mode:       One of the LRU isolation modes
665  *
666  * returns how many pages were moved onto *@dst.
667  */
668 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
669                 struct list_head *src, struct list_head *dst,
670                 unsigned long *scanned, int order, int mode)
671 {
672         unsigned long nr_taken = 0;
673         unsigned long scan;
674
675         for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
676                 struct page *page;
677                 unsigned long pfn;
678                 unsigned long end_pfn;
679                 unsigned long page_pfn;
680                 int zone_id;
681
682                 page = lru_to_page(src);
683                 prefetchw_prev_lru_page(page, src, flags);
684
685                 VM_BUG_ON(!PageLRU(page));
686
687                 switch (__isolate_lru_page(page, mode)) {
688                 case 0:
689                         list_move(&page->lru, dst);
690                         nr_taken++;
691                         break;
692
693                 case -EBUSY:
694                         /* else it is being freed elsewhere */
695                         list_move(&page->lru, src);
696                         continue;
697
698                 default:
699                         BUG();
700                 }
701
702                 if (!order)
703                         continue;
704
705                 /*
706                  * Attempt to take all pages in the order aligned region
707                  * surrounding the tag page.  Only take those pages of
708                  * the same active state as that tag page.  We may safely
709                  * round the target page pfn down to the requested order
710                  * as the mem_map is guarenteed valid out to MAX_ORDER,
711                  * where that page is in a different zone we will detect
712                  * it from its zone id and abort this block scan.
713                  */
714                 zone_id = page_zone_id(page);
715                 page_pfn = page_to_pfn(page);
716                 pfn = page_pfn & ~((1 << order) - 1);
717                 end_pfn = pfn + (1 << order);
718                 for (; pfn < end_pfn; pfn++) {
719                         struct page *cursor_page;
720
721                         /* The target page is in the block, ignore it. */
722                         if (unlikely(pfn == page_pfn))
723                                 continue;
724
725                         /* Avoid holes within the zone. */
726                         if (unlikely(!pfn_valid_within(pfn)))
727                                 break;
728
729                         cursor_page = pfn_to_page(pfn);
730                         /* Check that we have not crossed a zone boundary. */
731                         if (unlikely(page_zone_id(cursor_page) != zone_id))
732                                 continue;
733                         switch (__isolate_lru_page(cursor_page, mode)) {
734                         case 0:
735                                 list_move(&cursor_page->lru, dst);
736                                 nr_taken++;
737                                 scan++;
738                                 break;
739
740                         case -EBUSY:
741                                 /* else it is being freed elsewhere */
742                                 list_move(&cursor_page->lru, src);
743                         default:
744                                 break;
745                         }
746                 }
747         }
748
749         *scanned = scan;
750         return nr_taken;
751 }
752
753 /*
754  * clear_active_flags() is a helper for shrink_active_list(), clearing
755  * any active bits from the pages in the list.
756  */
757 static unsigned long clear_active_flags(struct list_head *page_list)
758 {
759         int nr_active = 0;
760         struct page *page;
761
762         list_for_each_entry(page, page_list, lru)
763                 if (PageActive(page)) {
764                         ClearPageActive(page);
765                         nr_active++;
766                 }
767
768         return nr_active;
769 }
770
771 /*
772  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
773  * of reclaimed pages
774  */
775 static unsigned long shrink_inactive_list(unsigned long max_scan,
776                                 struct zone *zone, struct scan_control *sc)
777 {
778         LIST_HEAD(page_list);
779         struct pagevec pvec;
780         unsigned long nr_scanned = 0;
781         unsigned long nr_reclaimed = 0;
782
783         pagevec_init(&pvec, 1);
784
785         lru_add_drain();
786         spin_lock_irq(&zone->lru_lock);
787         do {
788                 struct page *page;
789                 unsigned long nr_taken;
790                 unsigned long nr_scan;
791                 unsigned long nr_freed;
792                 unsigned long nr_active;
793
794                 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
795                              &zone->inactive_list,
796                              &page_list, &nr_scan, sc->order,
797                              (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
798                                              ISOLATE_BOTH : ISOLATE_INACTIVE);
799                 nr_active = clear_active_flags(&page_list);
800
801                 __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
802                 __mod_zone_page_state(zone, NR_INACTIVE,
803                                                 -(nr_taken - nr_active));
804                 zone->pages_scanned += nr_scan;
805                 spin_unlock_irq(&zone->lru_lock);
806
807                 nr_scanned += nr_scan;
808                 nr_freed = shrink_page_list(&page_list, sc);
809                 nr_reclaimed += nr_freed;
810                 local_irq_disable();
811                 if (current_is_kswapd()) {
812                         __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
813                         __count_vm_events(KSWAPD_STEAL, nr_freed);
814                 } else
815                         __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
816                 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
817
818                 if (nr_taken == 0)
819                         goto done;
820
821                 spin_lock(&zone->lru_lock);
822                 /*
823                  * Put back any unfreeable pages.
824                  */
825                 while (!list_empty(&page_list)) {
826                         page = lru_to_page(&page_list);
827                         VM_BUG_ON(PageLRU(page));
828                         SetPageLRU(page);
829                         list_del(&page->lru);
830                         if (PageActive(page))
831                                 add_page_to_active_list(zone, page);
832                         else
833                                 add_page_to_inactive_list(zone, page);
834                         if (!pagevec_add(&pvec, page)) {
835                                 spin_unlock_irq(&zone->lru_lock);
836                                 __pagevec_release(&pvec);
837                                 spin_lock_irq(&zone->lru_lock);
838                         }
839                 }
840         } while (nr_scanned < max_scan);
841         spin_unlock(&zone->lru_lock);
842 done:
843         local_irq_enable();
844         pagevec_release(&pvec);
845         return nr_reclaimed;
846 }
847
848 /*
849  * We are about to scan this zone at a certain priority level.  If that priority
850  * level is smaller (ie: more urgent) than the previous priority, then note
851  * that priority level within the zone.  This is done so that when the next
852  * process comes in to scan this zone, it will immediately start out at this
853  * priority level rather than having to build up its own scanning priority.
854  * Here, this priority affects only the reclaim-mapped threshold.
855  */
856 static inline void note_zone_scanning_priority(struct zone *zone, int priority)
857 {
858         if (priority < zone->prev_priority)
859                 zone->prev_priority = priority;
860 }
861
862 static inline int zone_is_near_oom(struct zone *zone)
863 {
864         return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
865                                 + zone_page_state(zone, NR_INACTIVE))*3;
866 }
867
868 /*
869  * This moves pages from the active list to the inactive list.
870  *
871  * We move them the other way if the page is referenced by one or more
872  * processes, from rmap.
873  *
874  * If the pages are mostly unmapped, the processing is fast and it is
875  * appropriate to hold zone->lru_lock across the whole operation.  But if
876  * the pages are mapped, the processing is slow (page_referenced()) so we
877  * should drop zone->lru_lock around each page.  It's impossible to balance
878  * this, so instead we remove the pages from the LRU while processing them.
879  * It is safe to rely on PG_active against the non-LRU pages in here because
880  * nobody will play with that bit on a non-LRU page.
881  *
882  * The downside is that we have to touch page->_count against each page.
883  * But we had to alter page->flags anyway.
884  */
885 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
886                                 struct scan_control *sc, int priority)
887 {
888         unsigned long pgmoved;
889         int pgdeactivate = 0;
890         unsigned long pgscanned;
891         LIST_HEAD(l_hold);      /* The pages which were snipped off */
892         LIST_HEAD(l_inactive);  /* Pages to go onto the inactive_list */
893         LIST_HEAD(l_active);    /* Pages to go onto the active_list */
894         struct page *page;
895         struct pagevec pvec;
896         int reclaim_mapped = 0;
897
898         if (sc->may_swap) {
899                 long mapped_ratio;
900                 long distress;
901                 long swap_tendency;
902
903                 if (zone_is_near_oom(zone))
904                         goto force_reclaim_mapped;
905
906                 /*
907                  * `distress' is a measure of how much trouble we're having
908                  * reclaiming pages.  0 -> no problems.  100 -> great trouble.
909                  */
910                 distress = 100 >> min(zone->prev_priority, priority);
911
912                 /*
913                  * The point of this algorithm is to decide when to start
914                  * reclaiming mapped memory instead of just pagecache.  Work out
915                  * how much memory
916                  * is mapped.
917                  */
918                 mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
919                                 global_page_state(NR_ANON_PAGES)) * 100) /
920                                         vm_total_pages;
921
922                 /*
923                  * Now decide how much we really want to unmap some pages.  The
924                  * mapped ratio is downgraded - just because there's a lot of
925                  * mapped memory doesn't necessarily mean that page reclaim
926                  * isn't succeeding.
927                  *
928                  * The distress ratio is important - we don't want to start
929                  * going oom.
930                  *
931                  * A 100% value of vm_swappiness overrides this algorithm
932                  * altogether.
933                  */
934                 swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
935
936                 /*
937                  * Now use this metric to decide whether to start moving mapped
938                  * memory onto the inactive list.
939                  */
940                 if (swap_tendency >= 100)
941 force_reclaim_mapped:
942                         reclaim_mapped = 1;
943         }
944
945         lru_add_drain();
946         spin_lock_irq(&zone->lru_lock);
947         pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
948                             &l_hold, &pgscanned, sc->order, ISOLATE_ACTIVE);
949         zone->pages_scanned += pgscanned;
950         __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
951         spin_unlock_irq(&zone->lru_lock);
952
953         while (!list_empty(&l_hold)) {
954                 cond_resched();
955                 page = lru_to_page(&l_hold);
956                 list_del(&page->lru);
957                 if (page_mapped(page)) {
958                         if (!reclaim_mapped ||
959                             (total_swap_pages == 0 && PageAnon(page)) ||
960                             page_referenced(page, 0)) {
961                                 list_add(&page->lru, &l_active);
962                                 continue;
963                         }
964                 }
965                 list_add(&page->lru, &l_inactive);
966         }
967
968         pagevec_init(&pvec, 1);
969         pgmoved = 0;
970         spin_lock_irq(&zone->lru_lock);
971         while (!list_empty(&l_inactive)) {
972                 page = lru_to_page(&l_inactive);
973                 prefetchw_prev_lru_page(page, &l_inactive, flags);
974                 VM_BUG_ON(PageLRU(page));
975                 SetPageLRU(page);
976                 VM_BUG_ON(!PageActive(page));
977                 ClearPageActive(page);
978
979                 list_move(&page->lru, &zone->inactive_list);
980                 pgmoved++;
981                 if (!pagevec_add(&pvec, page)) {
982                         __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
983                         spin_unlock_irq(&zone->lru_lock);
984                         pgdeactivate += pgmoved;
985                         pgmoved = 0;
986                         if (buffer_heads_over_limit)
987                                 pagevec_strip(&pvec);
988                         __pagevec_release(&pvec);
989                         spin_lock_irq(&zone->lru_lock);
990                 }
991         }
992         __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
993         pgdeactivate += pgmoved;
994         if (buffer_heads_over_limit) {
995                 spin_unlock_irq(&zone->lru_lock);
996                 pagevec_strip(&pvec);
997                 spin_lock_irq(&zone->lru_lock);
998         }
999
1000         pgmoved = 0;
1001         while (!list_empty(&l_active)) {
1002                 page = lru_to_page(&l_active);
1003                 prefetchw_prev_lru_page(page, &l_active, flags);
1004                 VM_BUG_ON(PageLRU(page));
1005                 SetPageLRU(page);
1006                 VM_BUG_ON(!PageActive(page));
1007                 list_move(&page->lru, &zone->active_list);
1008                 pgmoved++;
1009                 if (!pagevec_add(&pvec, page)) {
1010                         __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
1011                         pgmoved = 0;
1012                         spin_unlock_irq(&zone->lru_lock);
1013                         __pagevec_release(&pvec);
1014                         spin_lock_irq(&zone->lru_lock);
1015                 }
1016         }
1017         __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
1018
1019         __count_zone_vm_events(PGREFILL, zone, pgscanned);
1020         __count_vm_events(PGDEACTIVATE, pgdeactivate);
1021         spin_unlock_irq(&zone->lru_lock);
1022
1023         pagevec_release(&pvec);
1024 }
1025
1026 /*
1027  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
1028  */
1029 static unsigned long shrink_zone(int priority, struct zone *zone,
1030                                 struct scan_control *sc)
1031 {
1032         unsigned long nr_active;
1033         unsigned long nr_inactive;
1034         unsigned long nr_to_scan;
1035         unsigned long nr_reclaimed = 0;
1036
1037         atomic_inc(&zone->reclaim_in_progress);
1038
1039         /*
1040          * Add one to `nr_to_scan' just to make sure that the kernel will
1041          * slowly sift through the active list.
1042          */
1043         zone->nr_scan_active +=
1044                 (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
1045         nr_active = zone->nr_scan_active;
1046         if (nr_active >= sc->swap_cluster_max)
1047                 zone->nr_scan_active = 0;
1048         else
1049                 nr_active = 0;
1050
1051         zone->nr_scan_inactive +=
1052                 (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
1053         nr_inactive = zone->nr_scan_inactive;
1054         if (nr_inactive >= sc->swap_cluster_max)
1055                 zone->nr_scan_inactive = 0;
1056         else
1057                 nr_inactive = 0;
1058
1059         while (nr_active || nr_inactive) {
1060                 if (nr_active) {
1061                         nr_to_scan = min(nr_active,
1062                                         (unsigned long)sc->swap_cluster_max);
1063                         nr_active -= nr_to_scan;
1064                         shrink_active_list(nr_to_scan, zone, sc, priority);
1065                 }
1066
1067                 if (nr_inactive) {
1068                         nr_to_scan = min(nr_inactive,
1069                                         (unsigned long)sc->swap_cluster_max);
1070                         nr_inactive -= nr_to_scan;
1071                         nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1072                                                                 sc);
1073                 }
1074         }
1075
1076         throttle_vm_writeout(sc->gfp_mask);
1077
1078         atomic_dec(&zone->reclaim_in_progress);
1079         return nr_reclaimed;
1080 }
1081
1082 /*
1083  * This is the direct reclaim path, for page-allocating processes.  We only
1084  * try to reclaim pages from zones which will satisfy the caller's allocation
1085  * request.
1086  *
1087  * We reclaim from a zone even if that zone is over pages_high.  Because:
1088  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1089  *    allocation or
1090  * b) The zones may be over pages_high but they must go *over* pages_high to
1091  *    satisfy the `incremental min' zone defense algorithm.
1092  *
1093  * Returns the number of reclaimed pages.
1094  *
1095  * If a zone is deemed to be full of pinned pages then just give it a light
1096  * scan then give up on it.
1097  */
1098 static unsigned long shrink_zones(int priority, struct zone **zones,
1099                                         struct scan_control *sc)
1100 {
1101         unsigned long nr_reclaimed = 0;
1102         int i;
1103
1104         sc->all_unreclaimable = 1;
1105         for (i = 0; zones[i] != NULL; i++) {
1106                 struct zone *zone = zones[i];
1107
1108                 if (!populated_zone(zone))
1109                         continue;
1110
1111                 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1112                         continue;
1113
1114                 note_zone_scanning_priority(zone, priority);
1115
1116                 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1117                         continue;       /* Let kswapd poll it */
1118
1119                 sc->all_unreclaimable = 0;
1120
1121                 nr_reclaimed += shrink_zone(priority, zone, sc);
1122         }
1123         return nr_reclaimed;
1124 }
1125  
1126 /*
1127  * This is the main entry point to direct page reclaim.
1128  *
1129  * If a full scan of the inactive list fails to free enough memory then we
1130  * are "out of memory" and something needs to be killed.
1131  *
1132  * If the caller is !__GFP_FS then the probability of a failure is reasonably
1133  * high - the zone may be full of dirty or under-writeback pages, which this
1134  * caller can't do much about.  We kick pdflush and take explicit naps in the
1135  * hope that some of these pages can be written.  But if the allocating task
1136  * holds filesystem locks which prevent writeout this might not work, and the
1137  * allocation attempt will fail.
1138  */
1139 unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
1140 {
1141         int priority;
1142         int ret = 0;
1143         unsigned long total_scanned = 0;
1144         unsigned long nr_reclaimed = 0;
1145         struct reclaim_state *reclaim_state = current->reclaim_state;
1146         unsigned long lru_pages = 0;
1147         int i;
1148         struct scan_control sc = {
1149                 .gfp_mask = gfp_mask,
1150                 .may_writepage = !laptop_mode,
1151                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1152                 .may_swap = 1,
1153                 .swappiness = vm_swappiness,
1154                 .order = order,
1155         };
1156
1157         count_vm_event(ALLOCSTALL);
1158
1159         for (i = 0; zones[i] != NULL; i++) {
1160                 struct zone *zone = zones[i];
1161
1162                 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1163                         continue;
1164
1165                 lru_pages += zone_page_state(zone, NR_ACTIVE)
1166                                 + zone_page_state(zone, NR_INACTIVE);
1167         }
1168
1169         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1170                 sc.nr_scanned = 0;
1171                 if (!priority)
1172                         disable_swap_token();
1173                 nr_reclaimed += shrink_zones(priority, zones, &sc);
1174                 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
1175                 if (reclaim_state) {
1176                         nr_reclaimed += reclaim_state->reclaimed_slab;
1177                         reclaim_state->reclaimed_slab = 0;
1178                 }
1179                 total_scanned += sc.nr_scanned;
1180                 if (nr_reclaimed >= sc.swap_cluster_max) {
1181                         ret = 1;
1182                         goto out;
1183                 }
1184
1185                 /*
1186                  * Try to write back as many pages as we just scanned.  This
1187                  * tends to cause slow streaming writers to write data to the
1188                  * disk smoothly, at the dirtying rate, which is nice.   But
1189                  * that's undesirable in laptop mode, where we *want* lumpy
1190                  * writeout.  So in laptop mode, write out the whole world.
1191                  */
1192                 if (total_scanned > sc.swap_cluster_max +
1193                                         sc.swap_cluster_max / 2) {
1194                         wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1195                         sc.may_writepage = 1;
1196                 }
1197
1198                 /* Take a nap, wait for some writeback to complete */
1199                 if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
1200                         congestion_wait(WRITE, HZ/10);
1201         }
1202         /* top priority shrink_caches still had more to do? don't OOM, then */
1203         if (!sc.all_unreclaimable)
1204                 ret = 1;
1205 out:
1206         /*
1207          * Now that we've scanned all the zones at this priority level, note
1208          * that level within the zone so that the next thread which performs
1209          * scanning of this zone will immediately start out at this priority
1210          * level.  This affects only the decision whether or not to bring
1211          * mapped pages onto the inactive list.
1212          */
1213         if (priority < 0)
1214                 priority = 0;
1215         for (i = 0; zones[i] != 0; i++) {
1216                 struct zone *zone = zones[i];
1217
1218                 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1219                         continue;
1220
1221                 zone->prev_priority = priority;
1222         }
1223         return ret;
1224 }
1225
1226 /*
1227  * For kswapd, balance_pgdat() will work across all this node's zones until
1228  * they are all at pages_high.
1229  *
1230  * Returns the number of pages which were actually freed.
1231  *
1232  * There is special handling here for zones which are full of pinned pages.
1233  * This can happen if the pages are all mlocked, or if they are all used by
1234  * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
1235  * What we do is to detect the case where all pages in the zone have been
1236  * scanned twice and there has been zero successful reclaim.  Mark the zone as
1237  * dead and from now on, only perform a short scan.  Basically we're polling
1238  * the zone for when the problem goes away.
1239  *
1240  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
1241  * zones which have free_pages > pages_high, but once a zone is found to have
1242  * free_pages <= pages_high, we scan that zone and the lower zones regardless
1243  * of the number of free pages in the lower zones.  This interoperates with
1244  * the page allocator fallback scheme to ensure that aging of pages is balanced
1245  * across the zones.
1246  */
1247 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1248 {
1249         int all_zones_ok;
1250         int priority;
1251         int i;
1252         unsigned long total_scanned;
1253         unsigned long nr_reclaimed;
1254         struct reclaim_state *reclaim_state = current->reclaim_state;
1255         struct scan_control sc = {
1256                 .gfp_mask = GFP_KERNEL,
1257                 .may_swap = 1,
1258                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1259                 .swappiness = vm_swappiness,
1260                 .order = order,
1261         };
1262         /*
1263          * temp_priority is used to remember the scanning priority at which
1264          * this zone was successfully refilled to free_pages == pages_high.
1265          */
1266         int temp_priority[MAX_NR_ZONES];
1267
1268 loop_again:
1269         total_scanned = 0;
1270         nr_reclaimed = 0;
1271         sc.may_writepage = !laptop_mode;
1272         count_vm_event(PAGEOUTRUN);
1273
1274         for (i = 0; i < pgdat->nr_zones; i++)
1275                 temp_priority[i] = DEF_PRIORITY;
1276
1277         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1278                 int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
1279                 unsigned long lru_pages = 0;
1280
1281                 /* The swap token gets in the way of swapout... */
1282                 if (!priority)
1283                         disable_swap_token();
1284
1285                 all_zones_ok = 1;
1286
1287                 /*
1288                  * Scan in the highmem->dma direction for the highest
1289                  * zone which needs scanning
1290                  */
1291                 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1292                         struct zone *zone = pgdat->node_zones + i;
1293
1294                         if (!populated_zone(zone))
1295                                 continue;
1296
1297                         if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1298                                 continue;
1299
1300                         if (!zone_watermark_ok(zone, order, zone->pages_high,
1301                                                0, 0)) {
1302                                 end_zone = i;
1303                                 break;
1304                         }
1305                 }
1306                 if (i < 0)
1307                         goto out;
1308
1309                 for (i = 0; i <= end_zone; i++) {
1310                         struct zone *zone = pgdat->node_zones + i;
1311
1312                         lru_pages += zone_page_state(zone, NR_ACTIVE)
1313                                         + zone_page_state(zone, NR_INACTIVE);
1314                 }
1315
1316                 /*
1317                  * Now scan the zone in the dma->highmem direction, stopping
1318                  * at the last zone which needs scanning.
1319                  *
1320                  * We do this because the page allocator works in the opposite
1321                  * direction.  This prevents the page allocator from allocating
1322                  * pages behind kswapd's direction of progress, which would
1323                  * cause too much scanning of the lower zones.
1324                  */
1325                 for (i = 0; i <= end_zone; i++) {
1326                         struct zone *zone = pgdat->node_zones + i;
1327                         int nr_slab;
1328
1329                         if (!populated_zone(zone))
1330                                 continue;
1331
1332                         if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1333                                 continue;
1334
1335                         if (!zone_watermark_ok(zone, order, zone->pages_high,
1336                                                end_zone, 0))
1337                                 all_zones_ok = 0;
1338                         temp_priority[i] = priority;
1339                         sc.nr_scanned = 0;
1340                         note_zone_scanning_priority(zone, priority);
1341                         nr_reclaimed += shrink_zone(priority, zone, &sc);
1342                         reclaim_state->reclaimed_slab = 0;
1343                         nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1344                                                 lru_pages);
1345                         nr_reclaimed += reclaim_state->reclaimed_slab;
1346                         total_scanned += sc.nr_scanned;
1347                         if (zone->all_unreclaimable)
1348                                 continue;
1349                         if (nr_slab == 0 && zone->pages_scanned >=
1350                                 (zone_page_state(zone, NR_ACTIVE)
1351                                 + zone_page_state(zone, NR_INACTIVE)) * 6)
1352                                         zone->all_unreclaimable = 1;
1353                         /*
1354                          * If we've done a decent amount of scanning and
1355                          * the reclaim ratio is low, start doing writepage
1356                          * even in laptop mode
1357                          */
1358                         if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1359                             total_scanned > nr_reclaimed + nr_reclaimed / 2)
1360                                 sc.may_writepage = 1;
1361                 }
1362                 if (all_zones_ok)
1363                         break;          /* kswapd: all done */
1364                 /*
1365                  * OK, kswapd is getting into trouble.  Take a nap, then take
1366                  * another pass across the zones.
1367                  */
1368                 if (total_scanned && priority < DEF_PRIORITY - 2)
1369                         congestion_wait(WRITE, HZ/10);
1370
1371                 /*
1372                  * We do this so kswapd doesn't build up large priorities for
1373                  * example when it is freeing in parallel with allocators. It
1374                  * matches the direct reclaim path behaviour in terms of impact
1375                  * on zone->*_priority.
1376                  */
1377                 if (nr_reclaimed >= SWAP_CLUSTER_MAX)
1378                         break;
1379         }
1380 out:
1381         /*
1382          * Note within each zone the priority level at which this zone was
1383          * brought into a happy state.  So that the next thread which scans this
1384          * zone will start out at that priority level.
1385          */
1386         for (i = 0; i < pgdat->nr_zones; i++) {
1387                 struct zone *zone = pgdat->node_zones + i;
1388
1389                 zone->prev_priority = temp_priority[i];
1390         }
1391         if (!all_zones_ok) {
1392                 cond_resched();
1393
1394                 try_to_freeze();
1395
1396                 goto loop_again;
1397         }
1398
1399         return nr_reclaimed;
1400 }
1401
1402 /*
1403  * The background pageout daemon, started as a kernel thread
1404  * from the init process. 
1405  *
1406  * This basically trickles out pages so that we have _some_
1407  * free memory available even if there is no other activity
1408  * that frees anything up. This is needed for things like routing
1409  * etc, where we otherwise might have all activity going on in
1410  * asynchronous contexts that cannot page things out.
1411  *
1412  * If there are applications that are active memory-allocators
1413  * (most normal use), this basically shouldn't matter.
1414  */
1415 static int kswapd(void *p)
1416 {
1417         unsigned long order;
1418         pg_data_t *pgdat = (pg_data_t*)p;
1419         struct task_struct *tsk = current;
1420         DEFINE_WAIT(wait);
1421         struct reclaim_state reclaim_state = {
1422                 .reclaimed_slab = 0,
1423         };
1424         cpumask_t cpumask;
1425
1426         cpumask = node_to_cpumask(pgdat->node_id);
1427         if (!cpus_empty(cpumask))
1428                 set_cpus_allowed(tsk, cpumask);
1429         current->reclaim_state = &reclaim_state;
1430
1431         /*
1432          * Tell the memory management that we're a "memory allocator",
1433          * and that if we need more memory we should get access to it
1434          * regardless (see "__alloc_pages()"). "kswapd" should
1435          * never get caught in the normal page freeing logic.
1436          *
1437          * (Kswapd normally doesn't need memory anyway, but sometimes
1438          * you need a small amount of memory in order to be able to
1439          * page out something else, and this flag essentially protects
1440          * us from recursively trying to free more memory as we're
1441          * trying to free the first piece of memory in the first place).
1442          */
1443         tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1444
1445         order = 0;
1446         for ( ; ; ) {
1447                 unsigned long new_order;
1448
1449                 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1450                 new_order = pgdat->kswapd_max_order;
1451                 pgdat->kswapd_max_order = 0;
1452                 if (order < new_order) {
1453                         /*
1454                          * Don't sleep if someone wants a larger 'order'
1455                          * allocation
1456                          */
1457                         order = new_order;
1458                 } else {
1459                         if (!freezing(current))
1460                                 schedule();
1461
1462                         order = pgdat->kswapd_max_order;
1463                 }
1464                 finish_wait(&pgdat->kswapd_wait, &wait);
1465
1466                 if (!try_to_freeze()) {
1467                         /* We can speed up thawing tasks if we don't call
1468                          * balance_pgdat after returning from the refrigerator
1469                          */
1470                         balance_pgdat(pgdat, order);
1471                 }
1472         }
1473         return 0;
1474 }
1475
1476 /*
1477  * A zone is low on free memory, so wake its kswapd task to service it.
1478  */
1479 void wakeup_kswapd(struct zone *zone, int order)
1480 {
1481         pg_data_t *pgdat;
1482
1483         if (!populated_zone(zone))
1484                 return;
1485
1486         pgdat = zone->zone_pgdat;
1487         if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1488                 return;
1489         if (pgdat->kswapd_max_order < order)
1490                 pgdat->kswapd_max_order = order;
1491         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1492                 return;
1493         if (!waitqueue_active(&pgdat->kswapd_wait))
1494                 return;
1495         wake_up_interruptible(&pgdat->kswapd_wait);
1496 }
1497
1498 #ifdef CONFIG_PM
1499 /*
1500  * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
1501  * from LRU lists system-wide, for given pass and priority, and returns the
1502  * number of reclaimed pages
1503  *
1504  * For pass > 3 we also try to shrink the LRU lists that contain a few pages
1505  */
1506 static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1507                                       int pass, struct scan_control *sc)
1508 {
1509         struct zone *zone;
1510         unsigned long nr_to_scan, ret = 0;
1511
1512         for_each_zone(zone) {
1513
1514                 if (!populated_zone(zone))
1515                         continue;
1516
1517                 if (zone->all_unreclaimable && prio != DEF_PRIORITY)
1518                         continue;
1519
1520                 /* For pass = 0 we don't shrink the active list */
1521                 if (pass > 0) {
1522                         zone->nr_scan_active +=
1523                                 (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
1524                         if (zone->nr_scan_active >= nr_pages || pass > 3) {
1525                                 zone->nr_scan_active = 0;
1526                                 nr_to_scan = min(nr_pages,
1527                                         zone_page_state(zone, NR_ACTIVE));
1528                                 shrink_active_list(nr_to_scan, zone, sc, prio);
1529                         }
1530                 }
1531
1532                 zone->nr_scan_inactive +=
1533                         (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
1534                 if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
1535                         zone->nr_scan_inactive = 0;
1536                         nr_to_scan = min(nr_pages,
1537                                 zone_page_state(zone, NR_INACTIVE));
1538                         ret += shrink_inactive_list(nr_to_scan, zone, sc);
1539                         if (ret >= nr_pages)
1540                                 return ret;
1541                 }
1542         }
1543
1544         return ret;
1545 }
1546
1547 static unsigned long count_lru_pages(void)
1548 {
1549         return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
1550 }
1551
1552 /*
1553  * Try to free `nr_pages' of memory, system-wide, and return the number of
1554  * freed pages.
1555  *
1556  * Rather than trying to age LRUs the aim is to preserve the overall
1557  * LRU order by reclaiming preferentially
1558  * inactive > active > active referenced > active mapped
1559  */
1560 unsigned long shrink_all_memory(unsigned long nr_pages)
1561 {
1562         unsigned long lru_pages, nr_slab;
1563         unsigned long ret = 0;
1564         int pass;
1565         struct reclaim_state reclaim_state;
1566         struct scan_control sc = {
1567                 .gfp_mask = GFP_KERNEL,
1568                 .may_swap = 0,
1569                 .swap_cluster_max = nr_pages,
1570                 .may_writepage = 1,
1571                 .swappiness = vm_swappiness,
1572         };
1573
1574         current->reclaim_state = &reclaim_state;
1575
1576         lru_pages = count_lru_pages();
1577         nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1578         /* If slab caches are huge, it's better to hit them first */
1579         while (nr_slab >= lru_pages) {
1580                 reclaim_state.reclaimed_slab = 0;
1581                 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
1582                 if (!reclaim_state.reclaimed_slab)
1583                         break;
1584
1585                 ret += reclaim_state.reclaimed_slab;
1586                 if (ret >= nr_pages)
1587                         goto out;
1588
1589                 nr_slab -= reclaim_state.reclaimed_slab;
1590         }
1591
1592         /*
1593          * We try to shrink LRUs in 5 passes:
1594          * 0 = Reclaim from inactive_list only
1595          * 1 = Reclaim from active list but don't reclaim mapped
1596          * 2 = 2nd pass of type 1
1597          * 3 = Reclaim mapped (normal reclaim)
1598          * 4 = 2nd pass of type 3
1599          */
1600         for (pass = 0; pass < 5; pass++) {
1601                 int prio;
1602
1603                 /* Force reclaiming mapped pages in the passes #3 and #4 */
1604                 if (pass > 2) {
1605                         sc.may_swap = 1;
1606                         sc.swappiness = 100;
1607                 }
1608
1609                 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1610                         unsigned long nr_to_scan = nr_pages - ret;
1611
1612                         sc.nr_scanned = 0;
1613                         ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
1614                         if (ret >= nr_pages)
1615                                 goto out;
1616
1617                         reclaim_state.reclaimed_slab = 0;
1618                         shrink_slab(sc.nr_scanned, sc.gfp_mask,
1619                                         count_lru_pages());
1620                         ret += reclaim_state.reclaimed_slab;
1621                         if (ret >= nr_pages)
1622                                 goto out;
1623
1624                         if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1625                                 congestion_wait(WRITE, HZ / 10);
1626                 }
1627         }
1628
1629         /*
1630          * If ret = 0, we could not shrink LRUs, but there may be something
1631          * in slab caches
1632          */
1633         if (!ret) {
1634                 do {
1635                         reclaim_state.reclaimed_slab = 0;
1636                         shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
1637                         ret += reclaim_state.reclaimed_slab;
1638                 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
1639         }
1640
1641 out:
1642         current->reclaim_state = NULL;
1643
1644         return ret;
1645 }
1646 #endif
1647
1648 /* It's optimal to keep kswapds on the same CPUs as their memory, but
1649    not required for correctness.  So if the last cpu in a node goes
1650    away, we get changed to run anywhere: as the first one comes back,
1651    restore their cpu bindings. */
1652 static int __devinit cpu_callback(struct notifier_block *nfb,
1653                                   unsigned long action, void *hcpu)
1654 {
1655         pg_data_t *pgdat;
1656         cpumask_t mask;
1657
1658         if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1659                 for_each_online_pgdat(pgdat) {
1660                         mask = node_to_cpumask(pgdat->node_id);
1661                         if (any_online_cpu(mask) != NR_CPUS)
1662                                 /* One of our CPUs online: restore mask */
1663                                 set_cpus_allowed(pgdat->kswapd, mask);
1664                 }
1665         }
1666         return NOTIFY_OK;
1667 }
1668
1669 /*
1670  * This kswapd start function will be called by init and node-hot-add.
1671  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
1672  */
1673 int kswapd_run(int nid)
1674 {
1675         pg_data_t *pgdat = NODE_DATA(nid);
1676         int ret = 0;
1677
1678         if (pgdat->kswapd)
1679                 return 0;
1680
1681         pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
1682         if (IS_ERR(pgdat->kswapd)) {
1683                 /* failure at boot is fatal */
1684                 BUG_ON(system_state == SYSTEM_BOOTING);
1685                 printk("Failed to start kswapd on node %d\n",nid);
1686                 ret = -1;
1687         }
1688         return ret;
1689 }
1690
1691 static int __init kswapd_init(void)
1692 {
1693         int nid;
1694
1695         swap_setup();
1696         for_each_online_node(nid)
1697                 kswapd_run(nid);
1698         hotcpu_notifier(cpu_callback, 0);
1699         return 0;
1700 }
1701
1702 module_init(kswapd_init)
1703
1704 #ifdef CONFIG_NUMA
1705 /*
1706  * Zone reclaim mode
1707  *
1708  * If non-zero call zone_reclaim when the number of free pages falls below
1709  * the watermarks.
1710  */
1711 int zone_reclaim_mode __read_mostly;
1712
1713 #define RECLAIM_OFF 0
1714 #define RECLAIM_ZONE (1<<0)     /* Run shrink_cache on the zone */
1715 #define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
1716 #define RECLAIM_SWAP (1<<2)     /* Swap pages out during reclaim */
1717
1718 /*
1719  * Priority for ZONE_RECLAIM. This determines the fraction of pages
1720  * of a node considered for each zone_reclaim. 4 scans 1/16th of
1721  * a zone.
1722  */
1723 #define ZONE_RECLAIM_PRIORITY 4
1724
1725 /*
1726  * Percentage of pages in a zone that must be unmapped for zone_reclaim to
1727  * occur.
1728  */
1729 int sysctl_min_unmapped_ratio = 1;
1730
1731 /*
1732  * If the number of slab pages in a zone grows beyond this percentage then
1733  * slab reclaim needs to occur.
1734  */
1735 int sysctl_min_slab_ratio = 5;
1736
1737 /*
1738  * Try to free up some pages from this zone through reclaim.
1739  */
1740 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1741 {
1742         /* Minimum pages needed in order to stay on node */
1743         const unsigned long nr_pages = 1 << order;
1744         struct task_struct *p = current;
1745         struct reclaim_state reclaim_state;
1746         int priority;
1747         unsigned long nr_reclaimed = 0;
1748         struct scan_control sc = {
1749                 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1750                 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1751                 .swap_cluster_max = max_t(unsigned long, nr_pages,
1752                                         SWAP_CLUSTER_MAX),
1753                 .gfp_mask = gfp_mask,
1754                 .swappiness = vm_swappiness,
1755         };
1756         unsigned long slab_reclaimable;
1757
1758         disable_swap_token();
1759         cond_resched();
1760         /*
1761          * We need to be able to allocate from the reserves for RECLAIM_SWAP
1762          * and we also need to be able to write out pages for RECLAIM_WRITE
1763          * and RECLAIM_SWAP.
1764          */
1765         p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
1766         reclaim_state.reclaimed_slab = 0;
1767         p->reclaim_state = &reclaim_state;
1768
1769         if (zone_page_state(zone, NR_FILE_PAGES) -
1770                 zone_page_state(zone, NR_FILE_MAPPED) >
1771                 zone->min_unmapped_pages) {
1772                 /*
1773                  * Free memory by calling shrink zone with increasing
1774                  * priorities until we have enough memory freed.
1775                  */
1776                 priority = ZONE_RECLAIM_PRIORITY;
1777                 do {
1778                         note_zone_scanning_priority(zone, priority);
1779                         nr_reclaimed += shrink_zone(priority, zone, &sc);
1780                         priority--;
1781                 } while (priority >= 0 && nr_reclaimed < nr_pages);
1782         }
1783
1784         slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
1785         if (slab_reclaimable > zone->min_slab_pages) {
1786                 /*
1787                  * shrink_slab() does not currently allow us to determine how
1788                  * many pages were freed in this zone. So we take the current
1789                  * number of slab pages and shake the slab until it is reduced
1790                  * by the same nr_pages that we used for reclaiming unmapped
1791                  * pages.
1792                  *
1793                  * Note that shrink_slab will free memory on all zones and may
1794                  * take a long time.
1795                  */
1796                 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
1797                         zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
1798                                 slab_reclaimable - nr_pages)
1799                         ;
1800
1801                 /*
1802                  * Update nr_reclaimed by the number of slab pages we
1803                  * reclaimed from this zone.
1804                  */
1805                 nr_reclaimed += slab_reclaimable -
1806                         zone_page_state(zone, NR_SLAB_RECLAIMABLE);
1807         }
1808
1809         p->reclaim_state = NULL;
1810         current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
1811         return nr_reclaimed >= nr_pages;
1812 }
1813
1814 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1815 {
1816         cpumask_t mask;
1817         int node_id;
1818
1819         /*
1820          * Zone reclaim reclaims unmapped file backed pages and
1821          * slab pages if we are over the defined limits.
1822          *
1823          * A small portion of unmapped file backed pages is needed for
1824          * file I/O otherwise pages read by file I/O will be immediately
1825          * thrown out if the zone is overallocated. So we do not reclaim
1826          * if less than a specified percentage of the zone is used by
1827          * unmapped file backed pages.
1828          */
1829         if (zone_page_state(zone, NR_FILE_PAGES) -
1830             zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
1831             && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
1832                         <= zone->min_slab_pages)
1833                 return 0;
1834
1835         /*
1836          * Avoid concurrent zone reclaims, do not reclaim in a zone that does
1837          * not have reclaimable pages and if we should not delay the allocation
1838          * then do not scan.
1839          */
1840         if (!(gfp_mask & __GFP_WAIT) ||
1841                 zone->all_unreclaimable ||
1842                 atomic_read(&zone->reclaim_in_progress) > 0 ||
1843                 (current->flags & PF_MEMALLOC))
1844                         return 0;
1845
1846         /*
1847          * Only run zone reclaim on the local zone or on zones that do not
1848          * have associated processors. This will favor the local processor
1849          * over remote processors and spread off node memory allocations
1850          * as wide as possible.
1851          */
1852         node_id = zone_to_nid(zone);
1853         mask = node_to_cpumask(node_id);
1854         if (!cpus_empty(mask) && node_id != numa_node_id())
1855                 return 0;
1856         return __zone_reclaim(zone, gfp_mask, order);
1857 }
1858 #endif