vmscan: free swap space on swap-in/activation
[safe/jmp/linux-2.6] / mm / vmscan.c
1 /*
2  *  linux/mm/vmscan.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *
6  *  Swap reorganised 29.12.95, Stephen Tweedie.
7  *  kswapd added: 7.1.96  sct
8  *  Removed kswapd_ctl limits, and swap out as many pages as needed
9  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11  *  Multiqueue VM started 5.8.00, Rik van Riel.
12  */
13
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/vmstat.h>
23 #include <linux/file.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h>  /* for try_to_release_page(),
27                                         buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/notifier.h>
36 #include <linux/rwsem.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/freezer.h>
40 #include <linux/memcontrol.h>
41 #include <linux/delayacct.h>
42
43 #include <asm/tlbflush.h>
44 #include <asm/div64.h>
45
46 #include <linux/swapops.h>
47
48 #include "internal.h"
49
50 struct scan_control {
51         /* Incremented by the number of inactive pages that were scanned */
52         unsigned long nr_scanned;
53
54         /* This context's GFP mask */
55         gfp_t gfp_mask;
56
57         int may_writepage;
58
59         /* Can pages be swapped as part of reclaim? */
60         int may_swap;
61
62         /* This context's SWAP_CLUSTER_MAX. If freeing memory for
63          * suspend, we effectively ignore SWAP_CLUSTER_MAX.
64          * In this context, it doesn't matter that we scan the
65          * whole list at once. */
66         int swap_cluster_max;
67
68         int swappiness;
69
70         int all_unreclaimable;
71
72         int order;
73
74         /* Which cgroup do we reclaim from */
75         struct mem_cgroup *mem_cgroup;
76
77         /* Pluggable isolate pages callback */
78         unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
79                         unsigned long *scanned, int order, int mode,
80                         struct zone *z, struct mem_cgroup *mem_cont,
81                         int active);
82 };
83
84 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
85
86 #ifdef ARCH_HAS_PREFETCH
87 #define prefetch_prev_lru_page(_page, _base, _field)                    \
88         do {                                                            \
89                 if ((_page)->lru.prev != _base) {                       \
90                         struct page *prev;                              \
91                                                                         \
92                         prev = lru_to_page(&(_page->lru));              \
93                         prefetch(&prev->_field);                        \
94                 }                                                       \
95         } while (0)
96 #else
97 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
98 #endif
99
100 #ifdef ARCH_HAS_PREFETCHW
101 #define prefetchw_prev_lru_page(_page, _base, _field)                   \
102         do {                                                            \
103                 if ((_page)->lru.prev != _base) {                       \
104                         struct page *prev;                              \
105                                                                         \
106                         prev = lru_to_page(&(_page->lru));              \
107                         prefetchw(&prev->_field);                       \
108                 }                                                       \
109         } while (0)
110 #else
111 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
112 #endif
113
114 /*
115  * From 0 .. 100.  Higher means more swappy.
116  */
117 int vm_swappiness = 60;
118 long vm_total_pages;    /* The total number of pages which the VM controls */
119
120 static LIST_HEAD(shrinker_list);
121 static DECLARE_RWSEM(shrinker_rwsem);
122
123 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
124 #define scan_global_lru(sc)     (!(sc)->mem_cgroup)
125 #else
126 #define scan_global_lru(sc)     (1)
127 #endif
128
129 /*
130  * Add a shrinker callback to be called from the vm
131  */
132 void register_shrinker(struct shrinker *shrinker)
133 {
134         shrinker->nr = 0;
135         down_write(&shrinker_rwsem);
136         list_add_tail(&shrinker->list, &shrinker_list);
137         up_write(&shrinker_rwsem);
138 }
139 EXPORT_SYMBOL(register_shrinker);
140
141 /*
142  * Remove one
143  */
144 void unregister_shrinker(struct shrinker *shrinker)
145 {
146         down_write(&shrinker_rwsem);
147         list_del(&shrinker->list);
148         up_write(&shrinker_rwsem);
149 }
150 EXPORT_SYMBOL(unregister_shrinker);
151
152 #define SHRINK_BATCH 128
153 /*
154  * Call the shrink functions to age shrinkable caches
155  *
156  * Here we assume it costs one seek to replace a lru page and that it also
157  * takes a seek to recreate a cache object.  With this in mind we age equal
158  * percentages of the lru and ageable caches.  This should balance the seeks
159  * generated by these structures.
160  *
161  * If the vm encountered mapped pages on the LRU it increase the pressure on
162  * slab to avoid swapping.
163  *
164  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
165  *
166  * `lru_pages' represents the number of on-LRU pages in all the zones which
167  * are eligible for the caller's allocation attempt.  It is used for balancing
168  * slab reclaim versus page reclaim.
169  *
170  * Returns the number of slab objects which we shrunk.
171  */
172 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
173                         unsigned long lru_pages)
174 {
175         struct shrinker *shrinker;
176         unsigned long ret = 0;
177
178         if (scanned == 0)
179                 scanned = SWAP_CLUSTER_MAX;
180
181         if (!down_read_trylock(&shrinker_rwsem))
182                 return 1;       /* Assume we'll be able to shrink next time */
183
184         list_for_each_entry(shrinker, &shrinker_list, list) {
185                 unsigned long long delta;
186                 unsigned long total_scan;
187                 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
188
189                 delta = (4 * scanned) / shrinker->seeks;
190                 delta *= max_pass;
191                 do_div(delta, lru_pages + 1);
192                 shrinker->nr += delta;
193                 if (shrinker->nr < 0) {
194                         printk(KERN_ERR "%s: nr=%ld\n",
195                                         __func__, shrinker->nr);
196                         shrinker->nr = max_pass;
197                 }
198
199                 /*
200                  * Avoid risking looping forever due to too large nr value:
201                  * never try to free more than twice the estimate number of
202                  * freeable entries.
203                  */
204                 if (shrinker->nr > max_pass * 2)
205                         shrinker->nr = max_pass * 2;
206
207                 total_scan = shrinker->nr;
208                 shrinker->nr = 0;
209
210                 while (total_scan >= SHRINK_BATCH) {
211                         long this_scan = SHRINK_BATCH;
212                         int shrink_ret;
213                         int nr_before;
214
215                         nr_before = (*shrinker->shrink)(0, gfp_mask);
216                         shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
217                         if (shrink_ret == -1)
218                                 break;
219                         if (shrink_ret < nr_before)
220                                 ret += nr_before - shrink_ret;
221                         count_vm_events(SLABS_SCANNED, this_scan);
222                         total_scan -= this_scan;
223
224                         cond_resched();
225                 }
226
227                 shrinker->nr += total_scan;
228         }
229         up_read(&shrinker_rwsem);
230         return ret;
231 }
232
233 /* Called without lock on whether page is mapped, so answer is unstable */
234 static inline int page_mapping_inuse(struct page *page)
235 {
236         struct address_space *mapping;
237
238         /* Page is in somebody's page tables. */
239         if (page_mapped(page))
240                 return 1;
241
242         /* Be more reluctant to reclaim swapcache than pagecache */
243         if (PageSwapCache(page))
244                 return 1;
245
246         mapping = page_mapping(page);
247         if (!mapping)
248                 return 0;
249
250         /* File is mmap'd by somebody? */
251         return mapping_mapped(mapping);
252 }
253
254 static inline int is_page_cache_freeable(struct page *page)
255 {
256         return page_count(page) - !!PagePrivate(page) == 2;
257 }
258
259 static int may_write_to_queue(struct backing_dev_info *bdi)
260 {
261         if (current->flags & PF_SWAPWRITE)
262                 return 1;
263         if (!bdi_write_congested(bdi))
264                 return 1;
265         if (bdi == current->backing_dev_info)
266                 return 1;
267         return 0;
268 }
269
270 /*
271  * We detected a synchronous write error writing a page out.  Probably
272  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
273  * fsync(), msync() or close().
274  *
275  * The tricky part is that after writepage we cannot touch the mapping: nothing
276  * prevents it from being freed up.  But we have a ref on the page and once
277  * that page is locked, the mapping is pinned.
278  *
279  * We're allowed to run sleeping lock_page() here because we know the caller has
280  * __GFP_FS.
281  */
282 static void handle_write_error(struct address_space *mapping,
283                                 struct page *page, int error)
284 {
285         lock_page(page);
286         if (page_mapping(page) == mapping)
287                 mapping_set_error(mapping, error);
288         unlock_page(page);
289 }
290
291 /* Request for sync pageout. */
292 enum pageout_io {
293         PAGEOUT_IO_ASYNC,
294         PAGEOUT_IO_SYNC,
295 };
296
297 /* possible outcome of pageout() */
298 typedef enum {
299         /* failed to write page out, page is locked */
300         PAGE_KEEP,
301         /* move page to the active list, page is locked */
302         PAGE_ACTIVATE,
303         /* page has been sent to the disk successfully, page is unlocked */
304         PAGE_SUCCESS,
305         /* page is clean and locked */
306         PAGE_CLEAN,
307 } pageout_t;
308
309 /*
310  * pageout is called by shrink_page_list() for each dirty page.
311  * Calls ->writepage().
312  */
313 static pageout_t pageout(struct page *page, struct address_space *mapping,
314                                                 enum pageout_io sync_writeback)
315 {
316         /*
317          * If the page is dirty, only perform writeback if that write
318          * will be non-blocking.  To prevent this allocation from being
319          * stalled by pagecache activity.  But note that there may be
320          * stalls if we need to run get_block().  We could test
321          * PagePrivate for that.
322          *
323          * If this process is currently in generic_file_write() against
324          * this page's queue, we can perform writeback even if that
325          * will block.
326          *
327          * If the page is swapcache, write it back even if that would
328          * block, for some throttling. This happens by accident, because
329          * swap_backing_dev_info is bust: it doesn't reflect the
330          * congestion state of the swapdevs.  Easy to fix, if needed.
331          * See swapfile.c:page_queue_congested().
332          */
333         if (!is_page_cache_freeable(page))
334                 return PAGE_KEEP;
335         if (!mapping) {
336                 /*
337                  * Some data journaling orphaned pages can have
338                  * page->mapping == NULL while being dirty with clean buffers.
339                  */
340                 if (PagePrivate(page)) {
341                         if (try_to_free_buffers(page)) {
342                                 ClearPageDirty(page);
343                                 printk("%s: orphaned page\n", __func__);
344                                 return PAGE_CLEAN;
345                         }
346                 }
347                 return PAGE_KEEP;
348         }
349         if (mapping->a_ops->writepage == NULL)
350                 return PAGE_ACTIVATE;
351         if (!may_write_to_queue(mapping->backing_dev_info))
352                 return PAGE_KEEP;
353
354         if (clear_page_dirty_for_io(page)) {
355                 int res;
356                 struct writeback_control wbc = {
357                         .sync_mode = WB_SYNC_NONE,
358                         .nr_to_write = SWAP_CLUSTER_MAX,
359                         .range_start = 0,
360                         .range_end = LLONG_MAX,
361                         .nonblocking = 1,
362                         .for_reclaim = 1,
363                 };
364
365                 SetPageReclaim(page);
366                 res = mapping->a_ops->writepage(page, &wbc);
367                 if (res < 0)
368                         handle_write_error(mapping, page, res);
369                 if (res == AOP_WRITEPAGE_ACTIVATE) {
370                         ClearPageReclaim(page);
371                         return PAGE_ACTIVATE;
372                 }
373
374                 /*
375                  * Wait on writeback if requested to. This happens when
376                  * direct reclaiming a large contiguous area and the
377                  * first attempt to free a range of pages fails.
378                  */
379                 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
380                         wait_on_page_writeback(page);
381
382                 if (!PageWriteback(page)) {
383                         /* synchronous write or broken a_ops? */
384                         ClearPageReclaim(page);
385                 }
386                 inc_zone_page_state(page, NR_VMSCAN_WRITE);
387                 return PAGE_SUCCESS;
388         }
389
390         return PAGE_CLEAN;
391 }
392
393 /*
394  * Same as remove_mapping, but if the page is removed from the mapping, it
395  * gets returned with a refcount of 0.
396  */
397 static int __remove_mapping(struct address_space *mapping, struct page *page)
398 {
399         BUG_ON(!PageLocked(page));
400         BUG_ON(mapping != page_mapping(page));
401
402         spin_lock_irq(&mapping->tree_lock);
403         /*
404          * The non racy check for a busy page.
405          *
406          * Must be careful with the order of the tests. When someone has
407          * a ref to the page, it may be possible that they dirty it then
408          * drop the reference. So if PageDirty is tested before page_count
409          * here, then the following race may occur:
410          *
411          * get_user_pages(&page);
412          * [user mapping goes away]
413          * write_to(page);
414          *                              !PageDirty(page)    [good]
415          * SetPageDirty(page);
416          * put_page(page);
417          *                              !page_count(page)   [good, discard it]
418          *
419          * [oops, our write_to data is lost]
420          *
421          * Reversing the order of the tests ensures such a situation cannot
422          * escape unnoticed. The smp_rmb is needed to ensure the page->flags
423          * load is not satisfied before that of page->_count.
424          *
425          * Note that if SetPageDirty is always performed via set_page_dirty,
426          * and thus under tree_lock, then this ordering is not required.
427          */
428         if (!page_freeze_refs(page, 2))
429                 goto cannot_free;
430         /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
431         if (unlikely(PageDirty(page))) {
432                 page_unfreeze_refs(page, 2);
433                 goto cannot_free;
434         }
435
436         if (PageSwapCache(page)) {
437                 swp_entry_t swap = { .val = page_private(page) };
438                 __delete_from_swap_cache(page);
439                 spin_unlock_irq(&mapping->tree_lock);
440                 swap_free(swap);
441         } else {
442                 __remove_from_page_cache(page);
443                 spin_unlock_irq(&mapping->tree_lock);
444         }
445
446         return 1;
447
448 cannot_free:
449         spin_unlock_irq(&mapping->tree_lock);
450         return 0;
451 }
452
453 /*
454  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
455  * someone else has a ref on the page, abort and return 0.  If it was
456  * successfully detached, return 1.  Assumes the caller has a single ref on
457  * this page.
458  */
459 int remove_mapping(struct address_space *mapping, struct page *page)
460 {
461         if (__remove_mapping(mapping, page)) {
462                 /*
463                  * Unfreezing the refcount with 1 rather than 2 effectively
464                  * drops the pagecache ref for us without requiring another
465                  * atomic operation.
466                  */
467                 page_unfreeze_refs(page, 1);
468                 return 1;
469         }
470         return 0;
471 }
472
473 /*
474  * shrink_page_list() returns the number of reclaimed pages
475  */
476 static unsigned long shrink_page_list(struct list_head *page_list,
477                                         struct scan_control *sc,
478                                         enum pageout_io sync_writeback)
479 {
480         LIST_HEAD(ret_pages);
481         struct pagevec freed_pvec;
482         int pgactivate = 0;
483         unsigned long nr_reclaimed = 0;
484
485         cond_resched();
486
487         pagevec_init(&freed_pvec, 1);
488         while (!list_empty(page_list)) {
489                 struct address_space *mapping;
490                 struct page *page;
491                 int may_enter_fs;
492                 int referenced;
493
494                 cond_resched();
495
496                 page = lru_to_page(page_list);
497                 list_del(&page->lru);
498
499                 if (!trylock_page(page))
500                         goto keep;
501
502                 VM_BUG_ON(PageActive(page));
503
504                 sc->nr_scanned++;
505
506                 if (!sc->may_swap && page_mapped(page))
507                         goto keep_locked;
508
509                 /* Double the slab pressure for mapped and swapcache pages */
510                 if (page_mapped(page) || PageSwapCache(page))
511                         sc->nr_scanned++;
512
513                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
514                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
515
516                 if (PageWriteback(page)) {
517                         /*
518                          * Synchronous reclaim is performed in two passes,
519                          * first an asynchronous pass over the list to
520                          * start parallel writeback, and a second synchronous
521                          * pass to wait for the IO to complete.  Wait here
522                          * for any page for which writeback has already
523                          * started.
524                          */
525                         if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
526                                 wait_on_page_writeback(page);
527                         else
528                                 goto keep_locked;
529                 }
530
531                 referenced = page_referenced(page, 1, sc->mem_cgroup);
532                 /* In active use or really unfreeable?  Activate it. */
533                 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
534                                         referenced && page_mapping_inuse(page))
535                         goto activate_locked;
536
537 #ifdef CONFIG_SWAP
538                 /*
539                  * Anonymous process memory has backing store?
540                  * Try to allocate it some swap space here.
541                  */
542                 if (PageAnon(page) && !PageSwapCache(page))
543                         if (!add_to_swap(page, GFP_ATOMIC))
544                                 goto activate_locked;
545 #endif /* CONFIG_SWAP */
546
547                 mapping = page_mapping(page);
548
549                 /*
550                  * The page is mapped into the page tables of one or more
551                  * processes. Try to unmap it here.
552                  */
553                 if (page_mapped(page) && mapping) {
554                         switch (try_to_unmap(page, 0)) {
555                         case SWAP_FAIL:
556                                 goto activate_locked;
557                         case SWAP_AGAIN:
558                                 goto keep_locked;
559                         case SWAP_SUCCESS:
560                                 ; /* try to free the page below */
561                         }
562                 }
563
564                 if (PageDirty(page)) {
565                         if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
566                                 goto keep_locked;
567                         if (!may_enter_fs)
568                                 goto keep_locked;
569                         if (!sc->may_writepage)
570                                 goto keep_locked;
571
572                         /* Page is dirty, try to write it out here */
573                         switch (pageout(page, mapping, sync_writeback)) {
574                         case PAGE_KEEP:
575                                 goto keep_locked;
576                         case PAGE_ACTIVATE:
577                                 goto activate_locked;
578                         case PAGE_SUCCESS:
579                                 if (PageWriteback(page) || PageDirty(page))
580                                         goto keep;
581                                 /*
582                                  * A synchronous write - probably a ramdisk.  Go
583                                  * ahead and try to reclaim the page.
584                                  */
585                                 if (!trylock_page(page))
586                                         goto keep;
587                                 if (PageDirty(page) || PageWriteback(page))
588                                         goto keep_locked;
589                                 mapping = page_mapping(page);
590                         case PAGE_CLEAN:
591                                 ; /* try to free the page below */
592                         }
593                 }
594
595                 /*
596                  * If the page has buffers, try to free the buffer mappings
597                  * associated with this page. If we succeed we try to free
598                  * the page as well.
599                  *
600                  * We do this even if the page is PageDirty().
601                  * try_to_release_page() does not perform I/O, but it is
602                  * possible for a page to have PageDirty set, but it is actually
603                  * clean (all its buffers are clean).  This happens if the
604                  * buffers were written out directly, with submit_bh(). ext3
605                  * will do this, as well as the blockdev mapping. 
606                  * try_to_release_page() will discover that cleanness and will
607                  * drop the buffers and mark the page clean - it can be freed.
608                  *
609                  * Rarely, pages can have buffers and no ->mapping.  These are
610                  * the pages which were not successfully invalidated in
611                  * truncate_complete_page().  We try to drop those buffers here
612                  * and if that worked, and the page is no longer mapped into
613                  * process address space (page_count == 1) it can be freed.
614                  * Otherwise, leave the page on the LRU so it is swappable.
615                  */
616                 if (PagePrivate(page)) {
617                         if (!try_to_release_page(page, sc->gfp_mask))
618                                 goto activate_locked;
619                         if (!mapping && page_count(page) == 1) {
620                                 unlock_page(page);
621                                 if (put_page_testzero(page))
622                                         goto free_it;
623                                 else {
624                                         /*
625                                          * rare race with speculative reference.
626                                          * the speculative reference will free
627                                          * this page shortly, so we may
628                                          * increment nr_reclaimed here (and
629                                          * leave it off the LRU).
630                                          */
631                                         nr_reclaimed++;
632                                         continue;
633                                 }
634                         }
635                 }
636
637                 if (!mapping || !__remove_mapping(mapping, page))
638                         goto keep_locked;
639
640                 unlock_page(page);
641 free_it:
642                 nr_reclaimed++;
643                 if (!pagevec_add(&freed_pvec, page)) {
644                         __pagevec_free(&freed_pvec);
645                         pagevec_reinit(&freed_pvec);
646                 }
647                 continue;
648
649 activate_locked:
650                 /* Not a candidate for swapping, so reclaim swap space. */
651                 if (PageSwapCache(page) && vm_swap_full())
652                         remove_exclusive_swap_page_ref(page);
653                 SetPageActive(page);
654                 pgactivate++;
655 keep_locked:
656                 unlock_page(page);
657 keep:
658                 list_add(&page->lru, &ret_pages);
659                 VM_BUG_ON(PageLRU(page));
660         }
661         list_splice(&ret_pages, page_list);
662         if (pagevec_count(&freed_pvec))
663                 __pagevec_free(&freed_pvec);
664         count_vm_events(PGACTIVATE, pgactivate);
665         return nr_reclaimed;
666 }
667
668 /* LRU Isolation modes. */
669 #define ISOLATE_INACTIVE 0      /* Isolate inactive pages. */
670 #define ISOLATE_ACTIVE 1        /* Isolate active pages. */
671 #define ISOLATE_BOTH 2          /* Isolate both active and inactive pages. */
672
673 /*
674  * Attempt to remove the specified page from its LRU.  Only take this page
675  * if it is of the appropriate PageActive status.  Pages which are being
676  * freed elsewhere are also ignored.
677  *
678  * page:        page to consider
679  * mode:        one of the LRU isolation modes defined above
680  *
681  * returns 0 on success, -ve errno on failure.
682  */
683 int __isolate_lru_page(struct page *page, int mode)
684 {
685         int ret = -EINVAL;
686
687         /* Only take pages on the LRU. */
688         if (!PageLRU(page))
689                 return ret;
690
691         /*
692          * When checking the active state, we need to be sure we are
693          * dealing with comparible boolean values.  Take the logical not
694          * of each.
695          */
696         if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
697                 return ret;
698
699         ret = -EBUSY;
700         if (likely(get_page_unless_zero(page))) {
701                 /*
702                  * Be careful not to clear PageLRU until after we're
703                  * sure the page is not being freed elsewhere -- the
704                  * page release code relies on it.
705                  */
706                 ClearPageLRU(page);
707                 ret = 0;
708         }
709
710         return ret;
711 }
712
713 /*
714  * zone->lru_lock is heavily contended.  Some of the functions that
715  * shrink the lists perform better by taking out a batch of pages
716  * and working on them outside the LRU lock.
717  *
718  * For pagecache intensive workloads, this function is the hottest
719  * spot in the kernel (apart from copy_*_user functions).
720  *
721  * Appropriate locks must be held before calling this function.
722  *
723  * @nr_to_scan: The number of pages to look through on the list.
724  * @src:        The LRU list to pull pages off.
725  * @dst:        The temp list to put pages on to.
726  * @scanned:    The number of pages that were scanned.
727  * @order:      The caller's attempted allocation order
728  * @mode:       One of the LRU isolation modes
729  *
730  * returns how many pages were moved onto *@dst.
731  */
732 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
733                 struct list_head *src, struct list_head *dst,
734                 unsigned long *scanned, int order, int mode)
735 {
736         unsigned long nr_taken = 0;
737         unsigned long scan;
738
739         for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
740                 struct page *page;
741                 unsigned long pfn;
742                 unsigned long end_pfn;
743                 unsigned long page_pfn;
744                 int zone_id;
745
746                 page = lru_to_page(src);
747                 prefetchw_prev_lru_page(page, src, flags);
748
749                 VM_BUG_ON(!PageLRU(page));
750
751                 switch (__isolate_lru_page(page, mode)) {
752                 case 0:
753                         list_move(&page->lru, dst);
754                         nr_taken++;
755                         break;
756
757                 case -EBUSY:
758                         /* else it is being freed elsewhere */
759                         list_move(&page->lru, src);
760                         continue;
761
762                 default:
763                         BUG();
764                 }
765
766                 if (!order)
767                         continue;
768
769                 /*
770                  * Attempt to take all pages in the order aligned region
771                  * surrounding the tag page.  Only take those pages of
772                  * the same active state as that tag page.  We may safely
773                  * round the target page pfn down to the requested order
774                  * as the mem_map is guarenteed valid out to MAX_ORDER,
775                  * where that page is in a different zone we will detect
776                  * it from its zone id and abort this block scan.
777                  */
778                 zone_id = page_zone_id(page);
779                 page_pfn = page_to_pfn(page);
780                 pfn = page_pfn & ~((1 << order) - 1);
781                 end_pfn = pfn + (1 << order);
782                 for (; pfn < end_pfn; pfn++) {
783                         struct page *cursor_page;
784
785                         /* The target page is in the block, ignore it. */
786                         if (unlikely(pfn == page_pfn))
787                                 continue;
788
789                         /* Avoid holes within the zone. */
790                         if (unlikely(!pfn_valid_within(pfn)))
791                                 break;
792
793                         cursor_page = pfn_to_page(pfn);
794                         /* Check that we have not crossed a zone boundary. */
795                         if (unlikely(page_zone_id(cursor_page) != zone_id))
796                                 continue;
797                         switch (__isolate_lru_page(cursor_page, mode)) {
798                         case 0:
799                                 list_move(&cursor_page->lru, dst);
800                                 nr_taken++;
801                                 scan++;
802                                 break;
803
804                         case -EBUSY:
805                                 /* else it is being freed elsewhere */
806                                 list_move(&cursor_page->lru, src);
807                         default:
808                                 break;
809                         }
810                 }
811         }
812
813         *scanned = scan;
814         return nr_taken;
815 }
816
817 static unsigned long isolate_pages_global(unsigned long nr,
818                                         struct list_head *dst,
819                                         unsigned long *scanned, int order,
820                                         int mode, struct zone *z,
821                                         struct mem_cgroup *mem_cont,
822                                         int active)
823 {
824         if (active)
825                 return isolate_lru_pages(nr, &z->lru[LRU_ACTIVE].list, dst,
826                                                 scanned, order, mode);
827         else
828                 return isolate_lru_pages(nr, &z->lru[LRU_INACTIVE].list, dst,
829                                                 scanned, order, mode);
830 }
831
832 /*
833  * clear_active_flags() is a helper for shrink_active_list(), clearing
834  * any active bits from the pages in the list.
835  */
836 static unsigned long clear_active_flags(struct list_head *page_list)
837 {
838         int nr_active = 0;
839         struct page *page;
840
841         list_for_each_entry(page, page_list, lru)
842                 if (PageActive(page)) {
843                         ClearPageActive(page);
844                         nr_active++;
845                 }
846
847         return nr_active;
848 }
849
850 /**
851  * isolate_lru_page - tries to isolate a page from its LRU list
852  * @page: page to isolate from its LRU list
853  *
854  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
855  * vmstat statistic corresponding to whatever LRU list the page was on.
856  *
857  * Returns 0 if the page was removed from an LRU list.
858  * Returns -EBUSY if the page was not on an LRU list.
859  *
860  * The returned page will have PageLRU() cleared.  If it was found on
861  * the active list, it will have PageActive set.  That flag may need
862  * to be cleared by the caller before letting the page go.
863  *
864  * The vmstat statistic corresponding to the list on which the page was
865  * found will be decremented.
866  *
867  * Restrictions:
868  * (1) Must be called with an elevated refcount on the page. This is a
869  *     fundamentnal difference from isolate_lru_pages (which is called
870  *     without a stable reference).
871  * (2) the lru_lock must not be held.
872  * (3) interrupts must be enabled.
873  */
874 int isolate_lru_page(struct page *page)
875 {
876         int ret = -EBUSY;
877
878         if (PageLRU(page)) {
879                 struct zone *zone = page_zone(page);
880
881                 spin_lock_irq(&zone->lru_lock);
882                 if (PageLRU(page) && get_page_unless_zero(page)) {
883                         ret = 0;
884                         ClearPageLRU(page);
885                         if (PageActive(page))
886                                 del_page_from_active_list(zone, page);
887                         else
888                                 del_page_from_inactive_list(zone, page);
889                 }
890                 spin_unlock_irq(&zone->lru_lock);
891         }
892         return ret;
893 }
894
895 /*
896  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
897  * of reclaimed pages
898  */
899 static unsigned long shrink_inactive_list(unsigned long max_scan,
900                                 struct zone *zone, struct scan_control *sc)
901 {
902         LIST_HEAD(page_list);
903         struct pagevec pvec;
904         unsigned long nr_scanned = 0;
905         unsigned long nr_reclaimed = 0;
906
907         pagevec_init(&pvec, 1);
908
909         lru_add_drain();
910         spin_lock_irq(&zone->lru_lock);
911         do {
912                 struct page *page;
913                 unsigned long nr_taken;
914                 unsigned long nr_scan;
915                 unsigned long nr_freed;
916                 unsigned long nr_active;
917
918                 nr_taken = sc->isolate_pages(sc->swap_cluster_max,
919                              &page_list, &nr_scan, sc->order,
920                              (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
921                                              ISOLATE_BOTH : ISOLATE_INACTIVE,
922                                 zone, sc->mem_cgroup, 0);
923                 nr_active = clear_active_flags(&page_list);
924                 __count_vm_events(PGDEACTIVATE, nr_active);
925
926                 __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
927                 __mod_zone_page_state(zone, NR_INACTIVE,
928                                                 -(nr_taken - nr_active));
929                 if (scan_global_lru(sc))
930                         zone->pages_scanned += nr_scan;
931                 spin_unlock_irq(&zone->lru_lock);
932
933                 nr_scanned += nr_scan;
934                 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
935
936                 /*
937                  * If we are direct reclaiming for contiguous pages and we do
938                  * not reclaim everything in the list, try again and wait
939                  * for IO to complete. This will stall high-order allocations
940                  * but that should be acceptable to the caller
941                  */
942                 if (nr_freed < nr_taken && !current_is_kswapd() &&
943                                         sc->order > PAGE_ALLOC_COSTLY_ORDER) {
944                         congestion_wait(WRITE, HZ/10);
945
946                         /*
947                          * The attempt at page out may have made some
948                          * of the pages active, mark them inactive again.
949                          */
950                         nr_active = clear_active_flags(&page_list);
951                         count_vm_events(PGDEACTIVATE, nr_active);
952
953                         nr_freed += shrink_page_list(&page_list, sc,
954                                                         PAGEOUT_IO_SYNC);
955                 }
956
957                 nr_reclaimed += nr_freed;
958                 local_irq_disable();
959                 if (current_is_kswapd()) {
960                         __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
961                         __count_vm_events(KSWAPD_STEAL, nr_freed);
962                 } else if (scan_global_lru(sc))
963                         __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
964
965                 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
966
967                 if (nr_taken == 0)
968                         goto done;
969
970                 spin_lock(&zone->lru_lock);
971                 /*
972                  * Put back any unfreeable pages.
973                  */
974                 while (!list_empty(&page_list)) {
975                         page = lru_to_page(&page_list);
976                         VM_BUG_ON(PageLRU(page));
977                         SetPageLRU(page);
978                         list_del(&page->lru);
979                         add_page_to_lru_list(zone, page, page_lru(page));
980                         if (!pagevec_add(&pvec, page)) {
981                                 spin_unlock_irq(&zone->lru_lock);
982                                 __pagevec_release(&pvec);
983                                 spin_lock_irq(&zone->lru_lock);
984                         }
985                 }
986         } while (nr_scanned < max_scan);
987         spin_unlock(&zone->lru_lock);
988 done:
989         local_irq_enable();
990         pagevec_release(&pvec);
991         return nr_reclaimed;
992 }
993
994 /*
995  * We are about to scan this zone at a certain priority level.  If that priority
996  * level is smaller (ie: more urgent) than the previous priority, then note
997  * that priority level within the zone.  This is done so that when the next
998  * process comes in to scan this zone, it will immediately start out at this
999  * priority level rather than having to build up its own scanning priority.
1000  * Here, this priority affects only the reclaim-mapped threshold.
1001  */
1002 static inline void note_zone_scanning_priority(struct zone *zone, int priority)
1003 {
1004         if (priority < zone->prev_priority)
1005                 zone->prev_priority = priority;
1006 }
1007
1008 static inline int zone_is_near_oom(struct zone *zone)
1009 {
1010         return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
1011                                 + zone_page_state(zone, NR_INACTIVE))*3;
1012 }
1013
1014 /*
1015  * Determine we should try to reclaim mapped pages.
1016  * This is called only when sc->mem_cgroup is NULL.
1017  */
1018 static int calc_reclaim_mapped(struct scan_control *sc, struct zone *zone,
1019                                 int priority)
1020 {
1021         long mapped_ratio;
1022         long distress;
1023         long swap_tendency;
1024         long imbalance;
1025         int reclaim_mapped = 0;
1026         int prev_priority;
1027
1028         if (scan_global_lru(sc) && zone_is_near_oom(zone))
1029                 return 1;
1030         /*
1031          * `distress' is a measure of how much trouble we're having
1032          * reclaiming pages.  0 -> no problems.  100 -> great trouble.
1033          */
1034         if (scan_global_lru(sc))
1035                 prev_priority = zone->prev_priority;
1036         else
1037                 prev_priority = mem_cgroup_get_reclaim_priority(sc->mem_cgroup);
1038
1039         distress = 100 >> min(prev_priority, priority);
1040
1041         /*
1042          * The point of this algorithm is to decide when to start
1043          * reclaiming mapped memory instead of just pagecache.  Work out
1044          * how much memory
1045          * is mapped.
1046          */
1047         if (scan_global_lru(sc))
1048                 mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
1049                                 global_page_state(NR_ANON_PAGES)) * 100) /
1050                                         vm_total_pages;
1051         else
1052                 mapped_ratio = mem_cgroup_calc_mapped_ratio(sc->mem_cgroup);
1053
1054         /*
1055          * Now decide how much we really want to unmap some pages.  The
1056          * mapped ratio is downgraded - just because there's a lot of
1057          * mapped memory doesn't necessarily mean that page reclaim
1058          * isn't succeeding.
1059          *
1060          * The distress ratio is important - we don't want to start
1061          * going oom.
1062          *
1063          * A 100% value of vm_swappiness overrides this algorithm
1064          * altogether.
1065          */
1066         swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
1067
1068         /*
1069          * If there's huge imbalance between active and inactive
1070          * (think active 100 times larger than inactive) we should
1071          * become more permissive, or the system will take too much
1072          * cpu before it start swapping during memory pressure.
1073          * Distress is about avoiding early-oom, this is about
1074          * making swappiness graceful despite setting it to low
1075          * values.
1076          *
1077          * Avoid div by zero with nr_inactive+1, and max resulting
1078          * value is vm_total_pages.
1079          */
1080         if (scan_global_lru(sc)) {
1081                 imbalance  = zone_page_state(zone, NR_ACTIVE);
1082                 imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
1083         } else
1084                 imbalance = mem_cgroup_reclaim_imbalance(sc->mem_cgroup);
1085
1086         /*
1087          * Reduce the effect of imbalance if swappiness is low,
1088          * this means for a swappiness very low, the imbalance
1089          * must be much higher than 100 for this logic to make
1090          * the difference.
1091          *
1092          * Max temporary value is vm_total_pages*100.
1093          */
1094         imbalance *= (vm_swappiness + 1);
1095         imbalance /= 100;
1096
1097         /*
1098          * If not much of the ram is mapped, makes the imbalance
1099          * less relevant, it's high priority we refill the inactive
1100          * list with mapped pages only in presence of high ratio of
1101          * mapped pages.
1102          *
1103          * Max temporary value is vm_total_pages*100.
1104          */
1105         imbalance *= mapped_ratio;
1106         imbalance /= 100;
1107
1108         /* apply imbalance feedback to swap_tendency */
1109         swap_tendency += imbalance;
1110
1111         /*
1112          * Now use this metric to decide whether to start moving mapped
1113          * memory onto the inactive list.
1114          */
1115         if (swap_tendency >= 100)
1116                 reclaim_mapped = 1;
1117
1118         return reclaim_mapped;
1119 }
1120
1121 /*
1122  * This moves pages from the active list to the inactive list.
1123  *
1124  * We move them the other way if the page is referenced by one or more
1125  * processes, from rmap.
1126  *
1127  * If the pages are mostly unmapped, the processing is fast and it is
1128  * appropriate to hold zone->lru_lock across the whole operation.  But if
1129  * the pages are mapped, the processing is slow (page_referenced()) so we
1130  * should drop zone->lru_lock around each page.  It's impossible to balance
1131  * this, so instead we remove the pages from the LRU while processing them.
1132  * It is safe to rely on PG_active against the non-LRU pages in here because
1133  * nobody will play with that bit on a non-LRU page.
1134  *
1135  * The downside is that we have to touch page->_count against each page.
1136  * But we had to alter page->flags anyway.
1137  */
1138
1139
1140 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1141                                 struct scan_control *sc, int priority)
1142 {
1143         unsigned long pgmoved;
1144         int pgdeactivate = 0;
1145         unsigned long pgscanned;
1146         LIST_HEAD(l_hold);      /* The pages which were snipped off */
1147         LIST_HEAD(l_active);
1148         LIST_HEAD(l_inactive);
1149         struct page *page;
1150         struct pagevec pvec;
1151         int reclaim_mapped = 0;
1152
1153         if (sc->may_swap)
1154                 reclaim_mapped = calc_reclaim_mapped(sc, zone, priority);
1155
1156         lru_add_drain();
1157         spin_lock_irq(&zone->lru_lock);
1158         pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1159                                         ISOLATE_ACTIVE, zone,
1160                                         sc->mem_cgroup, 1);
1161         /*
1162          * zone->pages_scanned is used for detect zone's oom
1163          * mem_cgroup remembers nr_scan by itself.
1164          */
1165         if (scan_global_lru(sc))
1166                 zone->pages_scanned += pgscanned;
1167
1168         __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
1169         spin_unlock_irq(&zone->lru_lock);
1170
1171         while (!list_empty(&l_hold)) {
1172                 cond_resched();
1173                 page = lru_to_page(&l_hold);
1174                 list_del(&page->lru);
1175                 if (page_mapped(page)) {
1176                         if (!reclaim_mapped ||
1177                             (total_swap_pages == 0 && PageAnon(page)) ||
1178                             page_referenced(page, 0, sc->mem_cgroup)) {
1179                                 list_add(&page->lru, &l_active);
1180                                 continue;
1181                         }
1182                 }
1183                 list_add(&page->lru, &l_inactive);
1184         }
1185
1186         pagevec_init(&pvec, 1);
1187         pgmoved = 0;
1188         spin_lock_irq(&zone->lru_lock);
1189         while (!list_empty(&l_inactive)) {
1190                 page = lru_to_page(&l_inactive);
1191                 prefetchw_prev_lru_page(page, &l_inactive, flags);
1192                 VM_BUG_ON(PageLRU(page));
1193                 SetPageLRU(page);
1194                 VM_BUG_ON(!PageActive(page));
1195                 ClearPageActive(page);
1196
1197                 list_move(&page->lru, &zone->lru[LRU_INACTIVE].list);
1198                 mem_cgroup_move_lists(page, false);
1199                 pgmoved++;
1200                 if (!pagevec_add(&pvec, page)) {
1201                         __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
1202                         spin_unlock_irq(&zone->lru_lock);
1203                         pgdeactivate += pgmoved;
1204                         pgmoved = 0;
1205                         if (buffer_heads_over_limit)
1206                                 pagevec_strip(&pvec);
1207                         __pagevec_release(&pvec);
1208                         spin_lock_irq(&zone->lru_lock);
1209                 }
1210         }
1211         __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
1212         pgdeactivate += pgmoved;
1213         if (buffer_heads_over_limit) {
1214                 spin_unlock_irq(&zone->lru_lock);
1215                 pagevec_strip(&pvec);
1216                 spin_lock_irq(&zone->lru_lock);
1217         }
1218
1219         pgmoved = 0;
1220         while (!list_empty(&l_active)) {
1221                 page = lru_to_page(&l_active);
1222                 prefetchw_prev_lru_page(page, &l_active, flags);
1223                 VM_BUG_ON(PageLRU(page));
1224                 SetPageLRU(page);
1225                 VM_BUG_ON(!PageActive(page));
1226
1227                 list_move(&page->lru, &zone->lru[LRU_ACTIVE].list);
1228                 mem_cgroup_move_lists(page, true);
1229                 pgmoved++;
1230                 if (!pagevec_add(&pvec, page)) {
1231                         __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
1232                         pgmoved = 0;
1233                         spin_unlock_irq(&zone->lru_lock);
1234                         if (vm_swap_full())
1235                                 pagevec_swap_free(&pvec);
1236                         __pagevec_release(&pvec);
1237                         spin_lock_irq(&zone->lru_lock);
1238                 }
1239         }
1240         __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
1241
1242         __count_zone_vm_events(PGREFILL, zone, pgscanned);
1243         __count_vm_events(PGDEACTIVATE, pgdeactivate);
1244         spin_unlock_irq(&zone->lru_lock);
1245         if (vm_swap_full())
1246                 pagevec_swap_free(&pvec);
1247
1248         pagevec_release(&pvec);
1249 }
1250
1251 static unsigned long shrink_list(enum lru_list l, unsigned long nr_to_scan,
1252         struct zone *zone, struct scan_control *sc, int priority)
1253 {
1254         if (l == LRU_ACTIVE) {
1255                 shrink_active_list(nr_to_scan, zone, sc, priority);
1256                 return 0;
1257         }
1258         return shrink_inactive_list(nr_to_scan, zone, sc);
1259 }
1260
1261 /*
1262  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
1263  */
1264 static unsigned long shrink_zone(int priority, struct zone *zone,
1265                                 struct scan_control *sc)
1266 {
1267         unsigned long nr[NR_LRU_LISTS];
1268         unsigned long nr_to_scan;
1269         unsigned long nr_reclaimed = 0;
1270         enum lru_list l;
1271
1272         if (scan_global_lru(sc)) {
1273                 /*
1274                  * Add one to nr_to_scan just to make sure that the kernel
1275                  * will slowly sift through the active list.
1276                  */
1277                 for_each_lru(l) {
1278                         zone->lru[l].nr_scan += (zone_page_state(zone,
1279                                         NR_LRU_BASE + l)  >> priority) + 1;
1280                         nr[l] = zone->lru[l].nr_scan;
1281                         if (nr[l] >= sc->swap_cluster_max)
1282                                 zone->lru[l].nr_scan = 0;
1283                         else
1284                                 nr[l] = 0;
1285                 }
1286         } else {
1287                 /*
1288                  * This reclaim occurs not because zone memory shortage but
1289                  * because memory controller hits its limit.
1290                  * Then, don't modify zone reclaim related data.
1291                  */
1292                 nr[LRU_ACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup,
1293                                         zone, priority, LRU_ACTIVE);
1294
1295                 nr[LRU_INACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup,
1296                                         zone, priority, LRU_INACTIVE);
1297         }
1298
1299         while (nr[LRU_ACTIVE] || nr[LRU_INACTIVE]) {
1300                 for_each_lru(l) {
1301                         if (nr[l]) {
1302                                 nr_to_scan = min(nr[l],
1303                                         (unsigned long)sc->swap_cluster_max);
1304                                 nr[l] -= nr_to_scan;
1305
1306                                 nr_reclaimed += shrink_list(l, nr_to_scan,
1307                                                         zone, sc, priority);
1308                         }
1309                 }
1310         }
1311
1312         throttle_vm_writeout(sc->gfp_mask);
1313         return nr_reclaimed;
1314 }
1315
1316 /*
1317  * This is the direct reclaim path, for page-allocating processes.  We only
1318  * try to reclaim pages from zones which will satisfy the caller's allocation
1319  * request.
1320  *
1321  * We reclaim from a zone even if that zone is over pages_high.  Because:
1322  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1323  *    allocation or
1324  * b) The zones may be over pages_high but they must go *over* pages_high to
1325  *    satisfy the `incremental min' zone defense algorithm.
1326  *
1327  * Returns the number of reclaimed pages.
1328  *
1329  * If a zone is deemed to be full of pinned pages then just give it a light
1330  * scan then give up on it.
1331  */
1332 static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
1333                                         struct scan_control *sc)
1334 {
1335         enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1336         unsigned long nr_reclaimed = 0;
1337         struct zoneref *z;
1338         struct zone *zone;
1339
1340         sc->all_unreclaimable = 1;
1341         for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1342                 if (!populated_zone(zone))
1343                         continue;
1344                 /*
1345                  * Take care memory controller reclaiming has small influence
1346                  * to global LRU.
1347                  */
1348                 if (scan_global_lru(sc)) {
1349                         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1350                                 continue;
1351                         note_zone_scanning_priority(zone, priority);
1352
1353                         if (zone_is_all_unreclaimable(zone) &&
1354                                                 priority != DEF_PRIORITY)
1355                                 continue;       /* Let kswapd poll it */
1356                         sc->all_unreclaimable = 0;
1357                 } else {
1358                         /*
1359                          * Ignore cpuset limitation here. We just want to reduce
1360                          * # of used pages by us regardless of memory shortage.
1361                          */
1362                         sc->all_unreclaimable = 0;
1363                         mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
1364                                                         priority);
1365                 }
1366
1367                 nr_reclaimed += shrink_zone(priority, zone, sc);
1368         }
1369
1370         return nr_reclaimed;
1371 }
1372  
1373 /*
1374  * This is the main entry point to direct page reclaim.
1375  *
1376  * If a full scan of the inactive list fails to free enough memory then we
1377  * are "out of memory" and something needs to be killed.
1378  *
1379  * If the caller is !__GFP_FS then the probability of a failure is reasonably
1380  * high - the zone may be full of dirty or under-writeback pages, which this
1381  * caller can't do much about.  We kick pdflush and take explicit naps in the
1382  * hope that some of these pages can be written.  But if the allocating task
1383  * holds filesystem locks which prevent writeout this might not work, and the
1384  * allocation attempt will fail.
1385  *
1386  * returns:     0, if no pages reclaimed
1387  *              else, the number of pages reclaimed
1388  */
1389 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1390                                         struct scan_control *sc)
1391 {
1392         int priority;
1393         unsigned long ret = 0;
1394         unsigned long total_scanned = 0;
1395         unsigned long nr_reclaimed = 0;
1396         struct reclaim_state *reclaim_state = current->reclaim_state;
1397         unsigned long lru_pages = 0;
1398         struct zoneref *z;
1399         struct zone *zone;
1400         enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1401
1402         delayacct_freepages_start();
1403
1404         if (scan_global_lru(sc))
1405                 count_vm_event(ALLOCSTALL);
1406         /*
1407          * mem_cgroup will not do shrink_slab.
1408          */
1409         if (scan_global_lru(sc)) {
1410                 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1411
1412                         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1413                                 continue;
1414
1415                         lru_pages += zone_page_state(zone, NR_ACTIVE)
1416                                         + zone_page_state(zone, NR_INACTIVE);
1417                 }
1418         }
1419
1420         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1421                 sc->nr_scanned = 0;
1422                 if (!priority)
1423                         disable_swap_token();
1424                 nr_reclaimed += shrink_zones(priority, zonelist, sc);
1425                 /*
1426                  * Don't shrink slabs when reclaiming memory from
1427                  * over limit cgroups
1428                  */
1429                 if (scan_global_lru(sc)) {
1430                         shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1431                         if (reclaim_state) {
1432                                 nr_reclaimed += reclaim_state->reclaimed_slab;
1433                                 reclaim_state->reclaimed_slab = 0;
1434                         }
1435                 }
1436                 total_scanned += sc->nr_scanned;
1437                 if (nr_reclaimed >= sc->swap_cluster_max) {
1438                         ret = nr_reclaimed;
1439                         goto out;
1440                 }
1441
1442                 /*
1443                  * Try to write back as many pages as we just scanned.  This
1444                  * tends to cause slow streaming writers to write data to the
1445                  * disk smoothly, at the dirtying rate, which is nice.   But
1446                  * that's undesirable in laptop mode, where we *want* lumpy
1447                  * writeout.  So in laptop mode, write out the whole world.
1448                  */
1449                 if (total_scanned > sc->swap_cluster_max +
1450                                         sc->swap_cluster_max / 2) {
1451                         wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1452                         sc->may_writepage = 1;
1453                 }
1454
1455                 /* Take a nap, wait for some writeback to complete */
1456                 if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
1457                         congestion_wait(WRITE, HZ/10);
1458         }
1459         /* top priority shrink_zones still had more to do? don't OOM, then */
1460         if (!sc->all_unreclaimable && scan_global_lru(sc))
1461                 ret = nr_reclaimed;
1462 out:
1463         /*
1464          * Now that we've scanned all the zones at this priority level, note
1465          * that level within the zone so that the next thread which performs
1466          * scanning of this zone will immediately start out at this priority
1467          * level.  This affects only the decision whether or not to bring
1468          * mapped pages onto the inactive list.
1469          */
1470         if (priority < 0)
1471                 priority = 0;
1472
1473         if (scan_global_lru(sc)) {
1474                 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1475
1476                         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1477                                 continue;
1478
1479                         zone->prev_priority = priority;
1480                 }
1481         } else
1482                 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1483
1484         delayacct_freepages_end();
1485
1486         return ret;
1487 }
1488
1489 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1490                                                                 gfp_t gfp_mask)
1491 {
1492         struct scan_control sc = {
1493                 .gfp_mask = gfp_mask,
1494                 .may_writepage = !laptop_mode,
1495                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1496                 .may_swap = 1,
1497                 .swappiness = vm_swappiness,
1498                 .order = order,
1499                 .mem_cgroup = NULL,
1500                 .isolate_pages = isolate_pages_global,
1501         };
1502
1503         return do_try_to_free_pages(zonelist, &sc);
1504 }
1505
1506 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
1507
1508 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1509                                                 gfp_t gfp_mask)
1510 {
1511         struct scan_control sc = {
1512                 .may_writepage = !laptop_mode,
1513                 .may_swap = 1,
1514                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1515                 .swappiness = vm_swappiness,
1516                 .order = 0,
1517                 .mem_cgroup = mem_cont,
1518                 .isolate_pages = mem_cgroup_isolate_pages,
1519         };
1520         struct zonelist *zonelist;
1521
1522         sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1523                         (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1524         zonelist = NODE_DATA(numa_node_id())->node_zonelists;
1525         return do_try_to_free_pages(zonelist, &sc);
1526 }
1527 #endif
1528
1529 /*
1530  * For kswapd, balance_pgdat() will work across all this node's zones until
1531  * they are all at pages_high.
1532  *
1533  * Returns the number of pages which were actually freed.
1534  *
1535  * There is special handling here for zones which are full of pinned pages.
1536  * This can happen if the pages are all mlocked, or if they are all used by
1537  * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
1538  * What we do is to detect the case where all pages in the zone have been
1539  * scanned twice and there has been zero successful reclaim.  Mark the zone as
1540  * dead and from now on, only perform a short scan.  Basically we're polling
1541  * the zone for when the problem goes away.
1542  *
1543  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
1544  * zones which have free_pages > pages_high, but once a zone is found to have
1545  * free_pages <= pages_high, we scan that zone and the lower zones regardless
1546  * of the number of free pages in the lower zones.  This interoperates with
1547  * the page allocator fallback scheme to ensure that aging of pages is balanced
1548  * across the zones.
1549  */
1550 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1551 {
1552         int all_zones_ok;
1553         int priority;
1554         int i;
1555         unsigned long total_scanned;
1556         unsigned long nr_reclaimed;
1557         struct reclaim_state *reclaim_state = current->reclaim_state;
1558         struct scan_control sc = {
1559                 .gfp_mask = GFP_KERNEL,
1560                 .may_swap = 1,
1561                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1562                 .swappiness = vm_swappiness,
1563                 .order = order,
1564                 .mem_cgroup = NULL,
1565                 .isolate_pages = isolate_pages_global,
1566         };
1567         /*
1568          * temp_priority is used to remember the scanning priority at which
1569          * this zone was successfully refilled to free_pages == pages_high.
1570          */
1571         int temp_priority[MAX_NR_ZONES];
1572
1573 loop_again:
1574         total_scanned = 0;
1575         nr_reclaimed = 0;
1576         sc.may_writepage = !laptop_mode;
1577         count_vm_event(PAGEOUTRUN);
1578
1579         for (i = 0; i < pgdat->nr_zones; i++)
1580                 temp_priority[i] = DEF_PRIORITY;
1581
1582         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1583                 int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
1584                 unsigned long lru_pages = 0;
1585
1586                 /* The swap token gets in the way of swapout... */
1587                 if (!priority)
1588                         disable_swap_token();
1589
1590                 all_zones_ok = 1;
1591
1592                 /*
1593                  * Scan in the highmem->dma direction for the highest
1594                  * zone which needs scanning
1595                  */
1596                 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1597                         struct zone *zone = pgdat->node_zones + i;
1598
1599                         if (!populated_zone(zone))
1600                                 continue;
1601
1602                         if (zone_is_all_unreclaimable(zone) &&
1603                             priority != DEF_PRIORITY)
1604                                 continue;
1605
1606                         if (!zone_watermark_ok(zone, order, zone->pages_high,
1607                                                0, 0)) {
1608                                 end_zone = i;
1609                                 break;
1610                         }
1611                 }
1612                 if (i < 0)
1613                         goto out;
1614
1615                 for (i = 0; i <= end_zone; i++) {
1616                         struct zone *zone = pgdat->node_zones + i;
1617
1618                         lru_pages += zone_page_state(zone, NR_ACTIVE)
1619                                         + zone_page_state(zone, NR_INACTIVE);
1620                 }
1621
1622                 /*
1623                  * Now scan the zone in the dma->highmem direction, stopping
1624                  * at the last zone which needs scanning.
1625                  *
1626                  * We do this because the page allocator works in the opposite
1627                  * direction.  This prevents the page allocator from allocating
1628                  * pages behind kswapd's direction of progress, which would
1629                  * cause too much scanning of the lower zones.
1630                  */
1631                 for (i = 0; i <= end_zone; i++) {
1632                         struct zone *zone = pgdat->node_zones + i;
1633                         int nr_slab;
1634
1635                         if (!populated_zone(zone))
1636                                 continue;
1637
1638                         if (zone_is_all_unreclaimable(zone) &&
1639                                         priority != DEF_PRIORITY)
1640                                 continue;
1641
1642                         if (!zone_watermark_ok(zone, order, zone->pages_high,
1643                                                end_zone, 0))
1644                                 all_zones_ok = 0;
1645                         temp_priority[i] = priority;
1646                         sc.nr_scanned = 0;
1647                         note_zone_scanning_priority(zone, priority);
1648                         /*
1649                          * We put equal pressure on every zone, unless one
1650                          * zone has way too many pages free already.
1651                          */
1652                         if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
1653                                                 end_zone, 0))
1654                                 nr_reclaimed += shrink_zone(priority, zone, &sc);
1655                         reclaim_state->reclaimed_slab = 0;
1656                         nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1657                                                 lru_pages);
1658                         nr_reclaimed += reclaim_state->reclaimed_slab;
1659                         total_scanned += sc.nr_scanned;
1660                         if (zone_is_all_unreclaimable(zone))
1661                                 continue;
1662                         if (nr_slab == 0 && zone->pages_scanned >=
1663                                 (zone_page_state(zone, NR_ACTIVE)
1664                                 + zone_page_state(zone, NR_INACTIVE)) * 6)
1665                                         zone_set_flag(zone,
1666                                                       ZONE_ALL_UNRECLAIMABLE);
1667                         /*
1668                          * If we've done a decent amount of scanning and
1669                          * the reclaim ratio is low, start doing writepage
1670                          * even in laptop mode
1671                          */
1672                         if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1673                             total_scanned > nr_reclaimed + nr_reclaimed / 2)
1674                                 sc.may_writepage = 1;
1675                 }
1676                 if (all_zones_ok)
1677                         break;          /* kswapd: all done */
1678                 /*
1679                  * OK, kswapd is getting into trouble.  Take a nap, then take
1680                  * another pass across the zones.
1681                  */
1682                 if (total_scanned && priority < DEF_PRIORITY - 2)
1683                         congestion_wait(WRITE, HZ/10);
1684
1685                 /*
1686                  * We do this so kswapd doesn't build up large priorities for
1687                  * example when it is freeing in parallel with allocators. It
1688                  * matches the direct reclaim path behaviour in terms of impact
1689                  * on zone->*_priority.
1690                  */
1691                 if (nr_reclaimed >= SWAP_CLUSTER_MAX)
1692                         break;
1693         }
1694 out:
1695         /*
1696          * Note within each zone the priority level at which this zone was
1697          * brought into a happy state.  So that the next thread which scans this
1698          * zone will start out at that priority level.
1699          */
1700         for (i = 0; i < pgdat->nr_zones; i++) {
1701                 struct zone *zone = pgdat->node_zones + i;
1702
1703                 zone->prev_priority = temp_priority[i];
1704         }
1705         if (!all_zones_ok) {
1706                 cond_resched();
1707
1708                 try_to_freeze();
1709
1710                 goto loop_again;
1711         }
1712
1713         return nr_reclaimed;
1714 }
1715
1716 /*
1717  * The background pageout daemon, started as a kernel thread
1718  * from the init process. 
1719  *
1720  * This basically trickles out pages so that we have _some_
1721  * free memory available even if there is no other activity
1722  * that frees anything up. This is needed for things like routing
1723  * etc, where we otherwise might have all activity going on in
1724  * asynchronous contexts that cannot page things out.
1725  *
1726  * If there are applications that are active memory-allocators
1727  * (most normal use), this basically shouldn't matter.
1728  */
1729 static int kswapd(void *p)
1730 {
1731         unsigned long order;
1732         pg_data_t *pgdat = (pg_data_t*)p;
1733         struct task_struct *tsk = current;
1734         DEFINE_WAIT(wait);
1735         struct reclaim_state reclaim_state = {
1736                 .reclaimed_slab = 0,
1737         };
1738         node_to_cpumask_ptr(cpumask, pgdat->node_id);
1739
1740         if (!cpus_empty(*cpumask))
1741                 set_cpus_allowed_ptr(tsk, cpumask);
1742         current->reclaim_state = &reclaim_state;
1743
1744         /*
1745          * Tell the memory management that we're a "memory allocator",
1746          * and that if we need more memory we should get access to it
1747          * regardless (see "__alloc_pages()"). "kswapd" should
1748          * never get caught in the normal page freeing logic.
1749          *
1750          * (Kswapd normally doesn't need memory anyway, but sometimes
1751          * you need a small amount of memory in order to be able to
1752          * page out something else, and this flag essentially protects
1753          * us from recursively trying to free more memory as we're
1754          * trying to free the first piece of memory in the first place).
1755          */
1756         tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1757         set_freezable();
1758
1759         order = 0;
1760         for ( ; ; ) {
1761                 unsigned long new_order;
1762
1763                 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1764                 new_order = pgdat->kswapd_max_order;
1765                 pgdat->kswapd_max_order = 0;
1766                 if (order < new_order) {
1767                         /*
1768                          * Don't sleep if someone wants a larger 'order'
1769                          * allocation
1770                          */
1771                         order = new_order;
1772                 } else {
1773                         if (!freezing(current))
1774                                 schedule();
1775
1776                         order = pgdat->kswapd_max_order;
1777                 }
1778                 finish_wait(&pgdat->kswapd_wait, &wait);
1779
1780                 if (!try_to_freeze()) {
1781                         /* We can speed up thawing tasks if we don't call
1782                          * balance_pgdat after returning from the refrigerator
1783                          */
1784                         balance_pgdat(pgdat, order);
1785                 }
1786         }
1787         return 0;
1788 }
1789
1790 /*
1791  * A zone is low on free memory, so wake its kswapd task to service it.
1792  */
1793 void wakeup_kswapd(struct zone *zone, int order)
1794 {
1795         pg_data_t *pgdat;
1796
1797         if (!populated_zone(zone))
1798                 return;
1799
1800         pgdat = zone->zone_pgdat;
1801         if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1802                 return;
1803         if (pgdat->kswapd_max_order < order)
1804                 pgdat->kswapd_max_order = order;
1805         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1806                 return;
1807         if (!waitqueue_active(&pgdat->kswapd_wait))
1808                 return;
1809         wake_up_interruptible(&pgdat->kswapd_wait);
1810 }
1811
1812 #ifdef CONFIG_PM
1813 /*
1814  * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
1815  * from LRU lists system-wide, for given pass and priority, and returns the
1816  * number of reclaimed pages
1817  *
1818  * For pass > 3 we also try to shrink the LRU lists that contain a few pages
1819  */
1820 static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1821                                       int pass, struct scan_control *sc)
1822 {
1823         struct zone *zone;
1824         unsigned long nr_to_scan, ret = 0;
1825         enum lru_list l;
1826
1827         for_each_zone(zone) {
1828
1829                 if (!populated_zone(zone))
1830                         continue;
1831
1832                 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
1833                         continue;
1834
1835                 for_each_lru(l) {
1836                         /* For pass = 0 we don't shrink the active list */
1837                         if (pass == 0 && l == LRU_ACTIVE)
1838                                 continue;
1839
1840                         zone->lru[l].nr_scan +=
1841                                 (zone_page_state(zone, NR_LRU_BASE + l)
1842                                                                 >> prio) + 1;
1843                         if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
1844                                 zone->lru[l].nr_scan = 0;
1845                                 nr_to_scan = min(nr_pages,
1846                                         zone_page_state(zone,
1847                                                         NR_LRU_BASE + l));
1848                                 ret += shrink_list(l, nr_to_scan, zone,
1849                                                                 sc, prio);
1850                                 if (ret >= nr_pages)
1851                                         return ret;
1852                         }
1853                 }
1854         }
1855
1856         return ret;
1857 }
1858
1859 static unsigned long count_lru_pages(void)
1860 {
1861         return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
1862 }
1863
1864 /*
1865  * Try to free `nr_pages' of memory, system-wide, and return the number of
1866  * freed pages.
1867  *
1868  * Rather than trying to age LRUs the aim is to preserve the overall
1869  * LRU order by reclaiming preferentially
1870  * inactive > active > active referenced > active mapped
1871  */
1872 unsigned long shrink_all_memory(unsigned long nr_pages)
1873 {
1874         unsigned long lru_pages, nr_slab;
1875         unsigned long ret = 0;
1876         int pass;
1877         struct reclaim_state reclaim_state;
1878         struct scan_control sc = {
1879                 .gfp_mask = GFP_KERNEL,
1880                 .may_swap = 0,
1881                 .swap_cluster_max = nr_pages,
1882                 .may_writepage = 1,
1883                 .swappiness = vm_swappiness,
1884                 .isolate_pages = isolate_pages_global,
1885         };
1886
1887         current->reclaim_state = &reclaim_state;
1888
1889         lru_pages = count_lru_pages();
1890         nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1891         /* If slab caches are huge, it's better to hit them first */
1892         while (nr_slab >= lru_pages) {
1893                 reclaim_state.reclaimed_slab = 0;
1894                 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
1895                 if (!reclaim_state.reclaimed_slab)
1896                         break;
1897
1898                 ret += reclaim_state.reclaimed_slab;
1899                 if (ret >= nr_pages)
1900                         goto out;
1901
1902                 nr_slab -= reclaim_state.reclaimed_slab;
1903         }
1904
1905         /*
1906          * We try to shrink LRUs in 5 passes:
1907          * 0 = Reclaim from inactive_list only
1908          * 1 = Reclaim from active list but don't reclaim mapped
1909          * 2 = 2nd pass of type 1
1910          * 3 = Reclaim mapped (normal reclaim)
1911          * 4 = 2nd pass of type 3
1912          */
1913         for (pass = 0; pass < 5; pass++) {
1914                 int prio;
1915
1916                 /* Force reclaiming mapped pages in the passes #3 and #4 */
1917                 if (pass > 2) {
1918                         sc.may_swap = 1;
1919                         sc.swappiness = 100;
1920                 }
1921
1922                 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1923                         unsigned long nr_to_scan = nr_pages - ret;
1924
1925                         sc.nr_scanned = 0;
1926                         ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
1927                         if (ret >= nr_pages)
1928                                 goto out;
1929
1930                         reclaim_state.reclaimed_slab = 0;
1931                         shrink_slab(sc.nr_scanned, sc.gfp_mask,
1932                                         count_lru_pages());
1933                         ret += reclaim_state.reclaimed_slab;
1934                         if (ret >= nr_pages)
1935                                 goto out;
1936
1937                         if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1938                                 congestion_wait(WRITE, HZ / 10);
1939                 }
1940         }
1941
1942         /*
1943          * If ret = 0, we could not shrink LRUs, but there may be something
1944          * in slab caches
1945          */
1946         if (!ret) {
1947                 do {
1948                         reclaim_state.reclaimed_slab = 0;
1949                         shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
1950                         ret += reclaim_state.reclaimed_slab;
1951                 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
1952         }
1953
1954 out:
1955         current->reclaim_state = NULL;
1956
1957         return ret;
1958 }
1959 #endif
1960
1961 /* It's optimal to keep kswapds on the same CPUs as their memory, but
1962    not required for correctness.  So if the last cpu in a node goes
1963    away, we get changed to run anywhere: as the first one comes back,
1964    restore their cpu bindings. */
1965 static int __devinit cpu_callback(struct notifier_block *nfb,
1966                                   unsigned long action, void *hcpu)
1967 {
1968         int nid;
1969
1970         if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1971                 for_each_node_state(nid, N_HIGH_MEMORY) {
1972                         pg_data_t *pgdat = NODE_DATA(nid);
1973                         node_to_cpumask_ptr(mask, pgdat->node_id);
1974
1975                         if (any_online_cpu(*mask) < nr_cpu_ids)
1976                                 /* One of our CPUs online: restore mask */
1977                                 set_cpus_allowed_ptr(pgdat->kswapd, mask);
1978                 }
1979         }
1980         return NOTIFY_OK;
1981 }
1982
1983 /*
1984  * This kswapd start function will be called by init and node-hot-add.
1985  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
1986  */
1987 int kswapd_run(int nid)
1988 {
1989         pg_data_t *pgdat = NODE_DATA(nid);
1990         int ret = 0;
1991
1992         if (pgdat->kswapd)
1993                 return 0;
1994
1995         pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
1996         if (IS_ERR(pgdat->kswapd)) {
1997                 /* failure at boot is fatal */
1998                 BUG_ON(system_state == SYSTEM_BOOTING);
1999                 printk("Failed to start kswapd on node %d\n",nid);
2000                 ret = -1;
2001         }
2002         return ret;
2003 }
2004
2005 static int __init kswapd_init(void)
2006 {
2007         int nid;
2008
2009         swap_setup();
2010         for_each_node_state(nid, N_HIGH_MEMORY)
2011                 kswapd_run(nid);
2012         hotcpu_notifier(cpu_callback, 0);
2013         return 0;
2014 }
2015
2016 module_init(kswapd_init)
2017
2018 #ifdef CONFIG_NUMA
2019 /*
2020  * Zone reclaim mode
2021  *
2022  * If non-zero call zone_reclaim when the number of free pages falls below
2023  * the watermarks.
2024  */
2025 int zone_reclaim_mode __read_mostly;
2026
2027 #define RECLAIM_OFF 0
2028 #define RECLAIM_ZONE (1<<0)     /* Run shrink_inactive_list on the zone */
2029 #define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
2030 #define RECLAIM_SWAP (1<<2)     /* Swap pages out during reclaim */
2031
2032 /*
2033  * Priority for ZONE_RECLAIM. This determines the fraction of pages
2034  * of a node considered for each zone_reclaim. 4 scans 1/16th of
2035  * a zone.
2036  */
2037 #define ZONE_RECLAIM_PRIORITY 4
2038
2039 /*
2040  * Percentage of pages in a zone that must be unmapped for zone_reclaim to
2041  * occur.
2042  */
2043 int sysctl_min_unmapped_ratio = 1;
2044
2045 /*
2046  * If the number of slab pages in a zone grows beyond this percentage then
2047  * slab reclaim needs to occur.
2048  */
2049 int sysctl_min_slab_ratio = 5;
2050
2051 /*
2052  * Try to free up some pages from this zone through reclaim.
2053  */
2054 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2055 {
2056         /* Minimum pages needed in order to stay on node */
2057         const unsigned long nr_pages = 1 << order;
2058         struct task_struct *p = current;
2059         struct reclaim_state reclaim_state;
2060         int priority;
2061         unsigned long nr_reclaimed = 0;
2062         struct scan_control sc = {
2063                 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2064                 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2065                 .swap_cluster_max = max_t(unsigned long, nr_pages,
2066                                         SWAP_CLUSTER_MAX),
2067                 .gfp_mask = gfp_mask,
2068                 .swappiness = vm_swappiness,
2069                 .isolate_pages = isolate_pages_global,
2070         };
2071         unsigned long slab_reclaimable;
2072
2073         disable_swap_token();
2074         cond_resched();
2075         /*
2076          * We need to be able to allocate from the reserves for RECLAIM_SWAP
2077          * and we also need to be able to write out pages for RECLAIM_WRITE
2078          * and RECLAIM_SWAP.
2079          */
2080         p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2081         reclaim_state.reclaimed_slab = 0;
2082         p->reclaim_state = &reclaim_state;
2083
2084         if (zone_page_state(zone, NR_FILE_PAGES) -
2085                 zone_page_state(zone, NR_FILE_MAPPED) >
2086                 zone->min_unmapped_pages) {
2087                 /*
2088                  * Free memory by calling shrink zone with increasing
2089                  * priorities until we have enough memory freed.
2090                  */
2091                 priority = ZONE_RECLAIM_PRIORITY;
2092                 do {
2093                         note_zone_scanning_priority(zone, priority);
2094                         nr_reclaimed += shrink_zone(priority, zone, &sc);
2095                         priority--;
2096                 } while (priority >= 0 && nr_reclaimed < nr_pages);
2097         }
2098
2099         slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2100         if (slab_reclaimable > zone->min_slab_pages) {
2101                 /*
2102                  * shrink_slab() does not currently allow us to determine how
2103                  * many pages were freed in this zone. So we take the current
2104                  * number of slab pages and shake the slab until it is reduced
2105                  * by the same nr_pages that we used for reclaiming unmapped
2106                  * pages.
2107                  *
2108                  * Note that shrink_slab will free memory on all zones and may
2109                  * take a long time.
2110                  */
2111                 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
2112                         zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
2113                                 slab_reclaimable - nr_pages)
2114                         ;
2115
2116                 /*
2117                  * Update nr_reclaimed by the number of slab pages we
2118                  * reclaimed from this zone.
2119                  */
2120                 nr_reclaimed += slab_reclaimable -
2121                         zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2122         }
2123
2124         p->reclaim_state = NULL;
2125         current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
2126         return nr_reclaimed >= nr_pages;
2127 }
2128
2129 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2130 {
2131         int node_id;
2132         int ret;
2133
2134         /*
2135          * Zone reclaim reclaims unmapped file backed pages and
2136          * slab pages if we are over the defined limits.
2137          *
2138          * A small portion of unmapped file backed pages is needed for
2139          * file I/O otherwise pages read by file I/O will be immediately
2140          * thrown out if the zone is overallocated. So we do not reclaim
2141          * if less than a specified percentage of the zone is used by
2142          * unmapped file backed pages.
2143          */
2144         if (zone_page_state(zone, NR_FILE_PAGES) -
2145             zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
2146             && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
2147                         <= zone->min_slab_pages)
2148                 return 0;
2149
2150         if (zone_is_all_unreclaimable(zone))
2151                 return 0;
2152
2153         /*
2154          * Do not scan if the allocation should not be delayed.
2155          */
2156         if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2157                         return 0;
2158
2159         /*
2160          * Only run zone reclaim on the local zone or on zones that do not
2161          * have associated processors. This will favor the local processor
2162          * over remote processors and spread off node memory allocations
2163          * as wide as possible.
2164          */
2165         node_id = zone_to_nid(zone);
2166         if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2167                 return 0;
2168
2169         if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2170                 return 0;
2171         ret = __zone_reclaim(zone, gfp_mask, order);
2172         zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2173
2174         return ret;
2175 }
2176 #endif