synchronous lumpy reclaim: ensure we count pages transitioning inactive via clear_act...
[safe/jmp/linux-2.6] / mm / vmscan.c
1 /*
2  *  linux/mm/vmscan.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *
6  *  Swap reorganised 29.12.95, Stephen Tweedie.
7  *  kswapd added: 7.1.96  sct
8  *  Removed kswapd_ctl limits, and swap out as many pages as needed
9  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11  *  Multiqueue VM started 5.8.00, Rik van Riel.
12  */
13
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/vmstat.h>
23 #include <linux/file.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h>  /* for try_to_release_page(),
27                                         buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/notifier.h>
36 #include <linux/rwsem.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/freezer.h>
40
41 #include <asm/tlbflush.h>
42 #include <asm/div64.h>
43
44 #include <linux/swapops.h>
45
46 #include "internal.h"
47
48 struct scan_control {
49         /* Incremented by the number of inactive pages that were scanned */
50         unsigned long nr_scanned;
51
52         /* This context's GFP mask */
53         gfp_t gfp_mask;
54
55         int may_writepage;
56
57         /* Can pages be swapped as part of reclaim? */
58         int may_swap;
59
60         /* This context's SWAP_CLUSTER_MAX. If freeing memory for
61          * suspend, we effectively ignore SWAP_CLUSTER_MAX.
62          * In this context, it doesn't matter that we scan the
63          * whole list at once. */
64         int swap_cluster_max;
65
66         int swappiness;
67
68         int all_unreclaimable;
69
70         int order;
71 };
72
73 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
74
75 #ifdef ARCH_HAS_PREFETCH
76 #define prefetch_prev_lru_page(_page, _base, _field)                    \
77         do {                                                            \
78                 if ((_page)->lru.prev != _base) {                       \
79                         struct page *prev;                              \
80                                                                         \
81                         prev = lru_to_page(&(_page->lru));              \
82                         prefetch(&prev->_field);                        \
83                 }                                                       \
84         } while (0)
85 #else
86 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
87 #endif
88
89 #ifdef ARCH_HAS_PREFETCHW
90 #define prefetchw_prev_lru_page(_page, _base, _field)                   \
91         do {                                                            \
92                 if ((_page)->lru.prev != _base) {                       \
93                         struct page *prev;                              \
94                                                                         \
95                         prev = lru_to_page(&(_page->lru));              \
96                         prefetchw(&prev->_field);                       \
97                 }                                                       \
98         } while (0)
99 #else
100 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
101 #endif
102
103 /*
104  * From 0 .. 100.  Higher means more swappy.
105  */
106 int vm_swappiness = 60;
107 long vm_total_pages;    /* The total number of pages which the VM controls */
108
109 static LIST_HEAD(shrinker_list);
110 static DECLARE_RWSEM(shrinker_rwsem);
111
112 /*
113  * Add a shrinker callback to be called from the vm
114  */
115 void register_shrinker(struct shrinker *shrinker)
116 {
117         shrinker->nr = 0;
118         down_write(&shrinker_rwsem);
119         list_add_tail(&shrinker->list, &shrinker_list);
120         up_write(&shrinker_rwsem);
121 }
122 EXPORT_SYMBOL(register_shrinker);
123
124 /*
125  * Remove one
126  */
127 void unregister_shrinker(struct shrinker *shrinker)
128 {
129         down_write(&shrinker_rwsem);
130         list_del(&shrinker->list);
131         up_write(&shrinker_rwsem);
132 }
133 EXPORT_SYMBOL(unregister_shrinker);
134
135 #define SHRINK_BATCH 128
136 /*
137  * Call the shrink functions to age shrinkable caches
138  *
139  * Here we assume it costs one seek to replace a lru page and that it also
140  * takes a seek to recreate a cache object.  With this in mind we age equal
141  * percentages of the lru and ageable caches.  This should balance the seeks
142  * generated by these structures.
143  *
144  * If the vm encounted mapped pages on the LRU it increase the pressure on
145  * slab to avoid swapping.
146  *
147  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
148  *
149  * `lru_pages' represents the number of on-LRU pages in all the zones which
150  * are eligible for the caller's allocation attempt.  It is used for balancing
151  * slab reclaim versus page reclaim.
152  *
153  * Returns the number of slab objects which we shrunk.
154  */
155 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
156                         unsigned long lru_pages)
157 {
158         struct shrinker *shrinker;
159         unsigned long ret = 0;
160
161         if (scanned == 0)
162                 scanned = SWAP_CLUSTER_MAX;
163
164         if (!down_read_trylock(&shrinker_rwsem))
165                 return 1;       /* Assume we'll be able to shrink next time */
166
167         list_for_each_entry(shrinker, &shrinker_list, list) {
168                 unsigned long long delta;
169                 unsigned long total_scan;
170                 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
171
172                 delta = (4 * scanned) / shrinker->seeks;
173                 delta *= max_pass;
174                 do_div(delta, lru_pages + 1);
175                 shrinker->nr += delta;
176                 if (shrinker->nr < 0) {
177                         printk(KERN_ERR "%s: nr=%ld\n",
178                                         __FUNCTION__, shrinker->nr);
179                         shrinker->nr = max_pass;
180                 }
181
182                 /*
183                  * Avoid risking looping forever due to too large nr value:
184                  * never try to free more than twice the estimate number of
185                  * freeable entries.
186                  */
187                 if (shrinker->nr > max_pass * 2)
188                         shrinker->nr = max_pass * 2;
189
190                 total_scan = shrinker->nr;
191                 shrinker->nr = 0;
192
193                 while (total_scan >= SHRINK_BATCH) {
194                         long this_scan = SHRINK_BATCH;
195                         int shrink_ret;
196                         int nr_before;
197
198                         nr_before = (*shrinker->shrink)(0, gfp_mask);
199                         shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
200                         if (shrink_ret == -1)
201                                 break;
202                         if (shrink_ret < nr_before)
203                                 ret += nr_before - shrink_ret;
204                         count_vm_events(SLABS_SCANNED, this_scan);
205                         total_scan -= this_scan;
206
207                         cond_resched();
208                 }
209
210                 shrinker->nr += total_scan;
211         }
212         up_read(&shrinker_rwsem);
213         return ret;
214 }
215
216 /* Called without lock on whether page is mapped, so answer is unstable */
217 static inline int page_mapping_inuse(struct page *page)
218 {
219         struct address_space *mapping;
220
221         /* Page is in somebody's page tables. */
222         if (page_mapped(page))
223                 return 1;
224
225         /* Be more reluctant to reclaim swapcache than pagecache */
226         if (PageSwapCache(page))
227                 return 1;
228
229         mapping = page_mapping(page);
230         if (!mapping)
231                 return 0;
232
233         /* File is mmap'd by somebody? */
234         return mapping_mapped(mapping);
235 }
236
237 static inline int is_page_cache_freeable(struct page *page)
238 {
239         return page_count(page) - !!PagePrivate(page) == 2;
240 }
241
242 static int may_write_to_queue(struct backing_dev_info *bdi)
243 {
244         if (current->flags & PF_SWAPWRITE)
245                 return 1;
246         if (!bdi_write_congested(bdi))
247                 return 1;
248         if (bdi == current->backing_dev_info)
249                 return 1;
250         return 0;
251 }
252
253 /*
254  * We detected a synchronous write error writing a page out.  Probably
255  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
256  * fsync(), msync() or close().
257  *
258  * The tricky part is that after writepage we cannot touch the mapping: nothing
259  * prevents it from being freed up.  But we have a ref on the page and once
260  * that page is locked, the mapping is pinned.
261  *
262  * We're allowed to run sleeping lock_page() here because we know the caller has
263  * __GFP_FS.
264  */
265 static void handle_write_error(struct address_space *mapping,
266                                 struct page *page, int error)
267 {
268         lock_page(page);
269         if (page_mapping(page) == mapping)
270                 mapping_set_error(mapping, error);
271         unlock_page(page);
272 }
273
274 /* possible outcome of pageout() */
275 typedef enum {
276         /* failed to write page out, page is locked */
277         PAGE_KEEP,
278         /* move page to the active list, page is locked */
279         PAGE_ACTIVATE,
280         /* page has been sent to the disk successfully, page is unlocked */
281         PAGE_SUCCESS,
282         /* page is clean and locked */
283         PAGE_CLEAN,
284 } pageout_t;
285
286 /*
287  * pageout is called by shrink_page_list() for each dirty page.
288  * Calls ->writepage().
289  */
290 static pageout_t pageout(struct page *page, struct address_space *mapping)
291 {
292         /*
293          * If the page is dirty, only perform writeback if that write
294          * will be non-blocking.  To prevent this allocation from being
295          * stalled by pagecache activity.  But note that there may be
296          * stalls if we need to run get_block().  We could test
297          * PagePrivate for that.
298          *
299          * If this process is currently in generic_file_write() against
300          * this page's queue, we can perform writeback even if that
301          * will block.
302          *
303          * If the page is swapcache, write it back even if that would
304          * block, for some throttling. This happens by accident, because
305          * swap_backing_dev_info is bust: it doesn't reflect the
306          * congestion state of the swapdevs.  Easy to fix, if needed.
307          * See swapfile.c:page_queue_congested().
308          */
309         if (!is_page_cache_freeable(page))
310                 return PAGE_KEEP;
311         if (!mapping) {
312                 /*
313                  * Some data journaling orphaned pages can have
314                  * page->mapping == NULL while being dirty with clean buffers.
315                  */
316                 if (PagePrivate(page)) {
317                         if (try_to_free_buffers(page)) {
318                                 ClearPageDirty(page);
319                                 printk("%s: orphaned page\n", __FUNCTION__);
320                                 return PAGE_CLEAN;
321                         }
322                 }
323                 return PAGE_KEEP;
324         }
325         if (mapping->a_ops->writepage == NULL)
326                 return PAGE_ACTIVATE;
327         if (!may_write_to_queue(mapping->backing_dev_info))
328                 return PAGE_KEEP;
329
330         if (clear_page_dirty_for_io(page)) {
331                 int res;
332                 struct writeback_control wbc = {
333                         .sync_mode = WB_SYNC_NONE,
334                         .nr_to_write = SWAP_CLUSTER_MAX,
335                         .range_start = 0,
336                         .range_end = LLONG_MAX,
337                         .nonblocking = 1,
338                         .for_reclaim = 1,
339                 };
340
341                 SetPageReclaim(page);
342                 res = mapping->a_ops->writepage(page, &wbc);
343                 if (res < 0)
344                         handle_write_error(mapping, page, res);
345                 if (res == AOP_WRITEPAGE_ACTIVATE) {
346                         ClearPageReclaim(page);
347                         return PAGE_ACTIVATE;
348                 }
349                 if (!PageWriteback(page)) {
350                         /* synchronous write or broken a_ops? */
351                         ClearPageReclaim(page);
352                 }
353                 inc_zone_page_state(page, NR_VMSCAN_WRITE);
354                 return PAGE_SUCCESS;
355         }
356
357         return PAGE_CLEAN;
358 }
359
360 /*
361  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
362  * someone else has a ref on the page, abort and return 0.  If it was
363  * successfully detached, return 1.  Assumes the caller has a single ref on
364  * this page.
365  */
366 int remove_mapping(struct address_space *mapping, struct page *page)
367 {
368         BUG_ON(!PageLocked(page));
369         BUG_ON(mapping != page_mapping(page));
370
371         write_lock_irq(&mapping->tree_lock);
372         /*
373          * The non racy check for a busy page.
374          *
375          * Must be careful with the order of the tests. When someone has
376          * a ref to the page, it may be possible that they dirty it then
377          * drop the reference. So if PageDirty is tested before page_count
378          * here, then the following race may occur:
379          *
380          * get_user_pages(&page);
381          * [user mapping goes away]
382          * write_to(page);
383          *                              !PageDirty(page)    [good]
384          * SetPageDirty(page);
385          * put_page(page);
386          *                              !page_count(page)   [good, discard it]
387          *
388          * [oops, our write_to data is lost]
389          *
390          * Reversing the order of the tests ensures such a situation cannot
391          * escape unnoticed. The smp_rmb is needed to ensure the page->flags
392          * load is not satisfied before that of page->_count.
393          *
394          * Note that if SetPageDirty is always performed via set_page_dirty,
395          * and thus under tree_lock, then this ordering is not required.
396          */
397         if (unlikely(page_count(page) != 2))
398                 goto cannot_free;
399         smp_rmb();
400         if (unlikely(PageDirty(page)))
401                 goto cannot_free;
402
403         if (PageSwapCache(page)) {
404                 swp_entry_t swap = { .val = page_private(page) };
405                 __delete_from_swap_cache(page);
406                 write_unlock_irq(&mapping->tree_lock);
407                 swap_free(swap);
408                 __put_page(page);       /* The pagecache ref */
409                 return 1;
410         }
411
412         __remove_from_page_cache(page);
413         write_unlock_irq(&mapping->tree_lock);
414         __put_page(page);
415         return 1;
416
417 cannot_free:
418         write_unlock_irq(&mapping->tree_lock);
419         return 0;
420 }
421
422 /*
423  * shrink_page_list() returns the number of reclaimed pages
424  */
425 static unsigned long shrink_page_list(struct list_head *page_list,
426                                         struct scan_control *sc)
427 {
428         LIST_HEAD(ret_pages);
429         struct pagevec freed_pvec;
430         int pgactivate = 0;
431         unsigned long nr_reclaimed = 0;
432
433         cond_resched();
434
435         pagevec_init(&freed_pvec, 1);
436         while (!list_empty(page_list)) {
437                 struct address_space *mapping;
438                 struct page *page;
439                 int may_enter_fs;
440                 int referenced;
441
442                 cond_resched();
443
444                 page = lru_to_page(page_list);
445                 list_del(&page->lru);
446
447                 if (TestSetPageLocked(page))
448                         goto keep;
449
450                 VM_BUG_ON(PageActive(page));
451
452                 sc->nr_scanned++;
453
454                 if (!sc->may_swap && page_mapped(page))
455                         goto keep_locked;
456
457                 /* Double the slab pressure for mapped and swapcache pages */
458                 if (page_mapped(page) || PageSwapCache(page))
459                         sc->nr_scanned++;
460
461                 if (PageWriteback(page))
462                         goto keep_locked;
463
464                 referenced = page_referenced(page, 1);
465                 /* In active use or really unfreeable?  Activate it. */
466                 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
467                                         referenced && page_mapping_inuse(page))
468                         goto activate_locked;
469
470 #ifdef CONFIG_SWAP
471                 /*
472                  * Anonymous process memory has backing store?
473                  * Try to allocate it some swap space here.
474                  */
475                 if (PageAnon(page) && !PageSwapCache(page))
476                         if (!add_to_swap(page, GFP_ATOMIC))
477                                 goto activate_locked;
478 #endif /* CONFIG_SWAP */
479
480                 mapping = page_mapping(page);
481                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
482                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
483
484                 /*
485                  * The page is mapped into the page tables of one or more
486                  * processes. Try to unmap it here.
487                  */
488                 if (page_mapped(page) && mapping) {
489                         switch (try_to_unmap(page, 0)) {
490                         case SWAP_FAIL:
491                                 goto activate_locked;
492                         case SWAP_AGAIN:
493                                 goto keep_locked;
494                         case SWAP_SUCCESS:
495                                 ; /* try to free the page below */
496                         }
497                 }
498
499                 if (PageDirty(page)) {
500                         if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
501                                 goto keep_locked;
502                         if (!may_enter_fs)
503                                 goto keep_locked;
504                         if (!sc->may_writepage)
505                                 goto keep_locked;
506
507                         /* Page is dirty, try to write it out here */
508                         switch(pageout(page, mapping)) {
509                         case PAGE_KEEP:
510                                 goto keep_locked;
511                         case PAGE_ACTIVATE:
512                                 goto activate_locked;
513                         case PAGE_SUCCESS:
514                                 if (PageWriteback(page) || PageDirty(page))
515                                         goto keep;
516                                 /*
517                                  * A synchronous write - probably a ramdisk.  Go
518                                  * ahead and try to reclaim the page.
519                                  */
520                                 if (TestSetPageLocked(page))
521                                         goto keep;
522                                 if (PageDirty(page) || PageWriteback(page))
523                                         goto keep_locked;
524                                 mapping = page_mapping(page);
525                         case PAGE_CLEAN:
526                                 ; /* try to free the page below */
527                         }
528                 }
529
530                 /*
531                  * If the page has buffers, try to free the buffer mappings
532                  * associated with this page. If we succeed we try to free
533                  * the page as well.
534                  *
535                  * We do this even if the page is PageDirty().
536                  * try_to_release_page() does not perform I/O, but it is
537                  * possible for a page to have PageDirty set, but it is actually
538                  * clean (all its buffers are clean).  This happens if the
539                  * buffers were written out directly, with submit_bh(). ext3
540                  * will do this, as well as the blockdev mapping. 
541                  * try_to_release_page() will discover that cleanness and will
542                  * drop the buffers and mark the page clean - it can be freed.
543                  *
544                  * Rarely, pages can have buffers and no ->mapping.  These are
545                  * the pages which were not successfully invalidated in
546                  * truncate_complete_page().  We try to drop those buffers here
547                  * and if that worked, and the page is no longer mapped into
548                  * process address space (page_count == 1) it can be freed.
549                  * Otherwise, leave the page on the LRU so it is swappable.
550                  */
551                 if (PagePrivate(page)) {
552                         if (!try_to_release_page(page, sc->gfp_mask))
553                                 goto activate_locked;
554                         if (!mapping && page_count(page) == 1)
555                                 goto free_it;
556                 }
557
558                 if (!mapping || !remove_mapping(mapping, page))
559                         goto keep_locked;
560
561 free_it:
562                 unlock_page(page);
563                 nr_reclaimed++;
564                 if (!pagevec_add(&freed_pvec, page))
565                         __pagevec_release_nonlru(&freed_pvec);
566                 continue;
567
568 activate_locked:
569                 SetPageActive(page);
570                 pgactivate++;
571 keep_locked:
572                 unlock_page(page);
573 keep:
574                 list_add(&page->lru, &ret_pages);
575                 VM_BUG_ON(PageLRU(page));
576         }
577         list_splice(&ret_pages, page_list);
578         if (pagevec_count(&freed_pvec))
579                 __pagevec_release_nonlru(&freed_pvec);
580         count_vm_events(PGACTIVATE, pgactivate);
581         return nr_reclaimed;
582 }
583
584 /* LRU Isolation modes. */
585 #define ISOLATE_INACTIVE 0      /* Isolate inactive pages. */
586 #define ISOLATE_ACTIVE 1        /* Isolate active pages. */
587 #define ISOLATE_BOTH 2          /* Isolate both active and inactive pages. */
588
589 /*
590  * Attempt to remove the specified page from its LRU.  Only take this page
591  * if it is of the appropriate PageActive status.  Pages which are being
592  * freed elsewhere are also ignored.
593  *
594  * page:        page to consider
595  * mode:        one of the LRU isolation modes defined above
596  *
597  * returns 0 on success, -ve errno on failure.
598  */
599 static int __isolate_lru_page(struct page *page, int mode)
600 {
601         int ret = -EINVAL;
602
603         /* Only take pages on the LRU. */
604         if (!PageLRU(page))
605                 return ret;
606
607         /*
608          * When checking the active state, we need to be sure we are
609          * dealing with comparible boolean values.  Take the logical not
610          * of each.
611          */
612         if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
613                 return ret;
614
615         ret = -EBUSY;
616         if (likely(get_page_unless_zero(page))) {
617                 /*
618                  * Be careful not to clear PageLRU until after we're
619                  * sure the page is not being freed elsewhere -- the
620                  * page release code relies on it.
621                  */
622                 ClearPageLRU(page);
623                 ret = 0;
624         }
625
626         return ret;
627 }
628
629 /*
630  * zone->lru_lock is heavily contended.  Some of the functions that
631  * shrink the lists perform better by taking out a batch of pages
632  * and working on them outside the LRU lock.
633  *
634  * For pagecache intensive workloads, this function is the hottest
635  * spot in the kernel (apart from copy_*_user functions).
636  *
637  * Appropriate locks must be held before calling this function.
638  *
639  * @nr_to_scan: The number of pages to look through on the list.
640  * @src:        The LRU list to pull pages off.
641  * @dst:        The temp list to put pages on to.
642  * @scanned:    The number of pages that were scanned.
643  * @order:      The caller's attempted allocation order
644  * @mode:       One of the LRU isolation modes
645  *
646  * returns how many pages were moved onto *@dst.
647  */
648 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
649                 struct list_head *src, struct list_head *dst,
650                 unsigned long *scanned, int order, int mode)
651 {
652         unsigned long nr_taken = 0;
653         unsigned long scan;
654
655         for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
656                 struct page *page;
657                 unsigned long pfn;
658                 unsigned long end_pfn;
659                 unsigned long page_pfn;
660                 int zone_id;
661
662                 page = lru_to_page(src);
663                 prefetchw_prev_lru_page(page, src, flags);
664
665                 VM_BUG_ON(!PageLRU(page));
666
667                 switch (__isolate_lru_page(page, mode)) {
668                 case 0:
669                         list_move(&page->lru, dst);
670                         nr_taken++;
671                         break;
672
673                 case -EBUSY:
674                         /* else it is being freed elsewhere */
675                         list_move(&page->lru, src);
676                         continue;
677
678                 default:
679                         BUG();
680                 }
681
682                 if (!order)
683                         continue;
684
685                 /*
686                  * Attempt to take all pages in the order aligned region
687                  * surrounding the tag page.  Only take those pages of
688                  * the same active state as that tag page.  We may safely
689                  * round the target page pfn down to the requested order
690                  * as the mem_map is guarenteed valid out to MAX_ORDER,
691                  * where that page is in a different zone we will detect
692                  * it from its zone id and abort this block scan.
693                  */
694                 zone_id = page_zone_id(page);
695                 page_pfn = page_to_pfn(page);
696                 pfn = page_pfn & ~((1 << order) - 1);
697                 end_pfn = pfn + (1 << order);
698                 for (; pfn < end_pfn; pfn++) {
699                         struct page *cursor_page;
700
701                         /* The target page is in the block, ignore it. */
702                         if (unlikely(pfn == page_pfn))
703                                 continue;
704
705                         /* Avoid holes within the zone. */
706                         if (unlikely(!pfn_valid_within(pfn)))
707                                 break;
708
709                         cursor_page = pfn_to_page(pfn);
710                         /* Check that we have not crossed a zone boundary. */
711                         if (unlikely(page_zone_id(cursor_page) != zone_id))
712                                 continue;
713                         switch (__isolate_lru_page(cursor_page, mode)) {
714                         case 0:
715                                 list_move(&cursor_page->lru, dst);
716                                 nr_taken++;
717                                 scan++;
718                                 break;
719
720                         case -EBUSY:
721                                 /* else it is being freed elsewhere */
722                                 list_move(&cursor_page->lru, src);
723                         default:
724                                 break;
725                         }
726                 }
727         }
728
729         *scanned = scan;
730         return nr_taken;
731 }
732
733 /*
734  * clear_active_flags() is a helper for shrink_active_list(), clearing
735  * any active bits from the pages in the list.
736  */
737 static unsigned long clear_active_flags(struct list_head *page_list)
738 {
739         int nr_active = 0;
740         struct page *page;
741
742         list_for_each_entry(page, page_list, lru)
743                 if (PageActive(page)) {
744                         ClearPageActive(page);
745                         nr_active++;
746                 }
747
748         return nr_active;
749 }
750
751 /*
752  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
753  * of reclaimed pages
754  */
755 static unsigned long shrink_inactive_list(unsigned long max_scan,
756                                 struct zone *zone, struct scan_control *sc)
757 {
758         LIST_HEAD(page_list);
759         struct pagevec pvec;
760         unsigned long nr_scanned = 0;
761         unsigned long nr_reclaimed = 0;
762
763         pagevec_init(&pvec, 1);
764
765         lru_add_drain();
766         spin_lock_irq(&zone->lru_lock);
767         do {
768                 struct page *page;
769                 unsigned long nr_taken;
770                 unsigned long nr_scan;
771                 unsigned long nr_freed;
772                 unsigned long nr_active;
773
774                 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
775                              &zone->inactive_list,
776                              &page_list, &nr_scan, sc->order,
777                              (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
778                                              ISOLATE_BOTH : ISOLATE_INACTIVE);
779                 nr_active = clear_active_flags(&page_list);
780                 __count_vm_events(PGDEACTIVATE, nr_active);
781
782                 __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
783                 __mod_zone_page_state(zone, NR_INACTIVE,
784                                                 -(nr_taken - nr_active));
785                 zone->pages_scanned += nr_scan;
786                 spin_unlock_irq(&zone->lru_lock);
787
788                 nr_scanned += nr_scan;
789                 nr_freed = shrink_page_list(&page_list, sc);
790                 nr_reclaimed += nr_freed;
791                 local_irq_disable();
792                 if (current_is_kswapd()) {
793                         __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
794                         __count_vm_events(KSWAPD_STEAL, nr_freed);
795                 } else
796                         __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
797                 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
798
799                 if (nr_taken == 0)
800                         goto done;
801
802                 spin_lock(&zone->lru_lock);
803                 /*
804                  * Put back any unfreeable pages.
805                  */
806                 while (!list_empty(&page_list)) {
807                         page = lru_to_page(&page_list);
808                         VM_BUG_ON(PageLRU(page));
809                         SetPageLRU(page);
810                         list_del(&page->lru);
811                         if (PageActive(page))
812                                 add_page_to_active_list(zone, page);
813                         else
814                                 add_page_to_inactive_list(zone, page);
815                         if (!pagevec_add(&pvec, page)) {
816                                 spin_unlock_irq(&zone->lru_lock);
817                                 __pagevec_release(&pvec);
818                                 spin_lock_irq(&zone->lru_lock);
819                         }
820                 }
821         } while (nr_scanned < max_scan);
822         spin_unlock(&zone->lru_lock);
823 done:
824         local_irq_enable();
825         pagevec_release(&pvec);
826         return nr_reclaimed;
827 }
828
829 /*
830  * We are about to scan this zone at a certain priority level.  If that priority
831  * level is smaller (ie: more urgent) than the previous priority, then note
832  * that priority level within the zone.  This is done so that when the next
833  * process comes in to scan this zone, it will immediately start out at this
834  * priority level rather than having to build up its own scanning priority.
835  * Here, this priority affects only the reclaim-mapped threshold.
836  */
837 static inline void note_zone_scanning_priority(struct zone *zone, int priority)
838 {
839         if (priority < zone->prev_priority)
840                 zone->prev_priority = priority;
841 }
842
843 static inline int zone_is_near_oom(struct zone *zone)
844 {
845         return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
846                                 + zone_page_state(zone, NR_INACTIVE))*3;
847 }
848
849 /*
850  * This moves pages from the active list to the inactive list.
851  *
852  * We move them the other way if the page is referenced by one or more
853  * processes, from rmap.
854  *
855  * If the pages are mostly unmapped, the processing is fast and it is
856  * appropriate to hold zone->lru_lock across the whole operation.  But if
857  * the pages are mapped, the processing is slow (page_referenced()) so we
858  * should drop zone->lru_lock around each page.  It's impossible to balance
859  * this, so instead we remove the pages from the LRU while processing them.
860  * It is safe to rely on PG_active against the non-LRU pages in here because
861  * nobody will play with that bit on a non-LRU page.
862  *
863  * The downside is that we have to touch page->_count against each page.
864  * But we had to alter page->flags anyway.
865  */
866 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
867                                 struct scan_control *sc, int priority)
868 {
869         unsigned long pgmoved;
870         int pgdeactivate = 0;
871         unsigned long pgscanned;
872         LIST_HEAD(l_hold);      /* The pages which were snipped off */
873         LIST_HEAD(l_inactive);  /* Pages to go onto the inactive_list */
874         LIST_HEAD(l_active);    /* Pages to go onto the active_list */
875         struct page *page;
876         struct pagevec pvec;
877         int reclaim_mapped = 0;
878
879         if (sc->may_swap) {
880                 long mapped_ratio;
881                 long distress;
882                 long swap_tendency;
883
884                 if (zone_is_near_oom(zone))
885                         goto force_reclaim_mapped;
886
887                 /*
888                  * `distress' is a measure of how much trouble we're having
889                  * reclaiming pages.  0 -> no problems.  100 -> great trouble.
890                  */
891                 distress = 100 >> min(zone->prev_priority, priority);
892
893                 /*
894                  * The point of this algorithm is to decide when to start
895                  * reclaiming mapped memory instead of just pagecache.  Work out
896                  * how much memory
897                  * is mapped.
898                  */
899                 mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
900                                 global_page_state(NR_ANON_PAGES)) * 100) /
901                                         vm_total_pages;
902
903                 /*
904                  * Now decide how much we really want to unmap some pages.  The
905                  * mapped ratio is downgraded - just because there's a lot of
906                  * mapped memory doesn't necessarily mean that page reclaim
907                  * isn't succeeding.
908                  *
909                  * The distress ratio is important - we don't want to start
910                  * going oom.
911                  *
912                  * A 100% value of vm_swappiness overrides this algorithm
913                  * altogether.
914                  */
915                 swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
916
917                 /*
918                  * Now use this metric to decide whether to start moving mapped
919                  * memory onto the inactive list.
920                  */
921                 if (swap_tendency >= 100)
922 force_reclaim_mapped:
923                         reclaim_mapped = 1;
924         }
925
926         lru_add_drain();
927         spin_lock_irq(&zone->lru_lock);
928         pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
929                             &l_hold, &pgscanned, sc->order, ISOLATE_ACTIVE);
930         zone->pages_scanned += pgscanned;
931         __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
932         spin_unlock_irq(&zone->lru_lock);
933
934         while (!list_empty(&l_hold)) {
935                 cond_resched();
936                 page = lru_to_page(&l_hold);
937                 list_del(&page->lru);
938                 if (page_mapped(page)) {
939                         if (!reclaim_mapped ||
940                             (total_swap_pages == 0 && PageAnon(page)) ||
941                             page_referenced(page, 0)) {
942                                 list_add(&page->lru, &l_active);
943                                 continue;
944                         }
945                 }
946                 list_add(&page->lru, &l_inactive);
947         }
948
949         pagevec_init(&pvec, 1);
950         pgmoved = 0;
951         spin_lock_irq(&zone->lru_lock);
952         while (!list_empty(&l_inactive)) {
953                 page = lru_to_page(&l_inactive);
954                 prefetchw_prev_lru_page(page, &l_inactive, flags);
955                 VM_BUG_ON(PageLRU(page));
956                 SetPageLRU(page);
957                 VM_BUG_ON(!PageActive(page));
958                 ClearPageActive(page);
959
960                 list_move(&page->lru, &zone->inactive_list);
961                 pgmoved++;
962                 if (!pagevec_add(&pvec, page)) {
963                         __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
964                         spin_unlock_irq(&zone->lru_lock);
965                         pgdeactivate += pgmoved;
966                         pgmoved = 0;
967                         if (buffer_heads_over_limit)
968                                 pagevec_strip(&pvec);
969                         __pagevec_release(&pvec);
970                         spin_lock_irq(&zone->lru_lock);
971                 }
972         }
973         __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
974         pgdeactivate += pgmoved;
975         if (buffer_heads_over_limit) {
976                 spin_unlock_irq(&zone->lru_lock);
977                 pagevec_strip(&pvec);
978                 spin_lock_irq(&zone->lru_lock);
979         }
980
981         pgmoved = 0;
982         while (!list_empty(&l_active)) {
983                 page = lru_to_page(&l_active);
984                 prefetchw_prev_lru_page(page, &l_active, flags);
985                 VM_BUG_ON(PageLRU(page));
986                 SetPageLRU(page);
987                 VM_BUG_ON(!PageActive(page));
988                 list_move(&page->lru, &zone->active_list);
989                 pgmoved++;
990                 if (!pagevec_add(&pvec, page)) {
991                         __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
992                         pgmoved = 0;
993                         spin_unlock_irq(&zone->lru_lock);
994                         __pagevec_release(&pvec);
995                         spin_lock_irq(&zone->lru_lock);
996                 }
997         }
998         __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
999
1000         __count_zone_vm_events(PGREFILL, zone, pgscanned);
1001         __count_vm_events(PGDEACTIVATE, pgdeactivate);
1002         spin_unlock_irq(&zone->lru_lock);
1003
1004         pagevec_release(&pvec);
1005 }
1006
1007 /*
1008  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
1009  */
1010 static unsigned long shrink_zone(int priority, struct zone *zone,
1011                                 struct scan_control *sc)
1012 {
1013         unsigned long nr_active;
1014         unsigned long nr_inactive;
1015         unsigned long nr_to_scan;
1016         unsigned long nr_reclaimed = 0;
1017
1018         atomic_inc(&zone->reclaim_in_progress);
1019
1020         /*
1021          * Add one to `nr_to_scan' just to make sure that the kernel will
1022          * slowly sift through the active list.
1023          */
1024         zone->nr_scan_active +=
1025                 (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
1026         nr_active = zone->nr_scan_active;
1027         if (nr_active >= sc->swap_cluster_max)
1028                 zone->nr_scan_active = 0;
1029         else
1030                 nr_active = 0;
1031
1032         zone->nr_scan_inactive +=
1033                 (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
1034         nr_inactive = zone->nr_scan_inactive;
1035         if (nr_inactive >= sc->swap_cluster_max)
1036                 zone->nr_scan_inactive = 0;
1037         else
1038                 nr_inactive = 0;
1039
1040         while (nr_active || nr_inactive) {
1041                 if (nr_active) {
1042                         nr_to_scan = min(nr_active,
1043                                         (unsigned long)sc->swap_cluster_max);
1044                         nr_active -= nr_to_scan;
1045                         shrink_active_list(nr_to_scan, zone, sc, priority);
1046                 }
1047
1048                 if (nr_inactive) {
1049                         nr_to_scan = min(nr_inactive,
1050                                         (unsigned long)sc->swap_cluster_max);
1051                         nr_inactive -= nr_to_scan;
1052                         nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1053                                                                 sc);
1054                 }
1055         }
1056
1057         throttle_vm_writeout(sc->gfp_mask);
1058
1059         atomic_dec(&zone->reclaim_in_progress);
1060         return nr_reclaimed;
1061 }
1062
1063 /*
1064  * This is the direct reclaim path, for page-allocating processes.  We only
1065  * try to reclaim pages from zones which will satisfy the caller's allocation
1066  * request.
1067  *
1068  * We reclaim from a zone even if that zone is over pages_high.  Because:
1069  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1070  *    allocation or
1071  * b) The zones may be over pages_high but they must go *over* pages_high to
1072  *    satisfy the `incremental min' zone defense algorithm.
1073  *
1074  * Returns the number of reclaimed pages.
1075  *
1076  * If a zone is deemed to be full of pinned pages then just give it a light
1077  * scan then give up on it.
1078  */
1079 static unsigned long shrink_zones(int priority, struct zone **zones,
1080                                         struct scan_control *sc)
1081 {
1082         unsigned long nr_reclaimed = 0;
1083         int i;
1084
1085         sc->all_unreclaimable = 1;
1086         for (i = 0; zones[i] != NULL; i++) {
1087                 struct zone *zone = zones[i];
1088
1089                 if (!populated_zone(zone))
1090                         continue;
1091
1092                 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1093                         continue;
1094
1095                 note_zone_scanning_priority(zone, priority);
1096
1097                 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1098                         continue;       /* Let kswapd poll it */
1099
1100                 sc->all_unreclaimable = 0;
1101
1102                 nr_reclaimed += shrink_zone(priority, zone, sc);
1103         }
1104         return nr_reclaimed;
1105 }
1106  
1107 /*
1108  * This is the main entry point to direct page reclaim.
1109  *
1110  * If a full scan of the inactive list fails to free enough memory then we
1111  * are "out of memory" and something needs to be killed.
1112  *
1113  * If the caller is !__GFP_FS then the probability of a failure is reasonably
1114  * high - the zone may be full of dirty or under-writeback pages, which this
1115  * caller can't do much about.  We kick pdflush and take explicit naps in the
1116  * hope that some of these pages can be written.  But if the allocating task
1117  * holds filesystem locks which prevent writeout this might not work, and the
1118  * allocation attempt will fail.
1119  */
1120 unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
1121 {
1122         int priority;
1123         int ret = 0;
1124         unsigned long total_scanned = 0;
1125         unsigned long nr_reclaimed = 0;
1126         struct reclaim_state *reclaim_state = current->reclaim_state;
1127         unsigned long lru_pages = 0;
1128         int i;
1129         struct scan_control sc = {
1130                 .gfp_mask = gfp_mask,
1131                 .may_writepage = !laptop_mode,
1132                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1133                 .may_swap = 1,
1134                 .swappiness = vm_swappiness,
1135                 .order = order,
1136         };
1137
1138         count_vm_event(ALLOCSTALL);
1139
1140         for (i = 0; zones[i] != NULL; i++) {
1141                 struct zone *zone = zones[i];
1142
1143                 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1144                         continue;
1145
1146                 lru_pages += zone_page_state(zone, NR_ACTIVE)
1147                                 + zone_page_state(zone, NR_INACTIVE);
1148         }
1149
1150         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1151                 sc.nr_scanned = 0;
1152                 if (!priority)
1153                         disable_swap_token();
1154                 nr_reclaimed += shrink_zones(priority, zones, &sc);
1155                 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
1156                 if (reclaim_state) {
1157                         nr_reclaimed += reclaim_state->reclaimed_slab;
1158                         reclaim_state->reclaimed_slab = 0;
1159                 }
1160                 total_scanned += sc.nr_scanned;
1161                 if (nr_reclaimed >= sc.swap_cluster_max) {
1162                         ret = 1;
1163                         goto out;
1164                 }
1165
1166                 /*
1167                  * Try to write back as many pages as we just scanned.  This
1168                  * tends to cause slow streaming writers to write data to the
1169                  * disk smoothly, at the dirtying rate, which is nice.   But
1170                  * that's undesirable in laptop mode, where we *want* lumpy
1171                  * writeout.  So in laptop mode, write out the whole world.
1172                  */
1173                 if (total_scanned > sc.swap_cluster_max +
1174                                         sc.swap_cluster_max / 2) {
1175                         wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1176                         sc.may_writepage = 1;
1177                 }
1178
1179                 /* Take a nap, wait for some writeback to complete */
1180                 if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
1181                         congestion_wait(WRITE, HZ/10);
1182         }
1183         /* top priority shrink_caches still had more to do? don't OOM, then */
1184         if (!sc.all_unreclaimable)
1185                 ret = 1;
1186 out:
1187         /*
1188          * Now that we've scanned all the zones at this priority level, note
1189          * that level within the zone so that the next thread which performs
1190          * scanning of this zone will immediately start out at this priority
1191          * level.  This affects only the decision whether or not to bring
1192          * mapped pages onto the inactive list.
1193          */
1194         if (priority < 0)
1195                 priority = 0;
1196         for (i = 0; zones[i] != 0; i++) {
1197                 struct zone *zone = zones[i];
1198
1199                 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1200                         continue;
1201
1202                 zone->prev_priority = priority;
1203         }
1204         return ret;
1205 }
1206
1207 /*
1208  * For kswapd, balance_pgdat() will work across all this node's zones until
1209  * they are all at pages_high.
1210  *
1211  * Returns the number of pages which were actually freed.
1212  *
1213  * There is special handling here for zones which are full of pinned pages.
1214  * This can happen if the pages are all mlocked, or if they are all used by
1215  * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
1216  * What we do is to detect the case where all pages in the zone have been
1217  * scanned twice and there has been zero successful reclaim.  Mark the zone as
1218  * dead and from now on, only perform a short scan.  Basically we're polling
1219  * the zone for when the problem goes away.
1220  *
1221  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
1222  * zones which have free_pages > pages_high, but once a zone is found to have
1223  * free_pages <= pages_high, we scan that zone and the lower zones regardless
1224  * of the number of free pages in the lower zones.  This interoperates with
1225  * the page allocator fallback scheme to ensure that aging of pages is balanced
1226  * across the zones.
1227  */
1228 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1229 {
1230         int all_zones_ok;
1231         int priority;
1232         int i;
1233         unsigned long total_scanned;
1234         unsigned long nr_reclaimed;
1235         struct reclaim_state *reclaim_state = current->reclaim_state;
1236         struct scan_control sc = {
1237                 .gfp_mask = GFP_KERNEL,
1238                 .may_swap = 1,
1239                 .swap_cluster_max = SWAP_CLUSTER_MAX,
1240                 .swappiness = vm_swappiness,
1241                 .order = order,
1242         };
1243         /*
1244          * temp_priority is used to remember the scanning priority at which
1245          * this zone was successfully refilled to free_pages == pages_high.
1246          */
1247         int temp_priority[MAX_NR_ZONES];
1248
1249 loop_again:
1250         total_scanned = 0;
1251         nr_reclaimed = 0;
1252         sc.may_writepage = !laptop_mode;
1253         count_vm_event(PAGEOUTRUN);
1254
1255         for (i = 0; i < pgdat->nr_zones; i++)
1256                 temp_priority[i] = DEF_PRIORITY;
1257
1258         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1259                 int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
1260                 unsigned long lru_pages = 0;
1261
1262                 /* The swap token gets in the way of swapout... */
1263                 if (!priority)
1264                         disable_swap_token();
1265
1266                 all_zones_ok = 1;
1267
1268                 /*
1269                  * Scan in the highmem->dma direction for the highest
1270                  * zone which needs scanning
1271                  */
1272                 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1273                         struct zone *zone = pgdat->node_zones + i;
1274
1275                         if (!populated_zone(zone))
1276                                 continue;
1277
1278                         if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1279                                 continue;
1280
1281                         if (!zone_watermark_ok(zone, order, zone->pages_high,
1282                                                0, 0)) {
1283                                 end_zone = i;
1284                                 break;
1285                         }
1286                 }
1287                 if (i < 0)
1288                         goto out;
1289
1290                 for (i = 0; i <= end_zone; i++) {
1291                         struct zone *zone = pgdat->node_zones + i;
1292
1293                         lru_pages += zone_page_state(zone, NR_ACTIVE)
1294                                         + zone_page_state(zone, NR_INACTIVE);
1295                 }
1296
1297                 /*
1298                  * Now scan the zone in the dma->highmem direction, stopping
1299                  * at the last zone which needs scanning.
1300                  *
1301                  * We do this because the page allocator works in the opposite
1302                  * direction.  This prevents the page allocator from allocating
1303                  * pages behind kswapd's direction of progress, which would
1304                  * cause too much scanning of the lower zones.
1305                  */
1306                 for (i = 0; i <= end_zone; i++) {
1307                         struct zone *zone = pgdat->node_zones + i;
1308                         int nr_slab;
1309
1310                         if (!populated_zone(zone))
1311                                 continue;
1312
1313                         if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1314                                 continue;
1315
1316                         if (!zone_watermark_ok(zone, order, zone->pages_high,
1317                                                end_zone, 0))
1318                                 all_zones_ok = 0;
1319                         temp_priority[i] = priority;
1320                         sc.nr_scanned = 0;
1321                         note_zone_scanning_priority(zone, priority);
1322                         nr_reclaimed += shrink_zone(priority, zone, &sc);
1323                         reclaim_state->reclaimed_slab = 0;
1324                         nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1325                                                 lru_pages);
1326                         nr_reclaimed += reclaim_state->reclaimed_slab;
1327                         total_scanned += sc.nr_scanned;
1328                         if (zone->all_unreclaimable)
1329                                 continue;
1330                         if (nr_slab == 0 && zone->pages_scanned >=
1331                                 (zone_page_state(zone, NR_ACTIVE)
1332                                 + zone_page_state(zone, NR_INACTIVE)) * 6)
1333                                         zone->all_unreclaimable = 1;
1334                         /*
1335                          * If we've done a decent amount of scanning and
1336                          * the reclaim ratio is low, start doing writepage
1337                          * even in laptop mode
1338                          */
1339                         if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1340                             total_scanned > nr_reclaimed + nr_reclaimed / 2)
1341                                 sc.may_writepage = 1;
1342                 }
1343                 if (all_zones_ok)
1344                         break;          /* kswapd: all done */
1345                 /*
1346                  * OK, kswapd is getting into trouble.  Take a nap, then take
1347                  * another pass across the zones.
1348                  */
1349                 if (total_scanned && priority < DEF_PRIORITY - 2)
1350                         congestion_wait(WRITE, HZ/10);
1351
1352                 /*
1353                  * We do this so kswapd doesn't build up large priorities for
1354                  * example when it is freeing in parallel with allocators. It
1355                  * matches the direct reclaim path behaviour in terms of impact
1356                  * on zone->*_priority.
1357                  */
1358                 if (nr_reclaimed >= SWAP_CLUSTER_MAX)
1359                         break;
1360         }
1361 out:
1362         /*
1363          * Note within each zone the priority level at which this zone was
1364          * brought into a happy state.  So that the next thread which scans this
1365          * zone will start out at that priority level.
1366          */
1367         for (i = 0; i < pgdat->nr_zones; i++) {
1368                 struct zone *zone = pgdat->node_zones + i;
1369
1370                 zone->prev_priority = temp_priority[i];
1371         }
1372         if (!all_zones_ok) {
1373                 cond_resched();
1374
1375                 try_to_freeze();
1376
1377                 goto loop_again;
1378         }
1379
1380         return nr_reclaimed;
1381 }
1382
1383 /*
1384  * The background pageout daemon, started as a kernel thread
1385  * from the init process. 
1386  *
1387  * This basically trickles out pages so that we have _some_
1388  * free memory available even if there is no other activity
1389  * that frees anything up. This is needed for things like routing
1390  * etc, where we otherwise might have all activity going on in
1391  * asynchronous contexts that cannot page things out.
1392  *
1393  * If there are applications that are active memory-allocators
1394  * (most normal use), this basically shouldn't matter.
1395  */
1396 static int kswapd(void *p)
1397 {
1398         unsigned long order;
1399         pg_data_t *pgdat = (pg_data_t*)p;
1400         struct task_struct *tsk = current;
1401         DEFINE_WAIT(wait);
1402         struct reclaim_state reclaim_state = {
1403                 .reclaimed_slab = 0,
1404         };
1405         cpumask_t cpumask;
1406
1407         cpumask = node_to_cpumask(pgdat->node_id);
1408         if (!cpus_empty(cpumask))
1409                 set_cpus_allowed(tsk, cpumask);
1410         current->reclaim_state = &reclaim_state;
1411
1412         /*
1413          * Tell the memory management that we're a "memory allocator",
1414          * and that if we need more memory we should get access to it
1415          * regardless (see "__alloc_pages()"). "kswapd" should
1416          * never get caught in the normal page freeing logic.
1417          *
1418          * (Kswapd normally doesn't need memory anyway, but sometimes
1419          * you need a small amount of memory in order to be able to
1420          * page out something else, and this flag essentially protects
1421          * us from recursively trying to free more memory as we're
1422          * trying to free the first piece of memory in the first place).
1423          */
1424         tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1425         set_freezable();
1426
1427         order = 0;
1428         for ( ; ; ) {
1429                 unsigned long new_order;
1430
1431                 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1432                 new_order = pgdat->kswapd_max_order;
1433                 pgdat->kswapd_max_order = 0;
1434                 if (order < new_order) {
1435                         /*
1436                          * Don't sleep if someone wants a larger 'order'
1437                          * allocation
1438                          */
1439                         order = new_order;
1440                 } else {
1441                         if (!freezing(current))
1442                                 schedule();
1443
1444                         order = pgdat->kswapd_max_order;
1445                 }
1446                 finish_wait(&pgdat->kswapd_wait, &wait);
1447
1448                 if (!try_to_freeze()) {
1449                         /* We can speed up thawing tasks if we don't call
1450                          * balance_pgdat after returning from the refrigerator
1451                          */
1452                         balance_pgdat(pgdat, order);
1453                 }
1454         }
1455         return 0;
1456 }
1457
1458 /*
1459  * A zone is low on free memory, so wake its kswapd task to service it.
1460  */
1461 void wakeup_kswapd(struct zone *zone, int order)
1462 {
1463         pg_data_t *pgdat;
1464
1465         if (!populated_zone(zone))
1466                 return;
1467
1468         pgdat = zone->zone_pgdat;
1469         if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1470                 return;
1471         if (pgdat->kswapd_max_order < order)
1472                 pgdat->kswapd_max_order = order;
1473         if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1474                 return;
1475         if (!waitqueue_active(&pgdat->kswapd_wait))
1476                 return;
1477         wake_up_interruptible(&pgdat->kswapd_wait);
1478 }
1479
1480 #ifdef CONFIG_PM
1481 /*
1482  * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
1483  * from LRU lists system-wide, for given pass and priority, and returns the
1484  * number of reclaimed pages
1485  *
1486  * For pass > 3 we also try to shrink the LRU lists that contain a few pages
1487  */
1488 static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1489                                       int pass, struct scan_control *sc)
1490 {
1491         struct zone *zone;
1492         unsigned long nr_to_scan, ret = 0;
1493
1494         for_each_zone(zone) {
1495
1496                 if (!populated_zone(zone))
1497                         continue;
1498
1499                 if (zone->all_unreclaimable && prio != DEF_PRIORITY)
1500                         continue;
1501
1502                 /* For pass = 0 we don't shrink the active list */
1503                 if (pass > 0) {
1504                         zone->nr_scan_active +=
1505                                 (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
1506                         if (zone->nr_scan_active >= nr_pages || pass > 3) {
1507                                 zone->nr_scan_active = 0;
1508                                 nr_to_scan = min(nr_pages,
1509                                         zone_page_state(zone, NR_ACTIVE));
1510                                 shrink_active_list(nr_to_scan, zone, sc, prio);
1511                         }
1512                 }
1513
1514                 zone->nr_scan_inactive +=
1515                         (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
1516                 if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
1517                         zone->nr_scan_inactive = 0;
1518                         nr_to_scan = min(nr_pages,
1519                                 zone_page_state(zone, NR_INACTIVE));
1520                         ret += shrink_inactive_list(nr_to_scan, zone, sc);
1521                         if (ret >= nr_pages)
1522                                 return ret;
1523                 }
1524         }
1525
1526         return ret;
1527 }
1528
1529 static unsigned long count_lru_pages(void)
1530 {
1531         return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
1532 }
1533
1534 /*
1535  * Try to free `nr_pages' of memory, system-wide, and return the number of
1536  * freed pages.
1537  *
1538  * Rather than trying to age LRUs the aim is to preserve the overall
1539  * LRU order by reclaiming preferentially
1540  * inactive > active > active referenced > active mapped
1541  */
1542 unsigned long shrink_all_memory(unsigned long nr_pages)
1543 {
1544         unsigned long lru_pages, nr_slab;
1545         unsigned long ret = 0;
1546         int pass;
1547         struct reclaim_state reclaim_state;
1548         struct scan_control sc = {
1549                 .gfp_mask = GFP_KERNEL,
1550                 .may_swap = 0,
1551                 .swap_cluster_max = nr_pages,
1552                 .may_writepage = 1,
1553                 .swappiness = vm_swappiness,
1554         };
1555
1556         current->reclaim_state = &reclaim_state;
1557
1558         lru_pages = count_lru_pages();
1559         nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1560         /* If slab caches are huge, it's better to hit them first */
1561         while (nr_slab >= lru_pages) {
1562                 reclaim_state.reclaimed_slab = 0;
1563                 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
1564                 if (!reclaim_state.reclaimed_slab)
1565                         break;
1566
1567                 ret += reclaim_state.reclaimed_slab;
1568                 if (ret >= nr_pages)
1569                         goto out;
1570
1571                 nr_slab -= reclaim_state.reclaimed_slab;
1572         }
1573
1574         /*
1575          * We try to shrink LRUs in 5 passes:
1576          * 0 = Reclaim from inactive_list only
1577          * 1 = Reclaim from active list but don't reclaim mapped
1578          * 2 = 2nd pass of type 1
1579          * 3 = Reclaim mapped (normal reclaim)
1580          * 4 = 2nd pass of type 3
1581          */
1582         for (pass = 0; pass < 5; pass++) {
1583                 int prio;
1584
1585                 /* Force reclaiming mapped pages in the passes #3 and #4 */
1586                 if (pass > 2) {
1587                         sc.may_swap = 1;
1588                         sc.swappiness = 100;
1589                 }
1590
1591                 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1592                         unsigned long nr_to_scan = nr_pages - ret;
1593
1594                         sc.nr_scanned = 0;
1595                         ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
1596                         if (ret >= nr_pages)
1597                                 goto out;
1598
1599                         reclaim_state.reclaimed_slab = 0;
1600                         shrink_slab(sc.nr_scanned, sc.gfp_mask,
1601                                         count_lru_pages());
1602                         ret += reclaim_state.reclaimed_slab;
1603                         if (ret >= nr_pages)
1604                                 goto out;
1605
1606                         if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1607                                 congestion_wait(WRITE, HZ / 10);
1608                 }
1609         }
1610
1611         /*
1612          * If ret = 0, we could not shrink LRUs, but there may be something
1613          * in slab caches
1614          */
1615         if (!ret) {
1616                 do {
1617                         reclaim_state.reclaimed_slab = 0;
1618                         shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
1619                         ret += reclaim_state.reclaimed_slab;
1620                 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
1621         }
1622
1623 out:
1624         current->reclaim_state = NULL;
1625
1626         return ret;
1627 }
1628 #endif
1629
1630 /* It's optimal to keep kswapds on the same CPUs as their memory, but
1631    not required for correctness.  So if the last cpu in a node goes
1632    away, we get changed to run anywhere: as the first one comes back,
1633    restore their cpu bindings. */
1634 static int __devinit cpu_callback(struct notifier_block *nfb,
1635                                   unsigned long action, void *hcpu)
1636 {
1637         pg_data_t *pgdat;
1638         cpumask_t mask;
1639
1640         if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1641                 for_each_online_pgdat(pgdat) {
1642                         mask = node_to_cpumask(pgdat->node_id);
1643                         if (any_online_cpu(mask) != NR_CPUS)
1644                                 /* One of our CPUs online: restore mask */
1645                                 set_cpus_allowed(pgdat->kswapd, mask);
1646                 }
1647         }
1648         return NOTIFY_OK;
1649 }
1650
1651 /*
1652  * This kswapd start function will be called by init and node-hot-add.
1653  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
1654  */
1655 int kswapd_run(int nid)
1656 {
1657         pg_data_t *pgdat = NODE_DATA(nid);
1658         int ret = 0;
1659
1660         if (pgdat->kswapd)
1661                 return 0;
1662
1663         pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
1664         if (IS_ERR(pgdat->kswapd)) {
1665                 /* failure at boot is fatal */
1666                 BUG_ON(system_state == SYSTEM_BOOTING);
1667                 printk("Failed to start kswapd on node %d\n",nid);
1668                 ret = -1;
1669         }
1670         return ret;
1671 }
1672
1673 static int __init kswapd_init(void)
1674 {
1675         int nid;
1676
1677         swap_setup();
1678         for_each_online_node(nid)
1679                 kswapd_run(nid);
1680         hotcpu_notifier(cpu_callback, 0);
1681         return 0;
1682 }
1683
1684 module_init(kswapd_init)
1685
1686 #ifdef CONFIG_NUMA
1687 /*
1688  * Zone reclaim mode
1689  *
1690  * If non-zero call zone_reclaim when the number of free pages falls below
1691  * the watermarks.
1692  */
1693 int zone_reclaim_mode __read_mostly;
1694
1695 #define RECLAIM_OFF 0
1696 #define RECLAIM_ZONE (1<<0)     /* Run shrink_cache on the zone */
1697 #define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
1698 #define RECLAIM_SWAP (1<<2)     /* Swap pages out during reclaim */
1699
1700 /*
1701  * Priority for ZONE_RECLAIM. This determines the fraction of pages
1702  * of a node considered for each zone_reclaim. 4 scans 1/16th of
1703  * a zone.
1704  */
1705 #define ZONE_RECLAIM_PRIORITY 4
1706
1707 /*
1708  * Percentage of pages in a zone that must be unmapped for zone_reclaim to
1709  * occur.
1710  */
1711 int sysctl_min_unmapped_ratio = 1;
1712
1713 /*
1714  * If the number of slab pages in a zone grows beyond this percentage then
1715  * slab reclaim needs to occur.
1716  */
1717 int sysctl_min_slab_ratio = 5;
1718
1719 /*
1720  * Try to free up some pages from this zone through reclaim.
1721  */
1722 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1723 {
1724         /* Minimum pages needed in order to stay on node */
1725         const unsigned long nr_pages = 1 << order;
1726         struct task_struct *p = current;
1727         struct reclaim_state reclaim_state;
1728         int priority;
1729         unsigned long nr_reclaimed = 0;
1730         struct scan_control sc = {
1731                 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1732                 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1733                 .swap_cluster_max = max_t(unsigned long, nr_pages,
1734                                         SWAP_CLUSTER_MAX),
1735                 .gfp_mask = gfp_mask,
1736                 .swappiness = vm_swappiness,
1737         };
1738         unsigned long slab_reclaimable;
1739
1740         disable_swap_token();
1741         cond_resched();
1742         /*
1743          * We need to be able to allocate from the reserves for RECLAIM_SWAP
1744          * and we also need to be able to write out pages for RECLAIM_WRITE
1745          * and RECLAIM_SWAP.
1746          */
1747         p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
1748         reclaim_state.reclaimed_slab = 0;
1749         p->reclaim_state = &reclaim_state;
1750
1751         if (zone_page_state(zone, NR_FILE_PAGES) -
1752                 zone_page_state(zone, NR_FILE_MAPPED) >
1753                 zone->min_unmapped_pages) {
1754                 /*
1755                  * Free memory by calling shrink zone with increasing
1756                  * priorities until we have enough memory freed.
1757                  */
1758                 priority = ZONE_RECLAIM_PRIORITY;
1759                 do {
1760                         note_zone_scanning_priority(zone, priority);
1761                         nr_reclaimed += shrink_zone(priority, zone, &sc);
1762                         priority--;
1763                 } while (priority >= 0 && nr_reclaimed < nr_pages);
1764         }
1765
1766         slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
1767         if (slab_reclaimable > zone->min_slab_pages) {
1768                 /*
1769                  * shrink_slab() does not currently allow us to determine how
1770                  * many pages were freed in this zone. So we take the current
1771                  * number of slab pages and shake the slab until it is reduced
1772                  * by the same nr_pages that we used for reclaiming unmapped
1773                  * pages.
1774                  *
1775                  * Note that shrink_slab will free memory on all zones and may
1776                  * take a long time.
1777                  */
1778                 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
1779                         zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
1780                                 slab_reclaimable - nr_pages)
1781                         ;
1782
1783                 /*
1784                  * Update nr_reclaimed by the number of slab pages we
1785                  * reclaimed from this zone.
1786                  */
1787                 nr_reclaimed += slab_reclaimable -
1788                         zone_page_state(zone, NR_SLAB_RECLAIMABLE);
1789         }
1790
1791         p->reclaim_state = NULL;
1792         current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
1793         return nr_reclaimed >= nr_pages;
1794 }
1795
1796 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1797 {
1798         cpumask_t mask;
1799         int node_id;
1800
1801         /*
1802          * Zone reclaim reclaims unmapped file backed pages and
1803          * slab pages if we are over the defined limits.
1804          *
1805          * A small portion of unmapped file backed pages is needed for
1806          * file I/O otherwise pages read by file I/O will be immediately
1807          * thrown out if the zone is overallocated. So we do not reclaim
1808          * if less than a specified percentage of the zone is used by
1809          * unmapped file backed pages.
1810          */
1811         if (zone_page_state(zone, NR_FILE_PAGES) -
1812             zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
1813             && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
1814                         <= zone->min_slab_pages)
1815                 return 0;
1816
1817         /*
1818          * Avoid concurrent zone reclaims, do not reclaim in a zone that does
1819          * not have reclaimable pages and if we should not delay the allocation
1820          * then do not scan.
1821          */
1822         if (!(gfp_mask & __GFP_WAIT) ||
1823                 zone->all_unreclaimable ||
1824                 atomic_read(&zone->reclaim_in_progress) > 0 ||
1825                 (current->flags & PF_MEMALLOC))
1826                         return 0;
1827
1828         /*
1829          * Only run zone reclaim on the local zone or on zones that do not
1830          * have associated processors. This will favor the local processor
1831          * over remote processors and spread off node memory allocations
1832          * as wide as possible.
1833          */
1834         node_id = zone_to_nid(zone);
1835         mask = node_to_cpumask(node_id);
1836         if (!cpus_empty(mask) && node_id != numa_node_id())
1837                 return 0;
1838         return __zone_reclaim(zone, gfp_mask, order);
1839 }
1840 #endif