be8235fb193945cf15129f4a68ee7e88305078e4
[safe/jmp/linux-2.6] / mm / vmscan.c
1 /*
2  *  linux/mm/vmscan.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *
6  *  Swap reorganised 29.12.95, Stephen Tweedie.
7  *  kswapd added: 7.1.96  sct
8  *  Removed kswapd_ctl limits, and swap out as many pages as needed
9  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11  *  Multiqueue VM started 5.8.00, Rik van Riel.
12  */
13
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/file.h>
23 #include <linux/writeback.h>
24 #include <linux/blkdev.h>
25 #include <linux/buffer_head.h>  /* for try_to_release_page(),
26                                         buffer_heads_over_limit */
27 #include <linux/mm_inline.h>
28 #include <linux/pagevec.h>
29 #include <linux/backing-dev.h>
30 #include <linux/rmap.h>
31 #include <linux/topology.h>
32 #include <linux/cpu.h>
33 #include <linux/cpuset.h>
34 #include <linux/notifier.h>
35 #include <linux/rwsem.h>
36
37 #include <asm/tlbflush.h>
38 #include <asm/div64.h>
39
40 #include <linux/swapops.h>
41
42 /* possible outcome of pageout() */
43 typedef enum {
44         /* failed to write page out, page is locked */
45         PAGE_KEEP,
46         /* move page to the active list, page is locked */
47         PAGE_ACTIVATE,
48         /* page has been sent to the disk successfully, page is unlocked */
49         PAGE_SUCCESS,
50         /* page is clean and locked */
51         PAGE_CLEAN,
52 } pageout_t;
53
54 struct scan_control {
55         /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */
56         unsigned long nr_to_scan;
57
58         /* Incremented by the number of inactive pages that were scanned */
59         unsigned long nr_scanned;
60
61         /* Incremented by the number of pages reclaimed */
62         unsigned long nr_reclaimed;
63
64         unsigned long nr_mapped;        /* From page_state */
65
66         /* Ask shrink_caches, or shrink_zone to scan at this priority */
67         unsigned int priority;
68
69         /* This context's GFP mask */
70         gfp_t gfp_mask;
71
72         int may_writepage;
73
74         /* This context's SWAP_CLUSTER_MAX. If freeing memory for
75          * suspend, we effectively ignore SWAP_CLUSTER_MAX.
76          * In this context, it doesn't matter that we scan the
77          * whole list at once. */
78         int swap_cluster_max;
79 };
80
81 /*
82  * The list of shrinker callbacks used by to apply pressure to
83  * ageable caches.
84  */
85 struct shrinker {
86         shrinker_t              shrinker;
87         struct list_head        list;
88         int                     seeks;  /* seeks to recreate an obj */
89         long                    nr;     /* objs pending delete */
90 };
91
92 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
93
94 #ifdef ARCH_HAS_PREFETCH
95 #define prefetch_prev_lru_page(_page, _base, _field)                    \
96         do {                                                            \
97                 if ((_page)->lru.prev != _base) {                       \
98                         struct page *prev;                              \
99                                                                         \
100                         prev = lru_to_page(&(_page->lru));              \
101                         prefetch(&prev->_field);                        \
102                 }                                                       \
103         } while (0)
104 #else
105 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
106 #endif
107
108 #ifdef ARCH_HAS_PREFETCHW
109 #define prefetchw_prev_lru_page(_page, _base, _field)                   \
110         do {                                                            \
111                 if ((_page)->lru.prev != _base) {                       \
112                         struct page *prev;                              \
113                                                                         \
114                         prev = lru_to_page(&(_page->lru));              \
115                         prefetchw(&prev->_field);                       \
116                 }                                                       \
117         } while (0)
118 #else
119 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
120 #endif
121
122 /*
123  * From 0 .. 100.  Higher means more swappy.
124  */
125 int vm_swappiness = 60;
126 static long total_memory;
127
128 static LIST_HEAD(shrinker_list);
129 static DECLARE_RWSEM(shrinker_rwsem);
130
131 /*
132  * Add a shrinker callback to be called from the vm
133  */
134 struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
135 {
136         struct shrinker *shrinker;
137
138         shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
139         if (shrinker) {
140                 shrinker->shrinker = theshrinker;
141                 shrinker->seeks = seeks;
142                 shrinker->nr = 0;
143                 down_write(&shrinker_rwsem);
144                 list_add_tail(&shrinker->list, &shrinker_list);
145                 up_write(&shrinker_rwsem);
146         }
147         return shrinker;
148 }
149 EXPORT_SYMBOL(set_shrinker);
150
151 /*
152  * Remove one
153  */
154 void remove_shrinker(struct shrinker *shrinker)
155 {
156         down_write(&shrinker_rwsem);
157         list_del(&shrinker->list);
158         up_write(&shrinker_rwsem);
159         kfree(shrinker);
160 }
161 EXPORT_SYMBOL(remove_shrinker);
162
163 #define SHRINK_BATCH 128
164 /*
165  * Call the shrink functions to age shrinkable caches
166  *
167  * Here we assume it costs one seek to replace a lru page and that it also
168  * takes a seek to recreate a cache object.  With this in mind we age equal
169  * percentages of the lru and ageable caches.  This should balance the seeks
170  * generated by these structures.
171  *
172  * If the vm encounted mapped pages on the LRU it increase the pressure on
173  * slab to avoid swapping.
174  *
175  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
176  *
177  * `lru_pages' represents the number of on-LRU pages in all the zones which
178  * are eligible for the caller's allocation attempt.  It is used for balancing
179  * slab reclaim versus page reclaim.
180  *
181  * Returns the number of slab objects which we shrunk.
182  */
183 static int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
184                         unsigned long lru_pages)
185 {
186         struct shrinker *shrinker;
187         int ret = 0;
188
189         if (scanned == 0)
190                 scanned = SWAP_CLUSTER_MAX;
191
192         if (!down_read_trylock(&shrinker_rwsem))
193                 return 1;       /* Assume we'll be able to shrink next time */
194
195         list_for_each_entry(shrinker, &shrinker_list, list) {
196                 unsigned long long delta;
197                 unsigned long total_scan;
198                 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
199
200                 delta = (4 * scanned) / shrinker->seeks;
201                 delta *= max_pass;
202                 do_div(delta, lru_pages + 1);
203                 shrinker->nr += delta;
204                 if (shrinker->nr < 0) {
205                         printk(KERN_ERR "%s: nr=%ld\n",
206                                         __FUNCTION__, shrinker->nr);
207                         shrinker->nr = max_pass;
208                 }
209
210                 /*
211                  * Avoid risking looping forever due to too large nr value:
212                  * never try to free more than twice the estimate number of
213                  * freeable entries.
214                  */
215                 if (shrinker->nr > max_pass * 2)
216                         shrinker->nr = max_pass * 2;
217
218                 total_scan = shrinker->nr;
219                 shrinker->nr = 0;
220
221                 while (total_scan >= SHRINK_BATCH) {
222                         long this_scan = SHRINK_BATCH;
223                         int shrink_ret;
224                         int nr_before;
225
226                         nr_before = (*shrinker->shrinker)(0, gfp_mask);
227                         shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
228                         if (shrink_ret == -1)
229                                 break;
230                         if (shrink_ret < nr_before)
231                                 ret += nr_before - shrink_ret;
232                         mod_page_state(slabs_scanned, this_scan);
233                         total_scan -= this_scan;
234
235                         cond_resched();
236                 }
237
238                 shrinker->nr += total_scan;
239         }
240         up_read(&shrinker_rwsem);
241         return ret;
242 }
243
244 /* Called without lock on whether page is mapped, so answer is unstable */
245 static inline int page_mapping_inuse(struct page *page)
246 {
247         struct address_space *mapping;
248
249         /* Page is in somebody's page tables. */
250         if (page_mapped(page))
251                 return 1;
252
253         /* Be more reluctant to reclaim swapcache than pagecache */
254         if (PageSwapCache(page))
255                 return 1;
256
257         mapping = page_mapping(page);
258         if (!mapping)
259                 return 0;
260
261         /* File is mmap'd by somebody? */
262         return mapping_mapped(mapping);
263 }
264
265 static inline int is_page_cache_freeable(struct page *page)
266 {
267         return page_count(page) - !!PagePrivate(page) == 2;
268 }
269
270 static int may_write_to_queue(struct backing_dev_info *bdi)
271 {
272         if (current_is_kswapd())
273                 return 1;
274         if (current_is_pdflush())       /* This is unlikely, but why not... */
275                 return 1;
276         if (!bdi_write_congested(bdi))
277                 return 1;
278         if (bdi == current->backing_dev_info)
279                 return 1;
280         return 0;
281 }
282
283 /*
284  * We detected a synchronous write error writing a page out.  Probably
285  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
286  * fsync(), msync() or close().
287  *
288  * The tricky part is that after writepage we cannot touch the mapping: nothing
289  * prevents it from being freed up.  But we have a ref on the page and once
290  * that page is locked, the mapping is pinned.
291  *
292  * We're allowed to run sleeping lock_page() here because we know the caller has
293  * __GFP_FS.
294  */
295 static void handle_write_error(struct address_space *mapping,
296                                 struct page *page, int error)
297 {
298         lock_page(page);
299         if (page_mapping(page) == mapping) {
300                 if (error == -ENOSPC)
301                         set_bit(AS_ENOSPC, &mapping->flags);
302                 else
303                         set_bit(AS_EIO, &mapping->flags);
304         }
305         unlock_page(page);
306 }
307
308 /*
309  * pageout is called by shrink_list() for each dirty page. Calls ->writepage().
310  */
311 static pageout_t pageout(struct page *page, struct address_space *mapping)
312 {
313         /*
314          * If the page is dirty, only perform writeback if that write
315          * will be non-blocking.  To prevent this allocation from being
316          * stalled by pagecache activity.  But note that there may be
317          * stalls if we need to run get_block().  We could test
318          * PagePrivate for that.
319          *
320          * If this process is currently in generic_file_write() against
321          * this page's queue, we can perform writeback even if that
322          * will block.
323          *
324          * If the page is swapcache, write it back even if that would
325          * block, for some throttling. This happens by accident, because
326          * swap_backing_dev_info is bust: it doesn't reflect the
327          * congestion state of the swapdevs.  Easy to fix, if needed.
328          * See swapfile.c:page_queue_congested().
329          */
330         if (!is_page_cache_freeable(page))
331                 return PAGE_KEEP;
332         if (!mapping) {
333                 /*
334                  * Some data journaling orphaned pages can have
335                  * page->mapping == NULL while being dirty with clean buffers.
336                  */
337                 if (PagePrivate(page)) {
338                         if (try_to_free_buffers(page)) {
339                                 ClearPageDirty(page);
340                                 printk("%s: orphaned page\n", __FUNCTION__);
341                                 return PAGE_CLEAN;
342                         }
343                 }
344                 return PAGE_KEEP;
345         }
346         if (mapping->a_ops->writepage == NULL)
347                 return PAGE_ACTIVATE;
348         if (!may_write_to_queue(mapping->backing_dev_info))
349                 return PAGE_KEEP;
350
351         if (clear_page_dirty_for_io(page)) {
352                 int res;
353                 struct writeback_control wbc = {
354                         .sync_mode = WB_SYNC_NONE,
355                         .nr_to_write = SWAP_CLUSTER_MAX,
356                         .nonblocking = 1,
357                         .for_reclaim = 1,
358                 };
359
360                 SetPageReclaim(page);
361                 res = mapping->a_ops->writepage(page, &wbc);
362                 if (res < 0)
363                         handle_write_error(mapping, page, res);
364                 if (res == AOP_WRITEPAGE_ACTIVATE) {
365                         ClearPageReclaim(page);
366                         return PAGE_ACTIVATE;
367                 }
368                 if (!PageWriteback(page)) {
369                         /* synchronous write or broken a_ops? */
370                         ClearPageReclaim(page);
371                 }
372
373                 return PAGE_SUCCESS;
374         }
375
376         return PAGE_CLEAN;
377 }
378
379 /*
380  * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
381  */
382 static int shrink_list(struct list_head *page_list, struct scan_control *sc)
383 {
384         LIST_HEAD(ret_pages);
385         struct pagevec freed_pvec;
386         int pgactivate = 0;
387         int reclaimed = 0;
388
389         cond_resched();
390
391         pagevec_init(&freed_pvec, 1);
392         while (!list_empty(page_list)) {
393                 struct address_space *mapping;
394                 struct page *page;
395                 int may_enter_fs;
396                 int referenced;
397
398                 cond_resched();
399
400                 page = lru_to_page(page_list);
401                 list_del(&page->lru);
402
403                 if (TestSetPageLocked(page))
404                         goto keep;
405
406                 BUG_ON(PageActive(page));
407
408                 sc->nr_scanned++;
409                 /* Double the slab pressure for mapped and swapcache pages */
410                 if (page_mapped(page) || PageSwapCache(page))
411                         sc->nr_scanned++;
412
413                 if (PageWriteback(page))
414                         goto keep_locked;
415
416                 referenced = page_referenced(page, 1);
417                 /* In active use or really unfreeable?  Activate it. */
418                 if (referenced && page_mapping_inuse(page))
419                         goto activate_locked;
420
421 #ifdef CONFIG_SWAP
422                 /*
423                  * Anonymous process memory has backing store?
424                  * Try to allocate it some swap space here.
425                  */
426                 if (PageAnon(page) && !PageSwapCache(page)) {
427                         if (!add_to_swap(page))
428                                 goto activate_locked;
429                 }
430 #endif /* CONFIG_SWAP */
431
432                 mapping = page_mapping(page);
433                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
434                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
435
436                 /*
437                  * The page is mapped into the page tables of one or more
438                  * processes. Try to unmap it here.
439                  */
440                 if (page_mapped(page) && mapping) {
441                         switch (try_to_unmap(page)) {
442                         case SWAP_FAIL:
443                                 goto activate_locked;
444                         case SWAP_AGAIN:
445                                 goto keep_locked;
446                         case SWAP_SUCCESS:
447                                 ; /* try to free the page below */
448                         }
449                 }
450
451                 if (PageDirty(page)) {
452                         if (referenced)
453                                 goto keep_locked;
454                         if (!may_enter_fs)
455                                 goto keep_locked;
456                         if (laptop_mode && !sc->may_writepage)
457                                 goto keep_locked;
458
459                         /* Page is dirty, try to write it out here */
460                         switch(pageout(page, mapping)) {
461                         case PAGE_KEEP:
462                                 goto keep_locked;
463                         case PAGE_ACTIVATE:
464                                 goto activate_locked;
465                         case PAGE_SUCCESS:
466                                 if (PageWriteback(page) || PageDirty(page))
467                                         goto keep;
468                                 /*
469                                  * A synchronous write - probably a ramdisk.  Go
470                                  * ahead and try to reclaim the page.
471                                  */
472                                 if (TestSetPageLocked(page))
473                                         goto keep;
474                                 if (PageDirty(page) || PageWriteback(page))
475                                         goto keep_locked;
476                                 mapping = page_mapping(page);
477                         case PAGE_CLEAN:
478                                 ; /* try to free the page below */
479                         }
480                 }
481
482                 /*
483                  * If the page has buffers, try to free the buffer mappings
484                  * associated with this page. If we succeed we try to free
485                  * the page as well.
486                  *
487                  * We do this even if the page is PageDirty().
488                  * try_to_release_page() does not perform I/O, but it is
489                  * possible for a page to have PageDirty set, but it is actually
490                  * clean (all its buffers are clean).  This happens if the
491                  * buffers were written out directly, with submit_bh(). ext3
492                  * will do this, as well as the blockdev mapping. 
493                  * try_to_release_page() will discover that cleanness and will
494                  * drop the buffers and mark the page clean - it can be freed.
495                  *
496                  * Rarely, pages can have buffers and no ->mapping.  These are
497                  * the pages which were not successfully invalidated in
498                  * truncate_complete_page().  We try to drop those buffers here
499                  * and if that worked, and the page is no longer mapped into
500                  * process address space (page_count == 1) it can be freed.
501                  * Otherwise, leave the page on the LRU so it is swappable.
502                  */
503                 if (PagePrivate(page)) {
504                         if (!try_to_release_page(page, sc->gfp_mask))
505                                 goto activate_locked;
506                         if (!mapping && page_count(page) == 1)
507                                 goto free_it;
508                 }
509
510                 if (!mapping)
511                         goto keep_locked;       /* truncate got there first */
512
513                 write_lock_irq(&mapping->tree_lock);
514
515                 /*
516                  * The non-racy check for busy page.  It is critical to check
517                  * PageDirty _after_ making sure that the page is freeable and
518                  * not in use by anybody.       (pagecache + us == 2)
519                  */
520                 if (unlikely(page_count(page) != 2))
521                         goto cannot_free;
522                 smp_rmb();
523                 if (unlikely(PageDirty(page)))
524                         goto cannot_free;
525
526 #ifdef CONFIG_SWAP
527                 if (PageSwapCache(page)) {
528                         swp_entry_t swap = { .val = page_private(page) };
529                         __delete_from_swap_cache(page);
530                         write_unlock_irq(&mapping->tree_lock);
531                         swap_free(swap);
532                         __put_page(page);       /* The pagecache ref */
533                         goto free_it;
534                 }
535 #endif /* CONFIG_SWAP */
536
537                 __remove_from_page_cache(page);
538                 write_unlock_irq(&mapping->tree_lock);
539                 __put_page(page);
540
541 free_it:
542                 unlock_page(page);
543                 reclaimed++;
544                 if (!pagevec_add(&freed_pvec, page))
545                         __pagevec_release_nonlru(&freed_pvec);
546                 continue;
547
548 cannot_free:
549                 write_unlock_irq(&mapping->tree_lock);
550                 goto keep_locked;
551
552 activate_locked:
553                 SetPageActive(page);
554                 pgactivate++;
555 keep_locked:
556                 unlock_page(page);
557 keep:
558                 list_add(&page->lru, &ret_pages);
559                 BUG_ON(PageLRU(page));
560         }
561         list_splice(&ret_pages, page_list);
562         if (pagevec_count(&freed_pvec))
563                 __pagevec_release_nonlru(&freed_pvec);
564         mod_page_state(pgactivate, pgactivate);
565         sc->nr_reclaimed += reclaimed;
566         return reclaimed;
567 }
568
569 /*
570  * zone->lru_lock is heavily contended.  Some of the functions that
571  * shrink the lists perform better by taking out a batch of pages
572  * and working on them outside the LRU lock.
573  *
574  * For pagecache intensive workloads, this function is the hottest
575  * spot in the kernel (apart from copy_*_user functions).
576  *
577  * Appropriate locks must be held before calling this function.
578  *
579  * @nr_to_scan: The number of pages to look through on the list.
580  * @src:        The LRU list to pull pages off.
581  * @dst:        The temp list to put pages on to.
582  * @scanned:    The number of pages that were scanned.
583  *
584  * returns how many pages were moved onto *@dst.
585  */
586 static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
587                              struct list_head *dst, int *scanned)
588 {
589         int nr_taken = 0;
590         struct page *page;
591         int scan = 0;
592
593         while (scan++ < nr_to_scan && !list_empty(src)) {
594                 page = lru_to_page(src);
595                 prefetchw_prev_lru_page(page, src, flags);
596
597                 if (!TestClearPageLRU(page))
598                         BUG();
599                 list_del(&page->lru);
600                 if (get_page_testone(page)) {
601                         /*
602                          * It is being freed elsewhere
603                          */
604                         __put_page(page);
605                         SetPageLRU(page);
606                         list_add(&page->lru, src);
607                         continue;
608                 } else {
609                         list_add(&page->lru, dst);
610                         nr_taken++;
611                 }
612         }
613
614         *scanned = scan;
615         return nr_taken;
616 }
617
618 /*
619  * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
620  */
621 static void shrink_cache(struct zone *zone, struct scan_control *sc)
622 {
623         LIST_HEAD(page_list);
624         struct pagevec pvec;
625         int max_scan = sc->nr_to_scan;
626
627         pagevec_init(&pvec, 1);
628
629         lru_add_drain();
630         spin_lock_irq(&zone->lru_lock);
631         while (max_scan > 0) {
632                 struct page *page;
633                 int nr_taken;
634                 int nr_scan;
635                 int nr_freed;
636
637                 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
638                                              &zone->inactive_list,
639                                              &page_list, &nr_scan);
640                 zone->nr_inactive -= nr_taken;
641                 zone->pages_scanned += nr_scan;
642                 spin_unlock_irq(&zone->lru_lock);
643
644                 if (nr_taken == 0)
645                         goto done;
646
647                 max_scan -= nr_scan;
648                 nr_freed = shrink_list(&page_list, sc);
649
650                 local_irq_disable();
651                 if (current_is_kswapd()) {
652                         __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
653                         __mod_page_state(kswapd_steal, nr_freed);
654                 } else
655                         __mod_page_state_zone(zone, pgscan_direct, nr_scan);
656                 __mod_page_state_zone(zone, pgsteal, nr_freed);
657
658                 spin_lock(&zone->lru_lock);
659                 /*
660                  * Put back any unfreeable pages.
661                  */
662                 while (!list_empty(&page_list)) {
663                         page = lru_to_page(&page_list);
664                         if (TestSetPageLRU(page))
665                                 BUG();
666                         list_del(&page->lru);
667                         if (PageActive(page))
668                                 add_page_to_active_list(zone, page);
669                         else
670                                 add_page_to_inactive_list(zone, page);
671                         if (!pagevec_add(&pvec, page)) {
672                                 spin_unlock_irq(&zone->lru_lock);
673                                 __pagevec_release(&pvec);
674                                 spin_lock_irq(&zone->lru_lock);
675                         }
676                 }
677         }
678         spin_unlock_irq(&zone->lru_lock);
679 done:
680         pagevec_release(&pvec);
681 }
682
683 /*
684  * This moves pages from the active list to the inactive list.
685  *
686  * We move them the other way if the page is referenced by one or more
687  * processes, from rmap.
688  *
689  * If the pages are mostly unmapped, the processing is fast and it is
690  * appropriate to hold zone->lru_lock across the whole operation.  But if
691  * the pages are mapped, the processing is slow (page_referenced()) so we
692  * should drop zone->lru_lock around each page.  It's impossible to balance
693  * this, so instead we remove the pages from the LRU while processing them.
694  * It is safe to rely on PG_active against the non-LRU pages in here because
695  * nobody will play with that bit on a non-LRU page.
696  *
697  * The downside is that we have to touch page->_count against each page.
698  * But we had to alter page->flags anyway.
699  */
700 static void
701 refill_inactive_zone(struct zone *zone, struct scan_control *sc)
702 {
703         int pgmoved;
704         int pgdeactivate = 0;
705         int pgscanned;
706         int nr_pages = sc->nr_to_scan;
707         LIST_HEAD(l_hold);      /* The pages which were snipped off */
708         LIST_HEAD(l_inactive);  /* Pages to go onto the inactive_list */
709         LIST_HEAD(l_active);    /* Pages to go onto the active_list */
710         struct page *page;
711         struct pagevec pvec;
712         int reclaim_mapped = 0;
713         long mapped_ratio;
714         long distress;
715         long swap_tendency;
716
717         lru_add_drain();
718         spin_lock_irq(&zone->lru_lock);
719         pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
720                                     &l_hold, &pgscanned);
721         zone->pages_scanned += pgscanned;
722         zone->nr_active -= pgmoved;
723         spin_unlock_irq(&zone->lru_lock);
724
725         /*
726          * `distress' is a measure of how much trouble we're having reclaiming
727          * pages.  0 -> no problems.  100 -> great trouble.
728          */
729         distress = 100 >> zone->prev_priority;
730
731         /*
732          * The point of this algorithm is to decide when to start reclaiming
733          * mapped memory instead of just pagecache.  Work out how much memory
734          * is mapped.
735          */
736         mapped_ratio = (sc->nr_mapped * 100) / total_memory;
737
738         /*
739          * Now decide how much we really want to unmap some pages.  The mapped
740          * ratio is downgraded - just because there's a lot of mapped memory
741          * doesn't necessarily mean that page reclaim isn't succeeding.
742          *
743          * The distress ratio is important - we don't want to start going oom.
744          *
745          * A 100% value of vm_swappiness overrides this algorithm altogether.
746          */
747         swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
748
749         /*
750          * Now use this metric to decide whether to start moving mapped memory
751          * onto the inactive list.
752          */
753         if (swap_tendency >= 100)
754                 reclaim_mapped = 1;
755
756         while (!list_empty(&l_hold)) {
757                 cond_resched();
758                 page = lru_to_page(&l_hold);
759                 list_del(&page->lru);
760                 if (page_mapped(page)) {
761                         if (!reclaim_mapped ||
762                             (total_swap_pages == 0 && PageAnon(page)) ||
763                             page_referenced(page, 0)) {
764                                 list_add(&page->lru, &l_active);
765                                 continue;
766                         }
767                 }
768                 list_add(&page->lru, &l_inactive);
769         }
770
771         pagevec_init(&pvec, 1);
772         pgmoved = 0;
773         spin_lock_irq(&zone->lru_lock);
774         while (!list_empty(&l_inactive)) {
775                 page = lru_to_page(&l_inactive);
776                 prefetchw_prev_lru_page(page, &l_inactive, flags);
777                 if (TestSetPageLRU(page))
778                         BUG();
779                 if (!TestClearPageActive(page))
780                         BUG();
781                 list_move(&page->lru, &zone->inactive_list);
782                 pgmoved++;
783                 if (!pagevec_add(&pvec, page)) {
784                         zone->nr_inactive += pgmoved;
785                         spin_unlock_irq(&zone->lru_lock);
786                         pgdeactivate += pgmoved;
787                         pgmoved = 0;
788                         if (buffer_heads_over_limit)
789                                 pagevec_strip(&pvec);
790                         __pagevec_release(&pvec);
791                         spin_lock_irq(&zone->lru_lock);
792                 }
793         }
794         zone->nr_inactive += pgmoved;
795         pgdeactivate += pgmoved;
796         if (buffer_heads_over_limit) {
797                 spin_unlock_irq(&zone->lru_lock);
798                 pagevec_strip(&pvec);
799                 spin_lock_irq(&zone->lru_lock);
800         }
801
802         pgmoved = 0;
803         while (!list_empty(&l_active)) {
804                 page = lru_to_page(&l_active);
805                 prefetchw_prev_lru_page(page, &l_active, flags);
806                 if (TestSetPageLRU(page))
807                         BUG();
808                 BUG_ON(!PageActive(page));
809                 list_move(&page->lru, &zone->active_list);
810                 pgmoved++;
811                 if (!pagevec_add(&pvec, page)) {
812                         zone->nr_active += pgmoved;
813                         pgmoved = 0;
814                         spin_unlock_irq(&zone->lru_lock);
815                         __pagevec_release(&pvec);
816                         spin_lock_irq(&zone->lru_lock);
817                 }
818         }
819         zone->nr_active += pgmoved;
820         spin_unlock(&zone->lru_lock);
821
822         __mod_page_state_zone(zone, pgrefill, pgscanned);
823         __mod_page_state(pgdeactivate, pgdeactivate);
824         local_irq_enable();
825
826         pagevec_release(&pvec);
827 }
828
829 /*
830  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
831  */
832 static void
833 shrink_zone(struct zone *zone, struct scan_control *sc)
834 {
835         unsigned long nr_active;
836         unsigned long nr_inactive;
837
838         atomic_inc(&zone->reclaim_in_progress);
839
840         /*
841          * Add one to `nr_to_scan' just to make sure that the kernel will
842          * slowly sift through the active list.
843          */
844         zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1;
845         nr_active = zone->nr_scan_active;
846         if (nr_active >= sc->swap_cluster_max)
847                 zone->nr_scan_active = 0;
848         else
849                 nr_active = 0;
850
851         zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1;
852         nr_inactive = zone->nr_scan_inactive;
853         if (nr_inactive >= sc->swap_cluster_max)
854                 zone->nr_scan_inactive = 0;
855         else
856                 nr_inactive = 0;
857
858         while (nr_active || nr_inactive) {
859                 if (nr_active) {
860                         sc->nr_to_scan = min(nr_active,
861                                         (unsigned long)sc->swap_cluster_max);
862                         nr_active -= sc->nr_to_scan;
863                         refill_inactive_zone(zone, sc);
864                 }
865
866                 if (nr_inactive) {
867                         sc->nr_to_scan = min(nr_inactive,
868                                         (unsigned long)sc->swap_cluster_max);
869                         nr_inactive -= sc->nr_to_scan;
870                         shrink_cache(zone, sc);
871                 }
872         }
873
874         throttle_vm_writeout();
875
876         atomic_dec(&zone->reclaim_in_progress);
877 }
878
879 /*
880  * This is the direct reclaim path, for page-allocating processes.  We only
881  * try to reclaim pages from zones which will satisfy the caller's allocation
882  * request.
883  *
884  * We reclaim from a zone even if that zone is over pages_high.  Because:
885  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
886  *    allocation or
887  * b) The zones may be over pages_high but they must go *over* pages_high to
888  *    satisfy the `incremental min' zone defense algorithm.
889  *
890  * Returns the number of reclaimed pages.
891  *
892  * If a zone is deemed to be full of pinned pages then just give it a light
893  * scan then give up on it.
894  */
895 static void
896 shrink_caches(struct zone **zones, struct scan_control *sc)
897 {
898         int i;
899
900         for (i = 0; zones[i] != NULL; i++) {
901                 struct zone *zone = zones[i];
902
903                 if (!populated_zone(zone))
904                         continue;
905
906                 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
907                         continue;
908
909                 zone->temp_priority = sc->priority;
910                 if (zone->prev_priority > sc->priority)
911                         zone->prev_priority = sc->priority;
912
913                 if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY)
914                         continue;       /* Let kswapd poll it */
915
916                 shrink_zone(zone, sc);
917         }
918 }
919  
920 /*
921  * This is the main entry point to direct page reclaim.
922  *
923  * If a full scan of the inactive list fails to free enough memory then we
924  * are "out of memory" and something needs to be killed.
925  *
926  * If the caller is !__GFP_FS then the probability of a failure is reasonably
927  * high - the zone may be full of dirty or under-writeback pages, which this
928  * caller can't do much about.  We kick pdflush and take explicit naps in the
929  * hope that some of these pages can be written.  But if the allocating task
930  * holds filesystem locks which prevent writeout this might not work, and the
931  * allocation attempt will fail.
932  */
933 int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
934 {
935         int priority;
936         int ret = 0;
937         int total_scanned = 0, total_reclaimed = 0;
938         struct reclaim_state *reclaim_state = current->reclaim_state;
939         struct scan_control sc;
940         unsigned long lru_pages = 0;
941         int i;
942
943         sc.gfp_mask = gfp_mask;
944         sc.may_writepage = 0;
945
946         inc_page_state(allocstall);
947
948         for (i = 0; zones[i] != NULL; i++) {
949                 struct zone *zone = zones[i];
950
951                 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
952                         continue;
953
954                 zone->temp_priority = DEF_PRIORITY;
955                 lru_pages += zone->nr_active + zone->nr_inactive;
956         }
957
958         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
959                 sc.nr_mapped = read_page_state(nr_mapped);
960                 sc.nr_scanned = 0;
961                 sc.nr_reclaimed = 0;
962                 sc.priority = priority;
963                 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
964                 if (!priority)
965                         disable_swap_token();
966                 shrink_caches(zones, &sc);
967                 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
968                 if (reclaim_state) {
969                         sc.nr_reclaimed += reclaim_state->reclaimed_slab;
970                         reclaim_state->reclaimed_slab = 0;
971                 }
972                 total_scanned += sc.nr_scanned;
973                 total_reclaimed += sc.nr_reclaimed;
974                 if (total_reclaimed >= sc.swap_cluster_max) {
975                         ret = 1;
976                         goto out;
977                 }
978
979                 /*
980                  * Try to write back as many pages as we just scanned.  This
981                  * tends to cause slow streaming writers to write data to the
982                  * disk smoothly, at the dirtying rate, which is nice.   But
983                  * that's undesirable in laptop mode, where we *want* lumpy
984                  * writeout.  So in laptop mode, write out the whole world.
985                  */
986                 if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) {
987                         wakeup_pdflush(laptop_mode ? 0 : total_scanned);
988                         sc.may_writepage = 1;
989                 }
990
991                 /* Take a nap, wait for some writeback to complete */
992                 if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
993                         blk_congestion_wait(WRITE, HZ/10);
994         }
995 out:
996         for (i = 0; zones[i] != 0; i++) {
997                 struct zone *zone = zones[i];
998
999                 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1000                         continue;
1001
1002                 zone->prev_priority = zone->temp_priority;
1003         }
1004         return ret;
1005 }
1006
1007 /*
1008  * For kswapd, balance_pgdat() will work across all this node's zones until
1009  * they are all at pages_high.
1010  *
1011  * If `nr_pages' is non-zero then it is the number of pages which are to be
1012  * reclaimed, regardless of the zone occupancies.  This is a software suspend
1013  * special.
1014  *
1015  * Returns the number of pages which were actually freed.
1016  *
1017  * There is special handling here for zones which are full of pinned pages.
1018  * This can happen if the pages are all mlocked, or if they are all used by
1019  * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
1020  * What we do is to detect the case where all pages in the zone have been
1021  * scanned twice and there has been zero successful reclaim.  Mark the zone as
1022  * dead and from now on, only perform a short scan.  Basically we're polling
1023  * the zone for when the problem goes away.
1024  *
1025  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
1026  * zones which have free_pages > pages_high, but once a zone is found to have
1027  * free_pages <= pages_high, we scan that zone and the lower zones regardless
1028  * of the number of free pages in the lower zones.  This interoperates with
1029  * the page allocator fallback scheme to ensure that aging of pages is balanced
1030  * across the zones.
1031  */
1032 static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order)
1033 {
1034         int to_free = nr_pages;
1035         int all_zones_ok;
1036         int priority;
1037         int i;
1038         int total_scanned, total_reclaimed;
1039         struct reclaim_state *reclaim_state = current->reclaim_state;
1040         struct scan_control sc;
1041
1042 loop_again:
1043         total_scanned = 0;
1044         total_reclaimed = 0;
1045         sc.gfp_mask = GFP_KERNEL;
1046         sc.may_writepage = 0;
1047         sc.nr_mapped = read_page_state(nr_mapped);
1048
1049         inc_page_state(pageoutrun);
1050
1051         for (i = 0; i < pgdat->nr_zones; i++) {
1052                 struct zone *zone = pgdat->node_zones + i;
1053
1054                 zone->temp_priority = DEF_PRIORITY;
1055         }
1056
1057         for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1058                 int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
1059                 unsigned long lru_pages = 0;
1060
1061                 /* The swap token gets in the way of swapout... */
1062                 if (!priority)
1063                         disable_swap_token();
1064
1065                 all_zones_ok = 1;
1066
1067                 if (nr_pages == 0) {
1068                         /*
1069                          * Scan in the highmem->dma direction for the highest
1070                          * zone which needs scanning
1071                          */
1072                         for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1073                                 struct zone *zone = pgdat->node_zones + i;
1074
1075                                 if (!populated_zone(zone))
1076                                         continue;
1077
1078                                 if (zone->all_unreclaimable &&
1079                                                 priority != DEF_PRIORITY)
1080                                         continue;
1081
1082                                 if (!zone_watermark_ok(zone, order,
1083                                                 zone->pages_high, 0, 0)) {
1084                                         end_zone = i;
1085                                         goto scan;
1086                                 }
1087                         }
1088                         goto out;
1089                 } else {
1090                         end_zone = pgdat->nr_zones - 1;
1091                 }
1092 scan:
1093                 for (i = 0; i <= end_zone; i++) {
1094                         struct zone *zone = pgdat->node_zones + i;
1095
1096                         lru_pages += zone->nr_active + zone->nr_inactive;
1097                 }
1098
1099                 /*
1100                  * Now scan the zone in the dma->highmem direction, stopping
1101                  * at the last zone which needs scanning.
1102                  *
1103                  * We do this because the page allocator works in the opposite
1104                  * direction.  This prevents the page allocator from allocating
1105                  * pages behind kswapd's direction of progress, which would
1106                  * cause too much scanning of the lower zones.
1107                  */
1108                 for (i = 0; i <= end_zone; i++) {
1109                         struct zone *zone = pgdat->node_zones + i;
1110                         int nr_slab;
1111
1112                         if (!populated_zone(zone))
1113                                 continue;
1114
1115                         if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1116                                 continue;
1117
1118                         if (nr_pages == 0) {    /* Not software suspend */
1119                                 if (!zone_watermark_ok(zone, order,
1120                                                 zone->pages_high, end_zone, 0))
1121                                         all_zones_ok = 0;
1122                         }
1123                         zone->temp_priority = priority;
1124                         if (zone->prev_priority > priority)
1125                                 zone->prev_priority = priority;
1126                         sc.nr_scanned = 0;
1127                         sc.nr_reclaimed = 0;
1128                         sc.priority = priority;
1129                         sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
1130                         atomic_inc(&zone->reclaim_in_progress);
1131                         shrink_zone(zone, &sc);
1132                         atomic_dec(&zone->reclaim_in_progress);
1133                         reclaim_state->reclaimed_slab = 0;
1134                         nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1135                                                 lru_pages);
1136                         sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1137                         total_reclaimed += sc.nr_reclaimed;
1138                         total_scanned += sc.nr_scanned;
1139                         if (zone->all_unreclaimable)
1140                                 continue;
1141                         if (nr_slab == 0 && zone->pages_scanned >=
1142                                     (zone->nr_active + zone->nr_inactive) * 4)
1143                                 zone->all_unreclaimable = 1;
1144                         /*
1145                          * If we've done a decent amount of scanning and
1146                          * the reclaim ratio is low, start doing writepage
1147                          * even in laptop mode
1148                          */
1149                         if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1150                             total_scanned > total_reclaimed+total_reclaimed/2)
1151                                 sc.may_writepage = 1;
1152                 }
1153                 if (nr_pages && to_free > total_reclaimed)
1154                         continue;       /* swsusp: need to do more work */
1155                 if (all_zones_ok)
1156                         break;          /* kswapd: all done */
1157                 /*
1158                  * OK, kswapd is getting into trouble.  Take a nap, then take
1159                  * another pass across the zones.
1160                  */
1161                 if (total_scanned && priority < DEF_PRIORITY - 2)
1162                         blk_congestion_wait(WRITE, HZ/10);
1163
1164                 /*
1165                  * We do this so kswapd doesn't build up large priorities for
1166                  * example when it is freeing in parallel with allocators. It
1167                  * matches the direct reclaim path behaviour in terms of impact
1168                  * on zone->*_priority.
1169                  */
1170                 if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages))
1171                         break;
1172         }
1173 out:
1174         for (i = 0; i < pgdat->nr_zones; i++) {
1175                 struct zone *zone = pgdat->node_zones + i;
1176
1177                 zone->prev_priority = zone->temp_priority;
1178         }
1179         if (!all_zones_ok) {
1180                 cond_resched();
1181                 goto loop_again;
1182         }
1183
1184         return total_reclaimed;
1185 }
1186
1187 /*
1188  * The background pageout daemon, started as a kernel thread
1189  * from the init process. 
1190  *
1191  * This basically trickles out pages so that we have _some_
1192  * free memory available even if there is no other activity
1193  * that frees anything up. This is needed for things like routing
1194  * etc, where we otherwise might have all activity going on in
1195  * asynchronous contexts that cannot page things out.
1196  *
1197  * If there are applications that are active memory-allocators
1198  * (most normal use), this basically shouldn't matter.
1199  */
1200 static int kswapd(void *p)
1201 {
1202         unsigned long order;
1203         pg_data_t *pgdat = (pg_data_t*)p;
1204         struct task_struct *tsk = current;
1205         DEFINE_WAIT(wait);
1206         struct reclaim_state reclaim_state = {
1207                 .reclaimed_slab = 0,
1208         };
1209         cpumask_t cpumask;
1210
1211         daemonize("kswapd%d", pgdat->node_id);
1212         cpumask = node_to_cpumask(pgdat->node_id);
1213         if (!cpus_empty(cpumask))
1214                 set_cpus_allowed(tsk, cpumask);
1215         current->reclaim_state = &reclaim_state;
1216
1217         /*
1218          * Tell the memory management that we're a "memory allocator",
1219          * and that if we need more memory we should get access to it
1220          * regardless (see "__alloc_pages()"). "kswapd" should
1221          * never get caught in the normal page freeing logic.
1222          *
1223          * (Kswapd normally doesn't need memory anyway, but sometimes
1224          * you need a small amount of memory in order to be able to
1225          * page out something else, and this flag essentially protects
1226          * us from recursively trying to free more memory as we're
1227          * trying to free the first piece of memory in the first place).
1228          */
1229         tsk->flags |= PF_MEMALLOC|PF_KSWAPD;
1230
1231         order = 0;
1232         for ( ; ; ) {
1233                 unsigned long new_order;
1234
1235                 try_to_freeze();
1236
1237                 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1238                 new_order = pgdat->kswapd_max_order;
1239                 pgdat->kswapd_max_order = 0;
1240                 if (order < new_order) {
1241                         /*
1242                          * Don't sleep if someone wants a larger 'order'
1243                          * allocation
1244                          */
1245                         order = new_order;
1246                 } else {
1247                         schedule();
1248                         order = pgdat->kswapd_max_order;
1249                 }
1250                 finish_wait(&pgdat->kswapd_wait, &wait);
1251
1252                 balance_pgdat(pgdat, 0, order);
1253         }
1254         return 0;
1255 }
1256
1257 /*
1258  * A zone is low on free memory, so wake its kswapd task to service it.
1259  */
1260 void wakeup_kswapd(struct zone *zone, int order)
1261 {
1262         pg_data_t *pgdat;
1263
1264         if (!populated_zone(zone))
1265                 return;
1266
1267         pgdat = zone->zone_pgdat;
1268         if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1269                 return;
1270         if (pgdat->kswapd_max_order < order)
1271                 pgdat->kswapd_max_order = order;
1272         if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1273                 return;
1274         if (!waitqueue_active(&pgdat->kswapd_wait))
1275                 return;
1276         wake_up_interruptible(&pgdat->kswapd_wait);
1277 }
1278
1279 #ifdef CONFIG_PM
1280 /*
1281  * Try to free `nr_pages' of memory, system-wide.  Returns the number of freed
1282  * pages.
1283  */
1284 int shrink_all_memory(int nr_pages)
1285 {
1286         pg_data_t *pgdat;
1287         int nr_to_free = nr_pages;
1288         int ret = 0;
1289         struct reclaim_state reclaim_state = {
1290                 .reclaimed_slab = 0,
1291         };
1292
1293         current->reclaim_state = &reclaim_state;
1294         for_each_pgdat(pgdat) {
1295                 int freed;
1296                 freed = balance_pgdat(pgdat, nr_to_free, 0);
1297                 ret += freed;
1298                 nr_to_free -= freed;
1299                 if (nr_to_free <= 0)
1300                         break;
1301         }
1302         current->reclaim_state = NULL;
1303         return ret;
1304 }
1305 #endif
1306
1307 #ifdef CONFIG_HOTPLUG_CPU
1308 /* It's optimal to keep kswapds on the same CPUs as their memory, but
1309    not required for correctness.  So if the last cpu in a node goes
1310    away, we get changed to run anywhere: as the first one comes back,
1311    restore their cpu bindings. */
1312 static int __devinit cpu_callback(struct notifier_block *nfb,
1313                                   unsigned long action,
1314                                   void *hcpu)
1315 {
1316         pg_data_t *pgdat;
1317         cpumask_t mask;
1318
1319         if (action == CPU_ONLINE) {
1320                 for_each_pgdat(pgdat) {
1321                         mask = node_to_cpumask(pgdat->node_id);
1322                         if (any_online_cpu(mask) != NR_CPUS)
1323                                 /* One of our CPUs online: restore mask */
1324                                 set_cpus_allowed(pgdat->kswapd, mask);
1325                 }
1326         }
1327         return NOTIFY_OK;
1328 }
1329 #endif /* CONFIG_HOTPLUG_CPU */
1330
1331 static int __init kswapd_init(void)
1332 {
1333         pg_data_t *pgdat;
1334         swap_setup();
1335         for_each_pgdat(pgdat)
1336                 pgdat->kswapd
1337                 = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL));
1338         total_memory = nr_free_pagecache_pages();
1339         hotcpu_notifier(cpu_callback, 0);
1340         return 0;
1341 }
1342
1343 module_init(kswapd_init)