[PATCH] VM: add __GFP_NORECLAIM
[safe/jmp/linux-2.6] / mm / page_alloc.c
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16
17 #include <linux/config.h>
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/interrupt.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/compiler.h>
25 #include <linux/module.h>
26 #include <linux/suspend.h>
27 #include <linux/pagevec.h>
28 #include <linux/blkdev.h>
29 #include <linux/slab.h>
30 #include <linux/notifier.h>
31 #include <linux/topology.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/nodemask.h>
36 #include <linux/vmalloc.h>
37
38 #include <asm/tlbflush.h>
39 #include "internal.h"
40
41 /*
42  * MCD - HACK: Find somewhere to initialize this EARLY, or make this
43  * initializer cleaner
44  */
45 nodemask_t node_online_map = { { [0] = 1UL } };
46 EXPORT_SYMBOL(node_online_map);
47 nodemask_t node_possible_map = NODE_MASK_ALL;
48 EXPORT_SYMBOL(node_possible_map);
49 struct pglist_data *pgdat_list;
50 unsigned long totalram_pages;
51 unsigned long totalhigh_pages;
52 long nr_swap_pages;
53
54 /*
55  * results with 256, 32 in the lowmem_reserve sysctl:
56  *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
57  *      1G machine -> (16M dma, 784M normal, 224M high)
58  *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
59  *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
60  *      HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
61  */
62 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 };
63
64 EXPORT_SYMBOL(totalram_pages);
65 EXPORT_SYMBOL(nr_swap_pages);
66
67 /*
68  * Used by page_zone() to look up the address of the struct zone whose
69  * id is encoded in the upper bits of page->flags
70  */
71 struct zone *zone_table[1 << (ZONES_SHIFT + NODES_SHIFT)];
72 EXPORT_SYMBOL(zone_table);
73
74 static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
75 int min_free_kbytes = 1024;
76
77 unsigned long __initdata nr_kernel_pages;
78 unsigned long __initdata nr_all_pages;
79
80 /*
81  * Temporary debugging check for pages not lying within a given zone.
82  */
83 static int bad_range(struct zone *zone, struct page *page)
84 {
85         if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages)
86                 return 1;
87         if (page_to_pfn(page) < zone->zone_start_pfn)
88                 return 1;
89 #ifdef CONFIG_HOLES_IN_ZONE
90         if (!pfn_valid(page_to_pfn(page)))
91                 return 1;
92 #endif
93         if (zone != page_zone(page))
94                 return 1;
95         return 0;
96 }
97
98 static void bad_page(const char *function, struct page *page)
99 {
100         printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
101                 function, current->comm, page);
102         printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
103                 (int)(2*sizeof(page_flags_t)), (unsigned long)page->flags,
104                 page->mapping, page_mapcount(page), page_count(page));
105         printk(KERN_EMERG "Backtrace:\n");
106         dump_stack();
107         printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
108         page->flags &= ~(1 << PG_private        |
109                         1 << PG_locked  |
110                         1 << PG_lru     |
111                         1 << PG_active  |
112                         1 << PG_dirty   |
113                         1 << PG_swapcache |
114                         1 << PG_writeback);
115         set_page_count(page, 0);
116         reset_page_mapcount(page);
117         page->mapping = NULL;
118         tainted |= TAINT_BAD_PAGE;
119 }
120
121 #ifndef CONFIG_HUGETLB_PAGE
122 #define prep_compound_page(page, order) do { } while (0)
123 #define destroy_compound_page(page, order) do { } while (0)
124 #else
125 /*
126  * Higher-order pages are called "compound pages".  They are structured thusly:
127  *
128  * The first PAGE_SIZE page is called the "head page".
129  *
130  * The remaining PAGE_SIZE pages are called "tail pages".
131  *
132  * All pages have PG_compound set.  All pages have their ->private pointing at
133  * the head page (even the head page has this).
134  *
135  * The first tail page's ->mapping, if non-zero, holds the address of the
136  * compound page's put_page() function.
137  *
138  * The order of the allocation is stored in the first tail page's ->index
139  * This is only for debug at present.  This usage means that zero-order pages
140  * may not be compound.
141  */
142 static void prep_compound_page(struct page *page, unsigned long order)
143 {
144         int i;
145         int nr_pages = 1 << order;
146
147         page[1].mapping = NULL;
148         page[1].index = order;
149         for (i = 0; i < nr_pages; i++) {
150                 struct page *p = page + i;
151
152                 SetPageCompound(p);
153                 p->private = (unsigned long)page;
154         }
155 }
156
157 static void destroy_compound_page(struct page *page, unsigned long order)
158 {
159         int i;
160         int nr_pages = 1 << order;
161
162         if (!PageCompound(page))
163                 return;
164
165         if (page[1].index != order)
166                 bad_page(__FUNCTION__, page);
167
168         for (i = 0; i < nr_pages; i++) {
169                 struct page *p = page + i;
170
171                 if (!PageCompound(p))
172                         bad_page(__FUNCTION__, page);
173                 if (p->private != (unsigned long)page)
174                         bad_page(__FUNCTION__, page);
175                 ClearPageCompound(p);
176         }
177 }
178 #endif          /* CONFIG_HUGETLB_PAGE */
179
180 /*
181  * function for dealing with page's order in buddy system.
182  * zone->lock is already acquired when we use these.
183  * So, we don't need atomic page->flags operations here.
184  */
185 static inline unsigned long page_order(struct page *page) {
186         return page->private;
187 }
188
189 static inline void set_page_order(struct page *page, int order) {
190         page->private = order;
191         __SetPagePrivate(page);
192 }
193
194 static inline void rmv_page_order(struct page *page)
195 {
196         __ClearPagePrivate(page);
197         page->private = 0;
198 }
199
200 /*
201  * Locate the struct page for both the matching buddy in our
202  * pair (buddy1) and the combined O(n+1) page they form (page).
203  *
204  * 1) Any buddy B1 will have an order O twin B2 which satisfies
205  * the following equation:
206  *     B2 = B1 ^ (1 << O)
207  * For example, if the starting buddy (buddy2) is #8 its order
208  * 1 buddy is #10:
209  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
210  *
211  * 2) Any buddy B will have an order O+1 parent P which
212  * satisfies the following equation:
213  *     P = B & ~(1 << O)
214  *
215  * Assumption: *_mem_map is contigious at least up to MAX_ORDER
216  */
217 static inline struct page *
218 __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
219 {
220         unsigned long buddy_idx = page_idx ^ (1 << order);
221
222         return page + (buddy_idx - page_idx);
223 }
224
225 static inline unsigned long
226 __find_combined_index(unsigned long page_idx, unsigned int order)
227 {
228         return (page_idx & ~(1 << order));
229 }
230
231 /*
232  * This function checks whether a page is free && is the buddy
233  * we can do coalesce a page and its buddy if
234  * (a) the buddy is free &&
235  * (b) the buddy is on the buddy system &&
236  * (c) a page and its buddy have the same order.
237  * for recording page's order, we use page->private and PG_private.
238  *
239  */
240 static inline int page_is_buddy(struct page *page, int order)
241 {
242        if (PagePrivate(page)           &&
243            (page_order(page) == order) &&
244            !PageReserved(page)         &&
245             page_count(page) == 0)
246                return 1;
247        return 0;
248 }
249
250 /*
251  * Freeing function for a buddy system allocator.
252  *
253  * The concept of a buddy system is to maintain direct-mapped table
254  * (containing bit values) for memory blocks of various "orders".
255  * The bottom level table contains the map for the smallest allocatable
256  * units of memory (here, pages), and each level above it describes
257  * pairs of units from the levels below, hence, "buddies".
258  * At a high level, all that happens here is marking the table entry
259  * at the bottom level available, and propagating the changes upward
260  * as necessary, plus some accounting needed to play nicely with other
261  * parts of the VM system.
262  * At each level, we keep a list of pages, which are heads of continuous
263  * free pages of length of (1 << order) and marked with PG_Private.Page's
264  * order is recorded in page->private field.
265  * So when we are allocating or freeing one, we can derive the state of the
266  * other.  That is, if we allocate a small block, and both were   
267  * free, the remainder of the region must be split into blocks.   
268  * If a block is freed, and its buddy is also free, then this
269  * triggers coalescing into a block of larger size.            
270  *
271  * -- wli
272  */
273
274 static inline void __free_pages_bulk (struct page *page,
275                 struct zone *zone, unsigned int order)
276 {
277         unsigned long page_idx;
278         int order_size = 1 << order;
279
280         if (unlikely(order))
281                 destroy_compound_page(page, order);
282
283         page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
284
285         BUG_ON(page_idx & (order_size - 1));
286         BUG_ON(bad_range(zone, page));
287
288         zone->free_pages += order_size;
289         while (order < MAX_ORDER-1) {
290                 unsigned long combined_idx;
291                 struct free_area *area;
292                 struct page *buddy;
293
294                 combined_idx = __find_combined_index(page_idx, order);
295                 buddy = __page_find_buddy(page, page_idx, order);
296
297                 if (bad_range(zone, buddy))
298                         break;
299                 if (!page_is_buddy(buddy, order))
300                         break;          /* Move the buddy up one level. */
301                 list_del(&buddy->lru);
302                 area = zone->free_area + order;
303                 area->nr_free--;
304                 rmv_page_order(buddy);
305                 page = page + (combined_idx - page_idx);
306                 page_idx = combined_idx;
307                 order++;
308         }
309         set_page_order(page, order);
310         list_add(&page->lru, &zone->free_area[order].free_list);
311         zone->free_area[order].nr_free++;
312 }
313
314 static inline void free_pages_check(const char *function, struct page *page)
315 {
316         if (    page_mapcount(page) ||
317                 page->mapping != NULL ||
318                 page_count(page) != 0 ||
319                 (page->flags & (
320                         1 << PG_lru     |
321                         1 << PG_private |
322                         1 << PG_locked  |
323                         1 << PG_active  |
324                         1 << PG_reclaim |
325                         1 << PG_slab    |
326                         1 << PG_swapcache |
327                         1 << PG_writeback )))
328                 bad_page(function, page);
329         if (PageDirty(page))
330                 ClearPageDirty(page);
331 }
332
333 /*
334  * Frees a list of pages. 
335  * Assumes all pages on list are in same zone, and of same order.
336  * count is the number of pages to free, or 0 for all on the list.
337  *
338  * If the zone was previously in an "all pages pinned" state then look to
339  * see if this freeing clears that state.
340  *
341  * And clear the zone's pages_scanned counter, to hold off the "all pages are
342  * pinned" detection logic.
343  */
344 static int
345 free_pages_bulk(struct zone *zone, int count,
346                 struct list_head *list, unsigned int order)
347 {
348         unsigned long flags;
349         struct page *page = NULL;
350         int ret = 0;
351
352         spin_lock_irqsave(&zone->lock, flags);
353         zone->all_unreclaimable = 0;
354         zone->pages_scanned = 0;
355         while (!list_empty(list) && count--) {
356                 page = list_entry(list->prev, struct page, lru);
357                 /* have to delete it as __free_pages_bulk list manipulates */
358                 list_del(&page->lru);
359                 __free_pages_bulk(page, zone, order);
360                 ret++;
361         }
362         spin_unlock_irqrestore(&zone->lock, flags);
363         return ret;
364 }
365
366 void __free_pages_ok(struct page *page, unsigned int order)
367 {
368         LIST_HEAD(list);
369         int i;
370
371         arch_free_page(page, order);
372
373         mod_page_state(pgfree, 1 << order);
374
375 #ifndef CONFIG_MMU
376         if (order > 0)
377                 for (i = 1 ; i < (1 << order) ; ++i)
378                         __put_page(page + i);
379 #endif
380
381         for (i = 0 ; i < (1 << order) ; ++i)
382                 free_pages_check(__FUNCTION__, page + i);
383         list_add(&page->lru, &list);
384         kernel_map_pages(page, 1<<order, 0);
385         free_pages_bulk(page_zone(page), 1, &list, order);
386 }
387
388
389 /*
390  * The order of subdivision here is critical for the IO subsystem.
391  * Please do not alter this order without good reasons and regression
392  * testing. Specifically, as large blocks of memory are subdivided,
393  * the order in which smaller blocks are delivered depends on the order
394  * they're subdivided in this function. This is the primary factor
395  * influencing the order in which pages are delivered to the IO
396  * subsystem according to empirical testing, and this is also justified
397  * by considering the behavior of a buddy system containing a single
398  * large block of memory acted on by a series of small allocations.
399  * This behavior is a critical factor in sglist merging's success.
400  *
401  * -- wli
402  */
403 static inline struct page *
404 expand(struct zone *zone, struct page *page,
405         int low, int high, struct free_area *area)
406 {
407         unsigned long size = 1 << high;
408
409         while (high > low) {
410                 area--;
411                 high--;
412                 size >>= 1;
413                 BUG_ON(bad_range(zone, &page[size]));
414                 list_add(&page[size].lru, &area->free_list);
415                 area->nr_free++;
416                 set_page_order(&page[size], high);
417         }
418         return page;
419 }
420
421 void set_page_refs(struct page *page, int order)
422 {
423 #ifdef CONFIG_MMU
424         set_page_count(page, 1);
425 #else
426         int i;
427
428         /*
429          * We need to reference all the pages for this order, otherwise if
430          * anyone accesses one of the pages with (get/put) it will be freed.
431          * - eg: access_process_vm()
432          */
433         for (i = 0; i < (1 << order); i++)
434                 set_page_count(page + i, 1);
435 #endif /* CONFIG_MMU */
436 }
437
438 /*
439  * This page is about to be returned from the page allocator
440  */
441 static void prep_new_page(struct page *page, int order)
442 {
443         if (page->mapping || page_mapcount(page) ||
444             (page->flags & (
445                         1 << PG_private |
446                         1 << PG_locked  |
447                         1 << PG_lru     |
448                         1 << PG_active  |
449                         1 << PG_dirty   |
450                         1 << PG_reclaim |
451                         1 << PG_swapcache |
452                         1 << PG_writeback )))
453                 bad_page(__FUNCTION__, page);
454
455         page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
456                         1 << PG_referenced | 1 << PG_arch_1 |
457                         1 << PG_checked | 1 << PG_mappedtodisk);
458         page->private = 0;
459         set_page_refs(page, order);
460         kernel_map_pages(page, 1 << order, 1);
461 }
462
463 /* 
464  * Do the hard work of removing an element from the buddy allocator.
465  * Call me with the zone->lock already held.
466  */
467 static struct page *__rmqueue(struct zone *zone, unsigned int order)
468 {
469         struct free_area * area;
470         unsigned int current_order;
471         struct page *page;
472
473         for (current_order = order; current_order < MAX_ORDER; ++current_order) {
474                 area = zone->free_area + current_order;
475                 if (list_empty(&area->free_list))
476                         continue;
477
478                 page = list_entry(area->free_list.next, struct page, lru);
479                 list_del(&page->lru);
480                 rmv_page_order(page);
481                 area->nr_free--;
482                 zone->free_pages -= 1UL << order;
483                 return expand(zone, page, order, current_order, area);
484         }
485
486         return NULL;
487 }
488
489 /* 
490  * Obtain a specified number of elements from the buddy allocator, all under
491  * a single hold of the lock, for efficiency.  Add them to the supplied list.
492  * Returns the number of new pages which were placed at *list.
493  */
494 static int rmqueue_bulk(struct zone *zone, unsigned int order, 
495                         unsigned long count, struct list_head *list)
496 {
497         unsigned long flags;
498         int i;
499         int allocated = 0;
500         struct page *page;
501         
502         spin_lock_irqsave(&zone->lock, flags);
503         for (i = 0; i < count; ++i) {
504                 page = __rmqueue(zone, order);
505                 if (page == NULL)
506                         break;
507                 allocated++;
508                 list_add_tail(&page->lru, list);
509         }
510         spin_unlock_irqrestore(&zone->lock, flags);
511         return allocated;
512 }
513
514 #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
515 static void __drain_pages(unsigned int cpu)
516 {
517         struct zone *zone;
518         int i;
519
520         for_each_zone(zone) {
521                 struct per_cpu_pageset *pset;
522
523                 pset = &zone->pageset[cpu];
524                 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
525                         struct per_cpu_pages *pcp;
526
527                         pcp = &pset->pcp[i];
528                         pcp->count -= free_pages_bulk(zone, pcp->count,
529                                                 &pcp->list, 0);
530                 }
531         }
532 }
533 #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
534
535 #ifdef CONFIG_PM
536
537 void mark_free_pages(struct zone *zone)
538 {
539         unsigned long zone_pfn, flags;
540         int order;
541         struct list_head *curr;
542
543         if (!zone->spanned_pages)
544                 return;
545
546         spin_lock_irqsave(&zone->lock, flags);
547         for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
548                 ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));
549
550         for (order = MAX_ORDER - 1; order >= 0; --order)
551                 list_for_each(curr, &zone->free_area[order].free_list) {
552                         unsigned long start_pfn, i;
553
554                         start_pfn = page_to_pfn(list_entry(curr, struct page, lru));
555
556                         for (i=0; i < (1<<order); i++)
557                                 SetPageNosaveFree(pfn_to_page(start_pfn+i));
558         }
559         spin_unlock_irqrestore(&zone->lock, flags);
560 }
561
562 /*
563  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
564  */
565 void drain_local_pages(void)
566 {
567         unsigned long flags;
568
569         local_irq_save(flags);  
570         __drain_pages(smp_processor_id());
571         local_irq_restore(flags);       
572 }
573 #endif /* CONFIG_PM */
574
575 static void zone_statistics(struct zonelist *zonelist, struct zone *z)
576 {
577 #ifdef CONFIG_NUMA
578         unsigned long flags;
579         int cpu;
580         pg_data_t *pg = z->zone_pgdat;
581         pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
582         struct per_cpu_pageset *p;
583
584         local_irq_save(flags);
585         cpu = smp_processor_id();
586         p = &z->pageset[cpu];
587         if (pg == orig) {
588                 z->pageset[cpu].numa_hit++;
589         } else {
590                 p->numa_miss++;
591                 zonelist->zones[0]->pageset[cpu].numa_foreign++;
592         }
593         if (pg == NODE_DATA(numa_node_id()))
594                 p->local_node++;
595         else
596                 p->other_node++;
597         local_irq_restore(flags);
598 #endif
599 }
600
601 /*
602  * Free a 0-order page
603  */
604 static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
605 static void fastcall free_hot_cold_page(struct page *page, int cold)
606 {
607         struct zone *zone = page_zone(page);
608         struct per_cpu_pages *pcp;
609         unsigned long flags;
610
611         arch_free_page(page, 0);
612
613         kernel_map_pages(page, 1, 0);
614         inc_page_state(pgfree);
615         if (PageAnon(page))
616                 page->mapping = NULL;
617         free_pages_check(__FUNCTION__, page);
618         pcp = &zone->pageset[get_cpu()].pcp[cold];
619         local_irq_save(flags);
620         if (pcp->count >= pcp->high)
621                 pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
622         list_add(&page->lru, &pcp->list);
623         pcp->count++;
624         local_irq_restore(flags);
625         put_cpu();
626 }
627
628 void fastcall free_hot_page(struct page *page)
629 {
630         free_hot_cold_page(page, 0);
631 }
632         
633 void fastcall free_cold_page(struct page *page)
634 {
635         free_hot_cold_page(page, 1);
636 }
637
638 static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags)
639 {
640         int i;
641
642         BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
643         for(i = 0; i < (1 << order); i++)
644                 clear_highpage(page + i);
645 }
646
647 /*
648  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
649  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
650  * or two.
651  */
652 static struct page *
653 buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags)
654 {
655         unsigned long flags;
656         struct page *page = NULL;
657         int cold = !!(gfp_flags & __GFP_COLD);
658
659         if (order == 0) {
660                 struct per_cpu_pages *pcp;
661
662                 pcp = &zone->pageset[get_cpu()].pcp[cold];
663                 local_irq_save(flags);
664                 if (pcp->count <= pcp->low)
665                         pcp->count += rmqueue_bulk(zone, 0,
666                                                 pcp->batch, &pcp->list);
667                 if (pcp->count) {
668                         page = list_entry(pcp->list.next, struct page, lru);
669                         list_del(&page->lru);
670                         pcp->count--;
671                 }
672                 local_irq_restore(flags);
673                 put_cpu();
674         }
675
676         if (page == NULL) {
677                 spin_lock_irqsave(&zone->lock, flags);
678                 page = __rmqueue(zone, order);
679                 spin_unlock_irqrestore(&zone->lock, flags);
680         }
681
682         if (page != NULL) {
683                 BUG_ON(bad_range(zone, page));
684                 mod_page_state_zone(zone, pgalloc, 1 << order);
685                 prep_new_page(page, order);
686
687                 if (gfp_flags & __GFP_ZERO)
688                         prep_zero_page(page, order, gfp_flags);
689
690                 if (order && (gfp_flags & __GFP_COMP))
691                         prep_compound_page(page, order);
692         }
693         return page;
694 }
695
696 /*
697  * Return 1 if free pages are above 'mark'. This takes into account the order
698  * of the allocation.
699  */
700 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
701                       int classzone_idx, int can_try_harder, int gfp_high)
702 {
703         /* free_pages my go negative - that's OK */
704         long min = mark, free_pages = z->free_pages - (1 << order) + 1;
705         int o;
706
707         if (gfp_high)
708                 min -= min / 2;
709         if (can_try_harder)
710                 min -= min / 4;
711
712         if (free_pages <= min + z->lowmem_reserve[classzone_idx])
713                 return 0;
714         for (o = 0; o < order; o++) {
715                 /* At the next order, this order's pages become unavailable */
716                 free_pages -= z->free_area[o].nr_free << o;
717
718                 /* Require fewer higher order pages to be free */
719                 min >>= 1;
720
721                 if (free_pages <= min)
722                         return 0;
723         }
724         return 1;
725 }
726
727 static inline int
728 should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
729 {
730         if (!z->reclaim_pages)
731                 return 0;
732         if (gfp_mask & __GFP_NORECLAIM)
733                 return 0;
734         return 1;
735 }
736
737 /*
738  * This is the 'heart' of the zoned buddy allocator.
739  */
740 struct page * fastcall
741 __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
742                 struct zonelist *zonelist)
743 {
744         const int wait = gfp_mask & __GFP_WAIT;
745         struct zone **zones, *z;
746         struct page *page;
747         struct reclaim_state reclaim_state;
748         struct task_struct *p = current;
749         int i;
750         int classzone_idx;
751         int do_retry;
752         int can_try_harder;
753         int did_some_progress;
754
755         might_sleep_if(wait);
756
757         /*
758          * The caller may dip into page reserves a bit more if the caller
759          * cannot run direct reclaim, or is the caller has realtime scheduling
760          * policy
761          */
762         can_try_harder = (unlikely(rt_task(p)) && !in_interrupt()) || !wait;
763
764         zones = zonelist->zones;  /* the list of zones suitable for gfp_mask */
765
766         if (unlikely(zones[0] == NULL)) {
767                 /* Should this ever happen?? */
768                 return NULL;
769         }
770
771         classzone_idx = zone_idx(zones[0]);
772
773 restart:
774         /* Go through the zonelist once, looking for a zone with enough free */
775         for (i = 0; (z = zones[i]) != NULL; i++) {
776                 int do_reclaim = should_reclaim_zone(z, gfp_mask);
777
778                 if (!cpuset_zone_allowed(z))
779                         continue;
780
781                 /*
782                  * If the zone is to attempt early page reclaim then this loop
783                  * will try to reclaim pages and check the watermark a second
784                  * time before giving up and falling back to the next zone.
785                  */
786 zone_reclaim_retry:
787                 if (!zone_watermark_ok(z, order, z->pages_low,
788                                        classzone_idx, 0, 0)) {
789                         if (!do_reclaim)
790                                 continue;
791                         else {
792                                 zone_reclaim(z, gfp_mask, order);
793                                 /* Only try reclaim once */
794                                 do_reclaim = 0;
795                                 goto zone_reclaim_retry;
796                         }
797                 }
798
799                 page = buffered_rmqueue(z, order, gfp_mask);
800                 if (page)
801                         goto got_pg;
802         }
803
804         for (i = 0; (z = zones[i]) != NULL; i++)
805                 wakeup_kswapd(z, order);
806
807         /*
808          * Go through the zonelist again. Let __GFP_HIGH and allocations
809          * coming from realtime tasks to go deeper into reserves
810          *
811          * This is the last chance, in general, before the goto nopage.
812          * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
813          */
814         for (i = 0; (z = zones[i]) != NULL; i++) {
815                 if (!zone_watermark_ok(z, order, z->pages_min,
816                                        classzone_idx, can_try_harder,
817                                        gfp_mask & __GFP_HIGH))
818                         continue;
819
820                 if (wait && !cpuset_zone_allowed(z))
821                         continue;
822
823                 page = buffered_rmqueue(z, order, gfp_mask);
824                 if (page)
825                         goto got_pg;
826         }
827
828         /* This allocation should allow future memory freeing. */
829
830         if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
831                         && !in_interrupt()) {
832                 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
833                         /* go through the zonelist yet again, ignoring mins */
834                         for (i = 0; (z = zones[i]) != NULL; i++) {
835                                 if (!cpuset_zone_allowed(z))
836                                         continue;
837                                 page = buffered_rmqueue(z, order, gfp_mask);
838                                 if (page)
839                                         goto got_pg;
840                         }
841                 }
842                 goto nopage;
843         }
844
845         /* Atomic allocations - we can't balance anything */
846         if (!wait)
847                 goto nopage;
848
849 rebalance:
850         cond_resched();
851
852         /* We now go into synchronous reclaim */
853         p->flags |= PF_MEMALLOC;
854         reclaim_state.reclaimed_slab = 0;
855         p->reclaim_state = &reclaim_state;
856
857         did_some_progress = try_to_free_pages(zones, gfp_mask, order);
858
859         p->reclaim_state = NULL;
860         p->flags &= ~PF_MEMALLOC;
861
862         cond_resched();
863
864         if (likely(did_some_progress)) {
865                 /*
866                  * Go through the zonelist yet one more time, keep
867                  * very high watermark here, this is only to catch
868                  * a parallel oom killing, we must fail if we're still
869                  * under heavy pressure.
870                  */
871                 for (i = 0; (z = zones[i]) != NULL; i++) {
872                         if (!zone_watermark_ok(z, order, z->pages_min,
873                                                classzone_idx, can_try_harder,
874                                                gfp_mask & __GFP_HIGH))
875                                 continue;
876
877                         if (!cpuset_zone_allowed(z))
878                                 continue;
879
880                         page = buffered_rmqueue(z, order, gfp_mask);
881                         if (page)
882                                 goto got_pg;
883                 }
884         } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
885                 /*
886                  * Go through the zonelist yet one more time, keep
887                  * very high watermark here, this is only to catch
888                  * a parallel oom killing, we must fail if we're still
889                  * under heavy pressure.
890                  */
891                 for (i = 0; (z = zones[i]) != NULL; i++) {
892                         if (!zone_watermark_ok(z, order, z->pages_high,
893                                                classzone_idx, 0, 0))
894                                 continue;
895
896                         if (!cpuset_zone_allowed(z))
897                                 continue;
898
899                         page = buffered_rmqueue(z, order, gfp_mask);
900                         if (page)
901                                 goto got_pg;
902                 }
903
904                 out_of_memory(gfp_mask);
905                 goto restart;
906         }
907
908         /*
909          * Don't let big-order allocations loop unless the caller explicitly
910          * requests that.  Wait for some write requests to complete then retry.
911          *
912          * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
913          * <= 3, but that may not be true in other implementations.
914          */
915         do_retry = 0;
916         if (!(gfp_mask & __GFP_NORETRY)) {
917                 if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
918                         do_retry = 1;
919                 if (gfp_mask & __GFP_NOFAIL)
920                         do_retry = 1;
921         }
922         if (do_retry) {
923                 blk_congestion_wait(WRITE, HZ/50);
924                 goto rebalance;
925         }
926
927 nopage:
928         if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
929                 printk(KERN_WARNING "%s: page allocation failure."
930                         " order:%d, mode:0x%x\n",
931                         p->comm, order, gfp_mask);
932                 dump_stack();
933         }
934         return NULL;
935 got_pg:
936         zone_statistics(zonelist, z);
937         return page;
938 }
939
940 EXPORT_SYMBOL(__alloc_pages);
941
942 /*
943  * Common helper functions.
944  */
945 fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order)
946 {
947         struct page * page;
948         page = alloc_pages(gfp_mask, order);
949         if (!page)
950                 return 0;
951         return (unsigned long) page_address(page);
952 }
953
954 EXPORT_SYMBOL(__get_free_pages);
955
956 fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask)
957 {
958         struct page * page;
959
960         /*
961          * get_zeroed_page() returns a 32-bit address, which cannot represent
962          * a highmem page
963          */
964         BUG_ON(gfp_mask & __GFP_HIGHMEM);
965
966         page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
967         if (page)
968                 return (unsigned long) page_address(page);
969         return 0;
970 }
971
972 EXPORT_SYMBOL(get_zeroed_page);
973
974 void __pagevec_free(struct pagevec *pvec)
975 {
976         int i = pagevec_count(pvec);
977
978         while (--i >= 0)
979                 free_hot_cold_page(pvec->pages[i], pvec->cold);
980 }
981
982 fastcall void __free_pages(struct page *page, unsigned int order)
983 {
984         if (!PageReserved(page) && put_page_testzero(page)) {
985                 if (order == 0)
986                         free_hot_page(page);
987                 else
988                         __free_pages_ok(page, order);
989         }
990 }
991
992 EXPORT_SYMBOL(__free_pages);
993
994 fastcall void free_pages(unsigned long addr, unsigned int order)
995 {
996         if (addr != 0) {
997                 BUG_ON(!virt_addr_valid((void *)addr));
998                 __free_pages(virt_to_page((void *)addr), order);
999         }
1000 }
1001
1002 EXPORT_SYMBOL(free_pages);
1003
1004 /*
1005  * Total amount of free (allocatable) RAM:
1006  */
1007 unsigned int nr_free_pages(void)
1008 {
1009         unsigned int sum = 0;
1010         struct zone *zone;
1011
1012         for_each_zone(zone)
1013                 sum += zone->free_pages;
1014
1015         return sum;
1016 }
1017
1018 EXPORT_SYMBOL(nr_free_pages);
1019
1020 #ifdef CONFIG_NUMA
1021 unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
1022 {
1023         unsigned int i, sum = 0;
1024
1025         for (i = 0; i < MAX_NR_ZONES; i++)
1026                 sum += pgdat->node_zones[i].free_pages;
1027
1028         return sum;
1029 }
1030 #endif
1031
1032 static unsigned int nr_free_zone_pages(int offset)
1033 {
1034         pg_data_t *pgdat;
1035         unsigned int sum = 0;
1036
1037         for_each_pgdat(pgdat) {
1038                 struct zonelist *zonelist = pgdat->node_zonelists + offset;
1039                 struct zone **zonep = zonelist->zones;
1040                 struct zone *zone;
1041
1042                 for (zone = *zonep++; zone; zone = *zonep++) {
1043                         unsigned long size = zone->present_pages;
1044                         unsigned long high = zone->pages_high;
1045                         if (size > high)
1046                                 sum += size - high;
1047                 }
1048         }
1049
1050         return sum;
1051 }
1052
1053 /*
1054  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1055  */
1056 unsigned int nr_free_buffer_pages(void)
1057 {
1058         return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK);
1059 }
1060
1061 /*
1062  * Amount of free RAM allocatable within all zones
1063  */
1064 unsigned int nr_free_pagecache_pages(void)
1065 {
1066         return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
1067 }
1068
1069 #ifdef CONFIG_HIGHMEM
1070 unsigned int nr_free_highpages (void)
1071 {
1072         pg_data_t *pgdat;
1073         unsigned int pages = 0;
1074
1075         for_each_pgdat(pgdat)
1076                 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1077
1078         return pages;
1079 }
1080 #endif
1081
1082 #ifdef CONFIG_NUMA
1083 static void show_node(struct zone *zone)
1084 {
1085         printk("Node %d ", zone->zone_pgdat->node_id);
1086 }
1087 #else
1088 #define show_node(zone) do { } while (0)
1089 #endif
1090
1091 /*
1092  * Accumulate the page_state information across all CPUs.
1093  * The result is unavoidably approximate - it can change
1094  * during and after execution of this function.
1095  */
1096 static DEFINE_PER_CPU(struct page_state, page_states) = {0};
1097
1098 atomic_t nr_pagecache = ATOMIC_INIT(0);
1099 EXPORT_SYMBOL(nr_pagecache);
1100 #ifdef CONFIG_SMP
1101 DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
1102 #endif
1103
1104 void __get_page_state(struct page_state *ret, int nr)
1105 {
1106         int cpu = 0;
1107
1108         memset(ret, 0, sizeof(*ret));
1109
1110         cpu = first_cpu(cpu_online_map);
1111         while (cpu < NR_CPUS) {
1112                 unsigned long *in, *out, off;
1113
1114                 in = (unsigned long *)&per_cpu(page_states, cpu);
1115
1116                 cpu = next_cpu(cpu, cpu_online_map);
1117
1118                 if (cpu < NR_CPUS)
1119                         prefetch(&per_cpu(page_states, cpu));
1120
1121                 out = (unsigned long *)ret;
1122                 for (off = 0; off < nr; off++)
1123                         *out++ += *in++;
1124         }
1125 }
1126
1127 void get_page_state(struct page_state *ret)
1128 {
1129         int nr;
1130
1131         nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1132         nr /= sizeof(unsigned long);
1133
1134         __get_page_state(ret, nr + 1);
1135 }
1136
1137 void get_full_page_state(struct page_state *ret)
1138 {
1139         __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
1140 }
1141
1142 unsigned long __read_page_state(unsigned offset)
1143 {
1144         unsigned long ret = 0;
1145         int cpu;
1146
1147         for_each_online_cpu(cpu) {
1148                 unsigned long in;
1149
1150                 in = (unsigned long)&per_cpu(page_states, cpu) + offset;
1151                 ret += *((unsigned long *)in);
1152         }
1153         return ret;
1154 }
1155
1156 void __mod_page_state(unsigned offset, unsigned long delta)
1157 {
1158         unsigned long flags;
1159         void* ptr;
1160
1161         local_irq_save(flags);
1162         ptr = &__get_cpu_var(page_states);
1163         *(unsigned long*)(ptr + offset) += delta;
1164         local_irq_restore(flags);
1165 }
1166
1167 EXPORT_SYMBOL(__mod_page_state);
1168
1169 void __get_zone_counts(unsigned long *active, unsigned long *inactive,
1170                         unsigned long *free, struct pglist_data *pgdat)
1171 {
1172         struct zone *zones = pgdat->node_zones;
1173         int i;
1174
1175         *active = 0;
1176         *inactive = 0;
1177         *free = 0;
1178         for (i = 0; i < MAX_NR_ZONES; i++) {
1179                 *active += zones[i].nr_active;
1180                 *inactive += zones[i].nr_inactive;
1181                 *free += zones[i].free_pages;
1182         }
1183 }
1184
1185 void get_zone_counts(unsigned long *active,
1186                 unsigned long *inactive, unsigned long *free)
1187 {
1188         struct pglist_data *pgdat;
1189
1190         *active = 0;
1191         *inactive = 0;
1192         *free = 0;
1193         for_each_pgdat(pgdat) {
1194                 unsigned long l, m, n;
1195                 __get_zone_counts(&l, &m, &n, pgdat);
1196                 *active += l;
1197                 *inactive += m;
1198                 *free += n;
1199         }
1200 }
1201
1202 void si_meminfo(struct sysinfo *val)
1203 {
1204         val->totalram = totalram_pages;
1205         val->sharedram = 0;
1206         val->freeram = nr_free_pages();
1207         val->bufferram = nr_blockdev_pages();
1208 #ifdef CONFIG_HIGHMEM
1209         val->totalhigh = totalhigh_pages;
1210         val->freehigh = nr_free_highpages();
1211 #else
1212         val->totalhigh = 0;
1213         val->freehigh = 0;
1214 #endif
1215         val->mem_unit = PAGE_SIZE;
1216 }
1217
1218 EXPORT_SYMBOL(si_meminfo);
1219
1220 #ifdef CONFIG_NUMA
1221 void si_meminfo_node(struct sysinfo *val, int nid)
1222 {
1223         pg_data_t *pgdat = NODE_DATA(nid);
1224
1225         val->totalram = pgdat->node_present_pages;
1226         val->freeram = nr_free_pages_pgdat(pgdat);
1227         val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1228         val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1229         val->mem_unit = PAGE_SIZE;
1230 }
1231 #endif
1232
1233 #define K(x) ((x) << (PAGE_SHIFT-10))
1234
1235 /*
1236  * Show free area list (used inside shift_scroll-lock stuff)
1237  * We also calculate the percentage fragmentation. We do this by counting the
1238  * memory on each free list with the exception of the first item on the list.
1239  */
1240 void show_free_areas(void)
1241 {
1242         struct page_state ps;
1243         int cpu, temperature;
1244         unsigned long active;
1245         unsigned long inactive;
1246         unsigned long free;
1247         struct zone *zone;
1248
1249         for_each_zone(zone) {
1250                 show_node(zone);
1251                 printk("%s per-cpu:", zone->name);
1252
1253                 if (!zone->present_pages) {
1254                         printk(" empty\n");
1255                         continue;
1256                 } else
1257                         printk("\n");
1258
1259                 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
1260                         struct per_cpu_pageset *pageset;
1261
1262                         if (!cpu_possible(cpu))
1263                                 continue;
1264
1265                         pageset = zone->pageset + cpu;
1266
1267                         for (temperature = 0; temperature < 2; temperature++)
1268                                 printk("cpu %d %s: low %d, high %d, batch %d\n",
1269                                         cpu,
1270                                         temperature ? "cold" : "hot",
1271                                         pageset->pcp[temperature].low,
1272                                         pageset->pcp[temperature].high,
1273                                         pageset->pcp[temperature].batch);
1274                 }
1275         }
1276
1277         get_page_state(&ps);
1278         get_zone_counts(&active, &inactive, &free);
1279
1280         printk("\nFree pages: %11ukB (%ukB HighMem)\n",
1281                 K(nr_free_pages()),
1282                 K(nr_free_highpages()));
1283
1284         printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
1285                 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
1286                 active,
1287                 inactive,
1288                 ps.nr_dirty,
1289                 ps.nr_writeback,
1290                 ps.nr_unstable,
1291                 nr_free_pages(),
1292                 ps.nr_slab,
1293                 ps.nr_mapped,
1294                 ps.nr_page_table_pages);
1295
1296         for_each_zone(zone) {
1297                 int i;
1298
1299                 show_node(zone);
1300                 printk("%s"
1301                         " free:%lukB"
1302                         " min:%lukB"
1303                         " low:%lukB"
1304                         " high:%lukB"
1305                         " active:%lukB"
1306                         " inactive:%lukB"
1307                         " present:%lukB"
1308                         " pages_scanned:%lu"
1309                         " all_unreclaimable? %s"
1310                         "\n",
1311                         zone->name,
1312                         K(zone->free_pages),
1313                         K(zone->pages_min),
1314                         K(zone->pages_low),
1315                         K(zone->pages_high),
1316                         K(zone->nr_active),
1317                         K(zone->nr_inactive),
1318                         K(zone->present_pages),
1319                         zone->pages_scanned,
1320                         (zone->all_unreclaimable ? "yes" : "no")
1321                         );
1322                 printk("lowmem_reserve[]:");
1323                 for (i = 0; i < MAX_NR_ZONES; i++)
1324                         printk(" %lu", zone->lowmem_reserve[i]);
1325                 printk("\n");
1326         }
1327
1328         for_each_zone(zone) {
1329                 unsigned long nr, flags, order, total = 0;
1330
1331                 show_node(zone);
1332                 printk("%s: ", zone->name);
1333                 if (!zone->present_pages) {
1334                         printk("empty\n");
1335                         continue;
1336                 }
1337
1338                 spin_lock_irqsave(&zone->lock, flags);
1339                 for (order = 0; order < MAX_ORDER; order++) {
1340                         nr = zone->free_area[order].nr_free;
1341                         total += nr << order;
1342                         printk("%lu*%lukB ", nr, K(1UL) << order);
1343                 }
1344                 spin_unlock_irqrestore(&zone->lock, flags);
1345                 printk("= %lukB\n", K(total));
1346         }
1347
1348         show_swap_cache_info();
1349 }
1350
1351 /*
1352  * Builds allocation fallback zone lists.
1353  */
1354 static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k)
1355 {
1356         switch (k) {
1357                 struct zone *zone;
1358         default:
1359                 BUG();
1360         case ZONE_HIGHMEM:
1361                 zone = pgdat->node_zones + ZONE_HIGHMEM;
1362                 if (zone->present_pages) {
1363 #ifndef CONFIG_HIGHMEM
1364                         BUG();
1365 #endif
1366                         zonelist->zones[j++] = zone;
1367                 }
1368         case ZONE_NORMAL:
1369                 zone = pgdat->node_zones + ZONE_NORMAL;
1370                 if (zone->present_pages)
1371                         zonelist->zones[j++] = zone;
1372         case ZONE_DMA:
1373                 zone = pgdat->node_zones + ZONE_DMA;
1374                 if (zone->present_pages)
1375                         zonelist->zones[j++] = zone;
1376         }
1377
1378         return j;
1379 }
1380
1381 #ifdef CONFIG_NUMA
1382 #define MAX_NODE_LOAD (num_online_nodes())
1383 static int __initdata node_load[MAX_NUMNODES];
1384 /**
1385  * find_next_best_node - find the next node that should appear in a given node's fallback list
1386  * @node: node whose fallback list we're appending
1387  * @used_node_mask: nodemask_t of already used nodes
1388  *
1389  * We use a number of factors to determine which is the next node that should
1390  * appear on a given node's fallback list.  The node should not have appeared
1391  * already in @node's fallback list, and it should be the next closest node
1392  * according to the distance array (which contains arbitrary distance values
1393  * from each node to each node in the system), and should also prefer nodes
1394  * with no CPUs, since presumably they'll have very little allocation pressure
1395  * on them otherwise.
1396  * It returns -1 if no node is found.
1397  */
1398 static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
1399 {
1400         int i, n, val;
1401         int min_val = INT_MAX;
1402         int best_node = -1;
1403
1404         for_each_online_node(i) {
1405                 cpumask_t tmp;
1406
1407                 /* Start from local node */
1408                 n = (node+i) % num_online_nodes();
1409
1410                 /* Don't want a node to appear more than once */
1411                 if (node_isset(n, *used_node_mask))
1412                         continue;
1413
1414                 /* Use the local node if we haven't already */
1415                 if (!node_isset(node, *used_node_mask)) {
1416                         best_node = node;
1417                         break;
1418                 }
1419
1420                 /* Use the distance array to find the distance */
1421                 val = node_distance(node, n);
1422
1423                 /* Give preference to headless and unused nodes */
1424                 tmp = node_to_cpumask(n);
1425                 if (!cpus_empty(tmp))
1426                         val += PENALTY_FOR_NODE_WITH_CPUS;
1427
1428                 /* Slight preference for less loaded node */
1429                 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
1430                 val += node_load[n];
1431
1432                 if (val < min_val) {
1433                         min_val = val;
1434                         best_node = n;
1435                 }
1436         }
1437
1438         if (best_node >= 0)
1439                 node_set(best_node, *used_node_mask);
1440
1441         return best_node;
1442 }
1443
1444 static void __init build_zonelists(pg_data_t *pgdat)
1445 {
1446         int i, j, k, node, local_node;
1447         int prev_node, load;
1448         struct zonelist *zonelist;
1449         nodemask_t used_mask;
1450
1451         /* initialize zonelists */
1452         for (i = 0; i < GFP_ZONETYPES; i++) {
1453                 zonelist = pgdat->node_zonelists + i;
1454                 zonelist->zones[0] = NULL;
1455         }
1456
1457         /* NUMA-aware ordering of nodes */
1458         local_node = pgdat->node_id;
1459         load = num_online_nodes();
1460         prev_node = local_node;
1461         nodes_clear(used_mask);
1462         while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
1463                 /*
1464                  * We don't want to pressure a particular node.
1465                  * So adding penalty to the first node in same
1466                  * distance group to make it round-robin.
1467                  */
1468                 if (node_distance(local_node, node) !=
1469                                 node_distance(local_node, prev_node))
1470                         node_load[node] += load;
1471                 prev_node = node;
1472                 load--;
1473                 for (i = 0; i < GFP_ZONETYPES; i++) {
1474                         zonelist = pgdat->node_zonelists + i;
1475                         for (j = 0; zonelist->zones[j] != NULL; j++);
1476
1477                         k = ZONE_NORMAL;
1478                         if (i & __GFP_HIGHMEM)
1479                                 k = ZONE_HIGHMEM;
1480                         if (i & __GFP_DMA)
1481                                 k = ZONE_DMA;
1482
1483                         j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1484                         zonelist->zones[j] = NULL;
1485                 }
1486         }
1487 }
1488
1489 #else   /* CONFIG_NUMA */
1490
1491 static void __init build_zonelists(pg_data_t *pgdat)
1492 {
1493         int i, j, k, node, local_node;
1494
1495         local_node = pgdat->node_id;
1496         for (i = 0; i < GFP_ZONETYPES; i++) {
1497                 struct zonelist *zonelist;
1498
1499                 zonelist = pgdat->node_zonelists + i;
1500
1501                 j = 0;
1502                 k = ZONE_NORMAL;
1503                 if (i & __GFP_HIGHMEM)
1504                         k = ZONE_HIGHMEM;
1505                 if (i & __GFP_DMA)
1506                         k = ZONE_DMA;
1507
1508                 j = build_zonelists_node(pgdat, zonelist, j, k);
1509                 /*
1510                  * Now we build the zonelist so that it contains the zones
1511                  * of all the other nodes.
1512                  * We don't want to pressure a particular node, so when
1513                  * building the zones for node N, we make sure that the
1514                  * zones coming right after the local ones are those from
1515                  * node N+1 (modulo N)
1516                  */
1517                 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
1518                         if (!node_online(node))
1519                                 continue;
1520                         j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1521                 }
1522                 for (node = 0; node < local_node; node++) {
1523                         if (!node_online(node))
1524                                 continue;
1525                         j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1526                 }
1527
1528                 zonelist->zones[j] = NULL;
1529         }
1530 }
1531
1532 #endif  /* CONFIG_NUMA */
1533
1534 void __init build_all_zonelists(void)
1535 {
1536         int i;
1537
1538         for_each_online_node(i)
1539                 build_zonelists(NODE_DATA(i));
1540         printk("Built %i zonelists\n", num_online_nodes());
1541         cpuset_init_current_mems_allowed();
1542 }
1543
1544 /*
1545  * Helper functions to size the waitqueue hash table.
1546  * Essentially these want to choose hash table sizes sufficiently
1547  * large so that collisions trying to wait on pages are rare.
1548  * But in fact, the number of active page waitqueues on typical
1549  * systems is ridiculously low, less than 200. So this is even
1550  * conservative, even though it seems large.
1551  *
1552  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
1553  * waitqueues, i.e. the size of the waitq table given the number of pages.
1554  */
1555 #define PAGES_PER_WAITQUEUE     256
1556
1557 static inline unsigned long wait_table_size(unsigned long pages)
1558 {
1559         unsigned long size = 1;
1560
1561         pages /= PAGES_PER_WAITQUEUE;
1562
1563         while (size < pages)
1564                 size <<= 1;
1565
1566         /*
1567          * Once we have dozens or even hundreds of threads sleeping
1568          * on IO we've got bigger problems than wait queue collision.
1569          * Limit the size of the wait table to a reasonable size.
1570          */
1571         size = min(size, 4096UL);
1572
1573         return max(size, 4UL);
1574 }
1575
1576 /*
1577  * This is an integer logarithm so that shifts can be used later
1578  * to extract the more random high bits from the multiplicative
1579  * hash function before the remainder is taken.
1580  */
1581 static inline unsigned long wait_table_bits(unsigned long size)
1582 {
1583         return ffz(~size);
1584 }
1585
1586 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1587
1588 static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
1589                 unsigned long *zones_size, unsigned long *zholes_size)
1590 {
1591         unsigned long realtotalpages, totalpages = 0;
1592         int i;
1593
1594         for (i = 0; i < MAX_NR_ZONES; i++)
1595                 totalpages += zones_size[i];
1596         pgdat->node_spanned_pages = totalpages;
1597
1598         realtotalpages = totalpages;
1599         if (zholes_size)
1600                 for (i = 0; i < MAX_NR_ZONES; i++)
1601                         realtotalpages -= zholes_size[i];
1602         pgdat->node_present_pages = realtotalpages;
1603         printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1604 }
1605
1606
1607 /*
1608  * Initially all pages are reserved - free ones are freed
1609  * up by free_all_bootmem() once the early boot process is
1610  * done. Non-atomic initialization, single-pass.
1611  */
1612 void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1613                 unsigned long start_pfn)
1614 {
1615         struct page *start = pfn_to_page(start_pfn);
1616         struct page *page;
1617
1618         for (page = start; page < (start + size); page++) {
1619                 set_page_zone(page, NODEZONE(nid, zone));
1620                 set_page_count(page, 0);
1621                 reset_page_mapcount(page);
1622                 SetPageReserved(page);
1623                 INIT_LIST_HEAD(&page->lru);
1624 #ifdef WANT_PAGE_VIRTUAL
1625                 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1626                 if (!is_highmem_idx(zone))
1627                         set_page_address(page, __va(start_pfn << PAGE_SHIFT));
1628 #endif
1629                 start_pfn++;
1630         }
1631 }
1632
1633 void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1634                                 unsigned long size)
1635 {
1636         int order;
1637         for (order = 0; order < MAX_ORDER ; order++) {
1638                 INIT_LIST_HEAD(&zone->free_area[order].free_list);
1639                 zone->free_area[order].nr_free = 0;
1640         }
1641 }
1642
1643 #ifndef __HAVE_ARCH_MEMMAP_INIT
1644 #define memmap_init(size, nid, zone, start_pfn) \
1645         memmap_init_zone((size), (nid), (zone), (start_pfn))
1646 #endif
1647
1648 /*
1649  * Set up the zone data structures:
1650  *   - mark all pages reserved
1651  *   - mark all memory queues empty
1652  *   - clear the memory bitmaps
1653  */
1654 static void __init free_area_init_core(struct pglist_data *pgdat,
1655                 unsigned long *zones_size, unsigned long *zholes_size)
1656 {
1657         unsigned long i, j;
1658         const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
1659         int cpu, nid = pgdat->node_id;
1660         unsigned long zone_start_pfn = pgdat->node_start_pfn;
1661
1662         pgdat->nr_zones = 0;
1663         init_waitqueue_head(&pgdat->kswapd_wait);
1664         pgdat->kswapd_max_order = 0;
1665         
1666         for (j = 0; j < MAX_NR_ZONES; j++) {
1667                 struct zone *zone = pgdat->node_zones + j;
1668                 unsigned long size, realsize;
1669                 unsigned long batch;
1670
1671                 zone_table[NODEZONE(nid, j)] = zone;
1672                 realsize = size = zones_size[j];
1673                 if (zholes_size)
1674                         realsize -= zholes_size[j];
1675
1676                 if (j == ZONE_DMA || j == ZONE_NORMAL)
1677                         nr_kernel_pages += realsize;
1678                 nr_all_pages += realsize;
1679
1680                 zone->spanned_pages = size;
1681                 zone->present_pages = realsize;
1682                 zone->name = zone_names[j];
1683                 spin_lock_init(&zone->lock);
1684                 spin_lock_init(&zone->lru_lock);
1685                 zone->zone_pgdat = pgdat;
1686                 zone->free_pages = 0;
1687
1688                 zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
1689
1690                 /*
1691                  * The per-cpu-pages pools are set to around 1000th of the
1692                  * size of the zone.  But no more than 1/4 of a meg - there's
1693                  * no point in going beyond the size of L2 cache.
1694                  *
1695                  * OK, so we don't know how big the cache is.  So guess.
1696                  */
1697                 batch = zone->present_pages / 1024;
1698                 if (batch * PAGE_SIZE > 256 * 1024)
1699                         batch = (256 * 1024) / PAGE_SIZE;
1700                 batch /= 4;             /* We effectively *= 4 below */
1701                 if (batch < 1)
1702                         batch = 1;
1703
1704                 /*
1705                  * Clamp the batch to a 2^n - 1 value. Having a power
1706                  * of 2 value was found to be more likely to have
1707                  * suboptimal cache aliasing properties in some cases.
1708                  *
1709                  * For example if 2 tasks are alternately allocating
1710                  * batches of pages, one task can end up with a lot
1711                  * of pages of one half of the possible page colors
1712                  * and the other with pages of the other colors.
1713                  */
1714                 batch = (1 << fls(batch + batch/2)) - 1;
1715
1716                 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1717                         struct per_cpu_pages *pcp;
1718
1719                         pcp = &zone->pageset[cpu].pcp[0];       /* hot */
1720                         pcp->count = 0;
1721                         pcp->low = 2 * batch;
1722                         pcp->high = 6 * batch;
1723                         pcp->batch = 1 * batch;
1724                         INIT_LIST_HEAD(&pcp->list);
1725
1726                         pcp = &zone->pageset[cpu].pcp[1];       /* cold */
1727                         pcp->count = 0;
1728                         pcp->low = 0;
1729                         pcp->high = 2 * batch;
1730                         pcp->batch = 1 * batch;
1731                         INIT_LIST_HEAD(&pcp->list);
1732                 }
1733                 printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
1734                                 zone_names[j], realsize, batch);
1735                 INIT_LIST_HEAD(&zone->active_list);
1736                 INIT_LIST_HEAD(&zone->inactive_list);
1737                 zone->nr_scan_active = 0;
1738                 zone->nr_scan_inactive = 0;
1739                 zone->nr_active = 0;
1740                 zone->nr_inactive = 0;
1741                 if (!size)
1742                         continue;
1743
1744                 /*
1745                  * The per-page waitqueue mechanism uses hashed waitqueues
1746                  * per zone.
1747                  */
1748                 zone->wait_table_size = wait_table_size(size);
1749                 zone->wait_table_bits =
1750                         wait_table_bits(zone->wait_table_size);
1751                 zone->wait_table = (wait_queue_head_t *)
1752                         alloc_bootmem_node(pgdat, zone->wait_table_size
1753                                                 * sizeof(wait_queue_head_t));
1754
1755                 for(i = 0; i < zone->wait_table_size; ++i)
1756                         init_waitqueue_head(zone->wait_table + i);
1757
1758                 pgdat->nr_zones = j+1;
1759
1760                 zone->zone_mem_map = pfn_to_page(zone_start_pfn);
1761                 zone->zone_start_pfn = zone_start_pfn;
1762
1763                 if ((zone_start_pfn) & (zone_required_alignment-1))
1764                         printk(KERN_CRIT "BUG: wrong zone alignment, it will crash\n");
1765
1766                 memmap_init(size, nid, j, zone_start_pfn);
1767
1768                 zone_start_pfn += size;
1769
1770                 zone_init_free_lists(pgdat, zone, zone->spanned_pages);
1771         }
1772 }
1773
1774 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1775 {
1776         unsigned long size;
1777
1778         /* Skip empty nodes */
1779         if (!pgdat->node_spanned_pages)
1780                 return;
1781
1782         /* ia64 gets its own node_mem_map, before this, without bootmem */
1783         if (!pgdat->node_mem_map) {
1784                 size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
1785                 pgdat->node_mem_map = alloc_bootmem_node(pgdat, size);
1786         }
1787 #ifndef CONFIG_DISCONTIGMEM
1788         /*
1789          * With no DISCONTIG, the global mem_map is just set as node 0's
1790          */
1791         if (pgdat == NODE_DATA(0))
1792                 mem_map = NODE_DATA(0)->node_mem_map;
1793 #endif
1794 }
1795
1796 void __init free_area_init_node(int nid, struct pglist_data *pgdat,
1797                 unsigned long *zones_size, unsigned long node_start_pfn,
1798                 unsigned long *zholes_size)
1799 {
1800         pgdat->node_id = nid;
1801         pgdat->node_start_pfn = node_start_pfn;
1802         calculate_zone_totalpages(pgdat, zones_size, zholes_size);
1803
1804         alloc_node_mem_map(pgdat);
1805
1806         free_area_init_core(pgdat, zones_size, zholes_size);
1807 }
1808
1809 #ifndef CONFIG_DISCONTIGMEM
1810 static bootmem_data_t contig_bootmem_data;
1811 struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
1812
1813 EXPORT_SYMBOL(contig_page_data);
1814
1815 void __init free_area_init(unsigned long *zones_size)
1816 {
1817         free_area_init_node(0, &contig_page_data, zones_size,
1818                         __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
1819 }
1820 #endif
1821
1822 #ifdef CONFIG_PROC_FS
1823
1824 #include <linux/seq_file.h>
1825
1826 static void *frag_start(struct seq_file *m, loff_t *pos)
1827 {
1828         pg_data_t *pgdat;
1829         loff_t node = *pos;
1830
1831         for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next)
1832                 --node;
1833
1834         return pgdat;
1835 }
1836
1837 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1838 {
1839         pg_data_t *pgdat = (pg_data_t *)arg;
1840
1841         (*pos)++;
1842         return pgdat->pgdat_next;
1843 }
1844
1845 static void frag_stop(struct seq_file *m, void *arg)
1846 {
1847 }
1848
1849 /* 
1850  * This walks the free areas for each zone.
1851  */
1852 static int frag_show(struct seq_file *m, void *arg)
1853 {
1854         pg_data_t *pgdat = (pg_data_t *)arg;
1855         struct zone *zone;
1856         struct zone *node_zones = pgdat->node_zones;
1857         unsigned long flags;
1858         int order;
1859
1860         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1861                 if (!zone->present_pages)
1862                         continue;
1863
1864                 spin_lock_irqsave(&zone->lock, flags);
1865                 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1866                 for (order = 0; order < MAX_ORDER; ++order)
1867                         seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1868                 spin_unlock_irqrestore(&zone->lock, flags);
1869                 seq_putc(m, '\n');
1870         }
1871         return 0;
1872 }
1873
1874 struct seq_operations fragmentation_op = {
1875         .start  = frag_start,
1876         .next   = frag_next,
1877         .stop   = frag_stop,
1878         .show   = frag_show,
1879 };
1880
1881 /*
1882  * Output information about zones in @pgdat.
1883  */
1884 static int zoneinfo_show(struct seq_file *m, void *arg)
1885 {
1886         pg_data_t *pgdat = arg;
1887         struct zone *zone;
1888         struct zone *node_zones = pgdat->node_zones;
1889         unsigned long flags;
1890
1891         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
1892                 int i;
1893
1894                 if (!zone->present_pages)
1895                         continue;
1896
1897                 spin_lock_irqsave(&zone->lock, flags);
1898                 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1899                 seq_printf(m,
1900                            "\n  pages free     %lu"
1901                            "\n        min      %lu"
1902                            "\n        low      %lu"
1903                            "\n        high     %lu"
1904                            "\n        active   %lu"
1905                            "\n        inactive %lu"
1906                            "\n        scanned  %lu (a: %lu i: %lu)"
1907                            "\n        spanned  %lu"
1908                            "\n        present  %lu",
1909                            zone->free_pages,
1910                            zone->pages_min,
1911                            zone->pages_low,
1912                            zone->pages_high,
1913                            zone->nr_active,
1914                            zone->nr_inactive,
1915                            zone->pages_scanned,
1916                            zone->nr_scan_active, zone->nr_scan_inactive,
1917                            zone->spanned_pages,
1918                            zone->present_pages);
1919                 seq_printf(m,
1920                            "\n        protection: (%lu",
1921                            zone->lowmem_reserve[0]);
1922                 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1923                         seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
1924                 seq_printf(m,
1925                            ")"
1926                            "\n  pagesets");
1927                 for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) {
1928                         struct per_cpu_pageset *pageset;
1929                         int j;
1930
1931                         pageset = &zone->pageset[i];
1932                         for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
1933                                 if (pageset->pcp[j].count)
1934                                         break;
1935                         }
1936                         if (j == ARRAY_SIZE(pageset->pcp))
1937                                 continue;
1938                         for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
1939                                 seq_printf(m,
1940                                            "\n    cpu: %i pcp: %i"
1941                                            "\n              count: %i"
1942                                            "\n              low:   %i"
1943                                            "\n              high:  %i"
1944                                            "\n              batch: %i",
1945                                            i, j,
1946                                            pageset->pcp[j].count,
1947                                            pageset->pcp[j].low,
1948                                            pageset->pcp[j].high,
1949                                            pageset->pcp[j].batch);
1950                         }
1951 #ifdef CONFIG_NUMA
1952                         seq_printf(m,
1953                                    "\n            numa_hit:       %lu"
1954                                    "\n            numa_miss:      %lu"
1955                                    "\n            numa_foreign:   %lu"
1956                                    "\n            interleave_hit: %lu"
1957                                    "\n            local_node:     %lu"
1958                                    "\n            other_node:     %lu",
1959                                    pageset->numa_hit,
1960                                    pageset->numa_miss,
1961                                    pageset->numa_foreign,
1962                                    pageset->interleave_hit,
1963                                    pageset->local_node,
1964                                    pageset->other_node);
1965 #endif
1966                 }
1967                 seq_printf(m,
1968                            "\n  all_unreclaimable: %u"
1969                            "\n  prev_priority:     %i"
1970                            "\n  temp_priority:     %i"
1971                            "\n  start_pfn:         %lu",
1972                            zone->all_unreclaimable,
1973                            zone->prev_priority,
1974                            zone->temp_priority,
1975                            zone->zone_start_pfn);
1976                 spin_unlock_irqrestore(&zone->lock, flags);
1977                 seq_putc(m, '\n');
1978         }
1979         return 0;
1980 }
1981
1982 struct seq_operations zoneinfo_op = {
1983         .start  = frag_start, /* iterate over all zones. The same as in
1984                                * fragmentation. */
1985         .next   = frag_next,
1986         .stop   = frag_stop,
1987         .show   = zoneinfo_show,
1988 };
1989
1990 static char *vmstat_text[] = {
1991         "nr_dirty",
1992         "nr_writeback",
1993         "nr_unstable",
1994         "nr_page_table_pages",
1995         "nr_mapped",
1996         "nr_slab",
1997
1998         "pgpgin",
1999         "pgpgout",
2000         "pswpin",
2001         "pswpout",
2002         "pgalloc_high",
2003
2004         "pgalloc_normal",
2005         "pgalloc_dma",
2006         "pgfree",
2007         "pgactivate",
2008         "pgdeactivate",
2009
2010         "pgfault",
2011         "pgmajfault",
2012         "pgrefill_high",
2013         "pgrefill_normal",
2014         "pgrefill_dma",
2015
2016         "pgsteal_high",
2017         "pgsteal_normal",
2018         "pgsteal_dma",
2019         "pgscan_kswapd_high",
2020         "pgscan_kswapd_normal",
2021
2022         "pgscan_kswapd_dma",
2023         "pgscan_direct_high",
2024         "pgscan_direct_normal",
2025         "pgscan_direct_dma",
2026         "pginodesteal",
2027
2028         "slabs_scanned",
2029         "kswapd_steal",
2030         "kswapd_inodesteal",
2031         "pageoutrun",
2032         "allocstall",
2033
2034         "pgrotated",
2035         "nr_bounce",
2036 };
2037
2038 static void *vmstat_start(struct seq_file *m, loff_t *pos)
2039 {
2040         struct page_state *ps;
2041
2042         if (*pos >= ARRAY_SIZE(vmstat_text))
2043                 return NULL;
2044
2045         ps = kmalloc(sizeof(*ps), GFP_KERNEL);
2046         m->private = ps;
2047         if (!ps)
2048                 return ERR_PTR(-ENOMEM);
2049         get_full_page_state(ps);
2050         ps->pgpgin /= 2;                /* sectors -> kbytes */
2051         ps->pgpgout /= 2;
2052         return (unsigned long *)ps + *pos;
2053 }
2054
2055 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
2056 {
2057         (*pos)++;
2058         if (*pos >= ARRAY_SIZE(vmstat_text))
2059                 return NULL;
2060         return (unsigned long *)m->private + *pos;
2061 }
2062
2063 static int vmstat_show(struct seq_file *m, void *arg)
2064 {
2065         unsigned long *l = arg;
2066         unsigned long off = l - (unsigned long *)m->private;
2067
2068         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
2069         return 0;
2070 }
2071
2072 static void vmstat_stop(struct seq_file *m, void *arg)
2073 {
2074         kfree(m->private);
2075         m->private = NULL;
2076 }
2077
2078 struct seq_operations vmstat_op = {
2079         .start  = vmstat_start,
2080         .next   = vmstat_next,
2081         .stop   = vmstat_stop,
2082         .show   = vmstat_show,
2083 };
2084
2085 #endif /* CONFIG_PROC_FS */
2086
2087 #ifdef CONFIG_HOTPLUG_CPU
2088 static int page_alloc_cpu_notify(struct notifier_block *self,
2089                                  unsigned long action, void *hcpu)
2090 {
2091         int cpu = (unsigned long)hcpu;
2092         long *count;
2093         unsigned long *src, *dest;
2094
2095         if (action == CPU_DEAD) {
2096                 int i;
2097
2098                 /* Drain local pagecache count. */
2099                 count = &per_cpu(nr_pagecache_local, cpu);
2100                 atomic_add(*count, &nr_pagecache);
2101                 *count = 0;
2102                 local_irq_disable();
2103                 __drain_pages(cpu);
2104
2105                 /* Add dead cpu's page_states to our own. */
2106                 dest = (unsigned long *)&__get_cpu_var(page_states);
2107                 src = (unsigned long *)&per_cpu(page_states, cpu);
2108
2109                 for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
2110                                 i++) {
2111                         dest[i] += src[i];
2112                         src[i] = 0;
2113                 }
2114
2115                 local_irq_enable();
2116         }
2117         return NOTIFY_OK;
2118 }
2119 #endif /* CONFIG_HOTPLUG_CPU */
2120
2121 void __init page_alloc_init(void)
2122 {
2123         hotcpu_notifier(page_alloc_cpu_notify, 0);
2124 }
2125
2126 /*
2127  * setup_per_zone_lowmem_reserve - called whenever
2128  *      sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
2129  *      has a correct pages reserved value, so an adequate number of
2130  *      pages are left in the zone after a successful __alloc_pages().
2131  */
2132 static void setup_per_zone_lowmem_reserve(void)
2133 {
2134         struct pglist_data *pgdat;
2135         int j, idx;
2136
2137         for_each_pgdat(pgdat) {
2138                 for (j = 0; j < MAX_NR_ZONES; j++) {
2139                         struct zone *zone = pgdat->node_zones + j;
2140                         unsigned long present_pages = zone->present_pages;
2141
2142                         zone->lowmem_reserve[j] = 0;
2143
2144                         for (idx = j-1; idx >= 0; idx--) {
2145                                 struct zone *lower_zone;
2146
2147                                 if (sysctl_lowmem_reserve_ratio[idx] < 1)
2148                                         sysctl_lowmem_reserve_ratio[idx] = 1;
2149
2150                                 lower_zone = pgdat->node_zones + idx;
2151                                 lower_zone->lowmem_reserve[j] = present_pages /
2152                                         sysctl_lowmem_reserve_ratio[idx];
2153                                 present_pages += lower_zone->present_pages;
2154                         }
2155                 }
2156         }
2157 }
2158
2159 /*
2160  * setup_per_zone_pages_min - called when min_free_kbytes changes.  Ensures 
2161  *      that the pages_{min,low,high} values for each zone are set correctly 
2162  *      with respect to min_free_kbytes.
2163  */
2164 static void setup_per_zone_pages_min(void)
2165 {
2166         unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
2167         unsigned long lowmem_pages = 0;
2168         struct zone *zone;
2169         unsigned long flags;
2170
2171         /* Calculate total number of !ZONE_HIGHMEM pages */
2172         for_each_zone(zone) {
2173                 if (!is_highmem(zone))
2174                         lowmem_pages += zone->present_pages;
2175         }
2176
2177         for_each_zone(zone) {
2178                 spin_lock_irqsave(&zone->lru_lock, flags);
2179                 if (is_highmem(zone)) {
2180                         /*
2181                          * Often, highmem doesn't need to reserve any pages.
2182                          * But the pages_min/low/high values are also used for
2183                          * batching up page reclaim activity so we need a
2184                          * decent value here.
2185                          */
2186                         int min_pages;
2187
2188                         min_pages = zone->present_pages / 1024;
2189                         if (min_pages < SWAP_CLUSTER_MAX)
2190                                 min_pages = SWAP_CLUSTER_MAX;
2191                         if (min_pages > 128)
2192                                 min_pages = 128;
2193                         zone->pages_min = min_pages;
2194                 } else {
2195                         /* if it's a lowmem zone, reserve a number of pages
2196                          * proportionate to the zone's size.
2197                          */
2198                         zone->pages_min = (pages_min * zone->present_pages) /
2199                                            lowmem_pages;
2200                 }
2201
2202                 /*
2203                  * When interpreting these watermarks, just keep in mind that:
2204                  * zone->pages_min == (zone->pages_min * 4) / 4;
2205                  */
2206                 zone->pages_low   = (zone->pages_min * 5) / 4;
2207                 zone->pages_high  = (zone->pages_min * 6) / 4;
2208                 spin_unlock_irqrestore(&zone->lru_lock, flags);
2209         }
2210 }
2211
2212 /*
2213  * Initialise min_free_kbytes.
2214  *
2215  * For small machines we want it small (128k min).  For large machines
2216  * we want it large (64MB max).  But it is not linear, because network
2217  * bandwidth does not increase linearly with machine size.  We use
2218  *
2219  *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
2220  *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
2221  *
2222  * which yields
2223  *
2224  * 16MB:        512k
2225  * 32MB:        724k
2226  * 64MB:        1024k
2227  * 128MB:       1448k
2228  * 256MB:       2048k
2229  * 512MB:       2896k
2230  * 1024MB:      4096k
2231  * 2048MB:      5792k
2232  * 4096MB:      8192k
2233  * 8192MB:      11584k
2234  * 16384MB:     16384k
2235  */
2236 static int __init init_per_zone_pages_min(void)
2237 {
2238         unsigned long lowmem_kbytes;
2239
2240         lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
2241
2242         min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
2243         if (min_free_kbytes < 128)
2244                 min_free_kbytes = 128;
2245         if (min_free_kbytes > 65536)
2246                 min_free_kbytes = 65536;
2247         setup_per_zone_pages_min();
2248         setup_per_zone_lowmem_reserve();
2249         return 0;
2250 }
2251 module_init(init_per_zone_pages_min)
2252
2253 /*
2254  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
2255  *      that we can call two helper functions whenever min_free_kbytes
2256  *      changes.
2257  */
2258 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
2259         struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2260 {
2261         proc_dointvec(table, write, file, buffer, length, ppos);
2262         setup_per_zone_pages_min();
2263         return 0;
2264 }
2265
2266 /*
2267  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
2268  *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
2269  *      whenever sysctl_lowmem_reserve_ratio changes.
2270  *
2271  * The reserve ratio obviously has absolutely no relation with the
2272  * pages_min watermarks. The lowmem reserve ratio can only make sense
2273  * if in function of the boot time zone sizes.
2274  */
2275 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
2276         struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2277 {
2278         proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2279         setup_per_zone_lowmem_reserve();
2280         return 0;
2281 }
2282
2283 __initdata int hashdist = HASHDIST_DEFAULT;
2284
2285 #ifdef CONFIG_NUMA
2286 static int __init set_hashdist(char *str)
2287 {
2288         if (!str)
2289                 return 0;
2290         hashdist = simple_strtoul(str, &str, 0);
2291         return 1;
2292 }
2293 __setup("hashdist=", set_hashdist);
2294 #endif
2295
2296 /*
2297  * allocate a large system hash table from bootmem
2298  * - it is assumed that the hash table must contain an exact power-of-2
2299  *   quantity of entries
2300  * - limit is the number of hash buckets, not the total allocation size
2301  */
2302 void *__init alloc_large_system_hash(const char *tablename,
2303                                      unsigned long bucketsize,
2304                                      unsigned long numentries,
2305                                      int scale,
2306                                      int flags,
2307                                      unsigned int *_hash_shift,
2308                                      unsigned int *_hash_mask,
2309                                      unsigned long limit)
2310 {
2311         unsigned long long max = limit;
2312         unsigned long log2qty, size;
2313         void *table = NULL;
2314
2315         /* allow the kernel cmdline to have a say */
2316         if (!numentries) {
2317                 /* round applicable memory size up to nearest megabyte */
2318                 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
2319                 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
2320                 numentries >>= 20 - PAGE_SHIFT;
2321                 numentries <<= 20 - PAGE_SHIFT;
2322
2323                 /* limit to 1 bucket per 2^scale bytes of low memory */
2324                 if (scale > PAGE_SHIFT)
2325                         numentries >>= (scale - PAGE_SHIFT);
2326                 else
2327                         numentries <<= (PAGE_SHIFT - scale);
2328         }
2329         /* rounded up to nearest power of 2 in size */
2330         numentries = 1UL << (long_log2(numentries) + 1);
2331
2332         /* limit allocation size to 1/16 total memory by default */
2333         if (max == 0) {
2334                 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2335                 do_div(max, bucketsize);
2336         }
2337
2338         if (numentries > max)
2339                 numentries = max;
2340
2341         log2qty = long_log2(numentries);
2342
2343         do {
2344                 size = bucketsize << log2qty;
2345                 if (flags & HASH_EARLY)
2346                         table = alloc_bootmem(size);
2347                 else if (hashdist)
2348                         table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
2349                 else {
2350                         unsigned long order;
2351                         for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
2352                                 ;
2353                         table = (void*) __get_free_pages(GFP_ATOMIC, order);
2354                 }
2355         } while (!table && size > PAGE_SIZE && --log2qty);
2356
2357         if (!table)
2358                 panic("Failed to allocate %s hash table\n", tablename);
2359
2360         printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
2361                tablename,
2362                (1U << log2qty),
2363                long_log2(size) - PAGE_SHIFT,
2364                size);
2365
2366         if (_hash_shift)
2367                 *_hash_shift = log2qty;
2368         if (_hash_mask)
2369                 *_hash_mask = (1 << log2qty) - 1;
2370
2371         return table;
2372 }