[PATCH] FRV: Clean up bootmem allocator's page freeing algorithm
[safe/jmp/linux-2.6] / mm / page_alloc.c
index 088712f..cdad324 100644 (file)
@@ -53,6 +53,8 @@ unsigned long totalram_pages __read_mostly;
 unsigned long totalhigh_pages __read_mostly;
 long nr_swap_pages;
 
+static void fastcall free_hot_cold_page(struct page *page, int cold);
+
 /*
  * results with 256, 32 in the lowmem_reserve sysctl:
  *     1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
@@ -432,6 +434,39 @@ void __free_pages_ok(struct page *page, unsigned int order)
        local_irq_restore(flags);
 }
 
+/*
+ * permit the bootmem allocator to evade page validation on high-order frees
+ */
+void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
+{
+       if (order == 0) {
+               __ClearPageReserved(page);
+               set_page_count(page, 0);
+
+               free_hot_cold_page(page, 0);
+       } else {
+               LIST_HEAD(list);
+               int loop;
+
+               for (loop = 0; loop < BITS_PER_LONG; loop++) {
+                       struct page *p = &page[loop];
+
+                       if (loop + 16 < BITS_PER_LONG)
+                               prefetchw(p + 16);
+                       __ClearPageReserved(p);
+                       set_page_count(p, 0);
+               }
+
+               arch_free_page(page, order);
+
+               mod_page_state(pgfree, 1 << order);
+
+               list_add(&page->lru, &list);
+               kernel_map_pages(page, 1 << order, 0);
+               free_pages_bulk(page_zone(page), 1, &list, order);
+       }
+}
+
 
 /*
  * The order of subdivision here is critical for the IO subsystem.
@@ -447,8 +482,7 @@ void __free_pages_ok(struct page *page, unsigned int order)
  *
  * -- wli
  */
-static inline struct page *
-expand(struct zone *zone, struct page *page,
+static inline void expand(struct zone *zone, struct page *page,
        int low, int high, struct free_area *area)
 {
        unsigned long size = 1 << high;
@@ -462,7 +496,6 @@ expand(struct zone *zone, struct page *page,
                area->nr_free++;
                set_page_order(&page[size], high);
        }
-       return page;
 }
 
 /*
@@ -522,7 +555,8 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
                rmv_page_order(page);
                area->nr_free--;
                zone->free_pages -= 1UL << order;
-               return expand(zone, page, order, current_order, area);
+               expand(zone, page, order, current_order, area);
+               return page;
        }
 
        return NULL;
@@ -537,19 +571,16 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        unsigned long count, struct list_head *list)
 {
        int i;
-       int allocated = 0;
-       struct page *page;
        
        spin_lock(&zone->lock);
        for (i = 0; i < count; ++i) {
-               page = __rmqueue(zone, order);
-               if (page == NULL)
+               struct page *page = __rmqueue(zone, order);
+               if (unlikely(page == NULL))
                        break;
-               allocated++;
                list_add_tail(&page->lru, list);
        }
        spin_unlock(&zone->lock);
-       return allocated;
+       return i;
 }
 
 #ifdef CONFIG_NUMA
@@ -675,7 +706,6 @@ static void zone_statistics(struct zonelist *zonelist, struct zone *z)
 /*
  * Free a 0-order page
  */
-static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
 static void fastcall free_hot_cold_page(struct page *page, int cold)
 {
        struct zone *zone = page_zone(page);
@@ -740,7 +770,7 @@ again:
                page = NULL;
                pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
                local_irq_save(flags);
-               if (pcp->count <= pcp->low)
+               if (!pcp->count)
                        pcp->count += rmqueue_bulk(zone, 0,
                                                pcp->batch, &pcp->list);
                if (likely(pcp->count)) {
@@ -1169,12 +1199,11 @@ EXPORT_SYMBOL(nr_pagecache);
 DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
 #endif
 
-void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
+static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
 {
        int cpu = 0;
 
        memset(ret, 0, sizeof(*ret));
-       cpus_and(*cpumask, *cpumask, cpu_online_map);
 
        cpu = first_cpu(*cpumask);
        while (cpu < NR_CPUS) {
@@ -1227,7 +1256,7 @@ unsigned long __read_page_state(unsigned long offset)
        unsigned long ret = 0;
        int cpu;
 
-       for_each_online_cpu(cpu) {
+       for_each_cpu(cpu) {
                unsigned long in;
 
                in = (unsigned long)&per_cpu(page_states, cpu) + offset;
@@ -1345,10 +1374,9 @@ void show_free_areas(void)
                        pageset = zone_pcp(zone, cpu);
 
                        for (temperature = 0; temperature < 2; temperature++)
-                               printk("cpu %d %s: low %d, high %d, batch %d used:%d\n",
+                               printk("cpu %d %s: high %d, batch %d used:%d\n",
                                        cpu,
                                        temperature ? "cold" : "hot",
-                                       pageset->pcp[temperature].low,
                                        pageset->pcp[temperature].high,
                                        pageset->pcp[temperature].batch,
                                        pageset->pcp[temperature].count);
@@ -1790,14 +1818,12 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
 
        pcp = &p->pcp[0];               /* hot */
        pcp->count = 0;
-       pcp->low = 0;
        pcp->high = 6 * batch;
        pcp->batch = max(1UL, 1 * batch);
        INIT_LIST_HEAD(&pcp->list);
 
        pcp = &p->pcp[1];               /* cold*/
        pcp->count = 0;
-       pcp->low = 0;
        pcp->high = 2 * batch;
        pcp->batch = max(1UL, batch/2);
        INIT_LIST_HEAD(&pcp->list);
@@ -2193,12 +2219,10 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
                                seq_printf(m,
                                           "\n    cpu: %i pcp: %i"
                                           "\n              count: %i"
-                                          "\n              low:   %i"
                                           "\n              high:  %i"
                                           "\n              batch: %i",
                                           i, j,
                                           pageset->pcp[j].count,
-                                          pageset->pcp[j].low,
                                           pageset->pcp[j].high,
                                           pageset->pcp[j].batch);
                        }