include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / x86 / mm / pgtable_32.c
index 369cf06..1a8faf0 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/swap.h>
 #include <linux/smp.h>
 #include <linux/highmem.h>
-#include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 
-void show_mem(void)
-{
-       int total = 0, reserved = 0;
-       int shared = 0, cached = 0;
-       int highmem = 0;
-       struct page *page;
-       pg_data_t *pgdat;
-       unsigned long i;
-       unsigned long flags;
-
-       printk(KERN_INFO "Mem-info:\n");
-       show_free_areas();
-       for_each_online_pgdat(pgdat) {
-               pgdat_resize_lock(pgdat, &flags);
-               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
-                               touch_nmi_watchdog();
-                       page = pgdat_page_nr(pgdat, i);
-                       total++;
-                       if (PageHighMem(page))
-                               highmem++;
-                       if (PageReserved(page))
-                               reserved++;
-                       else if (PageSwapCache(page))
-                               cached++;
-                       else if (page_count(page))
-                               shared += page_count(page) - 1;
-               }
-               pgdat_resize_unlock(pgdat, &flags);
-       }
-       printk(KERN_INFO "%d pages of RAM\n", total);
-       printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
-       printk(KERN_INFO "%d reserved pages\n", reserved);
-       printk(KERN_INFO "%d pages shared\n", shared);
-       printk(KERN_INFO "%d pages swap cached\n", cached);
-
-       printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
-       printk(KERN_INFO "%lu pages writeback\n",
-                                       global_page_state(NR_WRITEBACK));
-       printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
-       printk(KERN_INFO "%lu pages slab\n",
-               global_page_state(NR_SLAB_RECLAIMABLE) +
-               global_page_state(NR_SLAB_UNRECLAIMABLE));
-       printk(KERN_INFO "%lu pages pagetables\n",
-                                       global_page_state(NR_PAGETABLE));
-}
+unsigned int __VMALLOC_RESERVE = 128 << 20;
 
 /*
  * Associate a virtual page frame with a given physical page frame 
  * and protection flags for that frame.
  */ 
-static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
+void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -94,8 +48,8 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
                return;
        }
        pte = pte_offset_kernel(pmd, vaddr);
-       if (pgprot_val(flags))
-               set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
+       if (pte_val(pteval))
+               set_pte_at(&init_mm, vaddr, pte, pteval);
        else
                pte_clear(&init_mm, vaddr, pte);
 
@@ -141,34 +95,39 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
        __flush_tlb_one(vaddr);
 }
 
-static int fixmaps;
 unsigned long __FIXADDR_TOP = 0xfffff000;
 EXPORT_SYMBOL(__FIXADDR_TOP);
 
-void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+/*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the
+ * vmalloc area - the default is 128m.
+ */
+static int __init parse_vmalloc(char *arg)
 {
-       unsigned long address = __fix_to_virt(idx);
+       if (!arg)
+               return -EINVAL;
 
-       if (idx >= __end_of_fixed_addresses) {
-               BUG();
-               return;
-       }
-       set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-       fixmaps++;
+       /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
+       __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
+       return 0;
 }
+early_param("vmalloc", parse_vmalloc);
 
-/**
- * reserve_top_address - reserves a hole in the top of kernel address space
- * @reserve - size of hole to reserve
- *
- * Can be used to relocate the fixmap area and poke a hole in the top
- * of kernel address space to make room for a hypervisor.
+/*
+ * reservetop=size reserves a hole at the top of the kernel address space which
+ * a hypervisor can load into later.  Needed for dynamically loaded hypervisors,
+ * so relocating the fixmap can be done before paging initialization.
  */
-void reserve_top_address(unsigned long reserve)
+static int __init parse_reservetop(char *arg)
 {
-       BUG_ON(fixmaps > 0);
-       printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
-              (int)-reserve);
-       __FIXADDR_TOP = -reserve - PAGE_SIZE;
-       __VMALLOC_RESERVE += reserve;
+       unsigned long address;
+
+       if (!arg)
+               return -EINVAL;
+
+       address = memparse(arg, &arg);
+       reserve_top_address(address);
+       return 0;
 }
+early_param("reservetop", parse_reservetop);