include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / alpha / mm / init.c
index 5e6da47..86425ab 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/bootmem.h> /* max_low_pfn */
 #include <linux/vmalloc.h>
+#include <linux/gfp.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -59,13 +60,6 @@ pgd_alloc(struct mm_struct *mm)
        return ret;
 }
 
-pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-       return pte;
-}
-
 
 /*
  * BAD_PAGE is the page that is used for page faults when linux
@@ -94,36 +88,6 @@ __bad_page(void)
        return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
 }
 
-#ifndef CONFIG_DISCONTIGMEM
-void
-show_mem(void)
-{
-       long i,free = 0,total = 0,reserved = 0;
-       long shared = 0, cached = 0;
-
-       printk("\nMem-info:\n");
-       show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-       i = max_mapnr;
-       while (i-- > 0) {
-               total++;
-               if (PageReserved(mem_map+i))
-                       reserved++;
-               else if (PageSwapCache(mem_map+i))
-                       cached++;
-               else if (!page_count(mem_map+i))
-                       free++;
-               else
-                       shared += page_count(mem_map + i) - 1;
-       }
-       printk("%ld pages of RAM\n",total);
-       printk("%ld free pages\n",free);
-       printk("%ld reserved pages\n",reserved);
-       printk("%ld pages shared\n",shared);
-       printk("%ld pages swap cached\n",cached);
-}
-#endif
-
 static inline unsigned long
 load_PCB(struct pcb_struct *pcb)
 {
@@ -226,16 +190,28 @@ callback_init(void * kernel_end)
 
        if (alpha_using_srm) {
                static struct vm_struct console_remap_vm;
-               unsigned long vaddr = VMALLOC_START;
+               unsigned long nr_pages = 0;
+               unsigned long vaddr;
                unsigned long i, j;
 
+               /* calculate needed size */
+               for (i = 0; i < crb->map_entries; ++i)
+                       nr_pages += crb->map[i].count;
+
+               /* register the vm area */
+               console_remap_vm.flags = VM_ALLOC;
+               console_remap_vm.size = nr_pages << PAGE_SHIFT;
+               vm_area_register_early(&console_remap_vm, PAGE_SIZE);
+
+               vaddr = (unsigned long)console_remap_vm.addr;
+
                /* Set up the third level PTEs and update the virtual
                   addresses of the CRB entries.  */
                for (i = 0; i < crb->map_entries; ++i) {
                        unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
                        crb->map[i].va = vaddr;
                        for (j = 0; j < crb->map[i].count; ++j) {
-                               /* Newer console's (especially on larger
+                               /* Newer consoles (especially on larger
                                   systems) may require more pages of
                                   PTEs. Grab additional pages as needed. */
                                if (pmd != pmd_offset(pgd, vaddr)) {
@@ -250,12 +226,6 @@ callback_init(void * kernel_end)
                                vaddr += PAGE_SIZE;
                        }
                }
-
-               /* Let vmalloc know that we've allocated some space.  */
-               console_remap_vm.flags = VM_ALLOC;
-               console_remap_vm.addr = (void *) VMALLOC_START;
-               console_remap_vm.size = vaddr - VMALLOC_START;
-               vmlist = &console_remap_vm;
        }
 
        callback_init_done = 1;
@@ -330,7 +300,7 @@ printk_memory_info(void)
        initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 
        printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
-              (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+              nr_free_pages() << (PAGE_SHIFT-10),
               max_mapnr << (PAGE_SHIFT-10),
               codesize >> 10,
               reservedpages << (PAGE_SHIFT-10),