include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / s390 / mm / vmem.c
index ea28048..8ea3144 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/list.h>
 #include <linux/hugetlb.h>
+#include <linux/slab.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/setup.h>
@@ -27,12 +28,19 @@ struct memory_segment {
 
 static LIST_HEAD(mem_segs);
 
-static pud_t *vmem_pud_alloc(void)
+static void __ref *vmem_alloc_pages(unsigned int order)
+{
+       if (slab_is_available())
+               return (void *)__get_free_pages(GFP_KERNEL, order);
+       return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
+}
+
+static inline pud_t *vmem_pud_alloc(void)
 {
        pud_t *pud = NULL;
 
 #ifdef CONFIG_64BIT
-       pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
+       pud = vmem_alloc_pages(2);
        if (!pud)
                return NULL;
        clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
@@ -40,12 +48,12 @@ static pud_t *vmem_pud_alloc(void)
        return pud;
 }
 
-static pmd_t *vmem_pmd_alloc(void)
+static inline pmd_t *vmem_pmd_alloc(void)
 {
        pmd_t *pmd = NULL;
 
 #ifdef CONFIG_64BIT
-       pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
+       pmd = vmem_alloc_pages(2);
        if (!pmd)
                return NULL;
        clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
@@ -63,8 +71,12 @@ static pte_t __ref *vmem_pte_alloc(void)
                pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
        if (!pte)
                return NULL;
-       clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
-                   PTRS_PER_PTE * sizeof(pte_t));
+       if (MACHINE_HAS_HPAGE)
+               clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
+                           PTRS_PER_PTE * sizeof(pte_t));
+       else
+               clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
+                           PTRS_PER_PTE * sizeof(pte_t));
        return pte;
 }
 
@@ -105,7 +117,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
                if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
                    (address + HPAGE_SIZE <= start + size) &&
                    (address >= HPAGE_SIZE)) {
-                       pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
+                       pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
+                                       _SEGMENT_ENTRY_CO;
                        pmd_val(*pm_dir) = pte_val(pte);
                        address += HPAGE_SIZE - PAGE_SIZE;
                        continue;
@@ -207,13 +220,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
                if (pte_none(*pt_dir)) {
                        unsigned long new_page;
 
-                       new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0));
+                       new_page =__pa(vmem_alloc_pages(0));
                        if (!new_page)
                                goto out;
                        pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
                        *pt_dir = pte;
                }
        }
+       memset(start, 0, nr * sizeof(struct page));
        ret = 0;
 out:
        flush_tlb_kernel_range(start_addr, end_addr);
@@ -228,7 +242,7 @@ static int insert_memory_segment(struct memory_segment *seg)
 {
        struct memory_segment *tmp;
 
-       if (seg->start + seg->size >= VMEM_MAX_PHYS ||
+       if (seg->start + seg->size > VMEM_MAX_PHYS ||
            seg->start + seg->size < seg->start)
                return -ERANGE;
 
@@ -323,6 +337,7 @@ void __init vmem_map_init(void)
        unsigned long start, end;
        int i;
 
+       spin_lock_init(&init_mm.context.list_lock);
        INIT_LIST_HEAD(&init_mm.context.crst_list);
        INIT_LIST_HEAD(&init_mm.context.pgtable_list);
        init_mm.context.noexec = 0;