[POWERPC] Fix warning in 32-bit builds with CONFIG_HIGHMEM
[safe/jmp/linux-2.6] / arch / powerpc / mm / hugetlbpage.c
index 7370f9f..92a1b16 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
 #include <linux/pagemap.h>
-#include <linux/smp_lock.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/sysctl.h>
 #include <asm/machdep.h>
 #include <asm/cputable.h>
 #include <asm/tlb.h>
+#include <asm/spu.h>
 
 #include <linux/sysctl.h>
 
 #define NUM_LOW_AREAS  (0x100000000UL >> SID_SHIFT)
 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
 
+#ifdef CONFIG_PPC_64K_PAGES
+#define HUGEPTE_INDEX_SIZE     (PMD_SHIFT-HPAGE_SHIFT)
+#else
+#define HUGEPTE_INDEX_SIZE     (PUD_SHIFT-HPAGE_SHIFT)
+#endif
+#define PTRS_PER_HUGEPTE       (1 << HUGEPTE_INDEX_SIZE)
+#define HUGEPTE_TABLE_SIZE     (sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
+
+#define HUGEPD_SHIFT           (HPAGE_SHIFT + HUGEPTE_INDEX_SIZE)
+#define HUGEPD_SIZE            (1UL << HUGEPD_SHIFT)
+#define HUGEPD_MASK            (~(HUGEPD_SIZE-1))
+
+#define huge_pgtable_cache     (pgtable_cache[HUGEPTE_CACHE_NUM])
+
+/* Flag to mark huge PD pointers.  This means pmd_bad() and pud_bad()
+ * will choke on pointers to hugepte tables, which is handy for
+ * catching screwups early. */
+#define HUGEPD_OK      0x1
+
+typedef struct { unsigned long pd; } hugepd_t;
+
+#define hugepd_none(hpd)       ((hpd).pd == 0)
+
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+       BUG_ON(!(hpd.pd & HUGEPD_OK));
+       return (pte_t *)(hpd.pd & ~HUGEPD_OK);
+}
+
+static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr)
+{
+       unsigned long idx = ((addr >> HPAGE_SHIFT) & (PTRS_PER_HUGEPTE-1));
+       pte_t *dir = hugepd_page(*hpdp);
+
+       return dir + idx;
+}
+
+static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
+                          unsigned long address)
+{
+       pte_t *new = kmem_cache_alloc(huge_pgtable_cache,
+                                     GFP_KERNEL|__GFP_REPEAT);
+
+       if (! new)
+               return -ENOMEM;
+
+       spin_lock(&mm->page_table_lock);
+       if (!hugepd_none(*hpdp))
+               kmem_cache_free(huge_pgtable_cache, new);
+       else
+               hpdp->pd = (unsigned long)new | HUGEPD_OK;
+       spin_unlock(&mm->page_table_lock);
+       return 0;
+}
+
 /* Modelled after find_linux_pte() */
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pg;
        pud_t *pu;
-       pmd_t *pm;
-       pte_t *pt;
 
-       BUG_ON(! in_hugepage_area(mm->context, addr));
+       BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
 
        addr &= HPAGE_MASK;
 
@@ -46,26 +99,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
        if (!pgd_none(*pg)) {
                pu = pud_offset(pg, addr);
                if (!pud_none(*pu)) {
-                       pm = pmd_offset(pu, addr);
 #ifdef CONFIG_PPC_64K_PAGES
-                       /* Currently, we use the normal PTE offset within full
-                        * size PTE pages, thus our huge PTEs are scattered in
-                        * the PTE page and we do waste some. We may change
-                        * that in the future, but the current mecanism keeps
-                        * things much simpler
-                        */
-                       if (!pmd_none(*pm)) {
-                               /* Note: pte_offset_* are all equivalent on
-                                * ppc64 as we don't have HIGHMEM
-                                */
-                               pt = pte_offset_kernel(pm, addr);
-                               return pt;
-                       }
-#else /* CONFIG_PPC_64K_PAGES */
-                       /* On 4k pages, we put huge PTEs in the PMD page */
-                       pt = (pte_t *)pm;
-                       return pt;
-#endif /* CONFIG_PPC_64K_PAGES */
+                       pmd_t *pm;
+                       pm = pmd_offset(pu, addr);
+                       if (!pmd_none(*pm))
+                               return hugepte_offset((hugepd_t *)pm, addr);
+#else
+                       return hugepte_offset((hugepd_t *)pu, addr);
+#endif
                }
        }
 
@@ -76,10 +117,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pg;
        pud_t *pu;
-       pmd_t *pm;
-       pte_t *pt;
+       hugepd_t *hpdp = NULL;
 
-       BUG_ON(! in_hugepage_area(mm->context, addr));
+       BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
 
        addr &= HPAGE_MASK;
 
@@ -87,236 +127,208 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
        pu = pud_alloc(mm, pg, addr);
 
        if (pu) {
-               pm = pmd_alloc(mm, pu, addr);
-               if (pm) {
 #ifdef CONFIG_PPC_64K_PAGES
-                       /* See comment in huge_pte_offset. Note that if we ever
-                        * want to put the page size in the PMD, we would have
-                        * to open code our own pte_alloc* function in order
-                        * to populate and set the size atomically
-                        */
-                       pt = pte_alloc_map(mm, pm, addr);
-#else /* CONFIG_PPC_64K_PAGES */
-                       pt = (pte_t *)pm;
-#endif /* CONFIG_PPC_64K_PAGES */
-                       return pt;
-               }
+               pmd_t *pm;
+               pm = pmd_alloc(mm, pu, addr);
+               if (pm)
+                       hpdp = (hugepd_t *)pm;
+#else
+               hpdp = (hugepd_t *)pu;
+#endif
        }
 
-       return NULL;
-}
+       if (! hpdp)
+               return NULL;
 
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
-                    pte_t *ptep, pte_t pte)
-{
-       if (pte_present(*ptep)) {
-               /* We open-code pte_clear because we need to pass the right
-                * argument to hpte_update (huge / !huge)
-                */
-               unsigned long old = pte_update(ptep, ~0UL);
-               if (old & _PAGE_HASHPTE)
-                       hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
-               flush_tlb_pending();
-       }
-       *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
+       if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr))
+               return NULL;
+
+       return hugepte_offset(hpdp, addr);
 }
 
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
-                             pte_t *ptep)
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 {
-       unsigned long old = pte_update(ptep, ~0UL);
-
-       if (old & _PAGE_HASHPTE)
-               hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
-       *ptep = __pte(0);
-
-       return __pte(old);
+       return 0;
 }
 
-struct slb_flush_info {
-       struct mm_struct *mm;
-       u16 newareas;
-};
-
-static void flush_low_segments(void *parm)
+static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
 {
-       struct slb_flush_info *fi = parm;
-       unsigned long i;
-
-       BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
-
-       if (current->active_mm != fi->mm)
-               return;
-
-       /* Only need to do anything if this CPU is working in the same
-        * mm as the one which has changed */
+       pte_t *hugepte = hugepd_page(*hpdp);
 
-       /* update the paca copy of the context struct */
-       get_paca()->context = current->active_mm->context;
-
-       asm volatile("isync" : : : "memory");
-       for (i = 0; i < NUM_LOW_AREAS; i++) {
-               if (! (fi->newareas & (1U << i)))
-                       continue;
-               asm volatile("slbie %0"
-                            : : "r" ((i << SID_SHIFT) | SLBIE_C));
-       }
-       asm volatile("isync" : : : "memory");
+       hpdp->pd = 0;
+       tlb->need_flush = 1;
+       pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
+                                                PGF_CACHENUM_MASK));
 }
 
-static void flush_high_segments(void *parm)
+#ifdef CONFIG_PPC_64K_PAGES
+static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+                                  unsigned long addr, unsigned long end,
+                                  unsigned long floor, unsigned long ceiling)
 {
-       struct slb_flush_info *fi = parm;
-       unsigned long i, j;
-
+       pmd_t *pmd;
+       unsigned long next;
+       unsigned long start;
 
-       BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
+       start = addr;
+       pmd = pmd_offset(pud, addr);
+       do {
+               next = pmd_addr_end(addr, end);
+               if (pmd_none(*pmd))
+                       continue;
+               free_hugepte_range(tlb, (hugepd_t *)pmd);
+       } while (pmd++, addr = next, addr != end);
 
-       if (current->active_mm != fi->mm)
+       start &= PUD_MASK;
+       if (start < floor)
                return;
-
-       /* Only need to do anything if this CPU is working in the same
-        * mm as the one which has changed */
-
-       /* update the paca copy of the context struct */
-       get_paca()->context = current->active_mm->context;
-
-       asm volatile("isync" : : : "memory");
-       for (i = 0; i < NUM_HIGH_AREAS; i++) {
-               if (! (fi->newareas & (1U << i)))
-                       continue;
-               for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
-                       asm volatile("slbie %0"
-                                    :: "r" (((i << HTLB_AREA_SHIFT)
-                                             + (j << SID_SHIFT)) | SLBIE_C));
+       if (ceiling) {
+               ceiling &= PUD_MASK;
+               if (!ceiling)
+                       return;
        }
-       asm volatile("isync" : : : "memory");
-}
-
-static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
-{
-       unsigned long start = area << SID_SHIFT;
-       unsigned long end = (area+1) << SID_SHIFT;
-       struct vm_area_struct *vma;
-
-       BUG_ON(area >= NUM_LOW_AREAS);
-
-       /* Check no VMAs are in the region */
-       vma = find_vma(mm, start);
-       if (vma && (vma->vm_start < end))
-               return -EBUSY;
+       if (end - 1 > ceiling - 1)
+               return;
 
-       return 0;
+       pmd = pmd_offset(pud, start);
+       pud_clear(pud);
+       pmd_free_tlb(tlb, pmd);
 }
+#endif
 
-static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+                                  unsigned long addr, unsigned long end,
+                                  unsigned long floor, unsigned long ceiling)
 {
-       unsigned long start = area << HTLB_AREA_SHIFT;
-       unsigned long end = (area+1) << HTLB_AREA_SHIFT;
-       struct vm_area_struct *vma;
-
-       BUG_ON(area >= NUM_HIGH_AREAS);
+       pud_t *pud;
+       unsigned long next;
+       unsigned long start;
 
-       /* Hack, so that each addresses is controlled by exactly one
-        * of the high or low area bitmaps, the first high area starts
-        * at 4GB, not 0 */
-       if (start == 0)
-               start = 0x100000000UL;
+       start = addr;
+       pud = pud_offset(pgd, addr);
+       do {
+               next = pud_addr_end(addr, end);
+#ifdef CONFIG_PPC_64K_PAGES
+               if (pud_none_or_clear_bad(pud))
+                       continue;
+               hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
+#else
+               if (pud_none(*pud))
+                       continue;
+               free_hugepte_range(tlb, (hugepd_t *)pud);
+#endif
+       } while (pud++, addr = next, addr != end);
 
-       /* Check no VMAs are in the region */
-       vma = find_vma(mm, start);
-       if (vma && (vma->vm_start < end))
-               return -EBUSY;
+       start &= PGDIR_MASK;
+       if (start < floor)
+               return;
+       if (ceiling) {
+               ceiling &= PGDIR_MASK;
+               if (!ceiling)
+                       return;
+       }
+       if (end - 1 > ceiling - 1)
+               return;
 
-       return 0;
+       pud = pud_offset(pgd, start);
+       pgd_clear(pgd);
+       pud_free_tlb(tlb, pud);
 }
 
-static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
+/*
+ * This function frees user-level page tables of a process.
+ *
+ * Must be called with pagetable lock held.
+ */
+void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+                           unsigned long addr, unsigned long end,
+                           unsigned long floor, unsigned long ceiling)
 {
-       unsigned long i;
-       struct slb_flush_info fi;
-
-       BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
-       BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
-
-       newareas &= ~(mm->context.low_htlb_areas);
-       if (! newareas)
-               return 0; /* The segments we want are already open */
-
-       for (i = 0; i < NUM_LOW_AREAS; i++)
-               if ((1 << i) & newareas)
-                       if (prepare_low_area_for_htlb(mm, i) != 0)
-                               return -EBUSY;
+       pgd_t *pgd;
+       unsigned long next;
+       unsigned long start;
 
-       mm->context.low_htlb_areas |= newareas;
-
-       /* the context change must make it to memory before the flush,
-        * so that further SLB misses do the right thing. */
-       mb();
+       /*
+        * Comments below take from the normal free_pgd_range().  They
+        * apply here too.  The tests against HUGEPD_MASK below are
+        * essential, because we *don't* test for this at the bottom
+        * level.  Without them we'll attempt to free a hugepte table
+        * when we unmap just part of it, even if there are other
+        * active mappings using it.
+        *
+        * The next few lines have given us lots of grief...
+        *
+        * Why are we testing HUGEPD* at this top level?  Because
+        * often there will be no work to do at all, and we'd prefer
+        * not to go all the way down to the bottom just to discover
+        * that.
+        *
+        * Why all these "- 1"s?  Because 0 represents both the bottom
+        * of the address space and the top of it (using -1 for the
+        * top wouldn't help much: the masks would do the wrong thing).
+        * The rule is that addr 0 and floor 0 refer to the bottom of
+        * the address space, but end 0 and ceiling 0 refer to the top
+        * Comparisons need to use "end - 1" and "ceiling - 1" (though
+        * that end 0 case should be mythical).
+        *
+        * Wherever addr is brought up or ceiling brought down, we
+        * must be careful to reject "the opposite 0" before it
+        * confuses the subsequent tests.  But what about where end is
+        * brought down by HUGEPD_SIZE below? no, end can't go down to
+        * 0 there.
+        *
+        * Whereas we round start (addr) and ceiling down, by different
+        * masks at different levels, in order to test whether a table
+        * now has no other vmas using it, so can be freed, we don't
+        * bother to round floor or end up - the tests don't need that.
+        */
 
-       fi.mm = mm;
-       fi.newareas = newareas;
-       on_each_cpu(flush_low_segments, &fi, 0, 1);
+       addr &= HUGEPD_MASK;
+       if (addr < floor) {
+               addr += HUGEPD_SIZE;
+               if (!addr)
+                       return;
+       }
+       if (ceiling) {
+               ceiling &= HUGEPD_MASK;
+               if (!ceiling)
+                       return;
+       }
+       if (end - 1 > ceiling - 1)
+               end -= HUGEPD_SIZE;
+       if (addr > end - 1)
+               return;
 
-       return 0;
+       start = addr;
+       pgd = pgd_offset((*tlb)->mm, addr);
+       do {
+               BUG_ON(get_slice_psize((*tlb)->mm, addr) != mmu_huge_psize);
+               next = pgd_addr_end(addr, end);
+               if (pgd_none_or_clear_bad(pgd))
+                       continue;
+               hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
+       } while (pgd++, addr = next, addr != end);
 }
 
-static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t pte)
 {
-       struct slb_flush_info fi;
-       unsigned long i;
-
-       BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
-       BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
-                    != NUM_HIGH_AREAS);
-
-       newareas &= ~(mm->context.high_htlb_areas);
-       if (! newareas)
-               return 0; /* The areas we want are already open */
-
-       for (i = 0; i < NUM_HIGH_AREAS; i++)
-               if ((1 << i) & newareas)
-                       if (prepare_high_area_for_htlb(mm, i) != 0)
-                               return -EBUSY;
-
-       mm->context.high_htlb_areas |= newareas;
-
-       /* update the paca copy of the context struct */
-       get_paca()->context = mm->context;
-
-       /* the context change must make it to memory before the flush,
-        * so that further SLB misses do the right thing. */
-       mb();
-
-       fi.mm = mm;
-       fi.newareas = newareas;
-       on_each_cpu(flush_high_segments, &fi, 0, 1);
-
-       return 0;
+       if (pte_present(*ptep)) {
+               /* We open-code pte_clear because we need to pass the right
+                * argument to hpte_need_flush (huge / !huge). Might not be
+                * necessary anymore if we make hpte_need_flush() get the
+                * page size from the slices
+                */
+               pte_update(mm, addr & HPAGE_MASK, ptep, ~0UL, 1);
+       }
+       *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
 }
 
-int prepare_hugepage_range(unsigned long addr, unsigned long len)
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+                             pte_t *ptep)
 {
-       int err = 0;
-
-       if ( (addr+len) < addr )
-               return -EINVAL;
-
-       if (addr < 0x100000000UL)
-               err = open_low_hpage_areas(current->mm,
-                                         LOW_ESID_MASK(addr, len));
-       if ((addr + len) > 0x100000000UL)
-               err = open_high_hpage_areas(current->mm,
-                                           HTLB_AREA_MASK(addr, len));
-       if (err) {
-               printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
-                      " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
-                      addr, len,
-                      LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
-               return err;
-       }
-
-       return 0;
+       unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
+       return __pte(old);
 }
 
 struct page *
@@ -325,7 +337,7 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
        pte_t *ptep;
        struct page *page;
 
-       if (! in_hugepage_area(mm->context, address))
+       if (get_slice_psize(mm, address) != mmu_huge_psize)
                return ERR_PTR(-EINVAL);
 
        ptep = huge_pte_offset(mm, address);
@@ -349,338 +361,13 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
        return NULL;
 }
 
-/* Because we have an exclusive hugepage region which lies within the
- * normal user address space, we have to take special measures to make
- * non-huge mmap()s evade the hugepage reserved regions. */
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
-                                    unsigned long len, unsigned long pgoff,
-                                    unsigned long flags)
-{
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long start_addr;
-
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (((TASK_SIZE - len) >= addr)
-                   && (!vma || (addr+len) <= vma->vm_start)
-                   && !is_hugepage_only_range(mm, addr,len))
-                       return addr;
-       }
-       if (len > mm->cached_hole_size) {
-               start_addr = addr = mm->free_area_cache;
-       } else {
-               start_addr = addr = TASK_UNMAPPED_BASE;
-               mm->cached_hole_size = 0;
-       }
-
-full_search:
-       vma = find_vma(mm, addr);
-       while (TASK_SIZE - len >= addr) {
-               BUG_ON(vma && (addr >= vma->vm_end));
-
-               if (touches_hugepage_low_range(mm, addr, len)) {
-                       addr = ALIGN(addr+1, 1<<SID_SHIFT);
-                       vma = find_vma(mm, addr);
-                       continue;
-               }
-               if (touches_hugepage_high_range(mm, addr, len)) {
-                       addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
-                       vma = find_vma(mm, addr);
-                       continue;
-               }
-               if (!vma || addr + len <= vma->vm_start) {
-                       /*
-                        * Remember the place where we stopped the search:
-                        */
-                       mm->free_area_cache = addr + len;
-                       return addr;
-               }
-               if (addr + mm->cached_hole_size < vma->vm_start)
-                       mm->cached_hole_size = vma->vm_start - addr;
-               addr = vma->vm_end;
-               vma = vma->vm_next;
-       }
-
-       /* Make sure we didn't miss any holes */
-       if (start_addr != TASK_UNMAPPED_BASE) {
-               start_addr = addr = TASK_UNMAPPED_BASE;
-               mm->cached_hole_size = 0;
-               goto full_search;
-       }
-       return -ENOMEM;
-}
-
-/*
- * This mmap-allocator allocates new areas top-down from below the
- * stack's low limit (the base):
- *
- * Because we have an exclusive hugepage region which lies within the
- * normal user address space, we have to take special measures to make
- * non-huge mmap()s evade the hugepage reserved regions.
- */
-unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-                         const unsigned long len, const unsigned long pgoff,
-                         const unsigned long flags)
-{
-       struct vm_area_struct *vma, *prev_vma;
-       struct mm_struct *mm = current->mm;
-       unsigned long base = mm->mmap_base, addr = addr0;
-       unsigned long largest_hole = mm->cached_hole_size;
-       int first_time = 1;
-
-       /* requested length too big for entire address space */
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       /* dont allow allocations above current base */
-       if (mm->free_area_cache > base)
-               mm->free_area_cache = base;
-
-       /* requesting a specific address */
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vma->vm_start)
-                               && !is_hugepage_only_range(mm, addr,len))
-                       return addr;
-       }
-
-       if (len <= largest_hole) {
-               largest_hole = 0;
-               mm->free_area_cache = base;
-       }
-try_again:
-       /* make sure it can fit in the remaining address space */
-       if (mm->free_area_cache < len)
-               goto fail;
-
-       /* either no address requested or cant fit in requested address hole */
-       addr = (mm->free_area_cache - len) & PAGE_MASK;
-       do {
-hugepage_recheck:
-               if (touches_hugepage_low_range(mm, addr, len)) {
-                       addr = (addr & ((~0) << SID_SHIFT)) - len;
-                       goto hugepage_recheck;
-               } else if (touches_hugepage_high_range(mm, addr, len)) {
-                       addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
-                       goto hugepage_recheck;
-               }
-
-               /*
-                * Lookup failure means no vma is above this address,
-                * i.e. return with success:
-                */
-               if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
-                       return addr;
-
-               /*
-                * new region fits between prev_vma->vm_end and
-                * vma->vm_start, use it:
-                */
-               if (addr+len <= vma->vm_start &&
-                         (!prev_vma || (addr >= prev_vma->vm_end))) {
-                       /* remember the address as a hint for next time */
-                       mm->cached_hole_size = largest_hole;
-                       return (mm->free_area_cache = addr);
-               } else {
-                       /* pull free_area_cache down to the first hole */
-                       if (mm->free_area_cache == vma->vm_end) {
-                               mm->free_area_cache = vma->vm_start;
-                               mm->cached_hole_size = largest_hole;
-                       }
-               }
-
-               /* remember the largest hole we saw so far */
-               if (addr + largest_hole < vma->vm_start)
-                       largest_hole = vma->vm_start - addr;
-
-               /* try just below the current vma->vm_start */
-               addr = vma->vm_start-len;
-       } while (len <= vma->vm_start);
-
-fail:
-       /*
-        * if hint left us with no space for the requested
-        * mapping then try again:
-        */
-       if (first_time) {
-               mm->free_area_cache = base;
-               largest_hole = 0;
-               first_time = 0;
-               goto try_again;
-       }
-       /*
-        * A failed mmap() very likely causes application failure,
-        * so fall back to the bottom-up function here. This scenario
-        * can happen with large stack limits and large mmap()
-        * allocations.
-        */
-       mm->free_area_cache = TASK_UNMAPPED_BASE;
-       mm->cached_hole_size = ~0UL;
-       addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-       /*
-        * Restore the topdown base:
-        */
-       mm->free_area_cache = base;
-       mm->cached_hole_size = ~0UL;
-
-       return addr;
-}
-
-static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
-{
-       struct vm_area_struct *vma;
-
-       vma = find_vma(current->mm, addr);
-       if (!vma || ((addr + len) <= vma->vm_start))
-               return 0;
-
-       return -ENOMEM;
-}
-
-static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
-{
-       unsigned long addr = 0;
-       struct vm_area_struct *vma;
-
-       vma = find_vma(current->mm, addr);
-       while (addr + len <= 0x100000000UL) {
-               BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
-
-               if (! __within_hugepage_low_range(addr, len, segmask)) {
-                       addr = ALIGN(addr+1, 1<<SID_SHIFT);
-                       vma = find_vma(current->mm, addr);
-                       continue;
-               }
-
-               if (!vma || (addr + len) <= vma->vm_start)
-                       return addr;
-               addr = ALIGN(vma->vm_end, HPAGE_SIZE);
-               /* Depending on segmask this might not be a confirmed
-                * hugepage region, so the ALIGN could have skipped
-                * some VMAs */
-               vma = find_vma(current->mm, addr);
-       }
-
-       return -ENOMEM;
-}
-
-static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
-{
-       unsigned long addr = 0x100000000UL;
-       struct vm_area_struct *vma;
-
-       vma = find_vma(current->mm, addr);
-       while (addr + len <= TASK_SIZE_USER64) {
-               BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
-
-               if (! __within_hugepage_high_range(addr, len, areamask)) {
-                       addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
-                       vma = find_vma(current->mm, addr);
-                       continue;
-               }
-
-               if (!vma || (addr + len) <= vma->vm_start)
-                       return addr;
-               addr = ALIGN(vma->vm_end, HPAGE_SIZE);
-               /* Depending on segmask this might not be a confirmed
-                * hugepage region, so the ALIGN could have skipped
-                * some VMAs */
-               vma = find_vma(current->mm, addr);
-       }
-
-       return -ENOMEM;
-}
 
 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                                        unsigned long len, unsigned long pgoff,
                                        unsigned long flags)
 {
-       int lastshift;
-       u16 areamask, curareas;
-
-       if (HPAGE_SHIFT == 0)
-               return -EINVAL;
-       if (len & ~HPAGE_MASK)
-               return -EINVAL;
-
-       if (!cpu_has_feature(CPU_FTR_16M_PAGE))
-               return -EINVAL;
-
-       /* Paranoia, caller should have dealt with this */
-       BUG_ON((addr + len)  < addr);
-
-       if (test_thread_flag(TIF_32BIT)) {
-               /* Paranoia, caller should have dealt with this */
-               BUG_ON((addr + len) > 0x100000000UL);
-
-               curareas = current->mm->context.low_htlb_areas;
-
-               /* First see if we can use the hint address */
-               if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
-                       areamask = LOW_ESID_MASK(addr, len);
-                       if (open_low_hpage_areas(current->mm, areamask) == 0)
-                               return addr;
-               }
-
-               /* Next see if we can map in the existing low areas */
-               addr = htlb_get_low_area(len, curareas);
-               if (addr != -ENOMEM)
-                       return addr;
-
-               /* Finally go looking for areas to open */
-               lastshift = 0;
-               for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
-                    ! lastshift; areamask >>=1) {
-                       if (areamask & 1)
-                               lastshift = 1;
-
-                       addr = htlb_get_low_area(len, curareas | areamask);
-                       if ((addr != -ENOMEM)
-                           && open_low_hpage_areas(current->mm, areamask) == 0)
-                               return addr;
-               }
-       } else {
-               curareas = current->mm->context.high_htlb_areas;
-
-               /* First see if we can use the hint address */
-               /* We discourage 64-bit processes from doing hugepage
-                * mappings below 4GB (must use MAP_FIXED) */
-               if ((addr >= 0x100000000UL)
-                   && (htlb_check_hinted_area(addr, len) == 0)) {
-                       areamask = HTLB_AREA_MASK(addr, len);
-                       if (open_high_hpage_areas(current->mm, areamask) == 0)
-                               return addr;
-               }
-
-               /* Next see if we can map in the existing high areas */
-               addr = htlb_get_high_area(len, curareas);
-               if (addr != -ENOMEM)
-                       return addr;
-
-               /* Finally go looking for areas to open */
-               lastshift = 0;
-               for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
-                    ! lastshift; areamask >>=1) {
-                       if (areamask & 1)
-                               lastshift = 1;
-
-                       addr = htlb_get_high_area(len, curareas | areamask);
-                       if ((addr != -ENOMEM)
-                           && open_high_hpage_areas(current->mm, areamask) == 0)
-                               return addr;
-               }
-       }
-       printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
-              " enough areas\n");
-       return -ENOMEM;
+       return slice_get_unmapped_area(addr, len, flags,
+                                      mmu_huge_psize, 1, 0);
 }
 
 /*
@@ -809,7 +496,6 @@ repeat:
 
                /* Primary is full, try the secondary */
                if (unlikely(slot == -1)) {
-                       new_pte |= _PAGE_F_SECOND;
                        hpte_group = ((~hash & htab_hash_mask) *
                                      HPTES_PER_GROUP) & ~0x7UL; 
                        slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
@@ -828,7 +514,7 @@ repeat:
                if (unlikely(slot == -2))
                        panic("hash_huge_page: pte_insert failed\n");
 
-               new_pte |= (slot << 12) & _PAGE_F_GIX;
+               new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
        }
 
        /*
@@ -841,3 +527,26 @@ repeat:
  out:
        return err;
 }
+
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
+{
+       memset(addr, 0, kmem_cache_size(cache));
+}
+
+static int __init hugetlbpage_init(void)
+{
+       if (!cpu_has_feature(CPU_FTR_16M_PAGE))
+               return -ENODEV;
+
+       huge_pgtable_cache = kmem_cache_create("hugepte_cache",
+                                              HUGEPTE_TABLE_SIZE,
+                                              HUGEPTE_TABLE_SIZE,
+                                              0,
+                                              zero_ctor, NULL);
+       if (! huge_pgtable_cache)
+               panic("hugetlbpage_init(): could not create hugepte cache\n");
+
+       return 0;
+}
+
+module_init(hugetlbpage_init);