sysctl: remove "struct file *" argument of ->proc_handler
[safe/jmp/linux-2.6] / mm / hugetlb.c
index 0d8153e..6f048fc 100644 (file)
@@ -7,27 +7,42 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
+#include <linux/seq_file.h>
 #include <linux/sysctl.h>
 #include <linux/highmem.h>
+#include <linux/mmu_notifier.h>
 #include <linux/nodemask.h>
 #include <linux/pagemap.h>
 #include <linux/mempolicy.h>
 #include <linux/cpuset.h>
 #include <linux/mutex.h>
+#include <linux/bootmem.h>
+#include <linux/sysfs.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/io.h>
 
 #include <linux/hugetlb.h>
 #include "internal.h"
 
 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
-unsigned long max_huge_pages;
-unsigned long sysctl_overcommit_huge_pages;
 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
 unsigned long hugepages_treat_as_movable;
 
-struct hstate default_hstate;
+static int max_hstate;
+unsigned int default_hstate_idx;
+struct hstate hstates[HUGE_MAX_HSTATE];
+
+__initdata LIST_HEAD(huge_boot_pages);
+
+/* for command line parsing */
+static struct hstate * __initdata parsed_hstate;
+static unsigned long __initdata default_hstate_max_huge_pages;
+static unsigned long __initdata default_hstate_size;
+
+#define for_each_hstate(h) \
+       for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
 
 /*
  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
@@ -205,6 +220,36 @@ static pgoff_t vma_hugecache_offset(struct hstate *h,
 }
 
 /*
+ * Return the size of the pages allocated when backing a VMA. In the majority
+ * cases this will be same size as used by the page table entries.
+ */
+unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
+{
+       struct hstate *hstate;
+
+       if (!is_vm_hugetlb_page(vma))
+               return PAGE_SIZE;
+
+       hstate = hstate_vma(vma);
+
+       return 1UL << (hstate->order + PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
+
+/*
+ * Return the page size being used by the MMU to back a VMA. In the majority
+ * of cases, the page size used by the kernel matches the MMU size. On
+ * architectures where it differs, an architecture-specific version of this
+ * function is required.
+ */
+#ifndef vma_mmu_pagesize
+unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+       return vma_kernel_pagesize(vma);
+}
+#endif
+
+/*
  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
  * bits of the reservation map pointer, which are always clear due to
  * alignment.
@@ -248,7 +293,7 @@ struct resv_map {
        struct list_head regions;
 };
 
-struct resv_map *resv_map_alloc(void)
+static struct resv_map *resv_map_alloc(void)
 {
        struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
        if (!resv_map)
@@ -260,7 +305,7 @@ struct resv_map *resv_map_alloc(void)
        return resv_map;
 }
 
-void resv_map_release(struct kref *ref)
+static void resv_map_release(struct kref *ref)
 {
        struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
 
@@ -272,16 +317,16 @@ void resv_map_release(struct kref *ref)
 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 {
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       if (!(vma->vm_flags & VM_SHARED))
+       if (!(vma->vm_flags & VM_MAYSHARE))
                return (struct resv_map *)(get_vma_private_data(vma) &
                                                        ~HPAGE_RESV_MASK);
-       return 0;
+       return NULL;
 }
 
 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 {
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       VM_BUG_ON(vma->vm_flags & VM_SHARED);
+       VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
 
        set_vma_private_data(vma, (get_vma_private_data(vma) &
                                HPAGE_RESV_MASK) | (unsigned long)map);
@@ -290,7 +335,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
 {
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       VM_BUG_ON(vma->vm_flags & VM_SHARED);
+       VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
 
        set_vma_private_data(vma, get_vma_private_data(vma) | flags);
 }
@@ -309,7 +354,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
        if (vma->vm_flags & VM_NORESERVE)
                return;
 
-       if (vma->vm_flags & VM_SHARED) {
+       if (vma->vm_flags & VM_MAYSHARE) {
                /* Shared mappings always use reserves */
                h->resv_huge_pages--;
        } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
@@ -325,25 +370,42 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 {
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       if (!(vma->vm_flags & VM_SHARED))
+       if (!(vma->vm_flags & VM_MAYSHARE))
                vma->vm_private_data = (void *)0;
 }
 
 /* Returns true if the VMA has associated reserve pages */
-static int vma_has_private_reserves(struct vm_area_struct *vma)
+static int vma_has_reserves(struct vm_area_struct *vma)
 {
-       if (vma->vm_flags & VM_SHARED)
-               return 0;
-       if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER))
-               return 0;
-       return 1;
+       if (vma->vm_flags & VM_MAYSHARE)
+               return 1;
+       if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+               return 1;
+       return 0;
 }
 
+static void clear_gigantic_page(struct page *page,
+                       unsigned long addr, unsigned long sz)
+{
+       int i;
+       struct page *p = page;
+
+       might_sleep();
+       for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
+               cond_resched();
+               clear_user_highpage(p, addr + i * PAGE_SIZE);
+       }
+}
 static void clear_huge_page(struct page *page,
                        unsigned long addr, unsigned long sz)
 {
        int i;
 
+       if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
+               clear_gigantic_page(page, addr, sz);
+               return;
+       }
+
        might_sleep();
        for (i = 0; i < sz/PAGE_SIZE; i++) {
                cond_resched();
@@ -351,12 +413,34 @@ static void clear_huge_page(struct page *page,
        }
 }
 
+static void copy_gigantic_page(struct page *dst, struct page *src,
+                          unsigned long addr, struct vm_area_struct *vma)
+{
+       int i;
+       struct hstate *h = hstate_vma(vma);
+       struct page *dst_base = dst;
+       struct page *src_base = src;
+       might_sleep();
+       for (i = 0; i < pages_per_huge_page(h); ) {
+               cond_resched();
+               copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
+
+               i++;
+               dst = mem_map_next(dst, dst_base, i);
+               src = mem_map_next(src, src_base, i);
+       }
+}
 static void copy_huge_page(struct page *dst, struct page *src,
                           unsigned long addr, struct vm_area_struct *vma)
 {
        int i;
        struct hstate *h = hstate_vma(vma);
 
+       if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
+               copy_gigantic_page(dst, src, addr, vma);
+               return;
+       }
+
        might_sleep();
        for (i = 0; i < pages_per_huge_page(h); i++) {
                cond_resched();
@@ -372,24 +456,6 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
        h->free_huge_pages_node[nid]++;
 }
 
-static struct page *dequeue_huge_page(struct hstate *h)
-{
-       int nid;
-       struct page *page = NULL;
-
-       for (nid = 0; nid < MAX_NUMNODES; ++nid) {
-               if (!list_empty(&h->hugepage_freelists[nid])) {
-                       page = list_entry(h->hugepage_freelists[nid].next,
-                                         struct page, lru);
-                       list_del(&page->lru);
-                       h->free_huge_pages--;
-                       h->free_huge_pages_node[nid]--;
-                       break;
-               }
-       }
-       return page;
-}
-
 static struct page *dequeue_huge_page_vma(struct hstate *h,
                                struct vm_area_struct *vma,
                                unsigned long address, int avoid_reserve)
@@ -408,7 +474,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
         * have no page reserves. This check ensures that reservations are
         * not "stolen". The child may still get SIGKILLed
         */
-       if (!vma_has_private_reserves(vma) &&
+       if (!vma_has_reserves(vma) &&
                        h->free_huge_pages - h->resv_huge_pages == 0)
                return NULL;
 
@@ -441,6 +507,8 @@ static void update_and_free_page(struct hstate *h, struct page *page)
 {
        int i;
 
+       VM_BUG_ON(h->order >= MAX_ORDER);
+
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
        for (i = 0; i < pages_per_huge_page(h); i++) {
@@ -454,13 +522,24 @@ static void update_and_free_page(struct hstate *h, struct page *page)
        __free_pages(page, huge_page_order(h));
 }
 
+struct hstate *size_to_hstate(unsigned long size)
+{
+       struct hstate *h;
+
+       for_each_hstate(h) {
+               if (huge_page_size(h) == size)
+                       return h;
+       }
+       return NULL;
+}
+
 static void free_huge_page(struct page *page)
 {
        /*
         * Can't pass hstate in here because it is called from the
         * compound page destructor.
         */
-       struct hstate *h = &default_hstate;
+       struct hstate *h = page_hstate(page);
        int nid = page_to_nid(page);
        struct address_space *mapping;
 
@@ -470,7 +549,7 @@ static void free_huge_page(struct page *page)
        INIT_LIST_HEAD(&page->lru);
 
        spin_lock(&hugetlb_lock);
-       if (h->surplus_huge_pages_node[nid]) {
+       if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
                update_and_free_page(h, page);
                h->surplus_huge_pages--;
                h->surplus_huge_pages_node[nid]--;
@@ -482,41 +561,6 @@ static void free_huge_page(struct page *page)
                hugetlb_put_quota(mapping, 1);
 }
 
-/*
- * Increment or decrement surplus_huge_pages.  Keep node-specific counters
- * balanced by operating on them in a round-robin fashion.
- * Returns 1 if an adjustment was made.
- */
-static int adjust_pool_surplus(struct hstate *h, int delta)
-{
-       static int prev_nid;
-       int nid = prev_nid;
-       int ret = 0;
-
-       VM_BUG_ON(delta != -1 && delta != 1);
-       do {
-               nid = next_node(nid, node_online_map);
-               if (nid == MAX_NUMNODES)
-                       nid = first_node(node_online_map);
-
-               /* To shrink on this node, there must be a surplus page */
-               if (delta < 0 && !h->surplus_huge_pages_node[nid])
-                       continue;
-               /* Surplus cannot exceed the total number of pages */
-               if (delta > 0 && h->surplus_huge_pages_node[nid] >=
-                                               h->nr_huge_pages_node[nid])
-                       continue;
-
-               h->surplus_huge_pages += delta;
-               h->surplus_huge_pages_node[nid] += delta;
-               ret = 1;
-               break;
-       } while (nid != prev_nid);
-
-       prev_nid = nid;
-       return ret;
-}
-
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
        set_compound_page_dtor(page, free_huge_page);
@@ -527,17 +571,48 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
        put_page(page); /* free it into the hugepage allocator */
 }
 
+static void prep_compound_gigantic_page(struct page *page, unsigned long order)
+{
+       int i;
+       int nr_pages = 1 << order;
+       struct page *p = page + 1;
+
+       /* we rely on prep_new_huge_page to set the destructor */
+       set_compound_order(page, order);
+       __SetPageHead(page);
+       for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+               __SetPageTail(p);
+               p->first_page = page;
+       }
+}
+
+int PageHuge(struct page *page)
+{
+       compound_page_dtor *dtor;
+
+       if (!PageCompound(page))
+               return 0;
+
+       page = compound_head(page);
+       dtor = get_compound_page_dtor(page);
+
+       return dtor == free_huge_page;
+}
+
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 {
        struct page *page;
 
-       page = alloc_pages_node(nid,
+       if (h->order >= MAX_ORDER)
+               return NULL;
+
+       page = alloc_pages_exact_node(nid,
                htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
                                                __GFP_REPEAT|__GFP_NOWARN,
                huge_page_order(h));
        if (page) {
                if (arch_prepare_hugepage(page)) {
-                       __free_pages(page, HUGETLB_PAGE_ORDER);
+                       __free_pages(page, huge_page_order(h));
                        return NULL;
                }
                prep_new_huge_page(h, page, nid);
@@ -546,6 +621,27 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
        return page;
 }
 
+/*
+ * Use a helper variable to find the next node and then
+ * copy it back to next_nid_to_alloc afterwards:
+ * otherwise there's a window in which a racer might
+ * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
+ * But we don't need to use a spin_lock here: it really
+ * doesn't matter if occasionally a racer chooses the
+ * same nid as we do.  Move nid forward in the mask even
+ * if we just successfully allocated a hugepage so that
+ * the next caller gets hugepages on the next node.
+ */
+static int hstate_next_node_to_alloc(struct hstate *h)
+{
+       int next_nid;
+       next_nid = next_node(h->next_nid_to_alloc, node_online_map);
+       if (next_nid == MAX_NUMNODES)
+               next_nid = first_node(node_online_map);
+       h->next_nid_to_alloc = next_nid;
+       return next_nid;
+}
+
 static int alloc_fresh_huge_page(struct hstate *h)
 {
        struct page *page;
@@ -553,28 +649,15 @@ static int alloc_fresh_huge_page(struct hstate *h)
        int next_nid;
        int ret = 0;
 
-       start_nid = h->hugetlb_next_nid;
+       start_nid = h->next_nid_to_alloc;
+       next_nid = start_nid;
 
        do {
-               page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
+               page = alloc_fresh_huge_page_node(h, next_nid);
                if (page)
                        ret = 1;
-               /*
-                * Use a helper variable to find the next node and then
-                * copy it back to hugetlb_next_nid afterwards:
-                * otherwise there's a window in which a racer might
-                * pass invalid nid MAX_NUMNODES to alloc_pages_node.
-                * But we don't need to use a spin_lock here: it really
-                * doesn't matter if occasionally a racer chooses the
-                * same nid as we do.  Move nid forward in the mask even
-                * if we just successfully allocated a hugepage so that
-                * the next caller gets hugepages on the next node.
-                */
-               next_nid = next_node(h->hugetlb_next_nid, node_online_map);
-               if (next_nid == MAX_NUMNODES)
-                       next_nid = first_node(node_online_map);
-               h->hugetlb_next_nid = next_nid;
-       } while (!page && h->hugetlb_next_nid != start_nid);
+               next_nid = hstate_next_node_to_alloc(h);
+       } while (!page && next_nid != start_nid);
 
        if (ret)
                count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -584,12 +667,70 @@ static int alloc_fresh_huge_page(struct hstate *h)
        return ret;
 }
 
+/*
+ * helper for free_pool_huge_page() - find next node
+ * from which to free a huge page
+ */
+static int hstate_next_node_to_free(struct hstate *h)
+{
+       int next_nid;
+       next_nid = next_node(h->next_nid_to_free, node_online_map);
+       if (next_nid == MAX_NUMNODES)
+               next_nid = first_node(node_online_map);
+       h->next_nid_to_free = next_nid;
+       return next_nid;
+}
+
+/*
+ * Free huge page from pool from next node to free.
+ * Attempt to keep persistent huge pages more or less
+ * balanced over allowed nodes.
+ * Called with hugetlb_lock locked.
+ */
+static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
+{
+       int start_nid;
+       int next_nid;
+       int ret = 0;
+
+       start_nid = h->next_nid_to_free;
+       next_nid = start_nid;
+
+       do {
+               /*
+                * If we're returning unused surplus pages, only examine
+                * nodes with surplus pages.
+                */
+               if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
+                   !list_empty(&h->hugepage_freelists[next_nid])) {
+                       struct page *page =
+                               list_entry(h->hugepage_freelists[next_nid].next,
+                                         struct page, lru);
+                       list_del(&page->lru);
+                       h->free_huge_pages--;
+                       h->free_huge_pages_node[next_nid]--;
+                       if (acct_surplus) {
+                               h->surplus_huge_pages--;
+                               h->surplus_huge_pages_node[next_nid]--;
+                       }
+                       update_and_free_page(h, page);
+                       ret = 1;
+               }
+               next_nid = hstate_next_node_to_free(h);
+       } while (!ret && next_nid != start_nid);
+
+       return ret;
+}
+
 static struct page *alloc_buddy_huge_page(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long address)
 {
        struct page *page;
        unsigned int nid;
 
+       if (h->order >= MAX_ORDER)
+               return NULL;
+
        /*
         * Assume we will successfully allocate the surplus page to
         * prevent racing processes from causing the surplus to exceed
@@ -627,6 +768,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
                                        __GFP_REPEAT|__GFP_NOWARN,
                                        huge_page_order(h));
 
+       if (page && arch_prepare_hugepage(page)) {
+               __free_pages(page, huge_page_order(h));
+               return NULL;
+       }
+
        spin_lock(&hugetlb_lock);
        if (page) {
                /*
@@ -747,47 +893,33 @@ free:
  * When releasing a hugetlb pool reservation, any surplus pages that were
  * allocated to satisfy the reservation must be explicitly freed if they were
  * never used.
+ * Called with hugetlb_lock held.
  */
 static void return_unused_surplus_pages(struct hstate *h,
                                        unsigned long unused_resv_pages)
 {
-       static int nid = -1;
-       struct page *page;
        unsigned long nr_pages;
 
+       /* Uncommit the reservation */
+       h->resv_huge_pages -= unused_resv_pages;
+
+       /* Cannot return gigantic pages currently */
+       if (h->order >= MAX_ORDER)
+               return;
+
+       nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
+
        /*
         * We want to release as many surplus pages as possible, spread
         * evenly across all nodes. Iterate across all nodes until we
         * can no longer free unreserved surplus pages. This occurs when
         * the nodes with surplus pages have no free pages.
+        * free_pool_huge_page() will balance the the frees across the
+        * on-line nodes for us and will handle the hstate accounting.
         */
-       unsigned long remaining_iterations = num_online_nodes();
-
-       /* Uncommit the reservation */
-       h->resv_huge_pages -= unused_resv_pages;
-
-       nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
-
-       while (remaining_iterations-- && nr_pages) {
-               nid = next_node(nid, node_online_map);
-               if (nid == MAX_NUMNODES)
-                       nid = first_node(node_online_map);
-
-               if (!h->surplus_huge_pages_node[nid])
-                       continue;
-
-               if (!list_empty(&h->hugepage_freelists[nid])) {
-                       page = list_entry(h->hugepage_freelists[nid].next,
-                                         struct page, lru);
-                       list_del(&page->lru);
-                       update_and_free_page(h, page);
-                       h->free_huge_pages--;
-                       h->free_huge_pages_node[nid]--;
-                       h->surplus_huge_pages--;
-                       h->surplus_huge_pages_node[nid]--;
-                       nr_pages--;
-                       remaining_iterations = num_online_nodes();
-               }
+       while (nr_pages--) {
+               if (!free_pool_huge_page(h, 1))
+                       break;
        }
 }
 
@@ -800,13 +932,13 @@ static void return_unused_surplus_pages(struct hstate *h,
  * an instantiated the change should be committed via vma_commit_reservation.
  * No action is required on failure.
  */
-static int vma_needs_reservation(struct hstate *h,
+static long vma_needs_reservation(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long addr)
 {
        struct address_space *mapping = vma->vm_file->f_mapping;
        struct inode *inode = mapping->host;
 
-       if (vma->vm_flags & VM_SHARED) {
+       if (vma->vm_flags & VM_MAYSHARE) {
                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
                return region_chg(&inode->i_mapping->private_list,
                                                        idx, idx + 1);
@@ -815,7 +947,7 @@ static int vma_needs_reservation(struct hstate *h,
                return 1;
 
        } else  {
-               int err;
+               long err;
                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
                struct resv_map *reservations = vma_resv_map(vma);
 
@@ -831,7 +963,7 @@ static void vma_commit_reservation(struct hstate *h,
        struct address_space *mapping = vma->vm_file->f_mapping;
        struct inode *inode = mapping->host;
 
-       if (vma->vm_flags & VM_SHARED) {
+       if (vma->vm_flags & VM_MAYSHARE) {
                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
                region_add(&inode->i_mapping->private_list, idx, idx + 1);
 
@@ -851,7 +983,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        struct page *page;
        struct address_space *mapping = vma->vm_file->f_mapping;
        struct inode *inode = mapping->host;
-       unsigned int chg;
+       long chg;
 
        /*
         * Processes that did not create the mapping will have no reserves and
@@ -887,60 +1019,120 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        return page;
 }
 
-static int __init hugetlb_init(void)
+int __weak alloc_bootmem_huge_page(struct hstate *h)
 {
-       unsigned long i;
-       struct hstate *h = &default_hstate;
+       struct huge_bootmem_page *m;
+       int nr_nodes = nodes_weight(node_online_map);
 
-       if (HPAGE_SHIFT == 0)
-               return 0;
+       while (nr_nodes) {
+               void *addr;
 
-       if (!h->order) {
-               h->order = HPAGE_SHIFT - PAGE_SHIFT;
-               h->mask = HPAGE_MASK;
+               addr = __alloc_bootmem_node_nopanic(
+                               NODE_DATA(h->next_nid_to_alloc),
+                               huge_page_size(h), huge_page_size(h), 0);
+
+               hstate_next_node_to_alloc(h);
+               if (addr) {
+                       /*
+                        * Use the beginning of the huge page to store the
+                        * huge_bootmem_page struct (until gather_bootmem
+                        * puts them into the mem_map).
+                        */
+                       m = addr;
+                       goto found;
+               }
+               nr_nodes--;
        }
+       return 0;
 
-       for (i = 0; i < MAX_NUMNODES; ++i)
-               INIT_LIST_HEAD(&h->hugepage_freelists[i]);
+found:
+       BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
+       /* Put them into a private list first because mem_map is not up yet */
+       list_add(&m->list, &huge_boot_pages);
+       m->hstate = h;
+       return 1;
+}
+
+static void prep_compound_huge_page(struct page *page, int order)
+{
+       if (unlikely(order > (MAX_ORDER - 1)))
+               prep_compound_gigantic_page(page, order);
+       else
+               prep_compound_page(page, order);
+}
 
-       h->hugetlb_next_nid = first_node(node_online_map);
+/* Put bootmem huge pages into the standard lists after mem_map is up */
+static void __init gather_bootmem_prealloc(void)
+{
+       struct huge_bootmem_page *m;
+
+       list_for_each_entry(m, &huge_boot_pages, list) {
+               struct page *page = virt_to_page(m);
+               struct hstate *h = m->hstate;
+               __ClearPageReserved(page);
+               WARN_ON(page_count(page) != 1);
+               prep_compound_huge_page(page, h->order);
+               prep_new_huge_page(h, page, page_to_nid(page));
+       }
+}
+
+static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
+{
+       unsigned long i;
 
-       for (i = 0; i < max_huge_pages; ++i) {
-               if (!alloc_fresh_huge_page(h))
+       for (i = 0; i < h->max_huge_pages; ++i) {
+               if (h->order >= MAX_ORDER) {
+                       if (!alloc_bootmem_huge_page(h))
+                               break;
+               } else if (!alloc_fresh_huge_page(h))
                        break;
        }
-       max_huge_pages = h->free_huge_pages = h->nr_huge_pages = i;
-       printk(KERN_INFO "Total HugeTLB memory allocated, %ld\n",
-                       h->free_huge_pages);
-       return 0;
+       h->max_huge_pages = i;
 }
-module_init(hugetlb_init);
 
-static int __init hugetlb_setup(char *s)
+static void __init hugetlb_init_hstates(void)
 {
-       if (sscanf(s, "%lu", &max_huge_pages) <= 0)
-               max_huge_pages = 0;
-       return 1;
+       struct hstate *h;
+
+       for_each_hstate(h) {
+               /* oversize hugepages were init'ed in early boot */
+               if (h->order < MAX_ORDER)
+                       hugetlb_hstate_alloc_pages(h);
+       }
 }
-__setup("hugepages=", hugetlb_setup);
 
-static unsigned int cpuset_mems_nr(unsigned int *array)
+static char * __init memfmt(char *buf, unsigned long n)
 {
-       int node;
-       unsigned int nr = 0;
+       if (n >= (1UL << 30))
+               sprintf(buf, "%lu GB", n >> 30);
+       else if (n >= (1UL << 20))
+               sprintf(buf, "%lu MB", n >> 20);
+       else
+               sprintf(buf, "%lu KB", n >> 10);
+       return buf;
+}
 
-       for_each_node_mask(node, cpuset_current_mems_allowed)
-               nr += array[node];
+static void __init report_hugepages(void)
+{
+       struct hstate *h;
 
-       return nr;
+       for_each_hstate(h) {
+               char buf[32];
+               printk(KERN_INFO "HugeTLB registered %s page size, "
+                                "pre-allocated %ld pages\n",
+                       memfmt(buf, huge_page_size(h)),
+                       h->free_huge_pages);
+       }
 }
 
-#ifdef CONFIG_SYSCTL
 #ifdef CONFIG_HIGHMEM
 static void try_to_free_low(struct hstate *h, unsigned long count)
 {
        int i;
 
+       if (h->order >= MAX_ORDER)
+               return;
+
        for (i = 0; i < MAX_NUMNODES; ++i) {
                struct page *page, *next;
                struct list_head *freel = &h->hugepage_freelists[i];
@@ -950,7 +1142,7 @@ static void try_to_free_low(struct hstate *h, unsigned long count)
                        if (PageHighMem(page))
                                continue;
                        list_del(&page->lru);
-                       update_and_free_page(page);
+                       update_and_free_page(h, page);
                        h->free_huge_pages--;
                        h->free_huge_pages_node[page_to_nid(page)]--;
                }
@@ -962,11 +1154,60 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
 }
 #endif
 
+/*
+ * Increment or decrement surplus_huge_pages.  Keep node-specific counters
+ * balanced by operating on them in a round-robin fashion.
+ * Returns 1 if an adjustment was made.
+ */
+static int adjust_pool_surplus(struct hstate *h, int delta)
+{
+       int start_nid, next_nid;
+       int ret = 0;
+
+       VM_BUG_ON(delta != -1 && delta != 1);
+
+       if (delta < 0)
+               start_nid = h->next_nid_to_alloc;
+       else
+               start_nid = h->next_nid_to_free;
+       next_nid = start_nid;
+
+       do {
+               int nid = next_nid;
+               if (delta < 0)  {
+                       next_nid = hstate_next_node_to_alloc(h);
+                       /*
+                        * To shrink on this node, there must be a surplus page
+                        */
+                       if (!h->surplus_huge_pages_node[nid])
+                               continue;
+               }
+               if (delta > 0) {
+                       next_nid = hstate_next_node_to_free(h);
+                       /*
+                        * Surplus cannot exceed the total number of pages
+                        */
+                       if (h->surplus_huge_pages_node[nid] >=
+                                               h->nr_huge_pages_node[nid])
+                               continue;
+               }
+
+               h->surplus_huge_pages += delta;
+               h->surplus_huge_pages_node[nid] += delta;
+               ret = 1;
+               break;
+       } while (next_nid != start_nid);
+
+       return ret;
+}
+
 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static unsigned long set_max_huge_pages(unsigned long count)
+static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
 {
        unsigned long min_count, ret;
-       struct hstate *h = &default_hstate;
+
+       if (h->order >= MAX_ORDER)
+               return h->max_huge_pages;
 
        /*
         * Increase the pool size
@@ -1018,10 +1259,8 @@ static unsigned long set_max_huge_pages(unsigned long count)
        min_count = max(count, min_count);
        try_to_free_low(h, min_count);
        while (min_count < persistent_huge_pages(h)) {
-               struct page *page = dequeue_huge_page(h);
-               if (!page)
+               if (!free_pool_huge_page(h, 0))
                        break;
-               update_and_free_page(h, page);
        }
        while (count < persistent_huge_pages(h)) {
                if (!adjust_pool_surplus(h, 1))
@@ -1033,20 +1272,295 @@ out:
        return ret;
 }
 
+#define HSTATE_ATTR_RO(_name) \
+       static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+
+#define HSTATE_ATTR(_name) \
+       static struct kobj_attribute _name##_attr = \
+               __ATTR(_name, 0644, _name##_show, _name##_store)
+
+static struct kobject *hugepages_kobj;
+static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
+
+static struct hstate *kobj_to_hstate(struct kobject *kobj)
+{
+       int i;
+       for (i = 0; i < HUGE_MAX_HSTATE; i++)
+               if (hstate_kobjs[i] == kobj)
+                       return &hstates[i];
+       BUG();
+       return NULL;
+}
+
+static ssize_t nr_hugepages_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr, char *buf)
+{
+       struct hstate *h = kobj_to_hstate(kobj);
+       return sprintf(buf, "%lu\n", h->nr_huge_pages);
+}
+static ssize_t nr_hugepages_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       int err;
+       unsigned long input;
+       struct hstate *h = kobj_to_hstate(kobj);
+
+       err = strict_strtoul(buf, 10, &input);
+       if (err)
+               return 0;
+
+       h->max_huge_pages = set_max_huge_pages(h, input);
+
+       return count;
+}
+HSTATE_ATTR(nr_hugepages);
+
+static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr, char *buf)
+{
+       struct hstate *h = kobj_to_hstate(kobj);
+       return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
+}
+static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       int err;
+       unsigned long input;
+       struct hstate *h = kobj_to_hstate(kobj);
+
+       err = strict_strtoul(buf, 10, &input);
+       if (err)
+               return 0;
+
+       spin_lock(&hugetlb_lock);
+       h->nr_overcommit_huge_pages = input;
+       spin_unlock(&hugetlb_lock);
+
+       return count;
+}
+HSTATE_ATTR(nr_overcommit_hugepages);
+
+static ssize_t free_hugepages_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr, char *buf)
+{
+       struct hstate *h = kobj_to_hstate(kobj);
+       return sprintf(buf, "%lu\n", h->free_huge_pages);
+}
+HSTATE_ATTR_RO(free_hugepages);
+
+static ssize_t resv_hugepages_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr, char *buf)
+{
+       struct hstate *h = kobj_to_hstate(kobj);
+       return sprintf(buf, "%lu\n", h->resv_huge_pages);
+}
+HSTATE_ATTR_RO(resv_hugepages);
+
+static ssize_t surplus_hugepages_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr, char *buf)
+{
+       struct hstate *h = kobj_to_hstate(kobj);
+       return sprintf(buf, "%lu\n", h->surplus_huge_pages);
+}
+HSTATE_ATTR_RO(surplus_hugepages);
+
+static struct attribute *hstate_attrs[] = {
+       &nr_hugepages_attr.attr,
+       &nr_overcommit_hugepages_attr.attr,
+       &free_hugepages_attr.attr,
+       &resv_hugepages_attr.attr,
+       &surplus_hugepages_attr.attr,
+       NULL,
+};
+
+static struct attribute_group hstate_attr_group = {
+       .attrs = hstate_attrs,
+};
+
+static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
+{
+       int retval;
+
+       hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
+                                                       hugepages_kobj);
+       if (!hstate_kobjs[h - hstates])
+               return -ENOMEM;
+
+       retval = sysfs_create_group(hstate_kobjs[h - hstates],
+                                                       &hstate_attr_group);
+       if (retval)
+               kobject_put(hstate_kobjs[h - hstates]);
+
+       return retval;
+}
+
+static void __init hugetlb_sysfs_init(void)
+{
+       struct hstate *h;
+       int err;
+
+       hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
+       if (!hugepages_kobj)
+               return;
+
+       for_each_hstate(h) {
+               err = hugetlb_sysfs_add_hstate(h);
+               if (err)
+                       printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
+                                                               h->name);
+       }
+}
+
+static void __exit hugetlb_exit(void)
+{
+       struct hstate *h;
+
+       for_each_hstate(h) {
+               kobject_put(hstate_kobjs[h - hstates]);
+       }
+
+       kobject_put(hugepages_kobj);
+}
+module_exit(hugetlb_exit);
+
+static int __init hugetlb_init(void)
+{
+       /* Some platform decide whether they support huge pages at boot
+        * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+        * there is no such support
+        */
+       if (HPAGE_SHIFT == 0)
+               return 0;
+
+       if (!size_to_hstate(default_hstate_size)) {
+               default_hstate_size = HPAGE_SIZE;
+               if (!size_to_hstate(default_hstate_size))
+                       hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
+       }
+       default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
+       if (default_hstate_max_huge_pages)
+               default_hstate.max_huge_pages = default_hstate_max_huge_pages;
+
+       hugetlb_init_hstates();
+
+       gather_bootmem_prealloc();
+
+       report_hugepages();
+
+       hugetlb_sysfs_init();
+
+       return 0;
+}
+module_init(hugetlb_init);
+
+/* Should be called on processing a hugepagesz=... option */
+void __init hugetlb_add_hstate(unsigned order)
+{
+       struct hstate *h;
+       unsigned long i;
+
+       if (size_to_hstate(PAGE_SIZE << order)) {
+               printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
+               return;
+       }
+       BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
+       BUG_ON(order == 0);
+       h = &hstates[max_hstate++];
+       h->order = order;
+       h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
+       h->nr_huge_pages = 0;
+       h->free_huge_pages = 0;
+       for (i = 0; i < MAX_NUMNODES; ++i)
+               INIT_LIST_HEAD(&h->hugepage_freelists[i]);
+       h->next_nid_to_alloc = first_node(node_online_map);
+       h->next_nid_to_free = first_node(node_online_map);
+       snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
+                                       huge_page_size(h)/1024);
+
+       parsed_hstate = h;
+}
+
+static int __init hugetlb_nrpages_setup(char *s)
+{
+       unsigned long *mhp;
+       static unsigned long *last_mhp;
+
+       /*
+        * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
+        * so this hugepages= parameter goes to the "default hstate".
+        */
+       if (!max_hstate)
+               mhp = &default_hstate_max_huge_pages;
+       else
+               mhp = &parsed_hstate->max_huge_pages;
+
+       if (mhp == last_mhp) {
+               printk(KERN_WARNING "hugepages= specified twice without "
+                       "interleaving hugepagesz=, ignoring\n");
+               return 1;
+       }
+
+       if (sscanf(s, "%lu", mhp) <= 0)
+               *mhp = 0;
+
+       /*
+        * Global state is always initialized later in hugetlb_init.
+        * But we need to allocate >= MAX_ORDER hstates here early to still
+        * use the bootmem allocator.
+        */
+       if (max_hstate && parsed_hstate->order >= MAX_ORDER)
+               hugetlb_hstate_alloc_pages(parsed_hstate);
+
+       last_mhp = mhp;
+
+       return 1;
+}
+__setup("hugepages=", hugetlb_nrpages_setup);
+
+static int __init hugetlb_default_setup(char *s)
+{
+       default_hstate_size = memparse(s, &s);
+       return 1;
+}
+__setup("default_hugepagesz=", hugetlb_default_setup);
+
+static unsigned int cpuset_mems_nr(unsigned int *array)
+{
+       int node;
+       unsigned int nr = 0;
+
+       for_each_node_mask(node, cpuset_current_mems_allowed)
+               nr += array[node];
+
+       return nr;
+}
+
+#ifdef CONFIG_SYSCTL
 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
-                          struct file *file, void __user *buffer,
+                          void __user *buffer,
                           size_t *length, loff_t *ppos)
 {
-       proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
-       max_huge_pages = set_max_huge_pages(max_huge_pages);
+       struct hstate *h = &default_hstate;
+       unsigned long tmp;
+
+       if (!write)
+               tmp = h->max_huge_pages;
+
+       table->data = &tmp;
+       table->maxlen = sizeof(unsigned long);
+       proc_doulongvec_minmax(table, write, buffer, length, ppos);
+
+       if (write)
+               h->max_huge_pages = set_max_huge_pages(h, tmp);
+
        return 0;
 }
 
 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
-                       struct file *file, void __user *buffer,
+                       void __user *buffer,
                        size_t *length, loff_t *ppos)
 {
-       proc_dointvec(table, write, file, buffer, length, ppos);
+       proc_dointvec(table, write, buffer, length, ppos);
        if (hugepages_treat_as_movable)
                htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
        else
@@ -1055,28 +1569,39 @@ int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
 }
 
 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
-                       struct file *file, void __user *buffer,
+                       void __user *buffer,
                        size_t *length, loff_t *ppos)
 {
        struct hstate *h = &default_hstate;
-       proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
-       spin_lock(&hugetlb_lock);
-       h->nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
-       spin_unlock(&hugetlb_lock);
+       unsigned long tmp;
+
+       if (!write)
+               tmp = h->nr_overcommit_huge_pages;
+
+       table->data = &tmp;
+       table->maxlen = sizeof(unsigned long);
+       proc_doulongvec_minmax(table, write, buffer, length, ppos);
+
+       if (write) {
+               spin_lock(&hugetlb_lock);
+               h->nr_overcommit_huge_pages = tmp;
+               spin_unlock(&hugetlb_lock);
+       }
+
        return 0;
 }
 
 #endif /* CONFIG_SYSCTL */
 
-int hugetlb_report_meminfo(char *buf)
+void hugetlb_report_meminfo(struct seq_file *m)
 {
        struct hstate *h = &default_hstate;
-       return sprintf(buf,
-                       "HugePages_Total: %5lu\n"
-                       "HugePages_Free:  %5lu\n"
-                       "HugePages_Rsvd:  %5lu\n"
-                       "HugePages_Surp:  %5lu\n"
-                       "Hugepagesize:    %5lu kB\n",
+       seq_printf(m,
+                       "HugePages_Total:   %5lu\n"
+                       "HugePages_Free:    %5lu\n"
+                       "HugePages_Rsvd:    %5lu\n"
+                       "HugePages_Surp:    %5lu\n"
+                       "Hugepagesize:   %8lu kB\n",
                        h->nr_huge_pages,
                        h->free_huge_pages,
                        h->resv_huge_pages,
@@ -1177,8 +1702,10 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 
                kref_put(&reservations->refs, resv_map_release);
 
-               if (reserve)
+               if (reserve) {
                        hugetlb_acct_memory(h, -reserve);
+                       hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
+               }
        }
 }
 
@@ -1295,6 +1822,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        BUG_ON(start & ~huge_page_mask(h));
        BUG_ON(end & ~huge_page_mask(h));
 
+       mmu_notifier_invalidate_range_start(mm, start, end);
        spin_lock(&mm->page_table_lock);
        for (address = start; address < end; address += sz) {
                ptep = huge_pte_offset(mm, address);
@@ -1336,6 +1864,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        }
        spin_unlock(&mm->page_table_lock);
        flush_tlb_range(vma, start, end);
+       mmu_notifier_invalidate_range_end(mm, start, end);
        list_for_each_entry_safe(page, tmp, &page_list, lru) {
                list_del(&page->lru);
                put_page(page);
@@ -1345,19 +1874,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
-       /*
-        * It is undesirable to test vma->vm_file as it should be non-null
-        * for valid hugetlb area. However, vm_file will be NULL in the error
-        * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
-        * do_mmap_pgoff() nullifies vma->vm_file before calling this function
-        * to clean up. Since no pte has actually been setup, it is safe to
-        * do nothing in this case.
-        */
-       if (vma->vm_file) {
-               spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
-               __unmap_hugepage_range(vma, start, end, ref_page);
-               spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
-       }
+       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       __unmap_hugepage_range(vma, start, end, ref_page);
+       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
 }
 
 /*
@@ -1366,11 +1885,10 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  * from other VMAs and let the children be SIGKILLed if they are faulting the
  * same region.
  */
-int unmap_ref_private(struct mm_struct *mm,
-                                       struct vm_area_struct *vma,
-                                       struct page *page,
-                                       unsigned long address)
+static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+                               struct page *page, unsigned long address)
 {
+       struct hstate *h = hstate_vma(vma);
        struct vm_area_struct *iter_vma;
        struct address_space *mapping;
        struct prio_tree_iter iter;
@@ -1380,7 +1898,7 @@ int unmap_ref_private(struct mm_struct *mm,
         * vm_pgoff is in PAGE_SIZE units, hence the different calculation
         * from page cache lookup which is in HPAGE_SIZE units.
         */
-       address = address & huge_page_mask(hstate_vma(vma));
+       address = address & huge_page_mask(h);
        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
                + (vma->vm_pgoff >> PAGE_SHIFT);
        mapping = (struct address_space *)page_private(page);
@@ -1399,7 +1917,7 @@ int unmap_ref_private(struct mm_struct *mm,
                 */
                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
                        unmap_hugepage_range(iter_vma,
-                               address, address + HPAGE_SIZE,
+                               address, address + huge_page_size(h),
                                page);
        }
 
@@ -1435,7 +1953,7 @@ retry_avoidcopy:
         * at the time of fork() could consume its reserves on COW instead
         * of the full address range.
         */
-       if (!(vma->vm_flags & VM_SHARED) &&
+       if (!(vma->vm_flags & VM_MAYSHARE) &&
                        is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
                        old_page != pagecache_page)
                outside_reserve = 1;
@@ -1498,8 +2016,28 @@ static struct page *hugetlbfs_pagecache_page(struct hstate *h,
        return find_lock_page(mapping, idx);
 }
 
+/*
+ * Return whether there is a pagecache page to back given address within VMA.
+ * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
+ */
+static bool hugetlbfs_pagecache_present(struct hstate *h,
+                       struct vm_area_struct *vma, unsigned long address)
+{
+       struct address_space *mapping;
+       pgoff_t idx;
+       struct page *page;
+
+       mapping = vma->vm_file->f_mapping;
+       idx = vma_hugecache_offset(h, vma, address);
+
+       page = find_get_page(mapping, idx);
+       if (page)
+               put_page(page);
+       return page != NULL;
+}
+
 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
-                       unsigned long address, pte_t *ptep, int write_access)
+                       unsigned long address, pte_t *ptep, unsigned int flags)
 {
        struct hstate *h = hstate_vma(vma);
        int ret = VM_FAULT_SIGBUS;
@@ -1542,7 +2080,7 @@ retry:
                clear_huge_page(page, address, huge_page_size(h));
                __SetPageUptodate(page);
 
-               if (vma->vm_flags & VM_SHARED) {
+               if (vma->vm_flags & VM_MAYSHARE) {
                        int err;
                        struct inode *inode = mapping->host;
 
@@ -1561,6 +2099,18 @@ retry:
                        lock_page(page);
        }
 
+       /*
+        * If we are going to COW a private mapping later, we examine the
+        * pending reservations for this page now. This will ensure that
+        * any allocations necessary to record that reservation occur outside
+        * the spinlock.
+        */
+       if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
+               if (vma_needs_reservation(h, vma, address) < 0) {
+                       ret = VM_FAULT_OOM;
+                       goto backout_unlocked;
+               }
+
        spin_lock(&mm->page_table_lock);
        size = i_size_read(mapping->host) >> huge_page_shift(h);
        if (idx >= size)
@@ -1574,7 +2124,7 @@ retry:
                                && (vma->vm_flags & VM_SHARED)));
        set_huge_pte_at(mm, address, ptep, new_pte);
 
-       if (write_access && !(vma->vm_flags & VM_SHARED)) {
+       if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
                /* Optimization, do the COW without a second fault */
                ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
        }
@@ -1586,17 +2136,19 @@ out:
 
 backout:
        spin_unlock(&mm->page_table_lock);
+backout_unlocked:
        unlock_page(page);
        put_page(page);
        goto out;
 }
 
 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-                       unsigned long address, int write_access)
+                       unsigned long address, unsigned int flags)
 {
        pte_t *ptep;
        pte_t entry;
        int ret;
+       struct page *pagecache_page = NULL;
        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
        struct hstate *h = hstate_vma(vma);
 
@@ -1612,35 +2164,77 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        mutex_lock(&hugetlb_instantiation_mutex);
        entry = huge_ptep_get(ptep);
        if (huge_pte_none(entry)) {
-               ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
-               mutex_unlock(&hugetlb_instantiation_mutex);
-               return ret;
+               ret = hugetlb_no_page(mm, vma, address, ptep, flags);
+               goto out_mutex;
        }
 
        ret = 0;
 
+       /*
+        * If we are going to COW the mapping later, we examine the pending
+        * reservations for this page now. This will ensure that any
+        * allocations necessary to record that reservation occur outside the
+        * spinlock. For private mappings, we also lookup the pagecache
+        * page now as it is used to determine if a reservation has been
+        * consumed.
+        */
+       if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
+               if (vma_needs_reservation(h, vma, address) < 0) {
+                       ret = VM_FAULT_OOM;
+                       goto out_mutex;
+               }
+
+               if (!(vma->vm_flags & VM_MAYSHARE))
+                       pagecache_page = hugetlbfs_pagecache_page(h,
+                                                               vma, address);
+       }
+
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
-       if (likely(pte_same(entry, huge_ptep_get(ptep))))
-               if (write_access && !pte_write(entry)) {
-                       struct page *page;
-                       page = hugetlbfs_pagecache_page(h, vma, address);
-                       ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
-                       if (page) {
-                               unlock_page(page);
-                               put_page(page);
-                       }
+       if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
+               goto out_page_table_lock;
+
+
+       if (flags & FAULT_FLAG_WRITE) {
+               if (!pte_write(entry)) {
+                       ret = hugetlb_cow(mm, vma, address, ptep, entry,
+                                                       pagecache_page);
+                       goto out_page_table_lock;
                }
+               entry = pte_mkdirty(entry);
+       }
+       entry = pte_mkyoung(entry);
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry,
+                                               flags & FAULT_FLAG_WRITE))
+               update_mmu_cache(vma, address, entry);
+
+out_page_table_lock:
        spin_unlock(&mm->page_table_lock);
+
+       if (pagecache_page) {
+               unlock_page(pagecache_page);
+               put_page(pagecache_page);
+       }
+
+out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
 
        return ret;
 }
 
+/* Can be overriden by architectures */
+__attribute__((weak)) struct page *
+follow_huge_pud(struct mm_struct *mm, unsigned long address,
+              pud_t *pud, int write)
+{
+       BUG();
+       return NULL;
+}
+
 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        struct page **pages, struct vm_area_struct **vmas,
                        unsigned long *position, int *length, int i,
-                       int write)
+                       unsigned int flags)
 {
        unsigned long pfn_offset;
        unsigned long vaddr = *position;
@@ -1650,28 +2244,42 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spin_lock(&mm->page_table_lock);
        while (vaddr < vma->vm_end && remainder) {
                pte_t *pte;
+               int absent;
                struct page *page;
 
                /*
                 * Some archs (sparc64, sh*) have multiple pte_ts to
-                * each hugepage.  We have to make sure we get the
+                * each hugepage.  We have to make sure we get the
                 * first, for the page indexing below to work.
                 */
                pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
+               absent = !pte || huge_pte_none(huge_ptep_get(pte));
+
+               /*
+                * When coredumping, it suits get_dump_page if we just return
+                * an error where there's an empty slot with no huge pagecache
+                * to back it.  This way, we avoid allocating a hugepage, and
+                * the sparse dumpfile avoids allocating disk blocks, but its
+                * huge holes still show up with zeroes where they need to be.
+                */
+               if (absent && (flags & FOLL_DUMP) &&
+                   !hugetlbfs_pagecache_present(h, vma, vaddr)) {
+                       remainder = 0;
+                       break;
+               }
 
-               if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
-                   (write && !pte_write(huge_ptep_get(pte)))) {
+               if (absent ||
+                   ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
                        int ret;
 
                        spin_unlock(&mm->page_table_lock);
-                       ret = hugetlb_fault(mm, vma, vaddr, write);
+                       ret = hugetlb_fault(mm, vma, vaddr,
+                               (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
                        spin_lock(&mm->page_table_lock);
                        if (!(ret & VM_FAULT_ERROR))
                                continue;
 
                        remainder = 0;
-                       if (!i)
-                               i = -EFAULT;
                        break;
                }
 
@@ -1679,8 +2287,8 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                page = pte_page(huge_ptep_get(pte));
 same_page:
                if (pages) {
-                       get_page(page);
-                       pages[i] = page + pfn_offset;
+                       pages[i] = mem_map_offset(page, pfn_offset);
+                       get_page(pages[i]);
                }
 
                if (vmas)
@@ -1703,7 +2311,7 @@ same_page:
        *length = remainder;
        *position = vaddr;
 
-       return i;
+       return i ? i : -EFAULT;
 }
 
 void hugetlb_change_protection(struct vm_area_struct *vma,
@@ -1740,12 +2348,18 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 
 int hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
-                                       struct vm_area_struct *vma)
+                                       struct vm_area_struct *vma,
+                                       int acctflag)
 {
        long ret, chg;
        struct hstate *h = hstate_inode(inode);
 
-       if (vma && vma->vm_flags & VM_NORESERVE)
+       /*
+        * Only apply hugepage reservation if asked. At fault time, an
+        * attempt will be made for VM_NORESERVE to allocate a page
+        * and filesystem quota without using reserves
+        */
+       if (acctflag & VM_NORESERVE)
                return 0;
 
        /*
@@ -1754,7 +2368,7 @@ int hugetlb_reserve_pages(struct inode *inode,
         * to reserve the full area even if read-only as mprotect() may be
         * called to make the mapping read-write. Assume !vma is a shm mapping
         */
-       if (!vma || vma->vm_flags & VM_SHARED)
+       if (!vma || vma->vm_flags & VM_MAYSHARE)
                chg = region_chg(&inode->i_mapping->private_list, from, to);
        else {
                struct resv_map *resv_map = resv_map_alloc();
@@ -1770,14 +2384,32 @@ int hugetlb_reserve_pages(struct inode *inode,
        if (chg < 0)
                return chg;
 
+       /* There must be enough filesystem quota for the mapping */
        if (hugetlb_get_quota(inode->i_mapping, chg))
                return -ENOSPC;
+
+       /*
+        * Check enough hugepages are available for the reservation.
+        * Hand back the quota if there are not
+        */
        ret = hugetlb_acct_memory(h, chg);
        if (ret < 0) {
                hugetlb_put_quota(inode->i_mapping, chg);
                return ret;
        }
-       if (!vma || vma->vm_flags & VM_SHARED)
+
+       /*
+        * Account for the reservations made. Shared mappings record regions
+        * that have reservations as they are shared by multiple VMAs.
+        * When the last VMA disappears, the region map says how much
+        * the reservation was and the page cache tells how much of
+        * the reservation was consumed. Private mappings are per-VMA and
+        * only the consumed reservations are tracked. When the VMA
+        * disappears, the original reservation is the VMA size and the
+        * consumed reservations are stored in the map. Hence, nothing
+        * else has to be done for private mappings here
+        */
+       if (!vma || vma->vm_flags & VM_MAYSHARE)
                region_add(&inode->i_mapping->private_list, from, to);
        return 0;
 }
@@ -1788,7 +2420,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
        long chg = region_truncate(&inode->i_mapping->private_list, offset);
 
        spin_lock(&inode->i_lock);
-       inode->i_blocks -= blocks_per_huge_page(h);
+       inode->i_blocks -= (blocks_per_huge_page(h) * freed);
        spin_unlock(&inode->i_lock);
 
        hugetlb_put_quota(inode->i_mapping, (chg - freed));