proc: clean the ip_misc_proc_init and ip_proc_init_net error paths
[safe/jmp/linux-2.6] / mm / hugetlb.c
index 8b809ec..ab17127 100644 (file)
 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
 static unsigned long surplus_huge_pages;
+static unsigned long nr_overcommit_huge_pages;
 unsigned long max_huge_pages;
+unsigned long sysctl_overcommit_huge_pages;
 static struct list_head hugepage_freelists[MAX_NUMNODES];
 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
 static unsigned int free_huge_pages_node[MAX_NUMNODES];
 static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
 unsigned long hugepages_treat_as_movable;
-int hugetlb_dynamic_pool;
 static int hugetlb_next_nid;
 
 /*
@@ -70,19 +71,40 @@ static void enqueue_huge_page(struct page *page)
        free_huge_pages_node[nid]++;
 }
 
-static struct page *dequeue_huge_page(struct vm_area_struct *vma,
+static struct page *dequeue_huge_page(void)
+{
+       int nid;
+       struct page *page = NULL;
+
+       for (nid = 0; nid < MAX_NUMNODES; ++nid) {
+               if (!list_empty(&hugepage_freelists[nid])) {
+                       page = list_entry(hugepage_freelists[nid].next,
+                                         struct page, lru);
+                       list_del(&page->lru);
+                       free_huge_pages--;
+                       free_huge_pages_node[nid]--;
+                       break;
+               }
+       }
+       return page;
+}
+
+static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
                                unsigned long address)
 {
        int nid;
        struct page *page = NULL;
        struct mempolicy *mpol;
+       nodemask_t *nodemask;
        struct zonelist *zonelist = huge_zonelist(vma, address,
-                                       htlb_alloc_mask, &mpol);
-       struct zone **z;
-
-       for (z = zonelist->zones; *z; z++) {
-               nid = zone_to_nid(*z);
-               if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
+                                       htlb_alloc_mask, &mpol, &nodemask);
+       struct zone *zone;
+       struct zoneref *z;
+
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                                               MAX_NR_ZONES - 1, nodemask) {
+               nid = zone_to_nid(zone);
+               if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
                    !list_empty(&hugepage_freelists[nid])) {
                        page = list_entry(hugepage_freelists[nid].next,
                                          struct page, lru);
@@ -94,7 +116,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
                        break;
                }
        }
-       mpol_free(mpol);        /* unref if mpol !NULL */
+       mpol_cond_put(mpol);
        return page;
 }
 
@@ -110,13 +132,17 @@ static void update_and_free_page(struct page *page)
        }
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
+       arch_release_hugepage(page);
        __free_pages(page, HUGETLB_PAGE_ORDER);
 }
 
 static void free_huge_page(struct page *page)
 {
        int nid = page_to_nid(page);
+       struct address_space *mapping;
 
+       mapping = (struct address_space *) page_private(page);
+       set_page_private(page, 0);
        BUG_ON(page_count(page));
        INIT_LIST_HEAD(&page->lru);
 
@@ -129,6 +155,8 @@ static void free_huge_page(struct page *page)
                enqueue_huge_page(page);
        }
        spin_unlock(&hugetlb_lock);
+       if (mapping)
+               hugetlb_put_quota(mapping, 1);
 }
 
 /*
@@ -171,9 +199,14 @@ static struct page *alloc_fresh_huge_page_node(int nid)
        struct page *page;
 
        page = alloc_pages_node(nid,
-               htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
+               htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+                                               __GFP_REPEAT|__GFP_NOWARN,
                HUGETLB_PAGE_ORDER);
        if (page) {
+               if (arch_prepare_hugepage(page)) {
+                       __free_pages(page, HUGETLB_PAGE_ORDER);
+                       return NULL;
+               }
                set_compound_page_dtor(page, free_huge_page);
                spin_lock(&hugetlb_lock);
                nr_huge_pages++;
@@ -215,6 +248,11 @@ static int alloc_fresh_huge_page(void)
                hugetlb_next_nid = next_nid;
        } while (!page && hugetlb_next_nid != start_nid);
 
+       if (ret)
+               count_vm_event(HTLB_BUDDY_PGALLOC);
+       else
+               count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+
        return ret;
 }
 
@@ -222,22 +260,67 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
                                                unsigned long address)
 {
        struct page *page;
+       unsigned int nid;
 
-       /* Check if the dynamic pool is enabled */
-       if (!hugetlb_dynamic_pool)
+       /*
+        * Assume we will successfully allocate the surplus page to
+        * prevent racing processes from causing the surplus to exceed
+        * overcommit
+        *
+        * This however introduces a different race, where a process B
+        * tries to grow the static hugepage pool while alloc_pages() is
+        * called by process A. B will only examine the per-node
+        * counters in determining if surplus huge pages can be
+        * converted to normal huge pages in adjust_pool_surplus(). A
+        * won't be able to increment the per-node counter, until the
+        * lock is dropped by B, but B doesn't drop hugetlb_lock until
+        * no more huge pages can be converted from surplus to normal
+        * state (and doesn't try to convert again). Thus, we have a
+        * case where a surplus huge page exists, the pool is grown, and
+        * the surplus huge page still exists after, even though it
+        * should just have been converted to a normal huge page. This
+        * does not leak memory, though, as the hugepage will be freed
+        * once it is out of use. It also does not allow the counters to
+        * go out of whack in adjust_pool_surplus() as we don't modify
+        * the node values until we've gotten the hugepage and only the
+        * per-node value is checked there.
+        */
+       spin_lock(&hugetlb_lock);
+       if (surplus_huge_pages >= nr_overcommit_huge_pages) {
+               spin_unlock(&hugetlb_lock);
                return NULL;
+       } else {
+               nr_huge_pages++;
+               surplus_huge_pages++;
+       }
+       spin_unlock(&hugetlb_lock);
 
-       page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
+       page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
+                                       __GFP_REPEAT|__GFP_NOWARN,
                                        HUGETLB_PAGE_ORDER);
+
+       spin_lock(&hugetlb_lock);
        if (page) {
+               /*
+                * This page is now managed by the hugetlb allocator and has
+                * no users -- drop the buddy allocator's reference.
+                */
+               put_page_testzero(page);
+               VM_BUG_ON(page_count(page));
+               nid = page_to_nid(page);
                set_compound_page_dtor(page, free_huge_page);
-               spin_lock(&hugetlb_lock);
-               nr_huge_pages++;
-               nr_huge_pages_node[page_to_nid(page)]++;
-               surplus_huge_pages++;
-               surplus_huge_pages_node[page_to_nid(page)]++;
-               spin_unlock(&hugetlb_lock);
+               /*
+                * We incremented the global counters already
+                */
+               nr_huge_pages_node[nid]++;
+               surplus_huge_pages_node[nid]++;
+               __count_vm_event(HTLB_BUDDY_PGALLOC);
+       } else {
+               nr_huge_pages--;
+               surplus_huge_pages--;
+               __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
        }
+       spin_unlock(&hugetlb_lock);
 
        return page;
 }
@@ -254,8 +337,10 @@ static int gather_surplus_pages(int delta)
        int needed, allocated;
 
        needed = (resv_huge_pages + delta) - free_huge_pages;
-       if (needed <= 0)
+       if (needed <= 0) {
+               resv_huge_pages += delta;
                return 0;
+       }
 
        allocated = 0;
        INIT_LIST_HEAD(&surplus_list);
@@ -293,26 +378,37 @@ retry:
         * The surplus_list now contains _at_least_ the number of extra pages
         * needed to accomodate the reservation.  Add the appropriate number
         * of pages to the hugetlb pool and free the extras back to the buddy
-        * allocator.
+        * allocator.  Commit the entire reservation here to prevent another
+        * process from stealing the pages as they are added to the pool but
+        * before they are reserved.
         */
        needed += allocated;
+       resv_huge_pages += delta;
        ret = 0;
 free:
+       /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+               if ((--needed) < 0)
+                       break;
                list_del(&page->lru);
-               if ((--needed) >= 0)
-                       enqueue_huge_page(page);
-               else {
+               enqueue_huge_page(page);
+       }
+
+       /* Free unnecessary surplus pages to the buddy allocator */
+       if (!list_empty(&surplus_list)) {
+               spin_unlock(&hugetlb_lock);
+               list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+                       list_del(&page->lru);
                        /*
-                        * Decrement the refcount and free the page using its
-                        * destructor.  This must be done with hugetlb_lock
+                        * The page has a reference count of zero already, so
+                        * call free_huge_page directly instead of using
+                        * put_page.  This must be done with hugetlb_lock
                         * unlocked which is safe because free_huge_page takes
                         * hugetlb_lock before deciding how to free the page.
                         */
-                       spin_unlock(&hugetlb_lock);
-                       put_page(page);
-                       spin_lock(&hugetlb_lock);
+                       free_huge_page(page);
                }
+               spin_lock(&hugetlb_lock);
        }
 
        return ret;
@@ -323,15 +419,26 @@ free:
  * allocated to satisfy the reservation must be explicitly freed if they were
  * never used.
  */
-void return_unused_surplus_pages(unsigned long unused_resv_pages)
+static void return_unused_surplus_pages(unsigned long unused_resv_pages)
 {
        static int nid = -1;
        struct page *page;
        unsigned long nr_pages;
 
+       /*
+        * We want to release as many surplus pages as possible, spread
+        * evenly across all nodes. Iterate across all nodes until we
+        * can no longer free unreserved surplus pages. This occurs when
+        * the nodes with surplus pages have no free pages.
+        */
+       unsigned long remaining_iterations = num_online_nodes();
+
+       /* Uncommit the reservation */
+       resv_huge_pages -= unused_resv_pages;
+
        nr_pages = min(unused_resv_pages, surplus_huge_pages);
 
-       while (nr_pages) {
+       while (remaining_iterations-- && nr_pages) {
                nid = next_node(nid, node_online_map);
                if (nid == MAX_NUMNODES)
                        nid = first_node(node_online_map);
@@ -349,39 +456,60 @@ void return_unused_surplus_pages(unsigned long unused_resv_pages)
                        surplus_huge_pages--;
                        surplus_huge_pages_node[nid]--;
                        nr_pages--;
+                       remaining_iterations = num_online_nodes();
                }
        }
 }
 
-static struct page *alloc_huge_page(struct vm_area_struct *vma,
-                                   unsigned long addr)
+
+static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
+                                               unsigned long addr)
 {
-       struct page *page = NULL;
-       int use_reserved_page = vma->vm_flags & VM_MAYSHARE;
+       struct page *page;
 
        spin_lock(&hugetlb_lock);
-       if (!use_reserved_page && (free_huge_pages <= resv_huge_pages))
-               goto fail;
+       page = dequeue_huge_page_vma(vma, addr);
+       spin_unlock(&hugetlb_lock);
+       return page ? page : ERR_PTR(-VM_FAULT_OOM);
+}
+
+static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
+                                               unsigned long addr)
+{
+       struct page *page = NULL;
 
-       page = dequeue_huge_page(vma, addr);
-       if (!page)
-               goto fail;
+       if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
+               return ERR_PTR(-VM_FAULT_SIGBUS);
 
+       spin_lock(&hugetlb_lock);
+       if (free_huge_pages > resv_huge_pages)
+               page = dequeue_huge_page_vma(vma, addr);
        spin_unlock(&hugetlb_lock);
-       set_page_refcounted(page);
+       if (!page) {
+               page = alloc_buddy_huge_page(vma, addr);
+               if (!page) {
+                       hugetlb_put_quota(vma->vm_file->f_mapping, 1);
+                       return ERR_PTR(-VM_FAULT_OOM);
+               }
+       }
        return page;
+}
 
-fail:
-       spin_unlock(&hugetlb_lock);
+static struct page *alloc_huge_page(struct vm_area_struct *vma,
+                                   unsigned long addr)
+{
+       struct page *page;
+       struct address_space *mapping = vma->vm_file->f_mapping;
 
-       /*
-        * Private mappings do not use reserved huge pages so the allocation
-        * may have failed due to an undersized hugetlb pool.  Try to grab a
-        * surplus huge page from the buddy allocator.
-        */
-       if (!use_reserved_page)
-               page = alloc_buddy_huge_page(vma, addr);
+       if (vma->vm_flags & VM_MAYSHARE)
+               page = alloc_huge_page_shared(vma, addr);
+       else
+               page = alloc_huge_page_private(vma, addr);
 
+       if (!IS_ERR(page)) {
+               set_page_refcounted(page);
+               set_page_private(page, (unsigned long) mapping);
+       }
        return page;
 }
 
@@ -461,6 +589,12 @@ static unsigned long set_max_huge_pages(unsigned long count)
         * Increase the pool size
         * First take pages out of surplus state.  Then make up the
         * remaining difference by allocating fresh huge pages.
+        *
+        * We might race with alloc_buddy_huge_page() here and be unable
+        * to convert a surplus huge page to a normal huge page. That is
+        * not critical, though, it just means the overall size of the
+        * pool might be one hugepage larger than it needs to be, but
+        * within all the constraints specified by the sysctls.
         */
        spin_lock(&hugetlb_lock);
        while (surplus_huge_pages && count > persistent_huge_pages) {
@@ -489,12 +623,20 @@ static unsigned long set_max_huge_pages(unsigned long count)
         * to keep enough around to satisfy reservations).  Then place
         * pages into surplus state as needed so the pool will shrink
         * to the desired size as pages become free.
+        *
+        * By placing pages into the surplus state independent of the
+        * overcommit value, we are allowing the surplus pool size to
+        * exceed overcommit. There are few sane options here. Since
+        * alloc_buddy_huge_page() is checking the global counter,
+        * though, we'll note that we're not allowed to exceed surplus
+        * and won't grow the pool anywhere else. Not until one of the
+        * sysctls are changed, or the surplus pages go out of use.
         */
        min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
        min_count = max(count, min_count);
        try_to_free_low(min_count);
        while (min_count < persistent_huge_pages) {
-               struct page *page = dequeue_huge_page(NULL, 0);
+               struct page *page = dequeue_huge_page();
                if (!page)
                        break;
                update_and_free_page(page);
@@ -530,6 +672,17 @@ int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
        return 0;
 }
 
+int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+                       struct file *file, void __user *buffer,
+                       size_t *length, loff_t *ppos)
+{
+       proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+       spin_lock(&hugetlb_lock);
+       nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
+       spin_unlock(&hugetlb_lock);
+       return 0;
+}
+
 #endif /* CONFIG_SYSCTL */
 
 int hugetlb_report_meminfo(char *buf)
@@ -551,9 +704,11 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
 {
        return sprintf(buf,
                "Node %d HugePages_Total: %5u\n"
-               "Node %d HugePages_Free:  %5u\n",
+               "Node %d HugePages_Free:  %5u\n"
+               "Node %d HugePages_Surp:  %5u\n",
                nid, nr_huge_pages_node[nid],
-               nid, free_huge_pages_node[nid]);
+               nid, free_huge_pages_node[nid],
+               nid, surplus_huge_pages_node[nid]);
 }
 
 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
@@ -587,7 +742,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
                entry =
                    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
        } else {
-               entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
+               entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
        }
        entry = pte_mkyoung(entry);
        entry = pte_mkhuge(entry);
@@ -600,8 +755,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
 {
        pte_t entry;
 
-       entry = pte_mkwrite(pte_mkdirty(*ptep));
-       if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+       entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
                update_mmu_cache(vma, address, entry);
        }
 }
@@ -624,12 +779,17 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                dst_pte = huge_pte_alloc(dst, addr);
                if (!dst_pte)
                        goto nomem;
+
+               /* If the pagetables are shared don't copy or take references */
+               if (dst_pte == src_pte)
+                       continue;
+
                spin_lock(&dst->page_table_lock);
-               spin_lock(&src->page_table_lock);
-               if (!pte_none(*src_pte)) {
+               spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
+               if (!huge_pte_none(huge_ptep_get(src_pte))) {
                        if (cow)
-                               ptep_set_wrprotect(src, addr, src_pte);
-                       entry = *src_pte;
+                               huge_ptep_set_wrprotect(src, addr, src_pte);
+                       entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
                        get_page(ptepage);
                        set_huge_pte_at(dst, addr, dst_pte, entry);
@@ -673,7 +833,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                        continue;
 
                pte = huge_ptep_get_and_clear(mm, address, ptep);
-               if (pte_none(pte))
+               if (huge_pte_none(pte))
                        continue;
 
                page = pte_page(pte);
@@ -726,18 +886,20 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        page_cache_get(old_page);
        new_page = alloc_huge_page(vma, address);
 
-       if (!new_page) {
+       if (IS_ERR(new_page)) {
                page_cache_release(old_page);
-               return VM_FAULT_OOM;
+               return -PTR_ERR(new_page);
        }
 
        spin_unlock(&mm->page_table_lock);
        copy_huge_page(new_page, old_page, address, vma);
+       __SetPageUptodate(new_page);
        spin_lock(&mm->page_table_lock);
 
        ptep = huge_pte_offset(mm, address & HPAGE_MASK);
-       if (likely(pte_same(*ptep, pte))) {
+       if (likely(pte_same(huge_ptep_get(ptep), pte))) {
                /* Break COW */
+               huge_ptep_clear_flush(vma, address, ptep);
                set_huge_pte_at(mm, address, ptep,
                                make_huge_pte(vma, new_page, 1));
                /* Make the old page be freed below */
@@ -772,27 +934,29 @@ retry:
                size = i_size_read(mapping->host) >> HPAGE_SHIFT;
                if (idx >= size)
                        goto out;
-               if (hugetlb_get_quota(mapping))
-                       goto out;
                page = alloc_huge_page(vma, address);
-               if (!page) {
-                       hugetlb_put_quota(mapping);
-                       ret = VM_FAULT_OOM;
+               if (IS_ERR(page)) {
+                       ret = -PTR_ERR(page);
                        goto out;
                }
                clear_huge_page(page, address);
+               __SetPageUptodate(page);
 
                if (vma->vm_flags & VM_SHARED) {
                        int err;
+                       struct inode *inode = mapping->host;
 
                        err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
                        if (err) {
                                put_page(page);
-                               hugetlb_put_quota(mapping);
                                if (err == -EEXIST)
                                        goto retry;
                                goto out;
                        }
+
+                       spin_lock(&inode->i_lock);
+                       inode->i_blocks += BLOCKS_PER_HUGEPAGE;
+                       spin_unlock(&inode->i_lock);
                } else
                        lock_page(page);
        }
@@ -803,7 +967,7 @@ retry:
                goto backout;
 
        ret = 0;
-       if (!pte_none(*ptep))
+       if (!huge_pte_none(huge_ptep_get(ptep)))
                goto backout;
 
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
@@ -822,7 +986,6 @@ out:
 
 backout:
        spin_unlock(&mm->page_table_lock);
-       hugetlb_put_quota(mapping);
        unlock_page(page);
        put_page(page);
        goto out;
@@ -846,8 +1009,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * the same page in the page cache.
         */
        mutex_lock(&hugetlb_instantiation_mutex);
-       entry = *ptep;
-       if (pte_none(entry)) {
+       entry = huge_ptep_get(ptep);
+       if (huge_pte_none(entry)) {
                ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
                mutex_unlock(&hugetlb_instantiation_mutex);
                return ret;
@@ -857,7 +1020,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
-       if (likely(pte_same(entry, *ptep)))
+       if (likely(pte_same(entry, huge_ptep_get(ptep))))
                if (write_access && !pte_write(entry))
                        ret = hugetlb_cow(mm, vma, address, ptep, entry);
        spin_unlock(&mm->page_table_lock);
@@ -868,7 +1031,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        struct page **pages, struct vm_area_struct **vmas,
-                       unsigned long *position, int *length, int i)
+                       unsigned long *position, int *length, int i,
+                       int write)
 {
        unsigned long pfn_offset;
        unsigned long vaddr = *position;
@@ -886,11 +1050,12 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 */
                pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
 
-               if (!pte || pte_none(*pte)) {
+               if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
+                   (write && !pte_write(huge_ptep_get(pte)))) {
                        int ret;
 
                        spin_unlock(&mm->page_table_lock);
-                       ret = hugetlb_fault(mm, vma, vaddr, 0);
+                       ret = hugetlb_fault(mm, vma, vaddr, write);
                        spin_lock(&mm->page_table_lock);
                        if (!(ret & VM_FAULT_ERROR))
                                continue;
@@ -902,7 +1067,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                }
 
                pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
-               page = pte_page(*pte);
+               page = pte_page(huge_ptep_get(pte));
 same_page:
                if (pages) {
                        get_page(page);
@@ -951,7 +1116,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                        continue;
                if (huge_pmd_unshare(mm, &address, ptep))
                        continue;
-               if (!pte_none(*ptep)) {
+               if (!huge_pte_none(huge_ptep_get(ptep))) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
                        set_huge_pte_at(mm, address, ptep, pte);
@@ -1110,12 +1275,13 @@ static int hugetlb_acct_memory(long delta)
                if (gather_surplus_pages(delta) < 0)
                        goto out;
 
-               if (delta > cpuset_mems_nr(free_huge_pages_node))
+               if (delta > cpuset_mems_nr(free_huge_pages_node)) {
+                       return_unused_surplus_pages(delta);
                        goto out;
+               }
        }
 
        ret = 0;
-       resv_huge_pages += delta;
        if (delta < 0)
                return_unused_surplus_pages((unsigned long) -delta);
 
@@ -1132,9 +1298,13 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
        if (chg < 0)
                return chg;
 
+       if (hugetlb_get_quota(inode->i_mapping, chg))
+               return -ENOSPC;
        ret = hugetlb_acct_memory(chg);
-       if (ret < 0)
+       if (ret < 0) {
+               hugetlb_put_quota(inode->i_mapping, chg);
                return ret;
+       }
        region_add(&inode->i_mapping->private_list, from, to);
        return 0;
 }
@@ -1142,5 +1312,11 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
 {
        long chg = region_truncate(&inode->i_mapping->private_list, offset);
-       hugetlb_acct_memory(freed - chg);
+
+       spin_lock(&inode->i_lock);
+       inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
+       spin_unlock(&inode->i_lock);
+
+       hugetlb_put_quota(inode->i_mapping, (chg - freed));
+       hugetlb_acct_memory(-(chg - freed));
 }